Auto merge of #50615 - irinagpopa:rename-trans, r=nikomatsakis

Rename trans to codegen everywhere.

Part of #45274.
This commit is contained in:
bors 2018-05-17 14:10:11 +00:00
commit dbd10f8175
264 changed files with 1547 additions and 1548 deletions

View File

@ -1896,6 +1896,52 @@ dependencies = [
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_codegen_llvm"
version = "0.0.0"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-demangle 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_allocator 0.0.0",
"rustc_apfloat 0.0.0",
"rustc_codegen_utils 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_incremental 0.0.0",
"rustc_llvm 0.0.0",
"rustc_mir 0.0.0",
"rustc_platform_intrinsics 0.0.0",
"rustc_target 0.0.0",
"serialize 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc_codegen_utils"
version = "0.0.0"
dependencies = [
"ar 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_incremental 0.0.0",
"rustc_mir 0.0.0",
"rustc_target 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_cratesio_shim"
version = "0.0.0"
@ -1932,6 +1978,7 @@ dependencies = [
"rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_allocator 0.0.0",
"rustc_borrowck 0.0.0",
"rustc_codegen_utils 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_incremental 0.0.0",
@ -1945,7 +1992,6 @@ dependencies = [
"rustc_save_analysis 0.0.0",
"rustc_target 0.0.0",
"rustc_traits 0.0.0",
"rustc_trans_utils 0.0.0",
"rustc_typeck 0.0.0",
"scoped-tls 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serialize 0.0.0",
@ -2155,52 +2201,6 @@ dependencies = [
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_trans"
version = "0.0.0"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-demangle 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_allocator 0.0.0",
"rustc_apfloat 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_incremental 0.0.0",
"rustc_llvm 0.0.0",
"rustc_mir 0.0.0",
"rustc_platform_intrinsics 0.0.0",
"rustc_target 0.0.0",
"rustc_trans_utils 0.0.0",
"serialize 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc_trans_utils"
version = "0.0.0"
dependencies = [
"ar 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_incremental 0.0.0",
"rustc_mir 0.0.0",
"rustc_target 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_tsan"
version = "0.0.0"

View File

@ -4,7 +4,7 @@ members = [
"rustc",
"libstd",
"libtest",
"librustc_trans",
"librustc_codegen_llvm",
"tools/cargotest",
"tools/clippy",
"tools/compiletest",

View File

@ -844,7 +844,7 @@ impl<'a> Builder<'a> {
// default via `-ldylib=winapi_foo`. That is, they're linked with the
// `dylib` type with a `winapi_` prefix (so the winapi ones don't
// conflict with the system MinGW ones). This consequently means that
// the binaries we ship of things like rustc_trans (aka the rustc_trans
// the binaries we ship of things like rustc_codegen_llvm (aka the rustc_codegen_llvm
// DLL) when linked against *again*, for example with procedural macros
// or plugins, will trigger the propagation logic of `-ldylib`, passing
// `-lwinapi_foo` to the linker again. This isn't actually available in

View File

@ -118,7 +118,7 @@ impl Step for CodegenBackend {
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.all_krates("rustc_trans")
run.all_krates("rustc_codegen_llvm")
}
fn make_run(run: RunConfig) {
@ -139,12 +139,12 @@ impl Step for CodegenBackend {
let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "check");
let features = builder.rustc_features().to_string();
cargo.arg("--manifest-path").arg(builder.src.join("src/librustc_trans/Cargo.toml"));
cargo.arg("--manifest-path").arg(builder.src.join("src/librustc_codegen_llvm/Cargo.toml"));
rustc_cargo_env(builder, &mut cargo);
// We won't build LLVM if it's not available, as it shouldn't affect `check`.
let _folder = builder.fold_output(|| format!("stage{}-rustc_trans", compiler.stage));
let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage));
run_cargo(builder,
cargo.arg("--features").arg(features),
&codegen_backend_stamp(builder, compiler, target, backend),
@ -259,14 +259,14 @@ pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned<St
builder.cargo_out(compiler, Mode::Librustc, target).join(".librustc-check.stamp")
}
/// Cargo's output path for librustc_trans in a given stage, compiled by a particular
/// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular
/// compiler for the specified target and backend.
fn codegen_backend_stamp(builder: &Builder,
compiler: Compiler,
target: Interned<String>,
backend: Interned<String>) -> PathBuf {
builder.cargo_out(compiler, Mode::Librustc, target)
.join(format!(".librustc_trans-{}-check.stamp", backend))
.join(format!(".librustc_codegen_llvm-{}-check.stamp", backend))
}
/// Cargo's output path for rustdoc in a given stage, compiled by a particular

View File

@ -603,7 +603,7 @@ impl Step for CodegenBackend {
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.all_krates("rustc_trans")
run.all_krates("rustc_codegen_llvm")
}
fn make_run(run: RunConfig) {
@ -637,7 +637,7 @@ impl Step for CodegenBackend {
let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "build");
let mut features = builder.rustc_features().to_string();
cargo.arg("--manifest-path")
.arg(builder.src.join("src/librustc_trans/Cargo.toml"));
.arg(builder.src.join("src/librustc_codegen_llvm/Cargo.toml"));
rustc_cargo_env(builder, &mut cargo);
features += &build_codegen_backend(&builder, &mut cargo, &compiler, target, backend);
@ -645,7 +645,7 @@ impl Step for CodegenBackend {
let tmp_stamp = builder.cargo_out(compiler, Mode::Librustc, target)
.join(".tmp.stamp");
let _folder = builder.fold_output(|| format!("stage{}-rustc_trans", compiler.stage));
let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage));
let files = run_cargo(builder,
cargo.arg("--features").arg(features),
&tmp_stamp,
@ -656,7 +656,7 @@ impl Step for CodegenBackend {
let mut files = files.into_iter()
.filter(|f| {
let filename = f.file_name().unwrap().to_str().unwrap();
is_dylib(filename) && filename.contains("rustc_trans-")
is_dylib(filename) && filename.contains("rustc_codegen_llvm-")
});
let codegen_backend = match files.next() {
Some(f) => f,
@ -697,7 +697,7 @@ pub fn build_codegen_backend(builder: &Builder,
compiler.stage, &compiler.host, target, backend));
// Pass down configuration from the LLVM build into the build of
// librustc_llvm and librustc_trans.
// librustc_llvm and librustc_codegen_llvm.
if builder.is_rust_llvm(target) {
cargo.env("LLVM_RUSTLLVM", "1");
}
@ -762,7 +762,7 @@ fn copy_codegen_backends_to_sysroot(builder: &Builder,
t!(t!(File::open(&stamp)).read_to_string(&mut dylib));
let file = Path::new(&dylib);
let filename = file.file_name().unwrap().to_str().unwrap();
// change `librustc_trans-xxxxxx.so` to `librustc_trans-llvm.so`
// change `librustc_codegen_llvm-xxxxxx.so` to `librustc_codegen_llvm-llvm.so`
let target_filename = {
let dash = filename.find("-").unwrap();
let dot = filename.find(".").unwrap();
@ -808,14 +808,14 @@ pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned<St
builder.cargo_out(compiler, Mode::Librustc, target).join(".librustc.stamp")
}
/// Cargo's output path for librustc_trans in a given stage, compiled by a particular
/// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular
/// compiler for the specified target and backend.
fn codegen_backend_stamp(builder: &Builder,
compiler: Compiler,
target: Interned<String>,
backend: Interned<String>) -> PathBuf {
builder.cargo_out(compiler, Mode::Librustc, target)
.join(format!(".librustc_trans-{}.stamp", backend))
.join(format!(".librustc_codegen_llvm-{}.stamp", backend))
}
pub fn compiler_file(builder: &Builder,

View File

@ -69,7 +69,7 @@ for details on how to format and write long error codes.
[librustc_passes](https://github.com/rust-lang/rust/blob/master/src/librustc_passes/diagnostics.rs),
[librustc_privacy](https://github.com/rust-lang/rust/blob/master/src/librustc_privacy/diagnostics.rs),
[librustc_resolve](https://github.com/rust-lang/rust/blob/master/src/librustc_resolve/diagnostics.rs),
[librustc_trans](https://github.com/rust-lang/rust/blob/master/src/librustc_trans/diagnostics.rs),
[librustc_codegen_llvm](https://github.com/rust-lang/rust/blob/master/src/librustc_codegen_llvm/diagnostics.rs),
[librustc_plugin](https://github.com/rust-lang/rust/blob/master/src/librustc_plugin/diagnostics.rs),
[librustc_typeck](https://github.com/rust-lang/rust/blob/master/src/librustc_typeck/diagnostics.rs).
* Explanations have full markdown support. Use it, especially to highlight

View File

@ -134,7 +134,7 @@ unsafe impl Alloc for Global {
}
/// The allocator for unique pointers.
// This function must not unwind. If it does, MIR trans will fail.
// This function must not unwind. If it does, MIR codegen will fail.
#[cfg(not(test))]
#[lang = "exchange_malloc"]
#[inline]

View File

@ -10,7 +10,7 @@
//! rustc compiler intrinsics.
//!
//! The corresponding definitions are in librustc_trans/intrinsic.rs.
//! The corresponding definitions are in librustc_codegen_llvm/intrinsic.rs.
//!
//! # Volatiles
//!

View File

@ -35,13 +35,13 @@ byteorder = { version = "1.1", features = ["i128"]}
# rlib/dylib pair but all crates.io crates tend to just be rlibs. This means
# we've got a problem for dependency graphs that look like:
#
# foo - rustc_trans
# foo - rustc_codegen_llvm
# / \
# rustc ---- rustc_driver
# \ /
# foo - rustc_metadata
#
# Here the crate `foo` is linked into the `rustc_trans` and the
# Here the crate `foo` is linked into the `rustc_codegen_llvm` and the
# `rustc_metadata` dylibs, meaning we've got duplicate copies! When we then
# go to link `rustc_driver` the compiler notices this and gives us a compiler
# error.
@ -49,7 +49,7 @@ byteorder = { version = "1.1", features = ["i128"]}
# To work around this problem we just add these crates.io dependencies to the
# `rustc` crate which is a shared dependency above. That way the crate `foo`
# shows up in the dylib for the `rustc` crate, deduplicating it and allowing
# crates like `rustc_trans` to use `foo` *through* the `rustc` crate.
# crates like `rustc_codegen_llvm` to use `foo` *through* the `rustc` crate.
#
# tl;dr; this is not needed to get `rustc` to compile, but if you remove it then
# later crate stop compiling. If you can remove this and everything

View File

@ -452,13 +452,13 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
// The CFG for match expression is quite complex, so no ASCII
// art for it (yet).
//
// The CFG generated below matches roughly what trans puts
// out. Each pattern and guard is visited in parallel, with
// The CFG generated below matches roughly what MIR contains.
// Each pattern and guard is visited in parallel, with
// arms containing multiple patterns generating multiple nodes
// for the same guard expression. The guard expressions chain
// into each other from top to bottom, with a specific
// exception to allow some additional valid programs
// (explained below). Trans differs slightly in that the
// (explained below). MIR differs slightly in that the
// pattern matching may continue after a guard but the visible
// behaviour should be the same.
//

View File

@ -565,7 +565,7 @@ define_dep_nodes!( <'tcx>
[] IsUnreachableLocalDefinition(DefId),
[] IsMirAvailable(DefId),
[] ItemAttrs(DefId),
[] TransFnAttrs(DefId),
[] CodegenFnAttrs(DefId),
[] FnArgNames(DefId),
[] RenderedConst(DefId),
[] DylibDepFormats(CrateNum),
@ -637,8 +637,8 @@ define_dep_nodes!( <'tcx>
[eval_always] AllTraits,
[input] AllCrateNums,
[] ExportedSymbols(CrateNum),
[eval_always] CollectAndPartitionTranslationItems,
[] IsTranslatedItem(DefId),
[eval_always] CollectAndPartitionMonoItems,
[] IsCodegenedItem(DefId),
[] CodegenUnit(InternedString),
[] CompileCodegenUnit(InternedString),
[input] OutputFilenames,

View File

@ -856,10 +856,10 @@ impl DepGraph {
/// each partition. In the first run, we create partitions based on
/// the symbols that need to be compiled. For each partition P, we
/// hash the symbols in P and create a `WorkProduct` record associated
/// with `DepNode::TransPartition(P)`; the hash is the set of symbols
/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
/// in P.
///
/// The next time we compile, if the `DepNode::TransPartition(P)` is
/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
/// judged to be clean (which means none of the things we read to
/// generate the partition were found to be dirty), it will be loaded
/// into previous work products. We will then regenerate the set of

View File

@ -58,7 +58,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> {
/// Check any attribute.
fn check_attributes(&self, item: &hir::Item, target: Target) {
if target == Target::Fn {
self.tcx.trans_fn_attrs(self.tcx.hir.local_def_id(item.id));
self.tcx.codegen_fn_attrs(self.tcx.hir.local_def_id(item.id));
} else if let Some(a) = item.attrs.iter().find(|a| a.check_name("target_feature")) {
self.tcx.sess.struct_span_err(a.span, "attribute should be applied to a function")
.span_label(item.span, "not a function")

View File

@ -2244,8 +2244,8 @@ pub fn provide(providers: &mut Providers) {
}
#[derive(Clone, RustcEncodable, RustcDecodable, Hash)]
pub struct TransFnAttrs {
pub flags: TransFnAttrFlags,
pub struct CodegenFnAttrs {
pub flags: CodegenFnAttrFlags,
pub inline: InlineAttr,
pub export_name: Option<Symbol>,
pub target_features: Vec<Symbol>,
@ -2254,7 +2254,7 @@ pub struct TransFnAttrs {
bitflags! {
#[derive(RustcEncodable, RustcDecodable)]
pub struct TransFnAttrFlags: u8 {
pub struct CodegenFnAttrFlags: u8 {
const COLD = 0b0000_0001;
const ALLOCATOR = 0b0000_0010;
const UNWIND = 0b0000_0100;
@ -2266,10 +2266,10 @@ bitflags! {
}
}
impl TransFnAttrs {
pub fn new() -> TransFnAttrs {
TransFnAttrs {
flags: TransFnAttrFlags::empty(),
impl CodegenFnAttrs {
pub fn new() -> CodegenFnAttrs {
CodegenFnAttrs {
flags: CodegenFnAttrFlags::empty(),
inline: InlineAttr::None,
export_name: None,
target_features: vec![],
@ -2287,7 +2287,6 @@ impl TransFnAttrs {
/// True if `#[no_mangle]` or `#[export_name(...)]` is present.
pub fn contains_extern_indicator(&self) -> bool {
self.flags.contains(TransFnAttrFlags::NO_MANGLE) || self.export_name.is_some()
self.flags.contains(CodegenFnAttrFlags::NO_MANGLE) || self.export_name.is_some()
}
}

View File

@ -1155,12 +1155,12 @@ impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::TraitCandidate {
}
}
impl<'hir> HashStable<StableHashingContext<'hir>> for hir::TransFnAttrs
impl<'hir> HashStable<StableHashingContext<'hir>> for hir::CodegenFnAttrs
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'hir>,
hasher: &mut StableHasher<W>) {
let hir::TransFnAttrs {
let hir::CodegenFnAttrs {
flags,
inline,
export_name,
@ -1176,7 +1176,7 @@ impl<'hir> HashStable<StableHashingContext<'hir>> for hir::TransFnAttrs
}
}
impl<'hir> HashStable<StableHashingContext<'hir>> for hir::TransFnAttrFlags
impl<'hir> HashStable<StableHashingContext<'hir>> for hir::CodegenFnAttrFlags
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'hir>,

View File

@ -30,7 +30,7 @@ pub const ATTR_CLEAN: &'static str = "rustc_clean";
pub const ATTR_IF_THIS_CHANGED: &'static str = "rustc_if_this_changed";
pub const ATTR_THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need";
pub const ATTR_PARTITION_REUSED: &'static str = "rustc_partition_reused";
pub const ATTR_PARTITION_TRANSLATED: &'static str = "rustc_partition_translated";
pub const ATTR_PARTITION_CODEGENED: &'static str = "rustc_partition_codegened";
pub const DEP_GRAPH_ASSERT_ATTRS: &'static [&'static str] = &[
@ -39,7 +39,7 @@ pub const DEP_GRAPH_ASSERT_ATTRS: &'static [&'static str] = &[
ATTR_DIRTY,
ATTR_CLEAN,
ATTR_PARTITION_REUSED,
ATTR_PARTITION_TRANSLATED,
ATTR_PARTITION_CODEGENED,
];
pub const IGNORED_ATTRIBUTES: &'static [&'static str] = &[
@ -49,5 +49,5 @@ pub const IGNORED_ATTRIBUTES: &'static [&'static str] = &[
ATTR_DIRTY,
ATTR_CLEAN,
ATTR_PARTITION_REUSED,
ATTR_PARTITION_TRANSLATED,
ATTR_PARTITION_CODEGENED,
];

View File

@ -614,7 +614,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for ReverseMapper<'cx, 'gcx, 'tcx>
// compiler; those regions are ignored for the
// outlives relation, and hence don't affect trait
// selection or auto traits, and they are erased
// during trans.
// during codegen.
let generics = self.tcx.generics_of(def_id);
let substs = self.tcx.mk_substs(substs.substs.iter().enumerate().map(

View File

@ -10,15 +10,15 @@
//! Implementation of lint checking.
//!
//! The lint checking is mostly consolidated into one pass which runs just
//! before translation to LLVM bytecode. Throughout compilation, lint warnings
//! The lint checking is mostly consolidated into one pass which runs
//! after all other analyses. Throughout compilation, lint warnings
//! can be added via the `add_lint` method on the Session structure. This
//! requires a span and an id of the node that the lint is being added to. The
//! lint isn't actually emitted at that time because it is unknown what the
//! actual lint level at that location is.
//!
//! To actually emit lint warnings/errors, a separate pass is used just before
//! translation. A context keeps track of the current state of all lint levels.
//! To actually emit lint warnings/errors, a separate pass is used.
//! A context keeps track of the current state of all lint levels.
//! Upon entering a node of the ast which can modify the lint settings, the
//! previous lint state is pushed onto a stack and the ast is then recursed
//! upon. As the ast is traversed, this keeps track of the current lint level

View File

@ -16,15 +16,15 @@
//! other phases of the compiler, which are generally required to hold in order
//! to compile the program at all.
//!
//! Most lints can be written as `LintPass` instances. These run just before
//! translation to LLVM bytecode. The `LintPass`es built into rustc are defined
//! Most lints can be written as `LintPass` instances. These run after
//! all other analyses. The `LintPass`es built into rustc are defined
//! within `builtin.rs`, which has further comments on how to add such a lint.
//! rustc can also load user-defined lint plugins via the plugin mechanism.
//!
//! Some of rustc's lints are defined elsewhere in the compiler and work by
//! calling `add_lint()` on the overall `Session` object. This works when
//! it happens before the main lint pass, which emits the lints stored by
//! `add_lint()`. To emit lints after the main lint pass (from trans, for
//! `add_lint()`. To emit lints after the main lint pass (from codegen, for
//! example) requires more effort. See `emit_lint` and `GatherNodeLevels`
//! in `context.rs`.

View File

@ -109,7 +109,7 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let sess = &tcx.sess;
if !sess.opts.output_types.should_trans() {
if !sess.opts.output_types.should_codegen() {
return Vec::new();
}

View File

@ -46,9 +46,9 @@
//!
//! ## By-reference upvars
//!
//! One part of the translation which may be non-obvious is that we translate
//! One part of the codegen which may be non-obvious is that we translate
//! closure upvars into the dereference of a borrowed pointer; this more closely
//! resembles the runtime translation. So, for example, if we had:
//! resembles the runtime codegen. So, for example, if we had:
//!
//! let mut x = 3;
//! let y = 5;

View File

@ -15,7 +15,7 @@
// makes all other generics or inline functions that it references
// reachable as well.
use hir::TransFnAttrs;
use hir::CodegenFnAttrs;
use hir::map as hir_map;
use hir::def::Def;
use hir::def_id::{DefId, CrateNum};
@ -44,7 +44,7 @@ fn generics_require_inlining(generics: &hir::Generics) -> bool {
// Returns true if the given item must be inlined because it may be
// monomorphized or it was marked with `#[inline]`. This will only return
// true for functions.
fn item_might_be_inlined(item: &hir::Item, attrs: TransFnAttrs) -> bool {
fn item_might_be_inlined(item: &hir::Item, attrs: CodegenFnAttrs) -> bool {
if attrs.requests_inline() {
return true
}
@ -61,15 +61,15 @@ fn item_might_be_inlined(item: &hir::Item, attrs: TransFnAttrs) -> bool {
fn method_might_be_inlined<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_item: &hir::ImplItem,
impl_src: DefId) -> bool {
let trans_fn_attrs = tcx.trans_fn_attrs(impl_item.hir_id.owner_def_id());
if trans_fn_attrs.requests_inline() ||
let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id.owner_def_id());
if codegen_fn_attrs.requests_inline() ||
generics_require_inlining(&impl_item.generics) {
return true
}
if let Some(impl_node_id) = tcx.hir.as_local_node_id(impl_src) {
match tcx.hir.find(impl_node_id) {
Some(hir_map::NodeItem(item)) =>
item_might_be_inlined(&item, trans_fn_attrs),
item_might_be_inlined(&item, codegen_fn_attrs),
Some(..) | None =>
span_bug!(impl_item.span, "impl did is not an item")
}
@ -163,7 +163,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
Some(hir_map::NodeItem(item)) => {
match item.node {
hir::ItemFn(..) =>
item_might_be_inlined(&item, self.tcx.trans_fn_attrs(def_id)),
item_might_be_inlined(&item, self.tcx.codegen_fn_attrs(def_id)),
_ => false,
}
}
@ -179,7 +179,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
match impl_item.node {
hir::ImplItemKind::Const(..) => true,
hir::ImplItemKind::Method(..) => {
let attrs = self.tcx.trans_fn_attrs(def_id);
let attrs = self.tcx.codegen_fn_attrs(def_id);
if generics_require_inlining(&impl_item.generics) ||
attrs.requests_inline() {
true
@ -233,7 +233,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
false
};
let def_id = self.tcx.hir.local_def_id(item.id);
let is_extern = self.tcx.trans_fn_attrs(def_id).contains_extern_indicator();
let is_extern = self.tcx.codegen_fn_attrs(def_id).contains_extern_indicator();
if reachable || is_extern {
self.reachable_symbols.insert(search_item);
}
@ -251,7 +251,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
match item.node {
hir::ItemFn(.., body) => {
let def_id = self.tcx.hir.local_def_id(item.id);
if item_might_be_inlined(&item, self.tcx.trans_fn_attrs(def_id)) {
if item_might_be_inlined(&item, self.tcx.codegen_fn_attrs(def_id)) {
self.visit_nested_body(body);
}
}

View File

@ -1279,7 +1279,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>,
loop {
// Note: give all the expressions matching `ET` with the
// extended temporary lifetime, not just the innermost rvalue,
// because in trans if we must compile e.g. `*rvalue()`
// because in codegen if we must compile e.g. `*rvalue()`
// into a temporary, we request the temporary scope of the
// outer expression.
visitor.scope_tree.record_rvalue_scope(expr.hir_id.local_id, blk_scope);

View File

@ -249,7 +249,7 @@ pub struct Allocation {
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: Align,
/// Whether the allocation (of a static) should be put into mutable memory when translating
/// Whether the allocation (of a static) should be put into mutable memory when codegenning
///
/// Only happens for `static mut` or `static` with interior mutability
pub runtime_mutability: Mutability,

View File

@ -75,7 +75,7 @@ impl<'tcx> ConstValue<'tcx> {
///
/// For optimization of a few very common cases, there is also a representation for a pair of
/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
/// operations and fat pointers. This idea was taken from rustc's trans.
/// operations and fat pointers. This idea was taken from rustc's codegen.
#[derive(Clone, Copy, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash)]
pub enum Value {
ByRef(Pointer, Align),

View File

@ -696,7 +696,7 @@ pub struct BasicBlockData<'tcx> {
pub terminator: Option<Terminator<'tcx>>,
/// If true, this block lies on an unwind path. This is used
/// during trans where distinct kinds of basic blocks may be
/// during codegen where distinct kinds of basic blocks may be
/// generated (particularly for MSVC cleanup). Unwind blocks must
/// only branch to other unwind blocks.
pub is_cleanup: bool,
@ -1614,7 +1614,7 @@ pub enum CastKind {
UnsafeFnPointer,
/// "Unsize" -- convert a thin-or-fat pointer to a fat pointer.
/// trans must figure out the details once full monomorphization
/// codegen must figure out the details once full monomorphization
/// is known. For example, this could be used to cast from a
/// `&[i32;N]` to a `&[i32]`, or a `Box<T>` to a `Box<Trait>`
/// (presuming `T: Trait`).

View File

@ -184,11 +184,11 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for CodegenUnit<'tcx> {
name.hash_stable(hcx, hasher);
let mut items: Vec<(Fingerprint, _)> = items.iter().map(|(trans_item, &attrs)| {
let mut items: Vec<(Fingerprint, _)> = items.iter().map(|(mono_item, &attrs)| {
let mut hasher = StableHasher::new();
trans_item.hash_stable(hcx, &mut hasher);
let trans_item_fingerprint = hasher.finish();
(trans_item_fingerprint, attrs)
mono_item.hash_stable(hcx, &mut hasher);
let mono_item_fingerprint = hasher.finish();
(mono_item_fingerprint, attrs)
}).collect();
items.sort_unstable_by_key(|i| i.0);
@ -238,4 +238,3 @@ impl Stats {
self.fn_stats.extend(stats.fn_stats);
}
}

View File

@ -270,7 +270,7 @@ impl OutputTypes {
}
// True if any of the output types require codegen or linking.
pub fn should_trans(&self) -> bool {
pub fn should_codegen(&self) -> bool {
self.0.keys().any(|k| match *k {
OutputType::Bitcode
| OutputType::Assembly
@ -1135,14 +1135,14 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"count where LLVM instrs originate"),
time_llvm_passes: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true,
"The output of `-Z time-llvm-passes` will only reflect timings of \
re-translated modules when used with incremental compilation" )],
re-codegened modules when used with incremental compilation" )],
"measure time of each LLVM pass"),
input_stats: bool = (false, parse_bool, [UNTRACKED],
"gather statistics about the input"),
trans_stats: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true,
"The output of `-Z trans-stats` might not be accurate when incremental \
codegen_stats: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true,
"The output of `-Z codegen-stats` might not be accurate when incremental \
compilation is enabled")],
"gather trans statistics"),
"gather codegen statistics"),
asm_comments: bool = (false, parse_bool, [TRACKED],
"generate comments into the assembly (may change behavior)"),
no_verify: bool = (false, parse_bool, [TRACKED],
@ -1183,8 +1183,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
Use with RUST_REGION_GRAPH=help for more info"),
parse_only: bool = (false, parse_bool, [UNTRACKED],
"parse only; do not compile, assemble, or link"),
no_trans: bool = (false, parse_bool, [TRACKED],
"run all passes except translation; no output"),
no_codegen: bool = (false, parse_bool, [TRACKED],
"run all passes except codegen; no output"),
treat_err_as_bug: bool = (false, parse_bool, [TRACKED],
"treat all errors that occur as bugs"),
external_macro_backtrace: bool = (false, parse_bool, [UNTRACKED],
@ -1235,8 +1235,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"show spans for compiler debugging (expr|pat|ty)"),
print_type_sizes: bool = (false, parse_bool, [UNTRACKED],
"print layout information for each type encountered"),
print_trans_items: Option<String> = (None, parse_opt_string, [UNTRACKED],
"print the result of the translation item collection pass"),
print_mono_items: Option<String> = (None, parse_opt_string, [UNTRACKED],
"print the result of the monomorphization collection pass"),
mir_opt_level: usize = (1, parse_uint, [TRACKED],
"set the MIR optimization level (0-3, default: 1)"),
mutable_noalias: bool = (false, parse_bool, [TRACKED],
@ -1244,7 +1244,7 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
arg_align_attributes: bool = (false, parse_bool, [TRACKED],
"emit align metadata for reference arguments"),
dump_mir: Option<String> = (None, parse_opt_string, [UNTRACKED],
"dump MIR state at various points in translation"),
"dump MIR state at various points in transforms"),
dump_mir_dir: String = (String::from("mir_dump"), parse_string, [UNTRACKED],
"the directory the MIR is dumped into"),
dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED],
@ -1296,8 +1296,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"dump facts from NLL analysis into side files"),
disable_nll_user_type_assert: bool = (false, parse_bool, [UNTRACKED],
"disable user provided type assertion in NLL"),
trans_time_graph: bool = (false, parse_bool, [UNTRACKED],
"generate a graphical HTML report of time spent in trans and LLVM"),
codegen_time_graph: bool = (false, parse_bool, [UNTRACKED],
"generate a graphical HTML report of time spent in codegen and LLVM"),
thinlto: Option<bool> = (None, parse_opt_bool, [TRACKED],
"enable ThinLTO when possible"),
inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
@ -1309,7 +1309,7 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
the max/min integer respectively, and NaN is mapped to 0"),
lower_128bit_ops: Option<bool> = (None, parse_opt_bool, [TRACKED],
"rewrite operators on i128 and u128 into lang item calls (typically provided \
by compiler-builtins) so translation doesn't need to support them,
by compiler-builtins) so codegen doesn't need to support them,
overriding the default for the current target"),
human_readable_cgu_names: bool = (false, parse_bool, [TRACKED],
"generate human-readable, predictable names for codegen units"),
@ -3047,7 +3047,7 @@ mod tests {
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.input_stats = true;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.trans_stats = true;
opts.debugging_opts.codegen_stats = true;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.borrowck_stats = true;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
@ -3093,7 +3093,7 @@ mod tests {
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.keep_ast = true;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.print_trans_items = Some(String::from("abc"));
opts.debugging_opts.print_mono_items = Some(String::from("abc"));
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.dump_mir = Some(String::from("abc"));
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
@ -3120,7 +3120,7 @@ mod tests {
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
opts = reference.clone();
opts.debugging_opts.no_trans = true;
opts.debugging_opts.no_codegen = true;
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
opts = reference.clone();

View File

@ -98,7 +98,7 @@ pub struct Session {
/// arguments passed to the compiler. Its value together with the crate-name
/// forms a unique global identifier for the crate. It is used to allow
/// multiple crates with the same name to coexist. See the
/// trans::back::symbol_names module for more information.
/// rustc_codegen_llvm::back::symbol_names module for more information.
pub crate_disambiguator: Once<CrateDisambiguator>,
features: Once<feature_gate::Features>,
@ -504,8 +504,8 @@ impl Session {
pub fn time_llvm_passes(&self) -> bool {
self.opts.debugging_opts.time_llvm_passes
}
pub fn trans_stats(&self) -> bool {
self.opts.debugging_opts.trans_stats
pub fn codegen_stats(&self) -> bool {
self.opts.debugging_opts.codegen_stats
}
pub fn meta_stats(&self) -> bool {
self.opts.debugging_opts.meta_stats
@ -894,11 +894,11 @@ impl Session {
// Why is 16 codegen units the default all the time?
//
// The main reason for enabling multiple codegen units by default is to
// leverage the ability for the trans backend to do translation and
// codegen in parallel. This allows us, especially for large crates, to
// leverage the ability for the codegen backend to do codegen and
// optimization in parallel. This allows us, especially for large crates, to
// make good use of all available resources on the machine once we've
// hit that stage of compilation. Large crates especially then often
// take a long time in trans/codegen and this helps us amortize that
// take a long time in codegen/optimization and this helps us amortize that
// cost.
//
// Note that a high number here doesn't mean that we'll be spawning a

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file contains various trait resolution methods used by trans.
// This file contains various trait resolution methods used by codegen.
// They all assume regions can be erased and monomorphic types. It
// seems likely that they should eventually be merged into more
// general routines.
@ -30,7 +30,7 @@ use ty::fold::TypeFoldable;
/// that type check should guarantee to us that all nested
/// obligations *could be* resolved if we wanted to.
/// Assumes that this is run after the entire crate has been successfully type-checked.
pub fn trans_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>,
pub fn codegen_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>,
(param_env, trait_ref):
(ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>))
-> Vtable<'tcx, ()>
@ -38,7 +38,7 @@ pub fn trans_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>,
// Remove any references to regions; this helps improve caching.
let trait_ref = ty.erase_regions(&trait_ref);
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
debug!("codegen_fulfill_obligation(trait_ref={:?}, def_id={:?})",
(param_env, trait_ref), trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
@ -60,12 +60,12 @@ pub fn trans_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>,
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
bug!("Encountered ambiguity selecting `{:?}` during trans, \
bug!("Encountered ambiguity selecting `{:?}` during codegen, \
presuming due to overflow",
trait_ref)
}
Err(e) => {
bug!("Encountered error `{:?}` selecting `{:?}` during trans",
bug!("Encountered error `{:?}` selecting `{:?}` during codegen",
e, trait_ref)
}
};

View File

@ -64,7 +64,7 @@ mod on_unimplemented;
mod select;
mod specialize;
mod structural_impls;
pub mod trans;
pub mod codegen;
mod util;
pub mod query;
@ -473,8 +473,8 @@ pub enum Vtable<'tcx, N> {
///
/// The type parameter `N` indicates the type used for "nested
/// obligations" that are required by the impl. During type check, this
/// is `Obligation`, as one might expect. During trans, however, this
/// is `()`, because trans only requires a shallow resolution of an
/// is `Obligation`, as one might expect. During codegen, however, this
/// is `()`, because codegen only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub struct VtableImplData<'tcx, N> {
@ -856,7 +856,7 @@ fn vtable_methods<'a, 'tcx>(
// It's possible that the method relies on where clauses that
// do not hold for this particular set of type parameters.
// Note that this method could then never be called, so we
// do not want to try and trans it, in that case (see #23435).
// do not want to try and codegen it, in that case (see #23435).
let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
if !normalize_and_test_predicates(tcx, predicates.predicates) {
debug!("vtable_methods: predicates do not hold");
@ -992,7 +992,7 @@ pub fn provide(providers: &mut ty::maps::Providers) {
is_object_safe: object_safety::is_object_safe_provider,
specialization_graph_of: specialize::specialization_graph_provider,
specializes: specialize::specializes,
trans_fulfill_obligation: trans::trans_fulfill_obligation,
codegen_fulfill_obligation: codegen::codegen_fulfill_obligation,
vtable_methods,
substitute_normalize_and_test_predicates,
..*providers

View File

@ -65,7 +65,7 @@ pub enum Reveal {
/// }
UserFacing,
/// At trans time, all monomorphic projections will succeed.
/// At codegen time, all monomorphic projections will succeed.
/// Also, `impl Trait` is normalized to the concrete type,
/// which has to be already collected by type-checking.
///
@ -346,7 +346,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a,
let ty = ty.super_fold_with(self);
match ty.sty {
ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
// Only normalize `impl Trait` after type-checking, usually in trans.
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
Reveal::UserFacing => ty,
@ -1054,7 +1054,7 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>(
super::VtableImpl(impl_data) => {
// We have to be careful when projecting out of an
// impl because of specialization. If we are not in
// trans (i.e., projection mode is not "any"), and the
// codegen (i.e., projection mode is not "any"), and the
// impl's type is declared as default, then we disable
// projection (even if the trait ref is fully
// monomorphic). In the case where trait ref is not

View File

@ -104,7 +104,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx
match ty.sty {
ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => {
// (*)
// Only normalize `impl Trait` after type-checking, usually in trans.
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
Reveal::UserFacing => ty,

View File

@ -57,7 +57,7 @@ impl<'cx, 'tcx> TyCtxt<'cx, 'tcx, 'tcx> {
///
/// NB. Currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// translation, we need to normalize the contents.
/// codegen, we need to normalize the contents.
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,

View File

@ -275,7 +275,7 @@ impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCause<'a> {
}
}
// For trans only.
// For codegen only.
impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> {
type Lifted = traits::Vtable<'tcx, ()>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {

View File

@ -91,8 +91,8 @@ pub enum Adjust<'tcx> {
/// pointers. We don't store the details of how the transform is
/// done (in fact, we don't know that, because it might depend on
/// the precise type parameters). We just store the target
/// type. Trans figures out what has to be done at monomorphization
/// time based on the precise source/target type at hand.
/// type. Codegen backends and miri figure out what has to be done
/// based on the precise source/target type at hand.
Unsize,
}

View File

@ -9,7 +9,7 @@
// except according to those terms.
// Helpers for handling cast expressions, used in both
// typeck and trans.
// typeck and codegen.
use ty::{self, Ty};

View File

@ -401,7 +401,7 @@ pub struct TypeckTables<'tcx> {
/// For each fn, records the "liberated" types of its arguments
/// and return type. Liberated means that all bound regions
/// (including late-bound regions) are replaced with free
/// equivalents. This table is not used in trans (since regions
/// equivalents. This table is not used in codegen (since regions
/// are erased there) and hence is not serialized to metadata.
liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
@ -921,7 +921,7 @@ pub struct GlobalCtxt<'tcx> {
/// A general purpose channel to throw data out the back towards LLVM worker
/// threads.
///
/// This is intended to only get used during the trans phase of the compiler
/// This is intended to only get used during the codegen phase of the compiler
/// when satisfying the query for a particular codegen unit. Internally in
/// the query it'll send data along this channel to get processed later.
pub tx_to_llvm_workers: Lock<mpsc::Sender<Box<dyn Any + Send>>>,

View File

@ -68,7 +68,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraserVisitor<'a, 'gcx, 't
//
// Note that we *CAN* replace early-bound regions -- the
// type system never "sees" those, they get substituted
// away. In trans, they will always be erased to 'erased
// away. In codegen, they will always be erased to 'erased
// whenever a substitution occurs.
match *r {
ty::ReLateBound(..) => r,
@ -76,4 +76,3 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraserVisitor<'a, 'gcx, 't
}
}
}

View File

@ -424,7 +424,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
collector.regions
}
/// Replace any late-bound regions bound in `value` with `'erased`. Useful in trans but also
/// Replace any late-bound regions bound in `value` with `'erased`. Useful in codegen but also
/// method lookup and a few other places where precise region relationships are not required.
pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
where T : TypeFoldable<'tcx>

View File

@ -104,13 +104,13 @@ impl<'tcx> InstanceDef<'tcx> {
return true
}
if let ty::InstanceDef::DropGlue(..) = *self {
// Drop glue wants to be instantiated at every translation
// Drop glue wants to be instantiated at every codegen
// unit, but without an #[inline] hint. We should make this
// available to normal end-users.
return true
}
let trans_fn_attrs = tcx.trans_fn_attrs(self.def_id());
trans_fn_attrs.requests_inline() || tcx.is_const_fn(self.def_id())
let codegen_fn_attrs = tcx.codegen_fn_attrs(self.def_id());
codegen_fn_attrs.requests_inline() || tcx.is_const_fn(self.def_id())
}
}
@ -145,7 +145,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Instance<'tcx> {
assert!(!substs.has_escaping_regions(),
"substs of instance {:?} not normalized for trans: {:?}",
"substs of instance {:?} not normalized for codegen: {:?}",
def_id, substs);
Instance { def: InstanceDef::Item(def_id), substs: substs }
}
@ -175,7 +175,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> {
/// `RevealMode` in the parameter environment.)
///
/// Presuming that coherence and type-check have succeeded, if this method is invoked
/// in a monomorphic context (i.e., like during trans), then it is guaranteed to return
/// in a monomorphic context (i.e., like during codegen), then it is guaranteed to return
/// `Some`.
pub fn resolve(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
@ -259,7 +259,7 @@ fn resolve_associated_item<'a, 'tcx>(
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = tcx.trans_fulfill_obligation((param_env, ty::Binder::bind(trait_ref)));
let vtbl = tcx.codegen_fulfill_obligation((param_env, ty::Binder::bind(trait_ref)));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
@ -321,7 +321,7 @@ fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind,
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// `fn(&mut self, ...)`. In fact, at codegen time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
@ -334,7 +334,7 @@ fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind,
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
// These are both the same at codegen time.
Ok(true)
}
(ty::ClosureKind::FnMut, _) |

View File

@ -949,14 +949,14 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
// because this discriminant will be loaded, and then stored into variable of
// type calculated by typeck. Consider such case (a bug): typeck decided on
// byte-sized discriminant, but layout thinks we need a 16-bit to store all
// discriminant values. That would be a bug, because then, in trans, in order
// discriminant values. That would be a bug, because then, in codegen, in order
// to store this 16-bit discriminant into 8-bit sized temporary some of the
// space necessary to represent would have to be discarded (or layout is wrong
// on thinking it needs 16 bits)
bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
min_ity, typeck_ity);
// However, it is fine to make discr type however large (as an optimisation)
// after this point well just truncate the value we load in trans.
// after this point well just truncate the value we load in codegen.
}
// Check to see if we should use a different type for the
@ -1121,7 +1121,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
// If we are running with `-Zprint-type-sizes`, record layouts for
// dumping later. Ignore layouts that are done with non-empty
// environments or non-monomorphic layouts, as the user only wants
// to see the stuff resulting from the final trans session.
// to see the stuff resulting from the final codegen session.
if
!self.tcx.sess.opts.debugging_opts.print_type_sizes ||
layout.ty.has_param_types() ||

View File

@ -349,7 +349,7 @@ impl<'tcx> QueryDescription<'tcx> for queries::is_mir_available<'tcx> {
}
}
impl<'tcx> QueryDescription<'tcx> for queries::trans_fulfill_obligation<'tcx> {
impl<'tcx> QueryDescription<'tcx> for queries::codegen_fulfill_obligation<'tcx> {
fn describe(tcx: TyCtxt, key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> String {
format!("checking if `{}` fulfills its obligations", tcx.item_path_str(key.1.def_id()))
}
@ -637,9 +637,9 @@ impl<'tcx> QueryDescription<'tcx> for queries::exported_symbols<'tcx> {
}
}
impl<'tcx> QueryDescription<'tcx> for queries::collect_and_partition_translation_items<'tcx> {
impl<'tcx> QueryDescription<'tcx> for queries::collect_and_partition_mono_items<'tcx> {
fn describe(_tcx: TyCtxt, _: CrateNum) -> String {
format!("collect_and_partition_translation_items")
format!("collect_and_partition_mono_items")
}
}
@ -795,5 +795,5 @@ impl_disk_cacheable_query!(def_symbol_name, |_| true);
impl_disk_cacheable_query!(type_of, |def_id| def_id.is_local());
impl_disk_cacheable_query!(predicates_of, |def_id| def_id.is_local());
impl_disk_cacheable_query!(used_trait_imports, |def_id| def_id.is_local());
impl_disk_cacheable_query!(trans_fn_attrs, |_| true);
impl_disk_cacheable_query!(codegen_fn_attrs, |_| true);
impl_disk_cacheable_query!(specialization_graph_of, |_| true);

View File

@ -11,7 +11,7 @@
use dep_graph::{DepConstructor, DepNode};
use hir::def_id::{CrateNum, DefId, DefIndex};
use hir::def::{Def, Export};
use hir::{self, TraitCandidate, ItemLocalId, TransFnAttrs};
use hir::{self, TraitCandidate, ItemLocalId, CodegenFnAttrs};
use hir::svh::Svh;
use infer::canonical::{self, Canonical};
use lint;
@ -181,7 +181,7 @@ define_maps! { <'tcx>
[] fn mir_validated: MirValidated(DefId) -> &'tcx Steal<mir::Mir<'tcx>>,
/// MIR after our optimization passes have run. This is MIR that is ready
/// for trans. This is also the only query that can fetch non-local MIR, at present.
/// for codegen. This is also the only query that can fetch non-local MIR, at present.
[] fn optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>,
/// The result of unsafety-checking this def-id.
@ -255,7 +255,7 @@ define_maps! { <'tcx>
[] fn lookup_stability: LookupStability(DefId) -> Option<&'tcx attr::Stability>,
[] fn lookup_deprecation_entry: LookupDeprecationEntry(DefId) -> Option<DeprecationEntry>,
[] fn item_attrs: ItemAttrs(DefId) -> Lrc<[ast::Attribute]>,
[] fn trans_fn_attrs: trans_fn_attrs(DefId) -> TransFnAttrs,
[] fn codegen_fn_attrs: codegen_fn_attrs(DefId) -> CodegenFnAttrs,
[] fn fn_arg_names: FnArgNames(DefId) -> Vec<ast::Name>,
/// Gets the rendered value of the specified constant or associated constant.
/// Used by rustdoc.
@ -268,7 +268,7 @@ define_maps! { <'tcx>
[] fn vtable_methods: vtable_methods_node(ty::PolyTraitRef<'tcx>)
-> Lrc<Vec<Option<(DefId, &'tcx Substs<'tcx>)>>>,
[] fn trans_fulfill_obligation: fulfill_obligation_dep_node(
[] fn codegen_fulfill_obligation: fulfill_obligation_dep_node(
(ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> Vtable<'tcx, ()>,
[] fn trait_impls_of: TraitImpls(DefId) -> Lrc<ty::trait_def::TraitImpls>,
[] fn specialization_graph_of: SpecializationGraph(DefId) -> Lrc<specialization_graph::Graph>,
@ -402,10 +402,10 @@ define_maps! { <'tcx>
[] fn exported_symbols: ExportedSymbols(CrateNum)
-> Arc<Vec<(ExportedSymbol<'tcx>, SymbolExportLevel)>>,
[] fn collect_and_partition_translation_items:
collect_and_partition_translation_items_node(CrateNum)
[] fn collect_and_partition_mono_items:
collect_and_partition_mono_items_node(CrateNum)
-> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>),
[] fn is_translated_item: IsTranslatedItem(DefId) -> bool,
[] fn is_codegened_item: IsCodegenedItem(DefId) -> bool,
[] fn codegen_unit: CodegenUnit(InternedString) -> Arc<CodegenUnit<'tcx>>,
[] fn compile_codegen_unit: CompileCodegenUnit(InternedString) -> Stats,
[] fn output_filenames: output_filenames_node(CrateNum)
@ -475,8 +475,8 @@ fn features_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::Features
}
fn trans_fn_attrs<'tcx>(id: DefId) -> DepConstructor<'tcx> {
DepConstructor::TransFnAttrs { 0: id }
fn codegen_fn_attrs<'tcx>(id: DefId) -> DepConstructor<'tcx> {
DepConstructor::CodegenFnAttrs { 0: id }
}
fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> {
@ -609,8 +609,8 @@ fn all_traits_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::AllTraits
}
fn collect_and_partition_translation_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::CollectAndPartitionTranslationItems
fn collect_and_partition_mono_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::CollectAndPartitionMonoItems
}
fn output_filenames_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {

View File

@ -224,7 +224,7 @@ impl<'sess> OnDiskCache<'sess> {
encode_query_results::<predicates_of, _>(tcx, enc, qri)?;
encode_query_results::<used_trait_imports, _>(tcx, enc, qri)?;
encode_query_results::<typeck_tables_of, _>(tcx, enc, qri)?;
encode_query_results::<trans_fulfill_obligation, _>(tcx, enc, qri)?;
encode_query_results::<codegen_fulfill_obligation, _>(tcx, enc, qri)?;
encode_query_results::<optimized_mir, _>(tcx, enc, qri)?;
encode_query_results::<unsafety_check_result, _>(tcx, enc, qri)?;
encode_query_results::<borrowck, _>(tcx, enc, qri)?;
@ -234,7 +234,7 @@ impl<'sess> OnDiskCache<'sess> {
encode_query_results::<const_is_rvalue_promotable_to_static, _>(tcx, enc, qri)?;
encode_query_results::<symbol_name, _>(tcx, enc, qri)?;
encode_query_results::<check_match, _>(tcx, enc, qri)?;
encode_query_results::<trans_fn_attrs, _>(tcx, enc, qri)?;
encode_query_results::<codegen_fn_attrs, _>(tcx, enc, qri)?;
encode_query_results::<specialization_graph_of, _>(tcx, enc, qri)?;
// const eval is special, it only encodes successfully evaluated constants

View File

@ -871,7 +871,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>,
// Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-translating the whole CGU just
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
@ -1046,7 +1046,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>,
}
DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); }
DepKind::ItemAttrs => { force!(item_attrs, def_id!()); }
DepKind::TransFnAttrs => { force!(trans_fn_attrs, def_id!()); }
DepKind::CodegenFnAttrs => { force!(codegen_fn_attrs, def_id!()); }
DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); }
DepKind::RenderedConst => { force!(rendered_const, def_id!()); }
DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); }
@ -1121,10 +1121,10 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>,
DepKind::AllTraits => { force!(all_traits, LOCAL_CRATE); }
DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); }
DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); }
DepKind::CollectAndPartitionTranslationItems => {
force!(collect_and_partition_translation_items, LOCAL_CRATE);
DepKind::CollectAndPartitionMonoItems => {
force!(collect_and_partition_mono_items, LOCAL_CRATE);
}
DepKind::IsTranslatedItem => { force!(is_translated_item, def_id!()); }
DepKind::IsCodegenedItem => { force!(is_codegened_item, def_id!()); }
DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); }
DepKind::TargetFeaturesWhitelist => { force!(target_features_whitelist, LOCAL_CRATE); }
@ -1207,6 +1207,6 @@ impl_load_from_cache!(
GenericsOfItem => generics_of,
PredicatesOfItem => predicates_of,
UsedTraitImports => used_trait_imports,
TransFnAttrs => trans_fn_attrs,
CodegenFnAttrs => codegen_fn_attrs,
SpecializationGraph => specialization_graph_of,
);

View File

@ -118,7 +118,7 @@ mod sty;
// Data types
/// The complete set of all analyses described in this module. This is
/// produced by the driver and fed to trans and later passes.
/// produced by the driver and fed to codegen and later passes.
///
/// NB: These contents are being migrated into queries using the
/// *on-demand* infrastructure.
@ -1426,7 +1426,7 @@ pub struct ParamEnv<'tcx> {
/// into Obligations, and elaborated and normalized.
pub caller_bounds: &'tcx Slice<ty::Predicate<'tcx>>,
/// Typically, this is `Reveal::UserFacing`, but during trans we
/// Typically, this is `Reveal::UserFacing`, but during codegen we
/// want `Reveal::All` -- note that this is always paired with an
/// empty environment. To get that, use `ParamEnv::reveal()`.
pub reveal: traits::Reveal,
@ -1444,7 +1444,7 @@ impl<'tcx> ParamEnv<'tcx> {
/// Construct a trait environment with no where clauses in scope
/// where the values of all `impl Trait` and other hidden types
/// are revealed. This is suitable for monomorphized, post-typeck
/// environments like trans or doing optimizations.
/// environments like codegen or doing optimizations.
///
/// NB. If you want to have predicates in scope, use `ParamEnv::new`,
/// or invoke `param_env.with_reveal_all()`.
@ -1462,7 +1462,7 @@ impl<'tcx> ParamEnv<'tcx> {
/// Returns a new parameter environment with the same clauses, but
/// which "reveals" the true results of projections in all cases
/// (even for associated types that are specializable). This is
/// the desired behavior during trans and certain other special
/// the desired behavior during codegen and certain other special
/// contexts; normally though we want to use `Reveal::UserFacing`,
/// which is the default.
pub fn with_reveal_all(self) -> Self {

View File

@ -243,7 +243,7 @@ pub enum TypeVariants<'tcx> {
/// out later.
///
/// All right, you say, but why include the type parameters from the
/// original function then? The answer is that trans may need them
/// original function then? The answer is that codegen may need them
/// when monomorphizing, and they may not appear in the upvars. A
/// closure could capture no variables but still make use of some
/// in-scope type parameter with a bound (e.g., if our example above
@ -273,7 +273,7 @@ pub struct ClosureSubsts<'tcx> {
/// Lifetime and type parameters from the enclosing function,
/// concatenated with the types of the upvars.
///
/// These are separated out because trans wants to pass them around
/// These are separated out because codegen wants to pass them around
/// when monomorphizing.
pub substs: &'tcx Substs<'tcx>,
}
@ -1093,7 +1093,7 @@ pub enum RegionKind {
/// variable with no constraints.
ReEmpty,
/// Erased region, used by trait selection, in MIR and during trans.
/// Erased region, used by trait selection, in MIR and during codegen.
ReErased,
/// These are regions bound in the "defining type" for a

View File

@ -1135,7 +1135,7 @@ define_print! {
})?
} else {
// cross-crate closure types should only be
// visible in trans bug reports, I imagine.
// visible in codegen bug reports, I imagine.
write!(f, "@{:?}", did)?;
let mut sep = " ";
for (index, upvar_ty) in upvar_tys.enumerate() {
@ -1175,7 +1175,7 @@ define_print! {
})?
} else {
// cross-crate closure types should only be
// visible in trans bug reports, I imagine.
// visible in codegen bug reports, I imagine.
write!(f, "@{:?}", did)?;
let mut sep = " ";
for (index, upvar_ty) in upvar_tys.enumerate() {

View File

@ -1126,7 +1126,7 @@ fn foo(a: [D; 10], b: [D; 10], i: i32, t: bool) -> D {
}
```
There are a number of ways that the trans backend could choose to
There are a number of ways that the codegen backend could choose to
compile this (e.g. a `[bool; 10]` array for each such moved array;
or an `Option<usize>` for each moved array). From the viewpoint of the
borrow-checker, the important thing is to record what kind of fragment

View File

@ -1,10 +1,10 @@
[package]
authors = ["The Rust Project Developers"]
name = "rustc_trans"
name = "rustc_codegen_llvm"
version = "0.0.0"
[lib]
name = "rustc_trans"
name = "rustc_codegen_llvm"
path = "lib.rs"
crate-type = ["dylib"]
test = false
@ -27,7 +27,7 @@ rustc_errors = { path = "../librustc_errors" }
rustc_incremental = { path = "../librustc_incremental" }
rustc_llvm = { path = "../librustc_llvm" }
rustc_platform_intrinsics = { path = "../librustc_platform_intrinsics" }
rustc_trans_utils = { path = "../librustc_trans_utils" }
rustc_codegen_utils = { path = "../librustc_codegen_utils" }
rustc_mir = { path = "../librustc_mir" }
serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
@ -43,7 +43,7 @@ env_logger = { version = "0.5", default-features = false }
# `rustc` driver script communicate this.
jemalloc = ["rustc_target/jemalloc"]
# This is used to convince Cargo to separately cache builds of `rustc_trans`
# This is used to convince Cargo to separately cache builds of `rustc_codegen_llvm`
# when this option is enabled or not. That way we can build two, cache two
# artifacts, and have nice speedy rebuilds.
emscripten = ["rustc_llvm/emscripten"]

View File

@ -0,0 +1,7 @@
The `codegen` crate contains the code to convert from MIR into LLVM IR,
and then from LLVM IR into machine code. In general it contains code
that runs towards the end of the compilation process.
For more information about how codegen works, see the [rustc guide].
[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/codegen.html

View File

@ -20,7 +20,7 @@ use rustc_allocator::{ALLOCATOR_METHODS, AllocatorTy};
use ModuleLlvm;
use llvm::{self, False, True};
pub(crate) unsafe fn trans(tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
pub(crate) unsafe fn codegen(tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
let llcx = mods.llcx;
let llmod = mods.llmod;
let usize = match &tcx.sess.target.target.target_pointer_width[..] {

View File

@ -8,8 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of inline assembly.
use llvm::{self, ValueRef};
use common::*;
use type_::Type;
@ -26,7 +24,7 @@ use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a, 'tcx>(
pub fn codegen_inline_asm<'a, 'tcx>(
bx: &Builder<'a, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'tcx>>,
@ -120,7 +118,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
}
}
pub fn trans_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {

View File

@ -11,7 +11,7 @@
use std::ffi::{CStr, CString};
use rustc::hir::{self, TransFnAttrFlags};
use rustc::hir::{self, CodegenFnAttrFlags};
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::session::Session;
@ -118,34 +118,34 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
/// attributes.
pub fn from_fn_attrs(cx: &CodegenCx, llfn: ValueRef, id: DefId) {
let trans_fn_attrs = cx.tcx.trans_fn_attrs(id);
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(id);
inline(llfn, trans_fn_attrs.inline);
inline(llfn, codegen_fn_attrs.inline);
set_frame_pointer_elimination(cx, llfn);
set_probestack(cx, llfn);
if trans_fn_attrs.flags.contains(TransFnAttrFlags::COLD) {
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
Attribute::Cold.apply_llfn(Function, llfn);
}
if trans_fn_attrs.flags.contains(TransFnAttrFlags::NAKED) {
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
naked(llfn, true);
}
if trans_fn_attrs.flags.contains(TransFnAttrFlags::ALLOCATOR) {
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
Attribute::NoAlias.apply_llfn(
llvm::AttributePlace::ReturnValue, llfn);
}
if trans_fn_attrs.flags.contains(TransFnAttrFlags::UNWIND) {
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::UNWIND) {
unwind(llfn, true);
}
if trans_fn_attrs.flags.contains(TransFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
unwind(llfn, false);
}
let features = llvm_target_features(cx.tcx.sess)
.map(|s| s.to_string())
.chain(
trans_fn_attrs.target_features
codegen_fn_attrs.target_features
.iter()
.map(|f| {
let feature = &*f.as_str();

View File

@ -24,7 +24,7 @@ use rustc::session::search_paths::PathKind;
use rustc::session::Session;
use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind};
use rustc::middle::dependency_format::Linkage;
use {CrateTranslation, CrateInfo};
use {CodegenResults, CrateInfo};
use rustc::util::common::time;
use rustc::util::fs::fix_windows_verbatim_for_gcc;
use rustc::hir::def_id::CrateNum;
@ -52,7 +52,7 @@ pub const METADATA_MODULE_NAME: &'static str = "crate.metadata";
// same as for metadata above, but for allocator shim
pub const ALLOCATOR_MODULE_NAME: &'static str = "crate.allocator";
pub use rustc_trans_utils::link::{find_crate_name, filename_for_input, default_output_for_target,
pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default_output_for_target,
invalid_output_for_target, build_link_meta, out_filename,
check_file_is_writeable};
@ -141,14 +141,14 @@ pub fn remove(sess: &Session, path: &Path) {
/// Perform the linkage portion of the compilation phase. This will generate all
/// of the requested outputs for this compilation session.
pub(crate) fn link_binary(sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
outputs: &OutputFilenames,
crate_name: &str) -> Vec<PathBuf> {
let mut out_filenames = Vec::new();
for &crate_type in sess.crate_types.borrow().iter() {
// Ignore executable crates if we have -Z no-trans, as they will error.
// Ignore executable crates if we have -Z no-codegen, as they will error.
let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
if (sess.opts.debugging_opts.no_trans || !sess.opts.output_types.should_trans()) &&
if (sess.opts.debugging_opts.no_codegen || !sess.opts.output_types.should_codegen()) &&
!output_metadata &&
crate_type == config::CrateTypeExecutable {
continue;
@ -159,7 +159,7 @@ pub(crate) fn link_binary(sess: &Session,
crate_type, sess.opts.target_triple);
}
let mut out_files = link_binary_output(sess,
trans,
codegen_results,
crate_type,
outputs,
crate_name);
@ -168,20 +168,20 @@ pub(crate) fn link_binary(sess: &Session,
// Remove the temporary object file and metadata if we aren't saving temps
if !sess.opts.cg.save_temps {
if sess.opts.output_types.should_trans() &&
if sess.opts.output_types.should_codegen() &&
!preserve_objects_for_their_debuginfo(sess)
{
for obj in trans.modules.iter().filter_map(|m| m.object.as_ref()) {
for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
remove(sess, obj);
}
}
for obj in trans.modules.iter().filter_map(|m| m.bytecode_compressed.as_ref()) {
for obj in codegen_results.modules.iter().filter_map(|m| m.bytecode_compressed.as_ref()) {
remove(sess, obj);
}
if let Some(ref obj) = trans.metadata_module.object {
if let Some(ref obj) = codegen_results.metadata_module.object {
remove(sess, obj);
}
if let Some(ref allocator) = trans.allocator_module {
if let Some(ref allocator) = codegen_results.allocator_module {
if let Some(ref obj) = allocator.object {
remove(sess, obj);
}
@ -304,11 +304,11 @@ pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum)
}
fn link_binary_output(sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
crate_type: config::CrateType,
outputs: &OutputFilenames,
crate_name: &str) -> Vec<PathBuf> {
for obj in trans.modules.iter().filter_map(|m| m.object.as_ref()) {
for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
check_file_is_writeable(obj, sess);
}
@ -325,7 +325,7 @@ fn link_binary_output(sess: &Session,
Ok(tmpdir) => tmpdir,
Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)),
};
let metadata = emit_metadata(sess, trans, &metadata_tmpdir);
let metadata = emit_metadata(sess, codegen_results, &metadata_tmpdir);
if let Err(e) = fs::rename(metadata, &out_filename) {
sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
}
@ -337,21 +337,21 @@ fn link_binary_output(sess: &Session,
Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)),
};
if outputs.outputs.should_trans() {
if outputs.outputs.should_codegen() {
let out_filename = out_filename(sess, crate_type, outputs, crate_name);
match crate_type {
config::CrateTypeRlib => {
link_rlib(sess,
trans,
codegen_results,
RlibFlavor::Normal,
&out_filename,
&tmpdir).build();
}
config::CrateTypeStaticlib => {
link_staticlib(sess, trans, &out_filename, &tmpdir);
link_staticlib(sess, codegen_results, &out_filename, &tmpdir);
}
_ => {
link_natively(sess, crate_type, &out_filename, trans, tmpdir.path());
link_natively(sess, crate_type, &out_filename, codegen_results, tmpdir.path());
}
}
out_filenames.push(out_filename);
@ -388,10 +388,10 @@ fn archive_config<'a>(sess: &'a Session,
/// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a
/// directory being searched for `extern crate` (observing an incomplete file).
/// The returned path is the temporary file containing the complete metadata.
fn emit_metadata<'a>(sess: &'a Session, trans: &CrateTranslation, tmpdir: &TempDir)
fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir)
-> PathBuf {
let out_filename = tmpdir.path().join(METADATA_FILENAME);
let result = fs::write(&out_filename, &trans.metadata.raw_data);
let result = fs::write(&out_filename, &codegen_results.metadata.raw_data);
if let Err(e) = result {
sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
@ -412,14 +412,14 @@ enum RlibFlavor {
// all of the object files from native libraries. This is done by unzipping
// native libraries and inserting all of the contents into this archive.
fn link_rlib<'a>(sess: &'a Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
flavor: RlibFlavor,
out_filename: &Path,
tmpdir: &TempDir) -> ArchiveBuilder<'a> {
info!("preparing rlib to {:?}", out_filename);
let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None));
for obj in trans.modules.iter().filter_map(|m| m.object.as_ref()) {
for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
ab.add_file(obj);
}
@ -439,7 +439,7 @@ fn link_rlib<'a>(sess: &'a Session,
// feature then we'll need to figure out how to record what objects were
// loaded from the libraries found here and then encode that into the
// metadata of the rlib we're generating somehow.
for lib in trans.crate_info.used_libraries.iter() {
for lib in codegen_results.crate_info.used_libraries.iter() {
match lib.kind {
NativeLibraryKind::NativeStatic => {}
NativeLibraryKind::NativeStaticNobundle |
@ -478,11 +478,15 @@ fn link_rlib<'a>(sess: &'a Session,
RlibFlavor::Normal => {
// Instead of putting the metadata in an object file section, rlibs
// contain the metadata in a separate file.
ab.add_file(&emit_metadata(sess, trans, tmpdir));
ab.add_file(&emit_metadata(sess, codegen_results, tmpdir));
// For LTO purposes, the bytecode of this library is also inserted
// into the archive.
for bytecode in trans.modules.iter().filter_map(|m| m.bytecode_compressed.as_ref()) {
for bytecode in codegen_results
.modules
.iter()
.filter_map(|m| m.bytecode_compressed.as_ref())
{
ab.add_file(bytecode);
}
@ -495,7 +499,7 @@ fn link_rlib<'a>(sess: &'a Session,
}
RlibFlavor::StaticlibBase => {
let obj = trans.allocator_module
let obj = codegen_results.allocator_module
.as_ref()
.and_then(|m| m.object.as_ref());
if let Some(obj) = obj {
@ -520,19 +524,19 @@ fn link_rlib<'a>(sess: &'a Session,
// link in the metadata object file (and also don't prepare the archive with a
// metadata file).
fn link_staticlib(sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
out_filename: &Path,
tempdir: &TempDir) {
let mut ab = link_rlib(sess,
trans,
codegen_results,
RlibFlavor::StaticlibBase,
out_filename,
tempdir);
let mut all_native_libs = vec![];
let res = each_linked_rlib(sess, &trans.crate_info, &mut |cnum, path| {
let name = &trans.crate_info.crate_name[&cnum];
let native_libs = &trans.crate_info.native_libraries[&cnum];
let res = each_linked_rlib(sess, &codegen_results.crate_info, &mut |cnum, path| {
let name = &codegen_results.crate_info.crate_name[&cnum];
let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
// Here when we include the rlib into our staticlib we need to make a
// decision whether to include the extra object files along the way.
@ -554,10 +558,10 @@ fn link_staticlib(sess: &Session,
ab.add_rlib(path,
&name.as_str(),
is_full_lto_enabled(sess) &&
!ignored_for_lto(sess, &trans.crate_info, cnum),
!ignored_for_lto(sess, &codegen_results.crate_info, cnum),
skip_object_files).unwrap();
all_native_libs.extend(trans.crate_info.native_libraries[&cnum].iter().cloned());
all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
});
if let Err(e) = res {
sess.fatal(&e);
@ -609,7 +613,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) {
fn link_natively(sess: &Session,
crate_type: config::CrateType,
out_filename: &Path,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
tmpdir: &Path) {
info!("preparing {:?} to {:?}", crate_type, out_filename);
let flavor = sess.linker_flavor();
@ -662,9 +666,9 @@ fn link_natively(sess: &Session,
}
{
let mut linker = trans.linker_info.to_linker(cmd, &sess);
let mut linker = codegen_results.linker_info.to_linker(cmd, &sess);
link_args(&mut *linker, sess, crate_type, tmpdir,
out_filename, trans);
out_filename, codegen_results);
cmd = linker.finalize();
}
if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) {
@ -842,9 +846,9 @@ fn link_natively(sess: &Session,
}
if sess.opts.target_triple == TargetTriple::from_triple("wasm32-unknown-unknown") {
wasm::rewrite_imports(&out_filename, &trans.crate_info.wasm_imports);
wasm::rewrite_imports(&out_filename, &codegen_results.crate_info.wasm_imports);
wasm::add_custom_sections(&out_filename,
&trans.crate_info.wasm_custom_sections);
&codegen_results.crate_info.wasm_custom_sections);
}
}
@ -995,7 +999,7 @@ fn link_args(cmd: &mut Linker,
crate_type: config::CrateType,
tmpdir: &Path,
out_filename: &Path,
trans: &CrateTranslation) {
codegen_results: &CodegenResults) {
// Linker plugins should be specified early in the list of arguments
cmd.cross_lang_lto();
@ -1008,14 +1012,14 @@ fn link_args(cmd: &mut Linker,
let t = &sess.target.target;
cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
for obj in trans.modules.iter().filter_map(|m| m.object.as_ref()) {
for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
cmd.add_object(obj);
}
cmd.output_filename(out_filename);
if crate_type == config::CrateTypeExecutable &&
sess.target.target.options.is_like_windows {
if let Some(ref s) = trans.windows_subsystem {
if let Some(ref s) = codegen_results.windows_subsystem {
cmd.subsystem(s);
}
}
@ -1032,12 +1036,12 @@ fn link_args(cmd: &mut Linker,
// object file, so we link that in here.
if crate_type == config::CrateTypeDylib ||
crate_type == config::CrateTypeProcMacro {
if let Some(obj) = trans.metadata_module.object.as_ref() {
if let Some(obj) = codegen_results.metadata_module.object.as_ref() {
cmd.add_object(obj);
}
}
let obj = trans.allocator_module
let obj = codegen_results.allocator_module
.as_ref()
.and_then(|m| m.object.as_ref());
if let Some(obj) = obj {
@ -1051,7 +1055,7 @@ fn link_args(cmd: &mut Linker,
cmd.gc_sections(keep_metadata);
}
let used_link_args = &trans.crate_info.link_args;
let used_link_args = &codegen_results.crate_info.link_args;
if crate_type == config::CrateTypeExecutable {
let mut position_independent_executable = false;
@ -1140,9 +1144,9 @@ fn link_args(cmd: &mut Linker,
// link line. And finally upstream native libraries can't depend on anything
// in this DAG so far because they're only dylibs and dylibs can only depend
// on other dylibs (e.g. other native deps).
add_local_native_libraries(cmd, sess, trans);
add_upstream_rust_crates(cmd, sess, trans, crate_type, tmpdir);
add_upstream_native_libraries(cmd, sess, trans, crate_type);
add_local_native_libraries(cmd, sess, codegen_results);
add_upstream_rust_crates(cmd, sess, codegen_results, crate_type, tmpdir);
add_upstream_native_libraries(cmd, sess, codegen_results, crate_type);
// Tell the linker what we're doing.
if crate_type != config::CrateTypeExecutable {
@ -1171,7 +1175,7 @@ fn link_args(cmd: &mut Linker,
path
};
let mut rpath_config = RPathConfig {
used_crates: &trans.crate_info.used_crates_dynamic,
used_crates: &codegen_results.crate_info.used_crates_dynamic,
out_filename: out_filename.to_path_buf(),
has_rpath: sess.target.target.options.has_rpath,
is_like_osx: sess.target.target.options.is_like_osx,
@ -1203,7 +1207,7 @@ fn link_args(cmd: &mut Linker,
// may have their native library pulled in above.
fn add_local_native_libraries(cmd: &mut Linker,
sess: &Session,
trans: &CrateTranslation) {
codegen_results: &CodegenResults) {
sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| {
match k {
PathKind::Framework => { cmd.framework_path(path); }
@ -1211,7 +1215,7 @@ fn add_local_native_libraries(cmd: &mut Linker,
}
});
let relevant_libs = trans.crate_info.used_libraries.iter().filter(|l| {
let relevant_libs = codegen_results.crate_info.used_libraries.iter().filter(|l| {
relevant_lib(sess, l)
});
@ -1234,7 +1238,7 @@ fn add_local_native_libraries(cmd: &mut Linker,
// the intermediate rlib version)
fn add_upstream_rust_crates(cmd: &mut Linker,
sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
crate_type: config::CrateType,
tmpdir: &Path) {
// All of the heavy lifting has previously been accomplished by the
@ -1250,7 +1254,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// Invoke get_used_crates to ensure that we get a topological sorting of
// crates.
let deps = &trans.crate_info.used_crates_dynamic;
let deps = &codegen_results.crate_info.used_crates_dynamic;
// There's a few internal crates in the standard library (aka libcore and
// libstd) which actually have a circular dependence upon one another. This
@ -1273,7 +1277,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
let mut group_end = None;
let mut group_start = None;
let mut end_with = FxHashSet();
let info = &trans.crate_info;
let info = &codegen_results.crate_info;
for &(cnum, _) in deps.iter().rev() {
if let Some(missing) = info.missing_lang_items.get(&cnum) {
end_with.extend(missing.iter().cloned());
@ -1305,24 +1309,24 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// We may not pass all crates through to the linker. Some crates may
// appear statically in an existing dylib, meaning we'll pick up all the
// symbols from the dylib.
let src = &trans.crate_info.used_crate_source[&cnum];
let src = &codegen_results.crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
_ if trans.crate_info.profiler_runtime == Some(cnum) => {
add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum);
_ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
}
_ if trans.crate_info.sanitizer_runtime == Some(cnum) => {
link_sanitizer_runtime(cmd, sess, trans, tmpdir, cnum);
_ if codegen_results.crate_info.sanitizer_runtime == Some(cnum) => {
link_sanitizer_runtime(cmd, sess, codegen_results, tmpdir, cnum);
}
// compiler-builtins are always placed last to ensure that they're
// linked correctly.
_ if trans.crate_info.compiler_builtins == Some(cnum) => {
_ if codegen_results.crate_info.compiler_builtins == Some(cnum) => {
assert!(compiler_builtins.is_none());
compiler_builtins = Some(cnum);
}
Linkage::NotLinked |
Linkage::IncludedFromDylib => {}
Linkage::Static => {
add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum);
add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
}
Linkage::Dynamic => {
add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0)
@ -1340,7 +1344,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic`
// is used)
if let Some(cnum) = compiler_builtins {
add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum);
add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
}
// Converts a library file-stem into a cc -l argument
@ -1358,10 +1362,10 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// linking it.
fn link_sanitizer_runtime(cmd: &mut Linker,
sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
tmpdir: &Path,
cnum: CrateNum) {
let src = &trans.crate_info.used_crate_source[&cnum];
let src = &codegen_results.crate_info.used_crate_source[&cnum];
let cratepath = &src.rlib.as_ref().unwrap().0;
if sess.target.target.options.is_like_osx {
@ -1427,23 +1431,23 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// we're at the end of the dependency chain.
fn add_static_crate(cmd: &mut Linker,
sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
tmpdir: &Path,
crate_type: config::CrateType,
cnum: CrateNum) {
let src = &trans.crate_info.used_crate_source[&cnum];
let src = &codegen_results.crate_info.used_crate_source[&cnum];
let cratepath = &src.rlib.as_ref().unwrap().0;
// See the comment above in `link_staticlib` and `link_rlib` for why if
// there's a static library that's not relevant we skip all object
// files.
let native_libs = &trans.crate_info.native_libraries[&cnum];
let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
let skip_native = native_libs.iter().any(|lib| {
lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib)
});
if (!is_full_lto_enabled(sess) ||
ignored_for_lto(sess, &trans.crate_info, cnum)) &&
ignored_for_lto(sess, &codegen_results.crate_info, cnum)) &&
crate_type != config::CrateTypeDylib &&
!skip_native {
cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath));
@ -1499,7 +1503,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
let skip_because_lto = is_full_lto_enabled(sess) &&
is_rust_object &&
(sess.target.target.options.no_builtins ||
!trans.crate_info.is_no_builtins.contains(&cnum));
!codegen_results.crate_info.is_no_builtins.contains(&cnum));
if skip_because_cfg_say_so || skip_because_lto {
archive.remove_file(&f);
@ -1521,7 +1525,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// compiler-builtins crate (e.g. compiler-rt) because it'll get
// repeatedly linked anyway.
if crate_type == config::CrateTypeDylib &&
trans.crate_info.compiler_builtins != Some(cnum) {
codegen_results.crate_info.compiler_builtins != Some(cnum) {
cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst));
} else {
cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst));
@ -1567,7 +1571,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker,
// also be resolved in the target crate.
fn add_upstream_native_libraries(cmd: &mut Linker,
sess: &Session,
trans: &CrateTranslation,
codegen_results: &CodegenResults,
crate_type: config::CrateType) {
// Be sure to use a topological sorting of crates because there may be
// interdependencies between native libraries. When passing -nodefaultlibs,
@ -1581,9 +1585,9 @@ fn add_upstream_native_libraries(cmd: &mut Linker,
let formats = sess.dependency_formats.borrow();
let data = formats.get(&crate_type).unwrap();
let crates = &trans.crate_info.used_crates_static;
let crates = &codegen_results.crate_info.used_crates_static;
for &(cnum, _) in crates {
for lib in trans.crate_info.native_libraries[&cnum].iter() {
for lib in codegen_results.crate_info.native_libraries[&cnum].iter() {
if !relevant_lib(sess, &lib) {
continue
}

View File

@ -641,7 +641,7 @@ impl<'a> Linker for MsvcLinker<'a> {
//
// The linker will read this `*.def` file and export all the symbols from
// the dynamic library. Note that this is not as simple as just exporting
// all the symbols in the current crate (as specified by `trans.reachable`)
// all the symbols in the current crate (as specified by `codegen.reachable`)
// but rather we also need to possibly export the symbols of upstream
// crates. Upstream rlibs may be linked statically to this dynamic library,
// in which case they may continue to transitively be used and hence need

View File

@ -21,7 +21,7 @@ use rustc::middle::exported_symbols::SymbolExportLevel;
use rustc::session::config::{self, Lto};
use rustc::util::common::time_ext;
use time_graph::Timeline;
use {ModuleTranslation, ModuleLlvm, ModuleKind, ModuleSource};
use {ModuleCodegen, ModuleLlvm, ModuleKind, ModuleSource};
use libc;
@ -42,45 +42,45 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool {
}
}
pub(crate) enum LtoModuleTranslation {
pub(crate) enum LtoModuleCodegen {
Fat {
module: Option<ModuleTranslation>,
module: Option<ModuleCodegen>,
_serialized_bitcode: Vec<SerializedModule>,
},
Thin(ThinModule),
}
impl LtoModuleTranslation {
impl LtoModuleCodegen {
pub fn name(&self) -> &str {
match *self {
LtoModuleTranslation::Fat { .. } => "everything",
LtoModuleTranslation::Thin(ref m) => m.name(),
LtoModuleCodegen::Fat { .. } => "everything",
LtoModuleCodegen::Thin(ref m) => m.name(),
}
}
/// Optimize this module within the given codegen context.
///
/// This function is unsafe as it'll return a `ModuleTranslation` still
/// points to LLVM data structures owned by this `LtoModuleTranslation`.
/// This function is unsafe as it'll return a `ModuleCodegen` still
/// points to LLVM data structures owned by this `LtoModuleCodegen`.
/// It's intended that the module returned is immediately code generated and
/// dropped, and then this LTO module is dropped.
pub(crate) unsafe fn optimize(&mut self,
cgcx: &CodegenContext,
timeline: &mut Timeline)
-> Result<ModuleTranslation, FatalError>
-> Result<ModuleCodegen, FatalError>
{
match *self {
LtoModuleTranslation::Fat { ref mut module, .. } => {
let trans = module.take().unwrap();
let config = cgcx.config(trans.kind);
let llmod = trans.llvm().unwrap().llmod;
let tm = trans.llvm().unwrap().tm;
LtoModuleCodegen::Fat { ref mut module, .. } => {
let module = module.take().unwrap();
let config = cgcx.config(module.kind);
let llmod = module.llvm().unwrap().llmod;
let tm = module.llvm().unwrap().tm;
run_pass_manager(cgcx, tm, llmod, config, false);
timeline.record("fat-done");
Ok(trans)
Ok(module)
}
LtoModuleTranslation::Thin(ref mut thin) => thin.optimize(cgcx, timeline),
LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline),
}
}
@ -89,16 +89,16 @@ impl LtoModuleTranslation {
pub fn cost(&self) -> u64 {
match *self {
// Only one module with fat LTO, so the cost doesn't matter.
LtoModuleTranslation::Fat { .. } => 0,
LtoModuleTranslation::Thin(ref m) => m.cost(),
LtoModuleCodegen::Fat { .. } => 0,
LtoModuleCodegen::Thin(ref m) => m.cost(),
}
}
}
pub(crate) fn run(cgcx: &CodegenContext,
modules: Vec<ModuleTranslation>,
modules: Vec<ModuleCodegen>,
timeline: &mut Timeline)
-> Result<Vec<LtoModuleTranslation>, FatalError>
-> Result<Vec<LtoModuleCodegen>, FatalError>
{
let diag_handler = cgcx.create_diag_handler();
let export_threshold = match cgcx.lto {
@ -201,11 +201,11 @@ pub(crate) fn run(cgcx: &CodegenContext,
fn fat_lto(cgcx: &CodegenContext,
diag_handler: &Handler,
mut modules: Vec<ModuleTranslation>,
mut modules: Vec<ModuleCodegen>,
mut serialized_modules: Vec<(SerializedModule, CString)>,
symbol_white_list: &[*const libc::c_char],
timeline: &mut Timeline)
-> Result<Vec<LtoModuleTranslation>, FatalError>
-> Result<Vec<LtoModuleCodegen>, FatalError>
{
info!("going for a fat lto");
@ -228,18 +228,18 @@ fn fat_lto(cgcx: &CodegenContext,
(cost, i)
})
.max()
.expect("must be trans'ing at least one module");
.expect("must be codegen'ing at least one module");
let module = modules.remove(costliest_module);
let llmod = module.llvm().expect("can't lto pre-translated modules").llmod;
let llmod = module.llvm().expect("can't lto pre-codegened modules").llmod;
info!("using {:?} as a base module", module.llmod_id);
// For all other modules we translated we'll need to link them into our own
// bitcode. All modules were translated in their own LLVM context, however,
// For all other modules we codegened we'll need to link them into our own
// bitcode. All modules were codegened in their own LLVM context, however,
// and we want to move everything to the same LLVM context. Currently the
// way we know of to do that is to serialize them to a string and them parse
// them later. Not great but hey, that's why it's "fat" LTO, right?
for module in modules {
let llvm = module.llvm().expect("can't lto pre-translated modules");
let llvm = module.llvm().expect("can't lto pre-codegened modules");
let buffer = ModuleBuffer::new(llvm.llmod);
let llmod_id = CString::new(&module.llmod_id[..]).unwrap();
serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
@ -284,7 +284,7 @@ fn fat_lto(cgcx: &CodegenContext,
}
timeline.record("passes");
Ok(vec![LtoModuleTranslation::Fat {
Ok(vec![LtoModuleCodegen::Fat {
module: Some(module),
_serialized_bitcode: serialized_bitcode,
}])
@ -344,14 +344,14 @@ impl Drop for Linker {
///
/// With all that in mind, the function here is designed at specifically just
/// calculating the *index* for ThinLTO. This index will then be shared amongst
/// all of the `LtoModuleTranslation` units returned below and destroyed once
/// all of the `LtoModuleCodegen` units returned below and destroyed once
/// they all go out of scope.
fn thin_lto(diag_handler: &Handler,
modules: Vec<ModuleTranslation>,
modules: Vec<ModuleCodegen>,
serialized_modules: Vec<(SerializedModule, CString)>,
symbol_white_list: &[*const libc::c_char],
timeline: &mut Timeline)
-> Result<Vec<LtoModuleTranslation>, FatalError>
-> Result<Vec<LtoModuleCodegen>, FatalError>
{
unsafe {
info!("going for that thin, thin LTO");
@ -369,7 +369,7 @@ fn thin_lto(diag_handler: &Handler,
// analysis!
for (i, module) in modules.iter().enumerate() {
info!("local module: {} - {}", i, module.llmod_id);
let llvm = module.llvm().expect("can't lto pretranslated module");
let llvm = module.llvm().expect("can't lto precodegened module");
let name = CString::new(module.llmod_id.clone()).unwrap();
let buffer = ThinBuffer::new(llvm.llmod);
thin_modules.push(llvm::ThinLTOModule {
@ -431,7 +431,7 @@ fn thin_lto(diag_handler: &Handler,
// Throw our data in an `Arc` as we'll be sharing it across threads. We
// also put all memory referenced by the C++ data (buffers, ids, etc)
// into the arc as well. After this we'll create a thin module
// translation per module in this data.
// codegen per module in this data.
let shared = Arc::new(ThinShared {
data,
thin_buffers,
@ -439,7 +439,7 @@ fn thin_lto(diag_handler: &Handler,
module_names,
});
Ok((0..shared.module_names.len()).map(|i| {
LtoModuleTranslation::Thin(ThinModule {
LtoModuleCodegen::Thin(ThinModule {
shared: shared.clone(),
idx: i,
})
@ -622,7 +622,7 @@ impl ThinModule {
}
unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline)
-> Result<ModuleTranslation, FatalError>
-> Result<ModuleCodegen, FatalError>
{
let diag_handler = cgcx.create_diag_handler();
let tm = (cgcx.tm_factory)().map_err(|e| {
@ -632,7 +632,7 @@ impl ThinModule {
// Right now the implementation we've got only works over serialized
// modules, so we create a fresh new LLVM context and parse the module
// into that context. One day, however, we may do this for upstream
// crates but for locally translated modules we may be able to reuse
// crates but for locally codegened modules we may be able to reuse
// that LLVM Context and Module.
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod = llvm::LLVMRustParseBitcodeForThinLTO(
@ -645,8 +645,8 @@ impl ThinModule {
let msg = format!("failed to parse bitcode for thin LTO module");
return Err(write::llvm_err(&diag_handler, msg));
}
let mtrans = ModuleTranslation {
source: ModuleSource::Translated(ModuleLlvm {
let module = ModuleCodegen {
source: ModuleSource::Codegened(ModuleLlvm {
llmod,
llcx,
tm,
@ -655,7 +655,7 @@ impl ThinModule {
name: self.name().to_string(),
kind: ModuleKind::Regular,
};
cgcx.save_temp_bitcode(&mtrans, "thin-lto-input");
cgcx.save_temp_bitcode(&module, "thin-lto-input");
// Before we do much else find the "main" `DICompileUnit` that we'll be
// using below. If we find more than one though then rustc has changed
@ -673,7 +673,7 @@ impl ThinModule {
// are disabled by removing all landing pads.
if cgcx.no_landing_pads {
llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-nounwind");
cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind");
timeline.record("nounwind");
}
@ -689,25 +689,25 @@ impl ThinModule {
let msg = format!("failed to prepare thin LTO module");
return Err(write::llvm_err(&diag_handler, msg))
}
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-rename");
cgcx.save_temp_bitcode(&module, "thin-lto-after-rename");
timeline.record("rename");
if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) {
let msg = format!("failed to prepare thin LTO module");
return Err(write::llvm_err(&diag_handler, msg))
}
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-resolve");
cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve");
timeline.record("resolve");
if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) {
let msg = format!("failed to prepare thin LTO module");
return Err(write::llvm_err(&diag_handler, msg))
}
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-internalize");
cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize");
timeline.record("internalize");
if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) {
let msg = format!("failed to prepare thin LTO module");
return Err(write::llvm_err(&diag_handler, msg))
}
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-import");
cgcx.save_temp_bitcode(&module, "thin-lto-after-import");
timeline.record("import");
// Ok now this is a bit unfortunate. This is also something you won't
@ -740,7 +740,7 @@ impl ThinModule {
// so it appears). Hopefully we can remove this once upstream bugs are
// fixed in LLVM.
llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-patch");
cgcx.save_temp_bitcode(&module, "thin-lto-after-patch");
timeline.record("patch");
// Alright now that we've done everything related to the ThinLTO
@ -748,10 +748,10 @@ impl ThinModule {
// `run_pass_manager` as the "fat" LTO above except that we tell it to
// populate a thin-specific pass manager, which presumably LLVM treats a
// little differently.
info!("running thin lto passes over {}", mtrans.name);
let config = cgcx.config(mtrans.kind);
info!("running thin lto passes over {}", module.name);
let config = cgcx.config(module.kind);
run_pass_manager(cgcx, tm, llmod, config, true);
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-pm");
cgcx.save_temp_bitcode(&module, "thin-lto-after-pm");
timeline.record("thin-done");
// FIXME: this is a hack around a bug in LLVM right now. Discovered in
@ -765,9 +765,9 @@ impl ThinModule {
// shouldn't be necessary eventually and we can safetly delete these few
// lines.
llvm::LLVMRustThinLTORemoveAvailableExternally(llmod);
cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-rm-ae");
cgcx.save_temp_bitcode(&module, "thin-lto-after-rm-ae");
timeline.record("no-ae");
Ok(mtrans)
Ok(module)
}
}

View File

@ -13,7 +13,7 @@ use std::sync::Arc;
use monomorphize::Instance;
use rustc::hir;
use rustc::hir::TransFnAttrFlags;
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
use rustc::ich::Fingerprint;
use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadata_symbol_name};
@ -63,7 +63,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
{
assert_eq!(cnum, LOCAL_CRATE);
if !tcx.sess.opts.output_types.should_trans() {
if !tcx.sess.opts.output_types.should_codegen() {
return Lrc::new(DefIdMap())
}
@ -118,7 +118,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let def_id = tcx.hir.local_def_id(node_id);
let generics = tcx.generics_of(def_id);
if !generics.requires_monomorphization(tcx) &&
// Functions marked with #[inline] are only ever translated
// Functions marked with #[inline] are only ever codegened
// with "internal" linkage and are never exported.
!Instance::mono(tcx, def_id).def.requires_local(tcx) {
Some(def_id)
@ -195,7 +195,7 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
{
assert_eq!(cnum, LOCAL_CRATE);
if !tcx.sess.opts.output_types.should_trans() {
if !tcx.sess.opts.output_types.should_codegen() {
return Arc::new(vec![])
}
@ -255,7 +255,7 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let need_visibility = tcx.sess.target.target.options.dynamic_linking &&
!tcx.sess.target.target.options.only_cdylib;
let (_, cgus) = tcx.collect_and_partition_translation_items(LOCAL_CRATE);
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
for (mono_item, &(linkage, visibility)) in cgus.iter()
.flat_map(|cgu| cgu.items().iter()) {
@ -383,9 +383,10 @@ fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel {
// special symbols in the standard library for various plumbing between
// core/std/allocators/etc. For example symbols used to hook up allocation
// are not considered for export
let trans_fn_attrs = tcx.trans_fn_attrs(sym_def_id);
let is_extern = trans_fn_attrs.contains_extern_indicator();
let std_internal = trans_fn_attrs.flags.contains(TransFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
let is_extern = codegen_fn_attrs.contains_extern_indicator();
let std_internal =
codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
if is_extern && !std_internal {
SymbolExportLevel::C

View File

@ -28,7 +28,7 @@ use time_graph::{self, TimeGraph, Timeline};
use llvm;
use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef};
use llvm::{SMDiagnosticRef, ContextRef};
use {CrateTranslation, ModuleSource, ModuleTranslation, CompiledModule, ModuleKind};
use {CodegenResults, ModuleSource, ModuleCodegen, CompiledModule, ModuleKind};
use CrateInfo;
use rustc::hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc::ty::TyCtxt;
@ -383,16 +383,16 @@ impl CodegenContext {
}
}
pub(crate) fn save_temp_bitcode(&self, trans: &ModuleTranslation, name: &str) {
pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) {
if !self.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&trans.name[..]);
let cgu = Some(&module.name[..]);
let path = self.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path2cstr(&path);
let llmod = trans.llvm().unwrap().llmod;
let llmod = module.llvm().unwrap().llmod;
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
@ -491,13 +491,13 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo
// Unsafe due to LLVM calls.
unsafe fn optimize(cgcx: &CodegenContext,
diag_handler: &Handler,
mtrans: &ModuleTranslation,
module: &ModuleCodegen,
config: &ModuleConfig,
timeline: &mut Timeline)
-> Result<(), FatalError>
{
let (llmod, llcx, tm) = match mtrans.source {
ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
let (llmod, llcx, tm) = match module.source {
ModuleSource::Codegened(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
ModuleSource::Preexisting(_) => {
bug!("optimize_and_codegen: called with ModuleSource::Preexisting")
}
@ -505,7 +505,7 @@ unsafe fn optimize(cgcx: &CodegenContext,
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = mtrans.name.clone();
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
@ -594,12 +594,12 @@ unsafe fn optimize(cgcx: &CodegenContext,
}
fn generate_lto_work(cgcx: &CodegenContext,
modules: Vec<ModuleTranslation>)
modules: Vec<ModuleCodegen>)
-> Vec<(WorkItem, u64)>
{
let mut timeline = cgcx.time_graph.as_ref().map(|tg| {
tg.start(TRANS_WORKER_TIMELINE,
TRANS_WORK_PACKAGE_KIND,
tg.start(CODEGEN_WORKER_TIMELINE,
CODEGEN_WORK_PACKAGE_KIND,
"generate lto")
}).unwrap_or(Timeline::noop());
let lto_modules = lto::run(cgcx, modules, &mut timeline)
@ -613,19 +613,19 @@ fn generate_lto_work(cgcx: &CodegenContext,
unsafe fn codegen(cgcx: &CodegenContext,
diag_handler: &Handler,
mtrans: ModuleTranslation,
module: ModuleCodegen,
config: &ModuleConfig,
timeline: &mut Timeline)
-> Result<CompiledModule, FatalError>
{
timeline.record("codegen");
let (llmod, llcx, tm) = match mtrans.source {
ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
let (llmod, llcx, tm) = match module.source {
ModuleSource::Codegened(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
ModuleSource::Preexisting(_) => {
bug!("codegen: called with ModuleSource::Preexisting")
}
};
let module_name = mtrans.name.clone();
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
@ -696,7 +696,7 @@ unsafe fn codegen(cgcx: &CodegenContext,
if config.emit_bc_compressed {
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&mtrans.llmod_id, data);
let data = bytecode::encode(&module.llmod_id, data);
if let Err(e) = fs::write(&dst, data) {
diag_handler.err(&format!("failed to write bytecode: {}", e));
}
@ -805,7 +805,7 @@ unsafe fn codegen(cgcx: &CodegenContext,
}
drop(handlers);
Ok(mtrans.into_compiled_module(config.emit_obj,
Ok(module.into_compiled_module(config.emit_obj,
config.emit_bc,
config.emit_bc_compressed,
&cgcx.output_filenames))
@ -880,13 +880,13 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
sess.opts.output_types.contains_key(&OutputType::Exe)
}
pub fn start_async_translation(tcx: TyCtxt,
pub fn start_async_codegen(tcx: TyCtxt,
time_graph: Option<TimeGraph>,
link: LinkMeta,
metadata: EncodedMetadata,
coordinator_receive: Receiver<Box<Any + Send>>,
total_cgus: usize)
-> OngoingCrateTranslation {
-> OngoingCodegen {
let sess = tcx.sess;
let crate_name = tcx.crate_name(LOCAL_CRATE);
let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins");
@ -991,12 +991,12 @@ pub fn start_async_translation(tcx: TyCtxt,
allocator_config.time_passes = false;
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
let (trans_worker_send, trans_worker_receive) = channel();
let (codegen_worker_send, codegen_worker_receive) = channel();
let coordinator_thread = start_executing_work(tcx,
&crate_info,
shared_emitter,
trans_worker_send,
codegen_worker_send,
coordinator_receive,
total_cgus,
sess.jobserver.clone(),
@ -1005,7 +1005,7 @@ pub fn start_async_translation(tcx: TyCtxt,
Arc::new(metadata_config),
Arc::new(allocator_config));
OngoingCrateTranslation {
OngoingCodegen {
crate_name,
link,
metadata,
@ -1015,7 +1015,7 @@ pub fn start_async_translation(tcx: TyCtxt,
time_graph,
coordinator_send: tcx.tx_to_llvm_workers.lock().clone(),
trans_worker_receive,
codegen_worker_receive,
shared_emitter_main,
future: coordinator_thread,
output_filenames: tcx.output_filenames(LOCAL_CRATE),
@ -1204,15 +1204,15 @@ fn produce_final_output_artifacts(sess: &Session,
// These are used in linking steps and will be cleaned up afterward.
}
pub(crate) fn dump_incremental_data(trans: &CrateTranslation) {
pub(crate) fn dump_incremental_data(codegen_results: &CodegenResults) {
println!("[incremental] Re-using {} out of {} modules",
trans.modules.iter().filter(|m| m.pre_existing).count(),
trans.modules.len());
codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
codegen_results.modules.len());
}
enum WorkItem {
Optimize(ModuleTranslation),
LTO(lto::LtoModuleTranslation),
Optimize(ModuleCodegen),
LTO(lto::LtoModuleCodegen),
}
impl WorkItem {
@ -1233,7 +1233,7 @@ impl WorkItem {
enum WorkItemResult {
Compiled(CompiledModule),
NeedsLTO(ModuleTranslation),
NeedsLTO(ModuleCodegen),
}
fn execute_work_item(cgcx: &CodegenContext,
@ -1243,8 +1243,8 @@ fn execute_work_item(cgcx: &CodegenContext,
{
let diag_handler = cgcx.create_diag_handler();
let config = cgcx.config(work_item.kind());
let mtrans = match work_item {
WorkItem::Optimize(mtrans) => mtrans,
let module = match work_item {
WorkItem::Optimize(module) => module,
WorkItem::LTO(mut lto) => {
unsafe {
let module = lto.optimize(cgcx, timeline)?;
@ -1253,10 +1253,10 @@ fn execute_work_item(cgcx: &CodegenContext,
}
}
};
let module_name = mtrans.name.clone();
let module_name = module.name.clone();
let pre_existing = match mtrans.source {
ModuleSource::Translated(_) => None,
let pre_existing = match module.source {
ModuleSource::Codegened(_) => None,
ModuleSource::Preexisting(ref wp) => Some(wp.clone()),
};
@ -1264,7 +1264,7 @@ fn execute_work_item(cgcx: &CodegenContext,
let incr_comp_session_dir = cgcx.incr_comp_session_dir
.as_ref()
.unwrap();
let name = &mtrans.name;
let name = &module.name;
let mut object = None;
let mut bytecode = None;
let mut bytecode_compressed = None;
@ -1290,7 +1290,7 @@ fn execute_work_item(cgcx: &CodegenContext,
let source_file = in_incr_comp_dir(&incr_comp_session_dir,
&saved_file);
debug!("copying pre-existing module `{}` from {:?} to {}",
mtrans.name,
module.name,
source_file,
obj_out.display());
match link_or_copy(&source_file, &obj_out) {
@ -1308,7 +1308,7 @@ fn execute_work_item(cgcx: &CodegenContext,
assert_eq!(bytecode_compressed.is_some(), config.emit_bc_compressed);
Ok(WorkItemResult::Compiled(CompiledModule {
llmod_id: mtrans.llmod_id.clone(),
llmod_id: module.llmod_id.clone(),
name: module_name,
kind: ModuleKind::Regular,
pre_existing: true,
@ -1320,7 +1320,7 @@ fn execute_work_item(cgcx: &CodegenContext,
debug!("llvm-optimizing {:?}", module_name);
unsafe {
optimize(cgcx, &diag_handler, &mtrans, config, timeline)?;
optimize(cgcx, &diag_handler, &module, config, timeline)?;
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
@ -1328,7 +1328,7 @@ fn execute_work_item(cgcx: &CodegenContext,
// has to wait for all the initial modules to be optimized).
//
// Here we dispatch based on the `cgcx.lto` and kind of module we're
// translating...
// codegenning...
let needs_lto = match cgcx.lto {
Lto::No => false,
@ -1353,14 +1353,14 @@ fn execute_work_item(cgcx: &CodegenContext,
// Additionally here's where we also factor in the current LLVM
// version. If it doesn't support ThinLTO we skip this.
Lto::ThinLocal => {
mtrans.kind != ModuleKind::Allocator &&
module.kind != ModuleKind::Allocator &&
llvm::LLVMRustThinLTOAvailable()
}
};
// Metadata modules never participate in LTO regardless of the lto
// settings.
let needs_lto = needs_lto && mtrans.kind != ModuleKind::Metadata;
let needs_lto = needs_lto && module.kind != ModuleKind::Metadata;
// Don't run LTO passes when cross-lang LTO is enabled. The linker
// will do that for us in this case.
@ -1368,9 +1368,9 @@ fn execute_work_item(cgcx: &CodegenContext,
!cgcx.opts.debugging_opts.cross_lang_lto.embed_bitcode();
if needs_lto {
Ok(WorkItemResult::NeedsLTO(mtrans))
Ok(WorkItemResult::NeedsLTO(module))
} else {
let module = codegen(cgcx, &diag_handler, mtrans, config, timeline)?;
let module = codegen(cgcx, &diag_handler, module, config, timeline)?;
Ok(WorkItemResult::Compiled(module))
}
}
@ -1380,19 +1380,19 @@ fn execute_work_item(cgcx: &CodegenContext,
enum Message {
Token(io::Result<Acquired>),
NeedsLTO {
result: ModuleTranslation,
result: ModuleCodegen,
worker_id: usize,
},
Done {
result: Result<CompiledModule, ()>,
worker_id: usize,
},
TranslationDone {
CodegenDone {
llvm_work_item: WorkItem,
cost: u64,
},
TranslationComplete,
TranslateItem,
CodegenComplete,
CodegenItem,
}
struct Diagnostic {
@ -1404,14 +1404,14 @@ struct Diagnostic {
#[derive(PartialEq, Clone, Copy, Debug)]
enum MainThreadWorkerState {
Idle,
Translating,
Codegenning,
LLVMing,
}
fn start_executing_work(tcx: TyCtxt,
crate_info: &CrateInfo,
shared_emitter: SharedEmitter,
trans_worker_send: Sender<Message>,
codegen_worker_send: Sender<Message>,
coordinator_receive: Receiver<Box<Any + Send>>,
total_cgus: usize,
jobserver: Client,
@ -1520,7 +1520,7 @@ fn start_executing_work(tcx: TyCtxt,
// - Error reporting only can happen on the main thread because that's the
// only place where we have access to the compiler `Session`.
// - LLVM work can be done on any thread.
// - Translation can only happen on the main thread.
// - Codegen can only happen on the main thread.
// - Each thread doing substantial work most be in possession of a `Token`
// from the `Jobserver`.
// - The compiler process always holds one `Token`. Any additional `Tokens`
@ -1536,7 +1536,7 @@ fn start_executing_work(tcx: TyCtxt,
// any error messages it has received. It might even abort compilation if
// has received a fatal error. In this case we rely on all other threads
// being torn down automatically with the main thread.
// Since the main thread will often be busy doing translation work, error
// Since the main thread will often be busy doing codegen work, error
// reporting will be somewhat delayed, since the message queue can only be
// checked in between to work packages.
//
@ -1552,10 +1552,10 @@ fn start_executing_work(tcx: TyCtxt,
// thread about what work to do when, and it will spawn off LLVM worker
// threads as open LLVM WorkItems become available.
//
// The job of the main thread is to translate CGUs into LLVM work package
// The job of the main thread is to codegen CGUs into LLVM work package
// (since the main thread is the only thread that can do this). The main
// thread will block until it receives a message from the coordinator, upon
// which it will translate one CGU, send it to the coordinator and block
// which it will codegen one CGU, send it to the coordinator and block
// again. This way the coordinator can control what the main thread is
// doing.
//
@ -1573,7 +1573,7 @@ fn start_executing_work(tcx: TyCtxt,
// if possible. These two goals are at odds with each other: If memory
// consumption were not an issue, we could just let the main thread produce
// LLVM WorkItems at full speed, assuring maximal utilization of
// Tokens/LLVM worker threads. However, since translation usual is faster
// Tokens/LLVM worker threads. However, since codegen usual is faster
// than LLVM processing, the queue of LLVM WorkItems would fill up and each
// WorkItem potentially holds on to a substantial amount of memory.
//
@ -1637,7 +1637,7 @@ fn start_executing_work(tcx: TyCtxt,
// The final job the coordinator thread is responsible for is managing LTO
// and how that works. When LTO is requested what we'll to is collect all
// optimized LLVM modules into a local vector on the coordinator. Once all
// modules have been translated and optimized we hand this to the `lto`
// modules have been codegened and optimized we hand this to the `lto`
// module for further optimization. The `lto` module will return back a list
// of more modules to work on, which the coordinator will continue to spawn
// work for.
@ -1663,15 +1663,15 @@ fn start_executing_work(tcx: TyCtxt,
};
// This is where we collect codegen units that have gone all the way
// through translation and LLVM.
// through codegen and LLVM.
let mut compiled_modules = vec![];
let mut compiled_metadata_module = None;
let mut compiled_allocator_module = None;
let mut needs_lto = Vec::new();
let mut started_lto = false;
// This flag tracks whether all items have gone through translations
let mut translation_done = false;
// This flag tracks whether all items have gone through codegens
let mut codegen_done = false;
// This is the queue of LLVM work items that still need processing.
let mut work_items = Vec::<(WorkItem, u64)>::new();
@ -1687,23 +1687,23 @@ fn start_executing_work(tcx: TyCtxt,
// Run the message loop while there's still anything that needs message
// processing:
while !translation_done ||
while !codegen_done ||
work_items.len() > 0 ||
running > 0 ||
needs_lto.len() > 0 ||
main_thread_worker_state != MainThreadWorkerState::Idle {
// While there are still CGUs to be translated, the coordinator has
// While there are still CGUs to be codegened, the coordinator has
// to decide how to utilize the compiler processes implicit Token:
// For translating more CGU or for running them through LLVM.
if !translation_done {
// For codegenning more CGU or for running them through LLVM.
if !codegen_done {
if main_thread_worker_state == MainThreadWorkerState::Idle {
if !queue_full_enough(work_items.len(), running, max_workers) {
// The queue is not full enough, translate more items:
if let Err(_) = trans_worker_send.send(Message::TranslateItem) {
panic!("Could not send Message::TranslateItem to main thread")
// The queue is not full enough, codegen more items:
if let Err(_) = codegen_worker_send.send(Message::CodegenItem) {
panic!("Could not send Message::CodegenItem to main thread")
}
main_thread_worker_state = MainThreadWorkerState::Translating;
main_thread_worker_state = MainThreadWorkerState::Codegenning;
} else {
// The queue is full enough to not let the worker
// threads starve. Use the implicit Token to do some
@ -1721,7 +1721,7 @@ fn start_executing_work(tcx: TyCtxt,
}
}
} else {
// If we've finished everything related to normal translation
// If we've finished everything related to normal codegen
// then it must be the case that we've got some LTO work to do.
// Perform the serial work here of figuring out what we're
// going to LTO and then push a bunch of work items onto our
@ -1742,7 +1742,7 @@ fn start_executing_work(tcx: TyCtxt,
}
}
// In this branch, we know that everything has been translated,
// In this branch, we know that everything has been codegened,
// so it's just a matter of determining whether the implicit
// Token is free to use for LLVM work.
match main_thread_worker_state {
@ -1768,9 +1768,9 @@ fn start_executing_work(tcx: TyCtxt,
main_thread_worker_state = MainThreadWorkerState::LLVMing;
}
}
MainThreadWorkerState::Translating => {
bug!("trans worker should not be translating after \
translation was already completed")
MainThreadWorkerState::Codegenning => {
bug!("codegen worker should not be codegenning after \
codegen was already completed")
}
MainThreadWorkerState::LLVMing => {
// Already making good use of that token
@ -1812,7 +1812,7 @@ fn start_executing_work(tcx: TyCtxt,
// If the main thread token is used for LLVM work
// at the moment, we turn that thread into a regular
// LLVM worker thread, so the main thread is free
// to react to translation demand.
// to react to codegen demand.
main_thread_worker_state = MainThreadWorkerState::Idle;
running += 1;
}
@ -1826,7 +1826,7 @@ fn start_executing_work(tcx: TyCtxt,
}
}
Message::TranslationDone { llvm_work_item, cost } => {
Message::CodegenDone { llvm_work_item, cost } => {
// We keep the queue sorted by estimated processing cost,
// so that more expensive items are processed earlier. This
// is good for throughput as it gives the main thread more
@ -1844,14 +1844,14 @@ fn start_executing_work(tcx: TyCtxt,
helper.request_token();
assert_eq!(main_thread_worker_state,
MainThreadWorkerState::Translating);
MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
}
Message::TranslationComplete => {
translation_done = true;
Message::CodegenComplete => {
codegen_done = true;
assert_eq!(main_thread_worker_state,
MainThreadWorkerState::Translating);
MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
}
@ -1902,8 +1902,8 @@ fn start_executing_work(tcx: TyCtxt,
// Exit the coordinator thread
return Err(())
}
Message::TranslateItem => {
bug!("the coordinator should not receive translation requests")
Message::CodegenItem => {
bug!("the coordinator should not receive codegen requests")
}
}
}
@ -1934,7 +1934,7 @@ fn start_executing_work(tcx: TyCtxt,
});
// A heuristic that determines if we have enough LLVM WorkItems in the
// queue so that the main thread can do LLVM work instead of translation
// queue so that the main thread can do LLVM work instead of codegen
fn queue_full_enough(items_in_queue: usize,
workers_running: usize,
max_workers: usize) -> bool {
@ -1955,10 +1955,10 @@ fn start_executing_work(tcx: TyCtxt,
}
}
pub const TRANS_WORKER_ID: usize = ::std::usize::MAX;
pub const TRANS_WORKER_TIMELINE: time_graph::TimelineId =
time_graph::TimelineId(TRANS_WORKER_ID);
pub const TRANS_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId =
time_graph::TimelineId(CODEGEN_WORKER_ID);
pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]);
const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]);
@ -2231,7 +2231,7 @@ impl SharedEmitterMain {
}
}
pub struct OngoingCrateTranslation {
pub struct OngoingCodegen {
crate_name: Symbol,
link: LinkMeta,
metadata: EncodedMetadata,
@ -2240,17 +2240,17 @@ pub struct OngoingCrateTranslation {
crate_info: CrateInfo,
time_graph: Option<TimeGraph>,
coordinator_send: Sender<Box<Any + Send>>,
trans_worker_receive: Receiver<Message>,
codegen_worker_receive: Receiver<Message>,
shared_emitter_main: SharedEmitterMain,
future: thread::JoinHandle<Result<CompiledModules, ()>>,
output_filenames: Arc<OutputFilenames>,
}
impl OngoingCrateTranslation {
impl OngoingCodegen {
pub(crate) fn join(
self,
sess: &Session
) -> (CrateTranslation, FxHashMap<WorkProductId, WorkProduct>) {
) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
self.shared_emitter_main.check(sess, true);
let compiled_modules = match self.future.join() {
Ok(Ok(compiled_modules)) => compiled_modules,
@ -2259,7 +2259,7 @@ impl OngoingCrateTranslation {
panic!("expected abort due to worker thread errors")
},
Err(_) => {
sess.fatal("Error during translation/LLVM phase.");
sess.fatal("Error during codegen/LLVM phase.");
}
};
@ -2282,7 +2282,7 @@ impl OngoingCrateTranslation {
unsafe { llvm::LLVMRustPrintPassTimings(); }
}
let trans = CrateTranslation {
(CodegenResults {
crate_name: self.crate_name,
link: self.link,
metadata: self.metadata,
@ -2293,35 +2293,33 @@ impl OngoingCrateTranslation {
modules: compiled_modules.modules,
allocator_module: compiled_modules.allocator_module,
metadata_module: compiled_modules.metadata_module,
};
(trans, work_products)
}, work_products)
}
pub(crate) fn submit_pre_translated_module_to_llvm(&self,
pub(crate) fn submit_pre_codegened_module_to_llvm(&self,
tcx: TyCtxt,
mtrans: ModuleTranslation) {
self.wait_for_signal_to_translate_item();
module: ModuleCodegen) {
self.wait_for_signal_to_codegen_item();
self.check_for_errors(tcx.sess);
// These are generally cheap and won't through off scheduling.
let cost = 0;
submit_translated_module_to_llvm(tcx, mtrans, cost);
submit_codegened_module_to_llvm(tcx, module, cost);
}
pub fn translation_finished(&self, tcx: TyCtxt) {
self.wait_for_signal_to_translate_item();
pub fn codegen_finished(&self, tcx: TyCtxt) {
self.wait_for_signal_to_codegen_item();
self.check_for_errors(tcx.sess);
drop(self.coordinator_send.send(Box::new(Message::TranslationComplete)));
drop(self.coordinator_send.send(Box::new(Message::CodegenComplete)));
}
pub fn check_for_errors(&self, sess: &Session) {
self.shared_emitter_main.check(sess, false);
}
pub fn wait_for_signal_to_translate_item(&self) {
match self.trans_worker_receive.recv() {
Ok(Message::TranslateItem) => {
pub fn wait_for_signal_to_codegen_item(&self) {
match self.codegen_worker_receive.recv() {
Ok(Message::CodegenItem) => {
// Nothing to do
}
Ok(_) => panic!("unexpected message"),
@ -2333,11 +2331,11 @@ impl OngoingCrateTranslation {
}
}
pub(crate) fn submit_translated_module_to_llvm(tcx: TyCtxt,
mtrans: ModuleTranslation,
pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt,
module: ModuleCodegen,
cost: u64) {
let llvm_work_item = WorkItem::Optimize(mtrans);
drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::TranslationDone {
let llvm_work_item = WorkItem::Optimize(module);
drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone {
llvm_work_item,
cost,
})));

View File

@ -8,14 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Translate the completed AST to the LLVM IR.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as trans_block and trans_expr, return a value --
//! the result of the translation to LLVM -- while others, such as trans_fn
//! and trans_item, are called only for the side effect of adding a
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about trans:
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a ValueRef. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
@ -25,12 +25,12 @@
use super::ModuleLlvm;
use super::ModuleSource;
use super::ModuleTranslation;
use super::ModuleCodegen;
use super::ModuleKind;
use abi;
use back::link;
use back::write::{self, OngoingCrateTranslation, create_target_machine};
use back::write::{self, OngoingCodegen, create_target_machine};
use llvm::{ContextRef, ModuleRef, ValueRef, Vector, get_param};
use llvm;
use metadata;
@ -66,9 +66,9 @@ use meth;
use mir;
use monomorphize::Instance;
use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt};
use rustc_trans_utils::symbol_names_test;
use rustc_codegen_utils::symbol_names_test;
use time_graph;
use trans_item::{MonoItem, BaseMonoItemExt, MonoItemExt, DefPathBasedNames};
use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt, DefPathBasedNames};
use type_::Type;
use type_of::LayoutLlvmExt;
use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet};
@ -93,7 +93,7 @@ use syntax::ast;
use mir::operand::OperandValue;
pub use rustc_trans_utils::check_for_rustc_errors_attr;
pub use rustc_codegen_utils::check_for_rustc_errors_attr;
pub struct StatRecorder<'a, 'tcx: 'a> {
cx: &'a CodegenCx<'a, 'tcx>,
@ -114,7 +114,7 @@ impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
fn drop(&mut self) {
if self.cx.sess().trans_stats() {
if self.cx.sess().codegen_stats() {
let mut stats = self.cx.stats.borrow_mut();
let iend = stats.n_llvm_insns;
stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
@ -458,8 +458,8 @@ pub fn call_memset<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
}
pub fn trans_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
let _s = if cx.sess().trans_stats() {
pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
let _s = if cx.sess().codegen_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(cx.tcx, true, true)
.push_def_path(instance.def_id(), &mut instance_name);
@ -471,7 +471,7 @@ pub fn trans_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tc
// this is an info! to allow collecting monomorphization statistics
// and to allow finding the last function before LLVM aborts from
// release builds.
info!("trans_instance({})", instance);
info!("codegen_instance({})", instance);
let fn_ty = instance.ty(cx.tcx);
let sig = common::ty_fn_sig(cx, fn_ty);
@ -506,7 +506,7 @@ pub fn trans_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tc
}
let mir = cx.tcx.instance_mir(instance.def);
mir::trans_mir(cx, lldecl, &mir, instance, sig);
mir::codegen_mir(cx, lldecl, &mir, instance, sig);
}
pub fn set_link_section(cx: &CodegenCx,
@ -712,9 +712,9 @@ pub fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
}
}
pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<Any + Send>>)
-> OngoingCrateTranslation {
-> OngoingCodegen {
check_for_rustc_errors_attr(tcx);
@ -734,17 +734,17 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let crate_hash = tcx.crate_hash(LOCAL_CRATE);
let link_meta = link::build_link_meta(crate_hash);
// Translate the metadata.
// Codegen the metadata.
let llmod_id = "metadata";
let (metadata_llcx, metadata_llmod, metadata) =
time(tcx.sess, "write metadata", || {
write_metadata(tcx, llmod_id, &link_meta)
});
let metadata_module = ModuleTranslation {
let metadata_module = ModuleCodegen {
name: link::METADATA_MODULE_NAME.to_string(),
llmod_id: llmod_id.to_string(),
source: ModuleSource::Translated(ModuleLlvm {
source: ModuleSource::Codegened(ModuleLlvm {
llcx: metadata_llcx,
llmod: metadata_llmod,
tm: create_target_machine(tcx.sess, false),
@ -752,16 +752,16 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
kind: ModuleKind::Metadata,
};
let time_graph = if tcx.sess.opts.debugging_opts.trans_time_graph {
let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
Some(time_graph::TimeGraph::new())
} else {
None
};
// Skip crate items and just output metadata in -Z no-trans mode.
if tcx.sess.opts.debugging_opts.no_trans ||
!tcx.sess.opts.output_types.should_trans() {
let ongoing_translation = write::start_async_translation(
// Skip crate items and just output metadata in -Z no-codegen mode.
if tcx.sess.opts.debugging_opts.no_codegen ||
!tcx.sess.opts.output_types.should_codegen() {
let ongoing_codegen = write::start_async_codegen(
tcx,
time_graph.clone(),
link_meta,
@ -769,20 +769,20 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx,
1);
ongoing_translation.submit_pre_translated_module_to_llvm(tcx, metadata_module);
ongoing_translation.translation_finished(tcx);
ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
ongoing_codegen.codegen_finished(tcx);
assert_and_save_dep_graph(tcx);
ongoing_translation.check_for_errors(tcx.sess);
ongoing_codegen.check_for_errors(tcx.sess);
return ongoing_translation;
return ongoing_codegen;
}
// Run the translation item collector and partition the collected items into
// Run the monomorphization collector and partition the collected items into
// codegen units.
let codegen_units =
tcx.collect_and_partition_translation_items(LOCAL_CRATE).1;
tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
let codegen_units = (*codegen_units).clone();
// Force all codegen_unit queries so they are already either red or green
@ -796,7 +796,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
let ongoing_translation = write::start_async_translation(
let ongoing_codegen = write::start_async_codegen(
tcx,
time_graph.clone(),
link_meta,
@ -804,7 +804,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx,
codegen_units.len());
// Translate an allocator shim, if any
// Codegen an allocator shim, if any
let allocator_module = if let Some(kind) = *tcx.sess.allocator_kind.get() {
unsafe {
let llmod_id = "allocator";
@ -816,13 +816,13 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
tm: create_target_machine(tcx.sess, false),
};
time(tcx.sess, "write allocator module", || {
allocator::trans(tcx, &modules, kind)
allocator::codegen(tcx, &modules, kind)
});
Some(ModuleTranslation {
Some(ModuleCodegen {
name: link::ALLOCATOR_MODULE_NAME.to_string(),
llmod_id: llmod_id.to_string(),
source: ModuleSource::Translated(modules),
source: ModuleSource::Codegened(modules),
kind: ModuleKind::Allocator,
})
}
@ -831,10 +831,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
};
if let Some(allocator_module) = allocator_module {
ongoing_translation.submit_pre_translated_module_to_llvm(tcx, allocator_module);
ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
}
ongoing_translation.submit_pre_translated_module_to_llvm(tcx, metadata_module);
ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
// We sort the codegen units by size. This way we can schedule work for LLVM
// a bit more efficiently.
@ -844,12 +844,12 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
codegen_units
};
let mut total_trans_time = Duration::new(0, 0);
let mut total_codegen_time = Duration::new(0, 0);
let mut all_stats = Stats::default();
for cgu in codegen_units.into_iter() {
ongoing_translation.wait_for_signal_to_translate_item();
ongoing_translation.check_for_errors(tcx.sess);
ongoing_codegen.wait_for_signal_to_codegen_item();
ongoing_codegen.check_for_errors(tcx.sess);
// First, if incremental compilation is enabled, we try to re-use the
// codegen unit from the cache.
@ -878,14 +878,14 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// 1. http://llvm.org/bugs/show_bug.cgi?id=11479
let llmod_id = format!("{}.rs", cgu.name());
let module = ModuleTranslation {
let module = ModuleCodegen {
name: cgu.name().to_string(),
source: ModuleSource::Preexisting(buf),
kind: ModuleKind::Regular,
llmod_id,
};
tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true);
write::submit_translated_module_to_llvm(tcx, module, 0);
write::submit_codegened_module_to_llvm(tcx, module, 0);
// Continue to next cgu, this one is done.
continue
}
@ -896,23 +896,23 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
let _timing_guard = time_graph.as_ref().map(|time_graph| {
time_graph.start(write::TRANS_WORKER_TIMELINE,
write::TRANS_WORK_PACKAGE_KIND,
time_graph.start(write::CODEGEN_WORKER_TIMELINE,
write::CODEGEN_WORK_PACKAGE_KIND,
&format!("codegen {}", cgu.name()))
});
let start_time = Instant::now();
all_stats.extend(tcx.compile_codegen_unit(*cgu.name()));
total_trans_time += start_time.elapsed();
ongoing_translation.check_for_errors(tcx.sess);
total_codegen_time += start_time.elapsed();
ongoing_codegen.check_for_errors(tcx.sess);
}
ongoing_translation.translation_finished(tcx);
ongoing_codegen.codegen_finished(tcx);
// Since the main thread is sometimes blocked during trans, we keep track
// Since the main thread is sometimes blocked during codegen, we keep track
// -Ztime-passes output manually.
print_time_passes_entry(tcx.sess.time_passes(),
"translate to LLVM IR",
total_trans_time);
"codegen to LLVM IR",
total_codegen_time);
if tcx.sess.opts.incremental.is_some() {
::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
@ -920,8 +920,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
symbol_names_test::report_symbol_names(tcx);
if tcx.sess.trans_stats() {
println!("--- trans stats ---");
if tcx.sess.codegen_stats() {
println!("--- codegen stats ---");
println!("n_glues_created: {}", all_stats.n_glues_created);
println!("n_null_glues: {}", all_stats.n_null_glues);
println!("n_real_glues: {}", all_stats.n_real_glues);
@ -942,10 +942,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
ongoing_translation.check_for_errors(tcx.sess);
ongoing_codegen.check_for_errors(tcx.sess);
assert_and_save_dep_graph(tcx);
ongoing_translation
ongoing_codegen
}
fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
@ -958,14 +958,14 @@ fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
|| rustc_incremental::save_dep_graph(tcx));
}
fn collect_and_partition_translation_items<'a, 'tcx>(
fn collect_and_partition_mono_items<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cnum: CrateNum,
) -> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>)
{
assert_eq!(cnum, LOCAL_CRATE);
let collection_mode = match tcx.sess.opts.debugging_opts.print_trans_items {
let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
Some(ref s) => {
let mode_string = s.to_lowercase();
let mode_string = mode_string.trim();
@ -992,7 +992,7 @@ fn collect_and_partition_translation_items<'a, 'tcx>(
};
let (items, inlining_map) =
time(tcx.sess, "translation item collection", || {
time(tcx.sess, "monomorphization collection", || {
collector::collect_crate_mono_items(tcx, collection_mode)
});
@ -1016,20 +1016,20 @@ fn collect_and_partition_translation_items<'a, 'tcx>(
.collect::<Vec<_>>()
});
let translation_items: DefIdSet = items.iter().filter_map(|trans_item| {
match *trans_item {
let mono_items: DefIdSet = items.iter().filter_map(|mono_item| {
match *mono_item {
MonoItem::Fn(ref instance) => Some(instance.def_id()),
MonoItem::Static(def_id) => Some(def_id),
_ => None,
}
}).collect();
if tcx.sess.opts.debugging_opts.print_trans_items.is_some() {
if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
let mut item_to_cgus = FxHashMap();
for cgu in &codegen_units {
for (&trans_item, &linkage) in cgu.items() {
item_to_cgus.entry(trans_item)
for (&mono_item, &linkage) in cgu.items() {
item_to_cgus.entry(mono_item)
.or_insert(Vec::new())
.push((cgu.name().clone(), linkage));
}
@ -1073,11 +1073,11 @@ fn collect_and_partition_translation_items<'a, 'tcx>(
item_keys.sort();
for item in item_keys {
println!("TRANS_ITEM {}", item);
println!("MONO_ITEM {}", item);
}
}
(Arc::new(translation_items), Arc::new(codegen_units))
(Arc::new(mono_items), Arc::new(codegen_units))
}
impl CrateInfo {
@ -1174,10 +1174,10 @@ impl CrateInfo {
}
}
fn is_translated_item(tcx: TyCtxt, id: DefId) -> bool {
let (all_trans_items, _) =
tcx.collect_and_partition_translation_items(LOCAL_CRATE);
all_trans_items.contains(&id)
fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool {
let (all_mono_items, _) =
tcx.collect_and_partition_mono_items(LOCAL_CRATE);
all_mono_items.contains(&id)
}
fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
@ -1185,23 +1185,23 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let cgu = tcx.codegen_unit(cgu);
let start_time = Instant::now();
let (stats, module) = module_translation(tcx, cgu);
let time_to_translate = start_time.elapsed();
let (stats, module) = module_codegen(tcx, cgu);
let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for translating it.
let cost = time_to_translate.as_secs() * 1_000_000_000 +
time_to_translate.subsec_nanos() as u64;
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 +
time_to_codegen.subsec_nanos() as u64;
write::submit_translated_module_to_llvm(tcx,
write::submit_codegened_module_to_llvm(tcx,
module,
cost);
return stats;
fn module_translation<'a, 'tcx>(
fn module_codegen<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu: Arc<CodegenUnit<'tcx>>)
-> (Stats, ModuleTranslation)
-> (Stats, ModuleCodegen)
{
let cgu_name = cgu.name().to_string();
@ -1218,18 +1218,18 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
tcx.crate_disambiguator(LOCAL_CRATE)
.to_fingerprint().to_hex());
// Instantiate translation items without filling out definitions yet...
// Instantiate monomorphizations without filling out definitions yet...
let cx = CodegenCx::new(tcx, cgu, &llmod_id);
let module = {
let trans_items = cx.codegen_unit
let mono_items = cx.codegen_unit
.items_in_deterministic_order(cx.tcx);
for &(trans_item, (linkage, visibility)) in &trans_items {
trans_item.predefine(&cx, linkage, visibility);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine(&cx, linkage, visibility);
}
// ... and now that we have everything pre-defined, fill out those definitions.
for &(trans_item, _) in &trans_items {
trans_item.define(&cx);
for &(mono_item, _) in &mono_items {
mono_item.define(&cx);
}
// If this codegen unit contains the main function, also create the
@ -1273,9 +1273,9 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
tm: create_target_machine(cx.sess(), false),
};
ModuleTranslation {
ModuleCodegen {
name: cgu_name,
source: ModuleSource::Translated(llvm_module),
source: ModuleSource::Codegened(llvm_module),
kind: ModuleKind::Regular,
llmod_id,
}
@ -1286,13 +1286,13 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
pub fn provide(providers: &mut Providers) {
providers.collect_and_partition_translation_items =
collect_and_partition_translation_items;
providers.collect_and_partition_mono_items =
collect_and_partition_mono_items;
providers.is_translated_item = is_translated_item;
providers.is_codegened_item = is_codegened_item;
providers.codegen_unit = |tcx, name| {
let (_, all) = tcx.collect_and_partition_translation_items(LOCAL_CRATE);
let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
all.iter()
.find(|cgu| *cgu.name() == name)
.cloned()
@ -1369,9 +1369,9 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
mod temp_stable_hash_impls {
use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher,
HashStable};
use ModuleTranslation;
use ModuleCodegen;
impl<HCX> HashStable<HCX> for ModuleTranslation {
impl<HCX> HashStable<HCX> for ModuleCodegen {
fn hash_stable<W: StableHasherResult>(&self,
_: &mut HCX,
_: &mut StableHasher<W>) {

View File

@ -108,7 +108,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
fn count_insn(&self, category: &str) {
if self.cx.sess().trans_stats() {
if self.cx.sess().codegen_stats() {
self.cx.stats.borrow_mut().n_llvm_insns += 1;
}
if self.cx.sess().count_llvm_insns() {

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Handles translation of callees as well as other call-related
//! Handles codegen of callees as well as other call-related
//! things. Callees are a superset of normal rust values and sometimes
//! have different representations. In particular, top-level fn items
//! and methods are represented as just a fn ptr and not a full
@ -28,7 +28,7 @@ use rustc::ty::layout::LayoutOf;
use rustc::ty::subst::Substs;
use rustc_target::spec::PanicStrategy;
/// Translates a reference to a fn/method item, monomorphizing and
/// Codegens a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
///
/// # Parameters
@ -116,9 +116,9 @@ pub fn get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
// just declared.
//
// This is sort of subtle. Inside our codegen unit we started off
// compilation by predefining all our own `TransItem` instances. That
// is, everything we're translating ourselves is already defined. That
// means that anything we're actually translating in this codegen unit
// compilation by predefining all our own `MonoItem` instances. That
// is, everything we're codegenning ourselves is already defined. That
// means that anything we're actually codegenning in this codegen unit
// will have hit the above branch in `get_declared_value`. As a result,
// we're guaranteed here that we're declaring a symbol that won't get
// defined, or in other words we're referencing a value from another
@ -181,7 +181,7 @@ pub fn get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
} else {
// This is a non-generic function
if cx.tcx.is_translated_item(instance_def_id) {
if cx.tcx.is_codegened_item(instance_def_id) {
// This is a function that is instantiated in the local crate
if instance_def_id.is_local() {

View File

@ -10,7 +10,7 @@
#![allow(non_camel_case_types, non_snake_case)]
//! Code that is useful in various trans modules.
//! Code that is useful in various codegen modules.
use llvm;
use llvm::{ValueRef, ContextRef, TypeKind};
@ -446,4 +446,3 @@ pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
_ => bug!("unexpected type {:?} to ty_fn_sig", ty)
}
}

View File

@ -128,7 +128,7 @@ pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
ref attrs, span, node: hir::ItemStatic(..), ..
}) => {
if declare::get_declared_value(cx, &sym[..]).is_some() {
span_bug!(span, "trans: Conflicting symbol names for static?");
span_bug!(span, "Conflicting symbol names for static?");
}
let g = declare::define_global(cx, &sym[..], llty).unwrap();
@ -145,7 +145,7 @@ pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
hir_map::NodeForeignItem(&hir::ForeignItem {
ref attrs, span, node: hir::ForeignItemStatic(..), ..
}) => {
let g = if let Some(linkage) = cx.tcx.trans_fn_attrs(def_id).linkage {
let g = if let Some(linkage) = cx.tcx.codegen_fn_attrs(def_id).linkage {
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
@ -221,8 +221,8 @@ pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
// However, in some scenarios we defer emission of statics to downstream
// crates, so there are cases where a static with an upstream DefId
// is actually present in the current crate. We can find out via the
// is_translated_item query.
if !cx.tcx.is_translated_item(def_id) {
// is_codegened_item query.
if !cx.tcx.is_codegened_item(def_id) {
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
}
@ -243,14 +243,14 @@ pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
g
}
pub fn trans_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
pub fn codegen_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
def_id: DefId,
is_mutable: bool,
attrs: &[ast::Attribute]) {
unsafe {
let g = get_static(cx, def_id);
let v = match ::mir::trans_static_initializer(cx, def_id) {
let v = match ::mir::codegen_static_initializer(cx, def_id) {
Ok(v) => v,
// Error has already been reported
Err(_) => return,
@ -290,7 +290,7 @@ pub fn trans_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::trans_crate.)
// with the new global later. (See base::codegen_backend.)
cx.statics_to_rauw.borrow_mut().push((g, new_g));
new_g
};

View File

@ -69,7 +69,7 @@ pub struct CodegenCx<'a, 'tcx: 'a> {
/// Val is a ValueRef holding a *[T].
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during translation
/// when we ptrcast, and we have to ptrcast during codegen
/// of a [T] const because we form a slice, a (*T,usize) pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<ValueRef, ValueRef>>,
@ -81,7 +81,7 @@ pub struct CodegenCx<'a, 'tcx: 'a> {
pub statics: RefCell<FxHashMap<ValueRef, DefId>>,
/// List of globals for static variables which need to be passed to the
/// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
/// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
/// (We have to make sure we don't invalidate any ValueRefs referring
/// to constants.)
pub statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
@ -351,7 +351,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to translate that. Otherwise, we're
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol

View File

@ -131,9 +131,9 @@
//! when generating prologue instructions we have to make sure that we don't
//! emit source location information until the 'real' function body begins. For
//! this reason, source location emission is disabled by default for any new
//! function being translated and is only activated after a call to the third
//! function being codegened and is only activated after a call to the third
//! function from the list above, `start_emitting_source_locations()`. This
//! function should be called right before regularly starting to translate the
//! function should be called right before regularly starting to codegen the
//! top-level block of the given function.
//!
//! There is one exception to the above rule: `llvm.dbg.declare` instruction

View File

@ -23,7 +23,7 @@ use llvm::{self, ValueRef};
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor,
DICompositeType, DILexicalBlock, DIFlags};
use rustc::hir::TransFnAttrFlags;
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def::CtorKind;
use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
use rustc::ty::fold::TypeVisitor;
@ -1645,13 +1645,13 @@ pub fn create_global_var_metadata(cx: &CodegenCx,
}
let tcx = cx.tcx;
let attrs = tcx.trans_fn_attrs(def_id);
let attrs = tcx.codegen_fn_attrs(def_id);
if attrs.flags.contains(TransFnAttrFlags::NO_DEBUG) {
if attrs.flags.contains(CodegenFnAttrFlags::NO_DEBUG) {
return;
}
let no_mangle = attrs.flags.contains(TransFnAttrFlags::NO_MANGLE);
let no_mangle = attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE);
// We may want to remove the namespace scope if we're in an extern block, see:
// https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952
let var_scope = get_namespace_for_item(cx, def_id);

View File

@ -23,7 +23,7 @@ use self::source_loc::InternalDebugLocation::{self, UnknownLocation};
use llvm;
use llvm::{ModuleRef, ContextRef, ValueRef};
use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray, DIFlags};
use rustc::hir::TransFnAttrFlags;
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{DefId, CrateNum};
use rustc::ty::subst::{Substs, UnpackedKind};
@ -212,7 +212,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
if let InstanceDef::Item(def_id) = instance.def {
if cx.tcx.trans_fn_attrs(def_id).flags.contains(TransFnAttrFlags::NO_DEBUG) {
if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
}

View File

@ -50,9 +50,9 @@ pub fn set_source_location(
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
/// they are disabled when beginning to translate a new function. This functions
/// they are disabled when beginning to codegen a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is translated.
/// first real statement/expression of the function is codegened.
pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) {
match *dbg_context {
FunctionDebugContext::RegularContext(ref data) => {

View File

@ -31,7 +31,7 @@ pub fn is_node_local_to_unit(cx: &CodegenCx, def_id: DefId) -> bool
// externally visible or by being inlined into something externally
// visible). It might better to use the `exported_items` set from
// `driver::CrateAnalysis` in the future, but (atm) this set is not
// available in the translation pass.
// available in the codegen pass.
!cx.tcx.is_reachable_non_generic(def_id)
}

View File

@ -11,7 +11,7 @@
//!
//! Prefer using functions and methods from this module rather than calling LLVM
//! functions directly. These functions do some additional work to ensure we do
//! the right thing given the preconceptions of trans.
//! the right thing given the preconceptions of codegen.
//!
//! Some useful guidelines:
//!

View File

@ -84,8 +84,8 @@ fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
/// add them to librustc_codegen_llvm/context.rs
pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx>],
@ -386,7 +386,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
},
"discriminant_value" => {
args[0].deref(bx.cx).trans_get_discr(bx, ret_ty)
args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
}
"align_offset" => {
@ -743,9 +743,9 @@ fn try_intrinsic<'a, 'tcx>(
let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
trans_msvc_try(bx, cx, func, data, local_ptr, dest);
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
trans_gnu_try(bx, cx, func, data, local_ptr, dest);
codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
}
}
@ -756,7 +756,7 @@ fn try_intrinsic<'a, 'tcx>(
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn trans_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
fn codegen_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
cx: &CodegenCx,
func: ValueRef,
data: ValueRef,
@ -857,14 +857,14 @@ fn trans_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
// instructions).
//
// This translation is a little surprising because we always call a shim
// This codegen is a little surprising because we always call a shim
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn trans_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
fn codegen_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
cx: &CodegenCx,
func: ValueRef,
data: ValueRef,
@ -873,7 +873,7 @@ fn trans_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
let llfn = get_rust_try_fn(cx, &mut |bx| {
let cx = bx.cx;
// Translates the shims described above:
// Codegens the shims described above:
//
// bx:
// invoke %func(%args...) normal %normal unwind %catch
@ -922,13 +922,13 @@ fn trans_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
bx.store(ret, dest, i32_align);
}
// Helper function to give a Block to a closure to translate a shim function.
// Helper function to give a Block to a closure to codegen a shim function.
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
name: &str,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
codegen: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig(
inputs.into_iter(),
@ -939,7 +939,7 @@ fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
)));
let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
let bx = Builder::new_block(cx, llfn, "entry-block");
trans(bx);
codegen(bx);
llfn
}
@ -948,7 +948,7 @@ fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
codegen: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
@ -965,7 +965,7 @@ fn get_rust_try_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
Abi::Rust
)));
let output = tcx.types.i32;
let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
cx.rust_try_fn.set(Some(rust_try));
return rust_try
}
@ -1069,7 +1069,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
let n: usize = match name["simd_shuffle".len()..].parse() {
Ok(n) => n,
Err(_) => span_bug!(span,
"bad `simd_shuffle` instruction only caught in trans?")
"bad `simd_shuffle` instruction only caught in codegen?")
};
require_simd!(ret_ty, "return");

View File

@ -49,7 +49,7 @@ extern crate rustc_demangle;
extern crate rustc_incremental;
extern crate rustc_llvm as llvm;
extern crate rustc_platform_intrinsics as intrinsics;
extern crate rustc_trans_utils;
extern crate rustc_codegen_utils;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
@ -79,12 +79,12 @@ use rustc::session::config::{OutputFilenames, OutputType, PrintRequest};
use rustc::ty::{self, TyCtxt};
use rustc::util::nodemap::{FxHashSet, FxHashMap};
use rustc_mir::monomorphize;
use rustc_trans_utils::trans_crate::TransCrate;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
mod diagnostics;
mod back {
pub use rustc_trans_utils::symbol_names;
pub use rustc_codegen_utils::symbol_names;
mod archive;
pub mod bytecode;
mod command;
@ -116,23 +116,23 @@ mod metadata;
mod meth;
mod mir;
mod time_graph;
mod trans_item;
mod mono_item;
mod type_;
mod type_of;
mod value;
pub struct LlvmTransCrate(());
pub struct LlvmCodegenBackend(());
impl !Send for LlvmTransCrate {} // Llvm is on a per-thread basis
impl !Sync for LlvmTransCrate {}
impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
impl !Sync for LlvmCodegenBackend {}
impl LlvmTransCrate {
pub fn new() -> Box<TransCrate> {
box LlvmTransCrate(())
impl LlvmCodegenBackend {
pub fn new() -> Box<CodegenBackend> {
box LlvmCodegenBackend(())
}
}
impl TransCrate for LlvmTransCrate {
impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
@ -197,27 +197,28 @@ impl TransCrate for LlvmTransCrate {
attributes::provide_extern(providers);
}
fn trans_crate<'a, 'tcx>(
fn codegen_crate<'a, 'tcx>(
&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<Any + Send>>
) -> Box<Any> {
box base::trans_crate(tcx, rx)
box base::codegen_crate(tcx, rx)
}
fn join_trans_and_link(
fn join_codegen_and_link(
&self,
trans: Box<Any>,
ongoing_codegen: Box<Any>,
sess: &Session,
dep_graph: &DepGraph,
outputs: &OutputFilenames,
) -> Result<(), CompileIncomplete>{
use rustc::util::common::time;
let (trans, work_products) = trans.downcast::<::back::write::OngoingCrateTranslation>()
.expect("Expected LlvmTransCrate's OngoingCrateTranslation, found Box<Any>")
.join(sess);
let (ongoing_codegen, work_products) =
ongoing_codegen.downcast::<::back::write::OngoingCodegen>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
back::write::dump_incremental_data(&trans);
back::write::dump_incremental_data(&ongoing_codegen);
}
time(sess,
@ -234,24 +235,25 @@ impl TransCrate for LlvmTransCrate {
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
time(sess, "linking", || {
back::link::link_binary(sess, &trans, outputs, &trans.crate_name.as_str());
back::link::link_binary(sess, &ongoing_codegen,
outputs, &ongoing_codegen.crate_name.as_str());
});
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash);
rustc_incremental::finalize_session_directory(sess, ongoing_codegen.link.crate_hash);
Ok(())
}
}
/// This is the entrypoint for a hot plugged rustc_trans
/// This is the entrypoint for a hot plugged rustc_codegen_llvm
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<TransCrate> {
LlvmTransCrate::new()
pub fn __rustc_codegen_backend() -> Box<CodegenBackend> {
LlvmCodegenBackend::new()
}
struct ModuleTranslation {
struct ModuleCodegen {
/// The name of the module. When the crate may be saved between
/// compilations, incremental compilation requires that name be
/// unique amongst **all** crates. Therefore, it should contain
@ -270,10 +272,10 @@ enum ModuleKind {
Allocator,
}
impl ModuleTranslation {
impl ModuleCodegen {
fn llvm(&self) -> Option<&ModuleLlvm> {
match self.source {
ModuleSource::Translated(ref llvm) => Some(llvm),
ModuleSource::Codegened(ref llvm) => Some(llvm),
ModuleSource::Preexisting(_) => None,
}
}
@ -285,7 +287,7 @@ impl ModuleTranslation {
outputs: &OutputFilenames) -> CompiledModule {
let pre_existing = match self.source {
ModuleSource::Preexisting(_) => true,
ModuleSource::Translated(_) => false,
ModuleSource::Codegened(_) => false,
};
let object = if emit_obj {
Some(outputs.temp_path(OutputType::Object, Some(&self.name)))
@ -332,7 +334,7 @@ enum ModuleSource {
Preexisting(WorkProduct),
/// Rebuild from this LLVM module.
Translated(ModuleLlvm),
Codegened(ModuleLlvm),
}
#[derive(Debug)]
@ -355,7 +357,7 @@ impl Drop for ModuleLlvm {
}
}
struct CrateTranslation {
struct CodegenResults {
crate_name: Symbol,
modules: Vec<CompiledModule>,
allocator_module: Option<CompiledModule>,
@ -387,4 +389,4 @@ struct CrateInfo {
missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
}
__build_diagnostic_array! { librustc_trans, DIAGNOSTICS }
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }

View File

@ -249,7 +249,7 @@ pub(crate) fn print(req: PrintRequest, sess: &Session) {
match req {
PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm),
PrintRequest::TargetFeatures => llvm::LLVMRustPrintTargetFeatures(tm),
_ => bug!("rustc_trans can't handle print request: {:?}", req),
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
}
}
}

View File

@ -34,25 +34,25 @@ use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
let mut bx = self.build_block(bb);
let data = &self.mir[bb];
debug!("trans_block({:?}={:?})", bb, data);
debug!("codegen_block({:?}={:?})", bb, data);
for statement in &data.statements {
bx = self.trans_statement(bx, statement);
bx = self.codegen_statement(bx, statement);
}
self.trans_terminator(bx, bb, data.terminator());
self.codegen_terminator(bx, bb, data.terminator());
}
fn trans_terminator(&mut self,
fn codegen_terminator(&mut self,
mut bx: Builder<'a, 'tcx>,
bb: mir::BasicBlock,
terminator: &mir::Terminator<'tcx>)
{
debug!("trans_terminator: {:?}", terminator);
debug!("codegen_terminator: {:?}", terminator);
// Create the cleanup bundle, if needed.
let tcx = bx.tcx();
@ -190,7 +190,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let discr = self.trans_operand(&bx, discr);
let discr = self.codegen_operand(&bx, discr);
if switch_ty == bx.tcx().types.bool {
let lltrue = llblock(self, targets[0]);
let llfalse = llblock(self, targets[1]);
@ -221,7 +221,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.trans_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val {
bx.load(llval, align)
} else {
@ -233,10 +233,10 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Place(tr_place) => {
LocalRef::Place(cg_place) => {
OperandRef {
val: Ref(tr_place.llval, tr_place.align),
layout: tr_place.layout
val: Ref(cg_place.llval, cg_place.align),
layout: cg_place.layout
}
}
};
@ -275,7 +275,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
return
}
let place = self.trans_place(&bx, location);
let place = self.codegen_place(&bx, location);
let mut args: &[_] = &[place.llval, place.llextra];
args = &args[..1 + place.has_extra() as usize];
let (drop_fn, fn_ty) = match ty.sty {
@ -301,7 +301,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
let cond = self.trans_operand(&bx, cond).immediate();
let cond = self.codegen_operand(&bx, cond).immediate();
let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked
@ -317,7 +317,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
// Don't translate the panic block if success if known.
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
funclet_br(self, bx, target);
return;
@ -353,8 +353,8 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
// Put together the arguments to the panic entry point.
let (lang_item, args) = match *msg {
EvalErrorKind::BoundsCheck { ref len, ref index } => {
let len = self.trans_operand(&mut bx, len).immediate();
let index = self.trans_operand(&mut bx, index).immediate();
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
let file_line_col = C_struct(bx.cx, &[filename, line, col], false);
let file_line_col = consts::addr_of(bx.cx,
@ -386,17 +386,17 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
let fn_ty = FnType::of_instance(bx.cx, &instance);
let llfn = callee::get_fn(bx.cx, instance);
// Translate the actual panic invoke/call.
// Codegen the actual panic invoke/call.
do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
}
mir::TerminatorKind::DropAndReplace { .. } => {
bug!("undesugared DropAndReplace in trans: {:?}", terminator);
bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
}
mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup } => {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bx, func);
let callee = self.codegen_operand(&bx, func);
let (instance, mut llfn) = match callee.layout.ty.sty {
ty::TyFnDef(def_id, substs) => {
@ -419,7 +419,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
);
let abi = sig.abi;
// Handle intrinsics old trans wants Expr's for, ourselves.
// Handle intrinsics old codegen wants Expr's for, ourselves.
let intrinsic = match def {
Some(ty::InstanceDef::Intrinsic(def_id))
=> Some(bx.tcx().item_name(def_id).as_str()),
@ -429,7 +429,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
if intrinsic == Some("transmute") {
let &(ref dest, target) = destination.as_ref().unwrap();
self.trans_transmute(&bx, &args[0], dest);
self.codegen_transmute(&bx, &args[0], dest);
funclet_br(self, bx, target);
return;
}
@ -467,7 +467,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
};
if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
use intrinsic::trans_intrinsic_call;
use intrinsic::codegen_intrinsic_call;
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
@ -504,12 +504,12 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
self.trans_operand(&bx, arg)
self.codegen_operand(&bx, arg)
}).collect();
let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx);
trans_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@ -534,7 +534,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
};
for (i, arg) in first_args.iter().enumerate() {
let mut op = self.trans_operand(&bx, arg);
let mut op = self.codegen_operand(&bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
if let Pair(data_ptr, meta) = op.val {
llfn = Some(meth::VirtualIndex::from_index(idx)
@ -556,10 +556,10 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
_ => {}
}
self.trans_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bx, tup, &mut llargs,
self.codegen_arguments_untupled(&bx, tup, &mut llargs,
&fn_ty.args[first_args.len()..])
}
@ -574,13 +574,13 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
cleanup);
}
mir::TerminatorKind::GeneratorDrop |
mir::TerminatorKind::Yield { .. } => bug!("generator ops in trans"),
mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"),
mir::TerminatorKind::FalseEdges { .. } |
mir::TerminatorKind::FalseUnwind { .. } => bug!("borrowck false edges in trans"),
mir::TerminatorKind::FalseUnwind { .. } => bug!("borrowck false edges in codegen"),
}
}
fn trans_argument(&mut self,
fn codegen_argument(&mut self,
bx: &Builder<'a, 'tcx>,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
@ -601,7 +601,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
llargs.push(b);
return;
}
_ => bug!("trans_argument: {:?} invalid for pair arugment", op)
_ => bug!("codegen_argument: {:?} invalid for pair arugment", op)
}
}
@ -659,25 +659,25 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
llargs.push(llval);
}
fn trans_arguments_untupled(&mut self,
fn codegen_arguments_untupled(&mut self,
bx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
args: &[ArgType<'tcx, Ty<'tcx>>]) {
let tuple = self.trans_operand(bx, operand);
let tuple = self.codegen_operand(bx, operand);
// Handle both by-ref and immediate tuples.
if let Ref(llval, align) = tuple.val {
let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i);
self.trans_argument(bx, field_ptr.load(bx), llargs, &args[i]);
self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]);
}
} else {
// If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() {
let op = tuple.extract_field(bx, i);
self.trans_argument(bx, op, llargs, &args[i]);
self.codegen_argument(bx, op, llargs, &args[i]);
}
}
}
@ -792,7 +792,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
} else {
self.trans_place(bx, dest)
self.codegen_place(bx, dest)
};
if fn_ret.is_indirect() {
if dest.align.abi() < dest.layout.align.abi() {
@ -811,18 +811,18 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
fn trans_transmute(&mut self, bx: &Builder<'a, 'tcx>,
fn codegen_transmute(&mut self, bx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>,
dst: &mir::Place<'tcx>) {
if let mir::Place::Local(index) = *dst {
match self.locals[index] {
LocalRef::Place(place) => self.trans_transmute_into(bx, src, place),
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
LocalRef::Operand(None) => {
let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst));
assert!(!dst_layout.ty.has_erasable_regions());
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
place.storage_live(bx);
self.trans_transmute_into(bx, src, place);
self.codegen_transmute_into(bx, src, place);
let op = place.load(bx);
place.storage_dead(bx);
self.locals[index] = LocalRef::Operand(Some(op));
@ -833,15 +833,15 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
} else {
let dst = self.trans_place(bx, dst);
self.trans_transmute_into(bx, src, dst);
let dst = self.codegen_place(bx, dst);
self.codegen_transmute_into(bx, src, dst);
}
}
fn trans_transmute_into(&mut self, bx: &Builder<'a, 'tcx>,
fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>,
dst: PlaceRef<'tcx>) {
let src = self.trans_operand(bx, src);
let src = self.codegen_operand(bx, src);
let llty = src.layout.llvm_type(bx.cx);
let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
let align = src.layout.align.min(dst.layout.align);

View File

@ -158,7 +158,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx, alloc: &Allocation) -> ValueRef {
C_struct(cx, &llvals, true)
}
pub fn trans_static_initializer<'a, 'tcx>(
pub fn codegen_static_initializer<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
def_id: DefId)
-> Result<ValueRef, ConstEvalErr<'tcx>>

View File

@ -33,7 +33,7 @@ use std::iter;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
pub use self::constant::trans_static_initializer;
pub use self::constant::codegen_static_initializer;
use self::analyze::CleanupKind;
use self::place::PlaceRef;
@ -41,7 +41,7 @@ use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
/// Master context for translating MIR.
/// Master context for codegenning from MIR.
pub struct FunctionCx<'a, 'tcx:'a> {
instance: Instance<'tcx>,
@ -197,7 +197,7 @@ impl<'a, 'tcx> LocalRef<'tcx> {
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'a, 'tcx: 'a>(
pub fn codegen_mir<'a, 'tcx: 'a>(
cx: &'a CodegenCx<'a, 'tcx>,
llfn: ValueRef,
mir: &'a Mir<'tcx>,
@ -321,10 +321,10 @@ pub fn trans_mir<'a, 'tcx: 'a>(
let rpo = traversal::reverse_postorder(&mir);
let mut visited = BitVector::new(mir.basic_blocks().len());
// Translate the body of each block using reverse postorder
// Codegen the body of each block using reverse postorder
for (bb, _) in rpo {
visited.insert(bb.index());
fx.trans_block(bb);
fx.codegen_block(bb);
}
// Remove blocks that haven't been visited, or have no
@ -332,7 +332,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
for bb in mir.basic_blocks().indices() {
// Unreachable block
if !visited.contains(bb.index()) {
debug!("trans_mir: block {:?} was not visited", bb);
debug!("codegen_mir: block {:?} was not visited", bb);
unsafe {
llvm::LLVMDeleteBasicBlock(fx.blocks[bb]);
}

View File

@ -315,12 +315,12 @@ impl<'a, 'tcx> OperandValue {
}
impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
fn maybe_trans_consume_direct(&mut self,
fn maybe_codegen_consume_direct(&mut self,
bx: &Builder<'a, 'tcx>,
place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx>>
{
debug!("maybe_trans_consume_direct(place={:?})", place);
debug!("maybe_codegen_consume_direct(place={:?})", place);
// watch out for locals that do not have an
// alloca; they are handled somewhat differently
@ -340,7 +340,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
// Moves out of scalar and scalar pair fields are trivial.
if let &mir::Place::Projection(ref proj) = place {
if let Some(o) = self.maybe_trans_consume_direct(bx, &proj.base) {
if let Some(o) = self.maybe_codegen_consume_direct(bx, &proj.base) {
match proj.elem {
mir::ProjectionElem::Field(ref f, _) => {
return Some(o.extract_field(bx, f.index()));
@ -349,7 +349,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
mir::ProjectionElem::ConstantIndex { .. } => {
// ZSTs don't require any actual memory access.
// FIXME(eddyb) deduplicate this with the identical
// checks in `trans_consume` and `extract_field`.
// checks in `codegen_consume` and `extract_field`.
let elem = o.layout.field(bx.cx, 0);
if elem.is_zst() {
return Some(OperandRef::new_zst(bx.cx, elem));
@ -363,12 +363,12 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
None
}
pub fn trans_consume(&mut self,
pub fn codegen_consume(&mut self,
bx: &Builder<'a, 'tcx>,
place: &mir::Place<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_consume(place={:?})", place);
debug!("codegen_consume(place={:?})", place);
let ty = self.monomorphized_place_ty(place);
let layout = bx.cx.layout_of(ty);
@ -378,26 +378,26 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
return OperandRef::new_zst(bx.cx, layout);
}
if let Some(o) = self.maybe_trans_consume_direct(bx, place) {
if let Some(o) = self.maybe_codegen_consume_direct(bx, place) {
return o;
}
// for most places, to consume them we just load them
// out from their home
self.trans_place(bx, place).load(bx)
self.codegen_place(bx, place).load(bx)
}
pub fn trans_operand(&mut self,
pub fn codegen_operand(&mut self,
bx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_operand(operand={:?})", operand);
debug!("codegen_operand(operand={:?})", operand);
match *operand {
mir::Operand::Copy(ref place) |
mir::Operand::Move(ref place) => {
self.trans_consume(bx, place)
self.codegen_consume(bx, place)
}
mir::Operand::Constant(ref constant) => {

View File

@ -253,7 +253,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr(self, bx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
pub fn codegen_get_discr(self, bx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
if self.layout.abi == layout::Abi::Uninhabited {
return C_undef(cast_to);
@ -313,7 +313,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr(&self, bx: &Builder<'a, 'tcx>, variant_index: usize) {
pub fn codegen_set_discr(&self, bx: &Builder<'a, 'tcx>, variant_index: usize) {
if self.layout.for_variant(bx.cx, variant_index).abi == layout::Abi::Uninhabited {
return;
}
@ -399,11 +399,11 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
pub fn trans_place(&mut self,
pub fn codegen_place(&mut self,
bx: &Builder<'a, 'tcx>,
place: &mir::Place<'tcx>)
-> PlaceRef<'tcx> {
debug!("trans_place(place={:?})", place);
debug!("codegen_place(place={:?})", place);
let cx = bx.cx;
let tcx = cx.tcx;
@ -430,46 +430,46 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
elem: mir::ProjectionElem::Deref
}) => {
// Load the pointer from its location.
self.trans_consume(bx, base).deref(bx.cx)
self.codegen_consume(bx, base).deref(bx.cx)
}
mir::Place::Projection(ref projection) => {
let tr_base = self.trans_place(bx, &projection.base);
let cg_base = self.codegen_place(bx, &projection.base);
match projection.elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
tr_base.project_field(bx, field.index())
cg_base.project_field(bx, field.index())
}
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(mir::Place::Local(index));
let index = self.trans_operand(bx, index);
let index = self.codegen_operand(bx, index);
let llindex = index.immediate();
tr_base.project_index(bx, llindex)
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let lloffset = C_usize(bx.cx, offset as u64);
tr_base.project_index(bx, lloffset)
cg_base.project_index(bx, lloffset)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
min_length: _ } => {
let lloffset = C_usize(bx.cx, offset as u64);
let lllen = tr_base.len(bx.cx);
let lllen = cg_base.len(bx.cx);
let llindex = bx.sub(lllen, lloffset);
tr_base.project_index(bx, llindex)
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::Subslice { from, to } => {
let mut subslice = tr_base.project_index(bx,
let mut subslice = cg_base.project_index(bx,
C_usize(bx.cx, from as u64));
let projected_ty = PlaceTy::Ty { ty: tr_base.layout.ty }
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem).to_ty(bx.tcx());
subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
assert!(tr_base.has_extra());
subslice.llextra = bx.sub(tr_base.llextra,
assert!(cg_base.has_extra());
subslice.llextra = bx.sub(cg_base.llextra,
C_usize(bx.cx, (from as u64) + (to as u64)));
}
@ -481,12 +481,12 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
subslice
}
mir::ProjectionElem::Downcast(_, v) => {
tr_base.project_downcast(bx, v)
cg_base.project_downcast(bx, v)
}
}
}
};
debug!("trans_place(place={:?}) => {:?}", place, result);
debug!("codegen_place(place={:?}) => {:?}", place, result);
result
}
@ -496,4 +496,3 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
self.monomorphize(&place_ty.to_ty(tcx))
}
}

View File

@ -33,21 +33,21 @@ use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
pub fn trans_rvalue(&mut self,
pub fn codegen_rvalue(&mut self,
bx: Builder<'a, 'tcx>,
dest: PlaceRef<'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> Builder<'a, 'tcx>
{
debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
Value(dest.llval), rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
let tr_operand = self.trans_operand(&bx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating
let cg_operand = self.codegen_operand(&bx, operand);
// FIXME: consider not copying constants through stack. (fixable by codegenning
// constants into OperandValue::Ref, why dont we do that yet if we dont?)
tr_operand.val.store(&bx, dest);
cg_operand.val.store(&bx, dest);
bx
}
@ -57,16 +57,16 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
if dest.layout.is_llvm_scalar_pair() {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&bx, dest);
return bx;
}
// Unsize of a nontrivial struct. I would prefer for
// this to be eliminated by MIR translation, but
// this to be eliminated by MIR building, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(&bx, source);
let operand = self.codegen_operand(&bx, source);
match operand.val {
OperandValue::Pair(..) |
OperandValue::Immediate(_) => {
@ -76,7 +76,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
debug!("codegen_rvalue: creating ugly alloca");
let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
scratch.storage_live(&bx);
operand.val.store(&bx, scratch);
@ -92,7 +92,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::Rvalue::Repeat(ref elem, count) => {
let tr_elem = self.trans_operand(&bx, elem);
let cg_elem = self.codegen_operand(&bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() {
@ -101,7 +101,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval;
if let OperandValue::Immediate(v) = tr_elem.val {
if let OperandValue::Immediate(v) = cg_elem.val {
let align = C_i32(bx.cx, dest.align.abi() as i32);
let size = C_usize(bx.cx, dest.layout.size.bytes());
@ -133,8 +133,8 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
let keep_going = header_bx.icmp(llvm::IntNE, current, end);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
tr_elem.val.store(&body_bx,
PlaceRef::new_sized(current, tr_elem.layout, dest.align));
cg_elem.val.store(&body_bx,
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]);
body_bx.br(header_bx.llbb());
@ -146,7 +146,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
mir::Rvalue::Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
dest.trans_set_discr(&bx, variant_index);
dest.codegen_set_discr(&bx, variant_index);
if adt_def.is_enum() {
(dest.project_downcast(&bx, variant_index), active_field_index)
} else {
@ -156,7 +156,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
_ => (dest, None)
};
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bx, operand);
let op = self.codegen_operand(&bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
@ -168,23 +168,23 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
_ => {
assert!(self.rvalue_creates_operand(rvalue));
let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&bx, dest);
bx
}
}
}
pub fn trans_rvalue_operand(&mut self,
pub fn codegen_rvalue_operand(&mut self,
bx: Builder<'a, 'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> (Builder<'a, 'tcx>, OperandRef<'tcx>)
{
assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
let operand = self.trans_operand(&bx, source);
let operand = self.codegen_operand(&bx, source);
debug!("cast operand is {:?}", operand);
let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
@ -242,7 +242,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
OperandValue::Pair(lldata, llextra)
}
OperandValue::Ref(..) => {
bug!("by-ref operand {:?} in trans_rvalue_operand",
bug!("by-ref operand {:?} in codegen_rvalue_operand",
operand);
}
}
@ -358,16 +358,16 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::Rvalue::Ref(_, bk, ref place) => {
let tr_place = self.trans_place(&bx, place);
let cg_place = self.codegen_place(&bx, place);
let ty = tr_place.layout.ty;
let ty = cg_place.layout.ty;
// Note: places are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let val = if !bx.cx.type_has_metadata(ty) {
OperandValue::Immediate(tr_place.llval)
OperandValue::Immediate(cg_place.llval)
} else {
OperandValue::Pair(tr_place.llval, tr_place.llextra)
OperandValue::Pair(cg_place.llval, cg_place.llextra)
};
(bx, OperandRef {
val,
@ -388,12 +388,12 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(&bx, lhs);
let rhs = self.trans_operand(&bx, rhs);
let lhs = self.codegen_operand(&bx, lhs);
let rhs = self.codegen_operand(&bx, rhs);
let llresult = match (lhs.val, rhs.val) {
(OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra)) => {
self.trans_fat_ptr_binop(&bx, op,
self.codegen_fat_ptr_binop(&bx, op,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
lhs.layout.ty)
@ -401,7 +401,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
(OperandValue::Immediate(lhs_val),
OperandValue::Immediate(rhs_val)) => {
self.trans_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
}
_ => bug!()
@ -414,9 +414,9 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
(bx, operand)
}
mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(&bx, lhs);
let rhs = self.trans_operand(&bx, rhs);
let result = self.trans_scalar_checked_binop(&bx, op,
let lhs = self.codegen_operand(&bx, lhs);
let rhs = self.codegen_operand(&bx, rhs);
let result = self.codegen_scalar_checked_binop(&bx, op,
lhs.immediate(), rhs.immediate(),
lhs.layout.ty);
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
@ -430,7 +430,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.trans_operand(&bx, operand);
let operand = self.codegen_operand(&bx, operand);
let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_fp();
let llval = match op {
@ -449,8 +449,8 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
let discr = self.trans_place(&bx, place)
.trans_get_discr(&bx, discr_ty);
let discr = self.codegen_place(&bx, place)
.codegen_get_discr(&bx, discr_ty);
(bx, OperandRef {
val: OperandValue::Immediate(discr),
layout: self.cx.layout_of(discr_ty)
@ -493,7 +493,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
(bx, operand)
}
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(&bx, operand);
let operand = self.codegen_operand(&bx, operand);
(bx, operand)
}
mir::Rvalue::Repeat(..) |
@ -512,7 +512,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
place: &mir::Place<'tcx>) -> ValueRef
{
// ZST are passed as operands and require special handling
// because trans_place() panics if Local is operand.
// because codegen_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::TyArray(_, n) = op.layout.ty.sty {
@ -522,11 +522,11 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
// use common size calculation for non zero-sized types
let tr_value = self.trans_place(&bx, place);
return tr_value.len(bx.cx);
let cg_value = self.codegen_place(&bx, place);
return cg_value.len(bx.cx);
}
pub fn trans_scalar_binop(&mut self,
pub fn codegen_scalar_binop(&mut self,
bx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
@ -592,7 +592,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
pub fn trans_fat_ptr_binop(&mut self,
pub fn codegen_fat_ptr_binop(&mut self,
bx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs_addr: ValueRef,
@ -639,7 +639,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
}
pub fn trans_scalar_checked_binop(&mut self,
pub fn codegen_scalar_checked_binop(&mut self,
bx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
@ -650,7 +650,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
// another crate (mostly core::num generic/#[inline] fns),
// while the current crate doesn't use overflow checks.
if !bx.cx.check_overflow {
let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
return OperandValue::Pair(val, C_bool(bx.cx, false));
}
@ -676,7 +676,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
let outer_bits = bx.and(rhs, invert_mask);
let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
(val, of)
}

View File

@ -17,22 +17,22 @@ use super::FunctionCx;
use super::LocalRef;
impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
pub fn trans_statement(&mut self,
pub fn codegen_statement(&mut self,
bx: Builder<'a, 'tcx>,
statement: &mir::Statement<'tcx>)
-> Builder<'a, 'tcx> {
debug!("trans_statement(statement={:?})", statement);
debug!("codegen_statement(statement={:?})", statement);
self.set_debug_loc(&bx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(ref place, ref rvalue) => {
if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Place(tr_dest) => {
self.trans_rvalue(bx, tr_dest, rvalue)
LocalRef::Place(cg_dest) => {
self.codegen_rvalue(bx, cg_dest, rvalue)
}
LocalRef::Operand(None) => {
let (bx, operand) = self.trans_rvalue_operand(bx, rvalue);
let (bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand));
bx
}
@ -44,42 +44,42 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
}
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bx, rvalue).0
// but we still need to make sure we codegen the operand
self.codegen_rvalue_operand(bx, rvalue).0
}
}
} else {
let tr_dest = self.trans_place(&bx, place);
self.trans_rvalue(bx, tr_dest, rvalue)
let cg_dest = self.codegen_place(&bx, place);
self.codegen_rvalue(bx, cg_dest, rvalue)
}
}
mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
self.trans_place(&bx, place)
.trans_set_discr(&bx, variant_index);
self.codegen_place(&bx, place)
.codegen_set_discr(&bx, variant_index);
bx
}
mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(tr_place) = self.locals[local] {
tr_place.storage_live(&bx);
if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_live(&bx);
}
bx
}
mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(tr_place) = self.locals[local] {
tr_place.storage_dead(&bx);
if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_dead(&bx);
}
bx
}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
self.trans_place(&bx, output)
self.codegen_place(&bx, output)
}).collect();
let input_vals = inputs.iter().map(|input| {
self.trans_operand(&bx, input).immediate()
self.codegen_operand(&bx, input).immediate()
}).collect();
asm::trans_inline_asm(&bx, asm, outputs, input_vals);
asm::codegen_inline_asm(&bx, asm, outputs, input_vals);
bx
}
mir::StatementKind::EndRegion(_) |

View File

@ -58,18 +58,18 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
};
let attrs = tcx.get_attrs(def_id);
consts::trans_static(&cx, def_id, is_mutable, &attrs);
consts::codegen_static(&cx, def_id, is_mutable, &attrs);
}
MonoItem::GlobalAsm(node_id) => {
let item = cx.tcx.hir.expect_item(node_id);
if let hir::ItemGlobalAsm(ref ga) = item.node {
asm::trans_global_asm(cx, ga);
asm::codegen_global_asm(cx, ga);
} else {
span_bug!(item.span, "Mismatch between hir::Item type and TransItem type")
span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
}
}
MonoItem::Fn(instance) => {
base::trans_instance(&cx, instance);
base::codegen_instance(&cx, instance);
}
}

Some files were not shown because too many files have changed in this diff Show More