rust/src/bootstrap/compile.rs

1380 lines
51 KiB
Rust
Raw Normal View History

//! Implementation of compiling various phases of the compiler and standard
//! library.
//!
//! This module contains some of the real meat in the rustbuild build system
//! which is where Cargo is used to compiler the standard library, libtest, and
//! compiler. This module is also responsible for assembling the sysroot as it
//! goes along from the output of the previous stage.
use std::borrow::Cow;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::io::prelude::*;
2019-12-22 23:42:04 +01:00
use std::io::BufReader;
use std::path::{Path, PathBuf};
2019-12-22 23:42:04 +01:00
use std::process::{exit, Command, Stdio};
use std::str;
use build_helper::{output, t, up_to_date};
use filetime::FileTime;
use serde::Deserialize;
use crate::builder::Cargo;
use crate::builder::{Builder, Kind, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::config::TargetSelection;
2019-12-22 23:42:04 +01:00
use crate::dist;
2018-12-07 13:21:05 +01:00
use crate::native;
use crate::tool::SourceType;
use crate::util::{exe, is_debug_info, is_dylib, symlink_dir};
use crate::{Compiler, DependencyType, GitRepo, Mode};
#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Std {
pub target: TargetSelection,
pub compiler: Compiler,
}
2017-03-13 19:36:44 +01:00
impl Step for Std {
type Output = ();
const DEFAULT: bool = true;
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
// When downloading stage1, the standard library has already been copied to the sysroot, so
// there's no need to rebuild it.
let download_rustc = run.builder.config.download_rustc;
run.all_krates("test").default_condition(!download_rustc)
}
2019-02-25 11:30:32 +01:00
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Std {
compiler: run.builder.compiler(run.builder.top_stage, run.build_triple()),
target: run.target,
});
}
2019-02-08 14:53:55 +01:00
/// Builds the standard library.
///
/// This will build the standard library for a particular stage of the build
/// using the `compiler` targeting the `target` architecture. The artifacts
/// created will also be linked into the sysroot directory.
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) {
let target = self.target;
let compiler = self.compiler;
Remove `ENABLE_DOWNLOAD_RUSTC` constant This was introduced as part of the MVP for `download-rustc`. Unfortunately, it doesn't work very well: - Steps are ignored by default, which makes it easy to leave out a step that should be built. For example, the MVP forgot to enable any tests, so it was *only* possible to build locally. - It didn't work correctly even when it was enabled: calling `builder.ensure()` would completely ignore the constant and rebuild the step anyway. This has no obvious fix since `ensure()` has to return a `Step::Output`. Instead, this handles `download-rustc` in `impl Step for Rustc` and `impl Step for Std`, which to my knowledge are the only build steps that don't first go through `impl Step for Sysroot` (`Rustc` is used for the `rustc-dev` component). See https://github.com/rust-lang/rust/pull/79540#discussion_r563350075 and https://github.com/rust-lang/rust/issues/81930 for further context. Here are some example runs with these changes and `download-rustc` enabled: ``` $ x.py build src/tools/clippy Building stage1 tool clippy-driver (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 1m 09s Building stage1 tool cargo-clippy (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.11s $ x.py test src/tools/clippy Updating only changed submodules Submodules updated in 0.01 seconds Finished dev [unoptimized + debuginfo] target(s) in 0.09s Building stage1 tool clippy-driver (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.09s Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.28s Finished release [optimized] target(s) in 15.26s Running build/x86_64-unknown-linux-gnu/stage1-tools/x86_64-unknown-linux-gnu/release/deps/clippy_driver-8b407b140e0aa91c test result: ok. 592 passed; 0 failed; 3 ignored; 0 measured; 0 filtered out $ x.py build src/tools/rustdoc Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 41.28s Build completed successfully in 0:00:41 $ x.py test src/test/rustdoc-ui Building stage0 tool compiletest (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.12s Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.10s test result: ok. 105 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 8.15s $ x.py build compiler/rustc Finished dev [unoptimized + debuginfo] target(s) in 0.09s Build completed successfully in 0:00:00 ``` Note a few things: - Clippy depends on stage1 rustc-dev artifacts, but rustc didn't have to be recompiled. Instead, the artifacts were copied automatically. - All steps are always enabled. There is no danger of forgetting a step, since only the entrypoints have to handle `download-rustc`. - Building the compiler (`compiler/rustc`) automatically does no work.
2021-02-24 16:14:19 +01:00
// These artifacts were already copied (in `impl Step for Sysroot`).
// Don't recompile them.
if builder.config.download_rustc {
return;
}
if builder.config.keep_stage.contains(&compiler.stage)
|| builder.config.keep_stage_std.contains(&compiler.stage)
{
builder.info("Warning: Using a potentially old libstd. This may not behave well.");
2019-12-22 23:42:04 +01:00
builder.ensure(StdLink { compiler, target_compiler: compiler, target });
return;
}
2019-10-12 16:01:59 +02:00
let mut target_deps = builder.ensure(StartupObjects { compiler, target });
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
if compiler_to_use != compiler {
2019-12-22 23:42:04 +01:00
builder.ensure(Std { compiler: compiler_to_use, target });
builder.info(&format!("Uplifting stage1 std ({} -> {})", compiler_to_use.host, target));
// Even if we're not building std this stage, the new sysroot must
// still contain the third party objects needed by various targets.
copy_third_party_objects(builder, &compiler, target);
copy_self_contained_objects(builder, &compiler, target);
builder.ensure(StdLink {
compiler: compiler_to_use,
target_compiler: compiler,
target,
});
return;
}
target_deps.extend(copy_third_party_objects(builder, &compiler, target));
target_deps.extend(copy_self_contained_objects(builder, &compiler, target));
let mut cargo = builder.cargo(compiler, Mode::Std, SourceType::InTree, target, "build");
std_cargo(builder, target, compiler.stage, &mut cargo);
2019-12-22 23:42:04 +01:00
builder.info(&format!(
"Building stage{} std artifacts ({} -> {})",
compiler.stage, &compiler.host, target
));
run_cargo(
builder,
cargo,
vec![],
&libstd_stamp(builder, compiler, target),
target_deps,
false,
);
builder.ensure(StdLink {
compiler: builder.compiler(compiler.stage, builder.config.build),
target_compiler: compiler,
target,
});
}
}
fn copy_and_stamp(
builder: &Builder<'_>,
libdir: &Path,
sourcedir: &Path,
name: &str,
target_deps: &mut Vec<(PathBuf, DependencyType)>,
dependency_type: DependencyType,
) {
let target = libdir.join(name);
builder.copy(&sourcedir.join(name), &target);
target_deps.push((target, dependency_type));
}
2019-11-27 04:19:54 +01:00
/// Copies third party objects needed by various targets.
2019-12-22 23:42:04 +01:00
fn copy_third_party_objects(
builder: &Builder<'_>,
compiler: &Compiler,
target: TargetSelection,
) -> Vec<(PathBuf, DependencyType)> {
2019-10-12 16:01:59 +02:00
let mut target_deps = vec![];
2020-06-25 12:01:02 +02:00
// FIXME: remove this in 2021
if target == "x86_64-fortanix-unknown-sgx" {
2020-06-25 12:01:02 +02:00
if env::var_os("X86_FORTANIX_SGX_LIBS").is_some() {
builder.info("Warning: X86_FORTANIX_SGX_LIBS environment variable is ignored, libunwind is now compiled as part of rustbuild");
}
}
2019-10-12 16:01:59 +02:00
if builder.config.sanitizers_enabled(target) && compiler.stage != 0 {
// The sanitizers are only copied in stage1 or above,
// to avoid creating dependency on LLVM.
target_deps.extend(
copy_sanitizers(builder, &compiler, target)
.into_iter()
.map(|d| (d, DependencyType::Target)),
);
}
2019-10-12 16:01:59 +02:00
target_deps
}
/// Copies third party objects needed by various targets for self-contained linkage.
fn copy_self_contained_objects(
builder: &Builder<'_>,
compiler: &Compiler,
target: TargetSelection,
) -> Vec<(PathBuf, DependencyType)> {
2020-07-16 15:12:59 +02:00
let libdir_self_contained = builder.sysroot_libdir(*compiler, target).join("self-contained");
t!(fs::create_dir_all(&libdir_self_contained));
let mut target_deps = vec![];
2019-10-12 16:01:59 +02:00
// Copies the CRT objects.
//
// rustc historically provides a more self-contained installation for musl targets
// not requiring the presence of a native musl toolchain. For example, it can fall back
// to using gcc from a glibc-targeting toolchain for linking.
// To do that we have to distribute musl startup objects as a part of Rust toolchain
// and link with them manually in the self-contained mode.
if target.contains("musl") {
let srcdir = builder.musl_libdir(target).unwrap_or_else(|| {
panic!("Target {:?} does not have a \"musl-libdir\" key", target.triple)
});
for &obj in &["crt1.o", "Scrt1.o", "rcrt1.o", "crti.o", "crtn.o"] {
copy_and_stamp(
builder,
&libdir_self_contained,
&srcdir,
obj,
&mut target_deps,
DependencyType::TargetSelfContained,
);
}
for &obj in &["crtbegin.o", "crtbeginS.o", "crtend.o", "crtendS.o"] {
let src = compiler_file(builder, builder.cc(target), target, obj);
let target = libdir_self_contained.join(obj);
builder.copy(&src, &target);
target_deps.push((target, DependencyType::TargetSelfContained));
}
Add a new wasm32-unknown-wasi target This commit adds a new wasm32-based target distributed through rustup, supported in the standard library, and implemented in the compiler. The `wasm32-unknown-wasi` target is intended to be a WebAssembly target which matches the [WASI proposal recently announced.][LINK]. In summary the WASI target is an effort to define a standard set of syscalls for WebAssembly modules, allowing WebAssembly modules to not only be portable across architectures but also be portable across environments implementing this standard set of system calls. The wasi target in libstd is still somewhat bare bones. This PR does not fill out the filesystem, networking, threads, etc. Instead it only provides the most basic of integration with the wasi syscalls, enabling features like: * `Instant::now` and `SystemTime::now` work * `env::args` is hooked up * `env::vars` will look up environment variables * `println!` will print to standard out * `process::{exit, abort}` should be hooked up appropriately None of these APIs can work natively on the `wasm32-unknown-unknown` target, but with the assumption of the WASI set of syscalls we're able to provide implementations of these syscalls that engines can implement. Currently the primary engine implementing wasi is [wasmtime], but more will surely emerge! In terms of future development of libstd, I think this is something we'll probably want to discuss. The purpose of the WASI target is to provide a standardized set of syscalls, but it's *also* to provide a standard C sysroot for compiling C/C++ programs. This means it's intended that functions like `read` and `write` are implemented for this target with a relatively standard definition and implementation. It's unclear, therefore, how we want to expose file descriptors and how we'll want to implement system primitives. For example should `std::fs::File` have a libc-based file descriptor underneath it? The raw wasi file descriptor? We'll see! Currently these details are all intentionally hidden and things we can change over time. A `WasiFd` sample struct was added to the standard library as part of this commit, but it's not currently used. It shows how all the wasi syscalls could be ergonomically bound in Rust, and they offer a possible implementation of primitives like `std::fs::File` if we bind wasi file descriptors exactly. Apart from the standard library, there's also the matter of how this target is integrated with respect to its C standard library. The reference sysroot, for example, provides managment of standard unix file descriptors and also standard APIs like `open` (as opposed to the relative `openat` inspiration for the wasi ssycalls). Currently the standard library relies on the C sysroot symbols for operations such as environment management, process exit, and `read`/`write` of stdio fds. We want these operations in Rust to be interoperable with C if they're used in the same process. Put another way, if Rust and C are linked into the same WebAssembly binary they should work together, but that requires that the same C standard library is used. We also, however, want the `wasm32-unknown-wasi` target to be usable-by-default with the Rust compiler without requiring a separate toolchain to get downloaded and configured. With that in mind, there's two modes of operation for the `wasm32-unknown-wasi` target: 1. By default the C standard library is statically provided inside of `liblibc.rlib` distributed as part of the sysroot. This means that you can `rustc foo.wasm --target wasm32-unknown-unknown` and you're good to go, a fully workable wasi binary pops out. This is incompatible with linking in C code, however, which may be compiled against a different sysroot than the Rust code was previously compiled against. In this mode the default of `rust-lld` is used to link binaries. 2. For linking with C code, the `-C target-feature=-crt-static` flag needs to be passed. This takes inspiration from the musl target for this flag, but the idea is that you're no longer using the provided static C runtime, but rather one will be provided externally. This flag is intended to also get coupled with an external `clang` compiler configured with its own sysroot. Therefore you'll typically use this flag with `-C linker=/path/to/clang-script-wrapper`. Using this mode the Rust code will continue to reference standard C symbols, but the definition will be pulled in by the linker configured. Alright so that's all the current state of this PR. I suspect we'll definitely want to discuss this before landing of course! This PR is coupled with libc changes as well which I'll be posting shortly. [LINK]: [wasmtime]:
2019-02-13 19:02:22 +01:00
} else if target.ends_with("-wasi") {
let srcdir = builder
.wasi_root(target)
.unwrap_or_else(|| {
panic!("Target {:?} does not have a \"wasi-root\" key", target.triple)
})
.join("lib/wasm32-wasi");
for &obj in &["crt1-command.o", "crt1-reactor.o"] {
copy_and_stamp(
builder,
&libdir_self_contained,
&srcdir,
obj,
&mut target_deps,
DependencyType::TargetSelfContained,
);
}
} else if target.contains("windows-gnu") {
for obj in ["crt2.o", "dllcrt2.o"].iter() {
let src = compiler_file(builder, builder.cc(target), target, obj);
let target = libdir_self_contained.join(obj);
builder.copy(&src, &target);
target_deps.push((target, DependencyType::TargetSelfContained));
}
}
2019-10-12 16:01:59 +02:00
target_deps
}
/// Configure cargo to compile the standard library, adding appropriate env vars
/// and such.
pub fn std_cargo(builder: &Builder<'_>, target: TargetSelection, stage: u32, cargo: &mut Cargo) {
if let Some(target) = env::var_os("MACOSX_STD_DEPLOYMENT_TARGET") {
cargo.env("MACOSX_DEPLOYMENT_TARGET", target);
}
// Determine if we're going to compile in optimized C intrinsics to
// the `compiler-builtins` crate. These intrinsics live in LLVM's
// `compiler-rt` repository, but our `src/llvm-project` submodule isn't
// always checked out, so we need to conditionally look for this. (e.g. if
// an external LLVM is used we skip the LLVM submodule checkout).
//
// Note that this shouldn't affect the correctness of `compiler-builtins`,
// but only its speed. Some intrinsics in C haven't been translated to Rust
// yet but that's pretty rare. Other intrinsics have optimized
// implementations in C which have only had slower versions ported to Rust,
// so we favor the C version where we can, but it's not critical.
//
// If `compiler-rt` is available ensure that the `c` feature of the
// `compiler-builtins` crate is enabled and it's configured to learn where
// `compiler-rt` is located.
let compiler_builtins_root = builder.src.join("src/llvm-project/compiler-rt");
let compiler_builtins_c_feature = if compiler_builtins_root.exists() {
// Note that `libprofiler_builtins/build.rs` also computes this so if
// you're changing something here please also change that.
cargo.env("RUST_COMPILER_RT_ROOT", &compiler_builtins_root);
" compiler-builtins-c"
} else {
""
};
if builder.no_std(target) == Some(true) {
let mut features = "compiler-builtins-mem".to_string();
features.push_str(compiler_builtins_c_feature);
2018-04-04 22:42:56 +02:00
// for no-std targets we only compile a few no_std crates
std: Depend directly on crates.io crates Ever since we added a Cargo-based build system for the compiler the standard library has always been a little special, it's never been able to depend on crates.io crates for runtime dependencies. This has been a result of various limitations, namely that Cargo doesn't understand that crates from crates.io depend on libcore, so Cargo tries to build crates before libcore is finished. I had an idea this afternoon, however, which lifts the strategy from #52919 to directly depend on crates.io crates from the standard library. After all is said and done this removes a whopping three submodules that we need to manage! The basic idea here is that for any crate `std` depends on it adds an *optional* dependency on an empty crate on crates.io, in this case named `rustc-std-workspace-core`. This crate is overridden via `[patch]` in this repository to point to a local crate we write, and *that* has a `path` dependency on libcore. Note that all `no_std` crates also depend on `compiler_builtins`, but if we're not using submodules we can publish `compiler_builtins` to crates.io and all crates can depend on it anyway! The basic strategy then looks like: * The standard library (or some transitive dep) decides to depend on a crate `foo`. * The standard library adds ```toml [dependencies] foo = { version = "0.1", features = ['rustc-dep-of-std'] } ``` * The crate `foo` has an optional dependency on `rustc-std-workspace-core` * The crate `foo` has an optional dependency on `compiler_builtins` * The crate `foo` has a feature `rustc-dep-of-std` which activates these crates and any other necessary infrastructure in the crate. A sample commit for `dlmalloc` [turns out to be quite simple][commit]. After that all `no_std` crates should largely build "as is" and still be publishable on crates.io! Notably they should be able to continue to use stable Rust if necessary, since the `rename-dependency` feature of Cargo is soon stabilizing. As a proof of concept, this commit removes the `dlmalloc`, `libcompiler_builtins`, and `libc` submodules from this repository. Long thorns in our side these are now gone for good and we can directly depend on crates.io! It's hoped that in the long term we can bring in other crates as necessary, but for now this is largely intended to simply make it easier to manage these crates and remove submodules. This should be a transparent non-breaking change for all users, but one possible stickler is that this almost for sure breaks out-of-tree `std`-building tools like `xargo` and `cargo-xbuild`. I think it should be relatively easy to get them working, however, as all that's needed is an entry in the `[patch]` section used to build the standard library. Hopefully we can work with these tools to solve this problem! [commit]: https://github.com/alexcrichton/dlmalloc-rs/commit/28ee12db813a3b650a7c25d1c36d2c17dcb88ae3
2018-11-20 06:52:50 +01:00
cargo
2018-04-04 22:42:56 +02:00
.args(&["-p", "alloc"])
.arg("--manifest-path")
2020-06-12 04:31:49 +02:00
.arg(builder.src.join("library/alloc/Cargo.toml"))
.arg("--features")
.arg(features);
} else {
let mut features = builder.std_features(target);
features.push_str(compiler_builtins_c_feature);
2019-12-22 23:42:04 +01:00
cargo
.arg("--features")
.arg(features)
.arg("--manifest-path")
2020-06-12 04:31:49 +02:00
.arg(builder.src.join("library/test/Cargo.toml"));
// Help the libc crate compile by assisting it in finding various
// sysroot native libraries.
if target.contains("musl") {
if let Some(p) = builder.musl_libdir(target) {
let root = format!("native={}", p.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
}
}
Add a new wasm32-unknown-wasi target This commit adds a new wasm32-based target distributed through rustup, supported in the standard library, and implemented in the compiler. The `wasm32-unknown-wasi` target is intended to be a WebAssembly target which matches the [WASI proposal recently announced.][LINK]. In summary the WASI target is an effort to define a standard set of syscalls for WebAssembly modules, allowing WebAssembly modules to not only be portable across architectures but also be portable across environments implementing this standard set of system calls. The wasi target in libstd is still somewhat bare bones. This PR does not fill out the filesystem, networking, threads, etc. Instead it only provides the most basic of integration with the wasi syscalls, enabling features like: * `Instant::now` and `SystemTime::now` work * `env::args` is hooked up * `env::vars` will look up environment variables * `println!` will print to standard out * `process::{exit, abort}` should be hooked up appropriately None of these APIs can work natively on the `wasm32-unknown-unknown` target, but with the assumption of the WASI set of syscalls we're able to provide implementations of these syscalls that engines can implement. Currently the primary engine implementing wasi is [wasmtime], but more will surely emerge! In terms of future development of libstd, I think this is something we'll probably want to discuss. The purpose of the WASI target is to provide a standardized set of syscalls, but it's *also* to provide a standard C sysroot for compiling C/C++ programs. This means it's intended that functions like `read` and `write` are implemented for this target with a relatively standard definition and implementation. It's unclear, therefore, how we want to expose file descriptors and how we'll want to implement system primitives. For example should `std::fs::File` have a libc-based file descriptor underneath it? The raw wasi file descriptor? We'll see! Currently these details are all intentionally hidden and things we can change over time. A `WasiFd` sample struct was added to the standard library as part of this commit, but it's not currently used. It shows how all the wasi syscalls could be ergonomically bound in Rust, and they offer a possible implementation of primitives like `std::fs::File` if we bind wasi file descriptors exactly. Apart from the standard library, there's also the matter of how this target is integrated with respect to its C standard library. The reference sysroot, for example, provides managment of standard unix file descriptors and also standard APIs like `open` (as opposed to the relative `openat` inspiration for the wasi ssycalls). Currently the standard library relies on the C sysroot symbols for operations such as environment management, process exit, and `read`/`write` of stdio fds. We want these operations in Rust to be interoperable with C if they're used in the same process. Put another way, if Rust and C are linked into the same WebAssembly binary they should work together, but that requires that the same C standard library is used. We also, however, want the `wasm32-unknown-wasi` target to be usable-by-default with the Rust compiler without requiring a separate toolchain to get downloaded and configured. With that in mind, there's two modes of operation for the `wasm32-unknown-wasi` target: 1. By default the C standard library is statically provided inside of `liblibc.rlib` distributed as part of the sysroot. This means that you can `rustc foo.wasm --target wasm32-unknown-unknown` and you're good to go, a fully workable wasi binary pops out. This is incompatible with linking in C code, however, which may be compiled against a different sysroot than the Rust code was previously compiled against. In this mode the default of `rust-lld` is used to link binaries. 2. For linking with C code, the `-C target-feature=-crt-static` flag needs to be passed. This takes inspiration from the musl target for this flag, but the idea is that you're no longer using the provided static C runtime, but rather one will be provided externally. This flag is intended to also get coupled with an external `clang` compiler configured with its own sysroot. Therefore you'll typically use this flag with `-C linker=/path/to/clang-script-wrapper`. Using this mode the Rust code will continue to reference standard C symbols, but the definition will be pulled in by the linker configured. Alright so that's all the current state of this PR. I suspect we'll definitely want to discuss this before landing of course! This PR is coupled with libc changes as well which I'll be posting shortly. [LINK]: [wasmtime]:
2019-02-13 19:02:22 +01:00
if target.ends_with("-wasi") {
if let Some(p) = builder.wasi_root(target) {
let root = format!("native={}/lib/wasm32-wasi", p.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
Add a new wasm32-unknown-wasi target This commit adds a new wasm32-based target distributed through rustup, supported in the standard library, and implemented in the compiler. The `wasm32-unknown-wasi` target is intended to be a WebAssembly target which matches the [WASI proposal recently announced.][LINK]. In summary the WASI target is an effort to define a standard set of syscalls for WebAssembly modules, allowing WebAssembly modules to not only be portable across architectures but also be portable across environments implementing this standard set of system calls. The wasi target in libstd is still somewhat bare bones. This PR does not fill out the filesystem, networking, threads, etc. Instead it only provides the most basic of integration with the wasi syscalls, enabling features like: * `Instant::now` and `SystemTime::now` work * `env::args` is hooked up * `env::vars` will look up environment variables * `println!` will print to standard out * `process::{exit, abort}` should be hooked up appropriately None of these APIs can work natively on the `wasm32-unknown-unknown` target, but with the assumption of the WASI set of syscalls we're able to provide implementations of these syscalls that engines can implement. Currently the primary engine implementing wasi is [wasmtime], but more will surely emerge! In terms of future development of libstd, I think this is something we'll probably want to discuss. The purpose of the WASI target is to provide a standardized set of syscalls, but it's *also* to provide a standard C sysroot for compiling C/C++ programs. This means it's intended that functions like `read` and `write` are implemented for this target with a relatively standard definition and implementation. It's unclear, therefore, how we want to expose file descriptors and how we'll want to implement system primitives. For example should `std::fs::File` have a libc-based file descriptor underneath it? The raw wasi file descriptor? We'll see! Currently these details are all intentionally hidden and things we can change over time. A `WasiFd` sample struct was added to the standard library as part of this commit, but it's not currently used. It shows how all the wasi syscalls could be ergonomically bound in Rust, and they offer a possible implementation of primitives like `std::fs::File` if we bind wasi file descriptors exactly. Apart from the standard library, there's also the matter of how this target is integrated with respect to its C standard library. The reference sysroot, for example, provides managment of standard unix file descriptors and also standard APIs like `open` (as opposed to the relative `openat` inspiration for the wasi ssycalls). Currently the standard library relies on the C sysroot symbols for operations such as environment management, process exit, and `read`/`write` of stdio fds. We want these operations in Rust to be interoperable with C if they're used in the same process. Put another way, if Rust and C are linked into the same WebAssembly binary they should work together, but that requires that the same C standard library is used. We also, however, want the `wasm32-unknown-wasi` target to be usable-by-default with the Rust compiler without requiring a separate toolchain to get downloaded and configured. With that in mind, there's two modes of operation for the `wasm32-unknown-wasi` target: 1. By default the C standard library is statically provided inside of `liblibc.rlib` distributed as part of the sysroot. This means that you can `rustc foo.wasm --target wasm32-unknown-unknown` and you're good to go, a fully workable wasi binary pops out. This is incompatible with linking in C code, however, which may be compiled against a different sysroot than the Rust code was previously compiled against. In this mode the default of `rust-lld` is used to link binaries. 2. For linking with C code, the `-C target-feature=-crt-static` flag needs to be passed. This takes inspiration from the musl target for this flag, but the idea is that you're no longer using the provided static C runtime, but rather one will be provided externally. This flag is intended to also get coupled with an external `clang` compiler configured with its own sysroot. Therefore you'll typically use this flag with `-C linker=/path/to/clang-script-wrapper`. Using this mode the Rust code will continue to reference standard C symbols, but the definition will be pulled in by the linker configured. Alright so that's all the current state of this PR. I suspect we'll definitely want to discuss this before landing of course! This PR is coupled with libc changes as well which I'll be posting shortly. [LINK]: [wasmtime]:
2019-02-13 19:02:22 +01:00
}
}
}
// By default, rustc uses `-Cembed-bitcode=yes`, and Cargo overrides that
// with `-Cembed-bitcode=no` for non-LTO builds. However, libstd must be
// built with bitcode so that the produced rlibs can be used for both LTO
// builds (which use bitcode) and non-LTO builds (which use object code).
// So we override the override here!
//
// But we don't bother for the stage 0 compiler because it's never used
// with LTO.
2021-05-01 13:36:48 +02:00
//
// FIXME: currently e2k llvm backend does not support asm parser
if stage >= 1 && !target.starts_with("e2k") {
cargo.rustflag("-Cembed-bitcode=yes");
}
[RISC-V] Do not force frame pointers We have been seeing some very inefficient code that went away when using `-Cforce-frame-pointers=no`. For instance `core::ptr::drop_in_place` at `-Oz` was compiled into a function which consisted entirely of saving registers to the stack, then using the frame pointer to restore the same registers (without any instructions between the prolog and epilog). The RISC-V LLVM backend supports frame pointer elimination, so it makes sense to allow this to happen when using Rust. It's not clear to me that frame pointers have ever been required in the general case. In rust-lang/rust#61675 it was pointed out that this made reassembling stack traces easier, which is true, but there is a code generation option for forcing frame pointers, and I feel the default should not be to require frame pointers, given it demonstrably makes code size worse (around 10% in some embedded applications). The kinds of targets mentioned in rust-lang/rust#61675 are popular, but should not dictate that code generation should be worse for all RISC-V targets, especially as there is a way to use CFI information to reconstruct the stack when the frame pointer is eliminated. It is also a misconception that `fp` is always used for the frame pointer. `fp` is an ABI name for `x8` (aka `s0`), and if no frame pointer is required, `x8` may be used for other callee-saved values. This commit does ensure that the standard library is built with unwind tables, so that users do not need to rebuild the standard library in order to get a backtrace that includes standard library calls (which is the original reason for forcing frame pointers).
2020-05-30 19:24:19 +02:00
// By default, rustc does not include unwind tables unless they are required
// for a particular target. They are not required by RISC-V targets, but
// compiling the standard library with them means that users can get
// backtraces without having to recompile the standard library themselves.
//
// This choice was discussed in https://github.com/rust-lang/rust/pull/69890
if target.contains("riscv") {
cargo.rustflag("-Cforce-unwind-tables=yes");
}
}
2017-07-05 01:53:53 +02:00
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct StdLink {
pub compiler: Compiler,
pub target_compiler: Compiler,
pub target: TargetSelection,
}
impl Step for StdLink {
type Output = ();
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
2017-07-19 02:03:38 +02:00
run.never()
2017-07-14 14:30:16 +02:00
}
/// Link all libstd rlibs/dylibs into the sysroot location.
///
2018-11-11 14:52:36 +01:00
/// Links those artifacts generated by `compiler` to the `stage` compiler's
/// sysroot for the specified `host` and `target`.
///
/// Note that this assumes that `compiler` has already generated the libstd
/// libraries for `target`, and this method will find them in the relevant
/// output directory.
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target_compiler = self.target_compiler;
let target = self.target;
2019-12-22 23:42:04 +01:00
builder.info(&format!(
"Copying stage{} std from stage{} ({} -> {} / {})",
target_compiler.stage, compiler.stage, &compiler.host, target_compiler.host, target
));
let libdir = builder.sysroot_libdir(target_compiler, target);
2018-12-02 21:47:41 +01:00
let hostdir = builder.sysroot_libdir(target_compiler, compiler.host);
add_to_sysroot(builder, &libdir, &hostdir, &libstd_stamp(builder, compiler, target));
}
}
/// Copies sanitizer runtime libraries into target libdir.
fn copy_sanitizers(
2019-02-25 11:30:32 +01:00
builder: &Builder<'_>,
compiler: &Compiler,
target: TargetSelection,
) -> Vec<PathBuf> {
let runtimes: Vec<native::SanitizerRuntime> = builder.ensure(native::Sanitizers { target });
if builder.config.dry_run {
return Vec::new();
}
let mut target_deps = Vec::new();
let libdir = builder.sysroot_libdir(*compiler, target);
for runtime in &runtimes {
let dst = libdir.join(&runtime.name);
builder.copy(&runtime.path, &dst);
if target == "x86_64-apple-darwin" || target == "aarch64-apple-darwin" {
// Update the librarys install name to reflect that it has has been renamed.
apple_darwin_update_library_name(&dst, &format!("@rpath/{}", &runtime.name));
// Upon renaming the install name, the code signature of the file will invalidate,
// so we will sign it again.
apple_darwin_sign_file(&dst);
}
target_deps.push(dst);
}
target_deps
}
fn apple_darwin_update_library_name(library_path: &Path, new_name: &str) {
let status = Command::new("install_name_tool")
.arg("-id")
.arg(new_name)
.arg(library_path)
.status()
.expect("failed to execute `install_name_tool`");
assert!(status.success());
}
fn apple_darwin_sign_file(file_path: &Path) {
let status = Command::new("codesign")
.arg("-f") // Force to rewrite the existing signature
.arg("-s")
.arg("-")
.arg(file_path)
.status()
.expect("failed to execute `codesign`");
assert!(status.success());
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct StartupObjects {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for StartupObjects {
type Output = Vec<(PathBuf, DependencyType)>;
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
2020-06-12 04:31:49 +02:00
run.path("library/rtstartup")
}
2019-02-25 11:30:32 +01:00
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(StartupObjects {
compiler: run.builder.compiler(run.builder.top_stage, run.build_triple()),
target: run.target,
});
}
2019-02-08 14:53:55 +01:00
/// Builds and prepare startup objects like rsbegin.o and rsend.o
///
/// These are primarily used on Windows right now for linking executables/dlls.
/// They don't require any library support as they're just plain old object
/// files, so we just use the nightly snapshot compiler to always build them (as
/// no other compilers are guaranteed to be available).
fn run(self, builder: &Builder<'_>) -> Vec<(PathBuf, DependencyType)> {
let for_compiler = self.compiler;
let target = self.target;
if !target.contains("windows-gnu") {
2019-12-22 23:42:04 +01:00
return vec![];
}
2019-10-12 16:01:59 +02:00
let mut target_deps = vec![];
2020-06-12 04:31:49 +02:00
let src_dir = &builder.src.join("library").join("rtstartup");
let dst_dir = &builder.native_dir(target).join("rtstartup");
let sysroot_dir = &builder.sysroot_libdir(for_compiler, target);
t!(fs::create_dir_all(dst_dir));
for file in &["rsbegin", "rsend"] {
let src_file = &src_dir.join(file.to_string() + ".rs");
let dst_file = &dst_dir.join(file.to_string() + ".o");
if !up_to_date(src_file, dst_file) {
let mut cmd = Command::new(&builder.initial_rustc);
2019-12-22 23:42:04 +01:00
builder.run(
cmd.env("RUSTC_BOOTSTRAP", "1")
.arg("--cfg")
.arg("bootstrap")
.arg("--target")
.arg(target.rustc_target_arg())
2019-12-22 23:42:04 +01:00
.arg("--emit=obj")
.arg("-o")
.arg(dst_file)
.arg(src_file),
);
}
2020-02-03 20:13:30 +01:00
let target = sysroot_dir.join((*file).to_string() + ".o");
2019-10-12 16:01:59 +02:00
builder.copy(dst_file, &target);
target_deps.push((target, DependencyType::Target));
}
2019-10-12 16:01:59 +02:00
target_deps
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustc {
pub target: TargetSelection,
pub compiler: Compiler,
}
impl Step for Rustc {
type Output = ();
const ONLY_HOSTS: bool = true;
Don't build rustc without std - Set rustc to build only when explicitly asked for This allows building the stage2 rustc artifacts, which nothing depends on. Previously the behavior was as follows (where stageN <-> stage(N-1) artifacts, except for stage0 libstd): - `x.py build --stage 0`: - stage0 libstd - stage1 rustc (but without putting rustc in stage0/) This leaves you without any rustc at all except for the beta compiler (https://github.com/rust-lang/rust/issues/73519). This is never what you want. - `x.py build --stage 1`: - stage0 libstd - stage1 rustc - stage1 libstd - stage1 rustdoc - stage2 rustc This leaves you with a broken stage2 rustc which doesn't even have libcore and is effectively useless. Additionally, it compiles rustc twice, which is not normally what you want. - `x.py build --stage 2`: - stage0 libstd - stage1 rustc - stage1 libstd - stage2 rustc - stage2 rustdoc and tools This builds all tools in release mode. This is the correct usage for CI, but takes far to long for development. Now the behavior is as follows: - `x.py build --stage 0`: - stage0 libstd This is suitable for contributors only working on the standard library, as it means rustc never has to be compiled. - `x.py build --stage 1`: - stage0 libstd - stage1 rustc - stage1 libstd - stage1 rustdoc This is suitable for contributors working on the compiler. It ensures that you have a working rustc and libstd without having to pass `src/libstd` in addition. - `x.py build --stage 2`: - stage0 libstd - stage1 rustc - stage1 libstd - stage2 rustc - stage2 libstd - stage2 rustdoc This is suitable for debugging errors which only appear with the stage2 compiler. - `x.py build --stage 2 src/libstd src/rustc` - stage0 libstd - stage1 rustc - stage1 libstd - stage2 rustc - stage2 libstd - stage2 rustdoc, tools, etc. - stage2 rustc artifacts ('stage3') This is suitable for CI, which wants all tools in release mode. However, most of the use cases for this should use `x.py dist` instead, which builds all the tools without each having to be named individually.
2020-07-02 14:40:34 +02:00
const DEFAULT: bool = false;
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
2020-08-28 05:58:48 +02:00
run.path("compiler/rustc")
}
2019-02-25 11:30:32 +01:00
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustc {
compiler: run.builder.compiler(run.builder.top_stage, run.build_triple()),
target: run.target,
});
}
2019-02-08 14:53:55 +01:00
/// Builds the compiler.
///
/// This will build the compiler for a particular stage of the build using
/// the `compiler` targeting the `target` architecture. The artifacts
/// created will also be linked into the sysroot directory.
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
Remove `ENABLE_DOWNLOAD_RUSTC` constant This was introduced as part of the MVP for `download-rustc`. Unfortunately, it doesn't work very well: - Steps are ignored by default, which makes it easy to leave out a step that should be built. For example, the MVP forgot to enable any tests, so it was *only* possible to build locally. - It didn't work correctly even when it was enabled: calling `builder.ensure()` would completely ignore the constant and rebuild the step anyway. This has no obvious fix since `ensure()` has to return a `Step::Output`. Instead, this handles `download-rustc` in `impl Step for Rustc` and `impl Step for Std`, which to my knowledge are the only build steps that don't first go through `impl Step for Sysroot` (`Rustc` is used for the `rustc-dev` component). See https://github.com/rust-lang/rust/pull/79540#discussion_r563350075 and https://github.com/rust-lang/rust/issues/81930 for further context. Here are some example runs with these changes and `download-rustc` enabled: ``` $ x.py build src/tools/clippy Building stage1 tool clippy-driver (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 1m 09s Building stage1 tool cargo-clippy (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.11s $ x.py test src/tools/clippy Updating only changed submodules Submodules updated in 0.01 seconds Finished dev [unoptimized + debuginfo] target(s) in 0.09s Building stage1 tool clippy-driver (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.09s Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.28s Finished release [optimized] target(s) in 15.26s Running build/x86_64-unknown-linux-gnu/stage1-tools/x86_64-unknown-linux-gnu/release/deps/clippy_driver-8b407b140e0aa91c test result: ok. 592 passed; 0 failed; 3 ignored; 0 measured; 0 filtered out $ x.py build src/tools/rustdoc Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 41.28s Build completed successfully in 0:00:41 $ x.py test src/test/rustdoc-ui Building stage0 tool compiletest (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.12s Building rustdoc for stage1 (x86_64-unknown-linux-gnu) Finished release [optimized] target(s) in 0.10s test result: ok. 105 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 8.15s $ x.py build compiler/rustc Finished dev [unoptimized + debuginfo] target(s) in 0.09s Build completed successfully in 0:00:00 ``` Note a few things: - Clippy depends on stage1 rustc-dev artifacts, but rustc didn't have to be recompiled. Instead, the artifacts were copied automatically. - All steps are always enabled. There is no danger of forgetting a step, since only the entrypoints have to handle `download-rustc`. - Building the compiler (`compiler/rustc`) automatically does no work.
2021-02-24 16:14:19 +01:00
if builder.config.download_rustc {
// Copy the existing artifacts instead of rebuilding them.
// NOTE: this path is only taken for tools linking to rustc-dev.
builder.ensure(Sysroot { compiler });
return;
}
bootstrap: Merge the libtest build step with libstd Since its inception rustbuild has always worked in three stages: one for libstd, one for libtest, and one for rustc. These three stages were architected around crates.io dependencies, where rustc wants to depend on crates.io crates but said crates don't explicitly depend on libstd, requiring a sysroot assembly step in the middle. This same logic was applied for libtest where libtest wants to depend on crates.io crates (`getopts`) but `getopts` didn't say that it depended on std, so it needed `std` built ahead of time. Lots of time has passed since the inception of rustbuild, however, and we've since gotten to the point where even `std` itself is depending on crates.io crates (albeit with some wonky configuration). This commit applies the same logic to the two dependencies that the `test` crate pulls in from crates.io, `getopts` and `unicode-width`. Over the many years since rustbuild's inception `unicode-width` was the only dependency picked up by the `test` crate, so the extra configuration necessary to get crates building in this crate graph is unlikely to be too much of a burden on developers. After this patch it means that there are now only two build phasese of rustbuild, one for libstd and one for rustc. The libtest/libproc_macro build phase is all lumped into one now with `std`. This was originally motivated by rust-lang/cargo#7216 where Cargo was having to deal with synthesizing dependency edges but this commit makes them explicit in this repository.
2019-08-16 17:29:08 +02:00
builder.ensure(Std { compiler, target });
if builder.config.keep_stage.contains(&compiler.stage) {
builder.info("Warning: Using a potentially old librustc. This may not behave well.");
builder.info("Warning: Use `--keep-stage-std` if you want to rebuild the compiler when it changes");
2019-12-22 23:42:04 +01:00
builder.ensure(RustcLink { compiler, target_compiler: compiler, target });
return;
}
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
if compiler_to_use != compiler {
2019-12-22 23:42:04 +01:00
builder.ensure(Rustc { compiler: compiler_to_use, target });
builder
.info(&format!("Uplifting stage1 rustc ({} -> {})", builder.config.build, target));
builder.ensure(RustcLink {
compiler: compiler_to_use,
target_compiler: compiler,
target,
});
return;
}
2018-12-02 21:47:41 +01:00
// Ensure that build scripts and proc macros have a std / libproc_macro to link against.
bootstrap: Merge the libtest build step with libstd Since its inception rustbuild has always worked in three stages: one for libstd, one for libtest, and one for rustc. These three stages were architected around crates.io dependencies, where rustc wants to depend on crates.io crates but said crates don't explicitly depend on libstd, requiring a sysroot assembly step in the middle. This same logic was applied for libtest where libtest wants to depend on crates.io crates (`getopts`) but `getopts` didn't say that it depended on std, so it needed `std` built ahead of time. Lots of time has passed since the inception of rustbuild, however, and we've since gotten to the point where even `std` itself is depending on crates.io crates (albeit with some wonky configuration). This commit applies the same logic to the two dependencies that the `test` crate pulls in from crates.io, `getopts` and `unicode-width`. Over the many years since rustbuild's inception `unicode-width` was the only dependency picked up by the `test` crate, so the extra configuration necessary to get crates building in this crate graph is unlikely to be too much of a burden on developers. After this patch it means that there are now only two build phasese of rustbuild, one for libstd and one for rustc. The libtest/libproc_macro build phase is all lumped into one now with `std`. This was originally motivated by rust-lang/cargo#7216 where Cargo was having to deal with synthesizing dependency edges but this commit makes them explicit in this repository.
2019-08-16 17:29:08 +02:00
builder.ensure(Std {
compiler: builder.compiler(self.compiler.stage, builder.config.build),
target: builder.config.build,
});
let mut cargo = builder.cargo(compiler, Mode::Rustc, SourceType::InTree, target, "build");
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
rustc_cargo(builder, &mut cargo, target);
Utilize PGO for rustc linux dist builds This implements support for applying PGO to the rustc compilation step (not standard library or any tooling, including rustdoc). Expanding PGO to more tools is not terribly difficult but will involve more work and greater CI time commitment. For the same reason of avoiding greater time commitment, this currently avoids implementing for platforms outside of x86_64-unknown-linux-gnu, though in practice it should be quite simple to extend over time to more platforms. The initial implementation is intentionally minimal here to avoid too much work investment before we start seeing wins for a subset of Rust users. The choice of workloads to profile here is somewhat arbitrary, but the general rationale was to aim for a small set that largely avoided time regressions on perf.rust-lang.org's full suite of crates. The set chosen is libcore, cargo (and its dependencies), and a few ad-hoc stress tests from perf.rlo. The stress tests are arguably the most controversial, but they benefit those cases (avoiding regressions) and do not really remove wins from other benchmarks. The primary next step after this PR lands is to implement support for PGO in LLVM. It is unclear whether we can afford a full LLVM rebuild in CI, though, so the approach taken there may need to be more staggered. rustc-only PGO seems well affordable on linux at least, giving us up to 20% wall time wins on some crates for 15 minutes of extra CI time (1 hour up from 45 minutes). The PGO data is uploaded to allow others to reuse it if attempting to reproduce the CI build or potentially, in the future, on other platforms where an off-by-one strategy is used for dist builds at minimal performance cost.
2020-12-14 19:50:59 +01:00
if builder.config.rust_profile_use.is_some()
&& builder.config.rust_profile_generate.is_some()
{
panic!("Cannot use and generate PGO profiles at the same time");
}
let is_collecting = if let Some(path) = &builder.config.rust_profile_generate {
if compiler.stage == 1 {
cargo.rustflag(&format!("-Cprofile-generate={}", path));
// Apparently necessary to avoid overflowing the counters during
// a Cargo build profile
cargo.rustflag("-Cllvm-args=-vp-counters-per-site=4");
true
} else {
false
}
} else if let Some(path) = &builder.config.rust_profile_use {
if compiler.stage == 1 {
cargo.rustflag(&format!("-Cprofile-use={}", path));
cargo.rustflag("-Cllvm-args=-pgo-warn-missing-function");
true
} else {
false
}
} else {
false
};
if is_collecting {
// Ensure paths to Rust sources are relative, not absolute.
cargo.rustflag(&format!(
"-Cllvm-args=-static-func-strip-dirname-prefix={}",
builder.config.src.components().count()
));
}
2019-12-22 23:42:04 +01:00
builder.info(&format!(
"Building stage{} compiler artifacts ({} -> {})",
compiler.stage, &compiler.host, target
));
run_cargo(
builder,
cargo,
vec![],
&librustc_stamp(builder, compiler, target),
vec![],
false,
);
builder.ensure(RustcLink {
compiler: builder.compiler(compiler.stage, builder.config.build),
target_compiler: compiler,
target,
});
}
}
pub fn rustc_cargo(builder: &Builder<'_>, cargo: &mut Cargo, target: TargetSelection) {
2019-12-22 23:42:04 +01:00
cargo
.arg("--features")
.arg(builder.rustc_features())
.arg("--manifest-path")
2020-08-28 05:58:48 +02:00
.arg(builder.src.join("compiler/rustc/Cargo.toml"));
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
rustc_cargo_env(builder, cargo, target);
}
pub fn rustc_cargo_env(builder: &Builder<'_>, cargo: &mut Cargo, target: TargetSelection) {
// Set some configuration variables picked up by build scripts and
// the compiler alike
2019-12-22 23:42:04 +01:00
cargo
.env("CFG_RELEASE", builder.rust_release())
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("CFG_VERSION", builder.rust_version())
.env("CFG_PREFIX", builder.config.prefix.clone().unwrap_or_default());
2020-02-03 20:13:30 +01:00
let libdir_relative = builder.config.libdir_relative().unwrap_or_else(|| Path::new("lib"));
cargo.env("CFG_LIBDIR_RELATIVE", libdir_relative);
if let Some(ref ver_date) = builder.rust_info.commit_date() {
cargo.env("CFG_VER_DATE", ver_date);
}
if let Some(ref ver_hash) = builder.rust_info.sha() {
cargo.env("CFG_VER_HASH", ver_hash);
}
if !builder.unstable_features() {
cargo.env("CFG_DISABLE_UNSTABLE_FEATURES", "1");
}
if let Some(ref s) = builder.config.rustc_default_linker {
cargo.env("CFG_DEFAULT_LINKER", s);
}
if builder.config.rustc_parallel {
cargo.rustflag("--cfg=parallel_compiler");
}
if builder.config.rust_verify_llvm_ir {
cargo.env("RUSTC_VERIFY_LLVM_IR", "1");
}
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
// Pass down configuration from the LLVM build into the build of
2020-07-26 19:11:30 +02:00
// rustc_llvm and rustc_codegen_llvm.
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
//
// Note that this is disabled if LLVM itself is disabled or we're in a check
// build. If we are in a check build we still go ahead here presuming we've
// detected that LLVM is alreay built and good to go which helps prevent
// busting caches (e.g. like #71152).
if builder.config.llvm_enabled()
&& (builder.kind != Kind::Check
|| crate::native::prebuilt_llvm_config(builder, target).is_ok())
{
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
if builder.is_rust_llvm(target) {
cargo.env("LLVM_RUSTLLVM", "1");
}
let llvm_config = builder.ensure(native::Llvm { target });
cargo.env("LLVM_CONFIG", &llvm_config);
let target_config = builder.config.target_config.get(&target);
if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) {
cargo.env("CFG_LLVM_ROOT", s);
}
2020-07-26 19:11:30 +02:00
// Some LLVM linker flags (-L and -l) may be needed to link rustc_llvm.
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
if let Some(ref s) = builder.config.llvm_ldflags {
cargo.env("LLVM_LINKER_FLAGS", s);
}
// Building with a static libstdc++ is only supported on linux right now,
// not for MSVC or macOS
2019-12-22 23:42:04 +01:00
if builder.config.llvm_static_stdcpp
&& !target.contains("freebsd")
&& !target.contains("msvc")
&& !target.contains("apple")
{
let file = compiler_file(builder, builder.cxx(target).unwrap(), target, "libstdc++.a");
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
cargo.env("LLVM_STATIC_STDCPP", file);
}
if builder.config.llvm_link_shared {
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
cargo.env("LLVM_LINK_SHARED", "1");
}
if builder.config.llvm_use_libcxx {
cargo.env("LLVM_USE_LIBCXX", "1");
}
if builder.config.llvm_optimize && !builder.config.llvm_release_debuginfo {
cargo.env("LLVM_NDEBUG", "1");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct RustcLink {
pub compiler: Compiler,
pub target_compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustcLink {
type Output = ();
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
2017-07-19 02:03:38 +02:00
run.never()
2017-07-14 14:30:16 +02:00
}
/// Same as `std_link`, only for librustc
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target_compiler = self.target_compiler;
let target = self.target;
2019-12-22 23:42:04 +01:00
builder.info(&format!(
"Copying stage{} rustc from stage{} ({} -> {} / {})",
target_compiler.stage, compiler.stage, &compiler.host, target_compiler.host, target
));
2018-12-02 21:47:41 +01:00
add_to_sysroot(
builder,
&builder.sysroot_libdir(target_compiler, target),
&builder.sysroot_libdir(target_compiler, compiler.host),
2019-12-22 23:42:04 +01:00
&librustc_stamp(builder, compiler, target),
2018-12-02 21:47:41 +01:00
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CodegenBackend {
pub target: TargetSelection,
pub compiler: Compiler,
pub backend: Interned<String>,
}
impl Step for CodegenBackend {
type Output = ();
const ONLY_HOSTS: bool = true;
// Only the backends specified in the `codegen-backends` entry of `config.toml` are built.
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("compiler/rustc_codegen_cranelift")
}
fn make_run(run: RunConfig<'_>) {
for &backend in &run.builder.config.rust_codegen_backends {
if backend == "llvm" {
continue; // Already built as part of rustc
}
run.builder.ensure(CodegenBackend {
target: run.target,
compiler: run.builder.compiler(run.builder.top_stage, run.build_triple()),
backend,
});
}
}
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
let backend = self.backend;
builder.ensure(Rustc { compiler, target });
if builder.config.keep_stage.contains(&compiler.stage) {
builder.info(
"Warning: Using a potentially old codegen backend. \
This may not behave well.",
);
// Codegen backends are linked separately from this step today, so we don't do
// anything here.
return;
}
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
if compiler_to_use != compiler {
builder.ensure(CodegenBackend { compiler: compiler_to_use, target, backend });
return;
}
let out_dir = builder.cargo_out(compiler, Mode::Codegen, target);
let mut cargo =
builder.cargo(compiler, Mode::Codegen, SourceType::Submodule, target, "build");
cargo
.arg("--manifest-path")
.arg(builder.src.join(format!("compiler/rustc_codegen_{}/Cargo.toml", backend)));
rustc_cargo_env(builder, &mut cargo, target);
let tmp_stamp = out_dir.join(".tmp.stamp");
let files = run_cargo(builder, cargo, vec![], &tmp_stamp, vec![], false);
if builder.config.dry_run {
return;
}
let mut files = files.into_iter().filter(|f| {
let filename = f.file_name().unwrap().to_str().unwrap();
is_dylib(filename) && filename.contains("rustc_codegen_")
});
let codegen_backend = match files.next() {
Some(f) => f,
None => panic!("no dylibs built for codegen backend?"),
};
if let Some(f) = files.next() {
panic!(
"codegen backend built two dylibs:\n{}\n{}",
codegen_backend.display(),
f.display()
);
}
let stamp = codegen_backend_stamp(builder, compiler, target, backend);
let codegen_backend = codegen_backend.to_str().unwrap();
t!(fs::write(&stamp, &codegen_backend));
}
}
/// Creates the `codegen-backends` folder for a compiler that's about to be
/// assembled as a complete compiler.
///
/// This will take the codegen artifacts produced by `compiler` and link them
/// into an appropriate location for `target_compiler` to be a functional
/// compiler.
fn copy_codegen_backends_to_sysroot(
builder: &Builder<'_>,
compiler: Compiler,
target_compiler: Compiler,
) {
let target = target_compiler.host;
// Note that this step is different than all the other `*Link` steps in
// that it's not assembling a bunch of libraries but rather is primarily
// moving the codegen backend into place. The codegen backend of rustc is
// not linked into the main compiler by default but is rather dynamically
// selected at runtime for inclusion.
//
// Here we're looking for the output dylib of the `CodegenBackend` step and
// we're copying that into the `codegen-backends` folder.
let dst = builder.sysroot_codegen_backends(target_compiler);
Utilize PGO for rustc linux dist builds This implements support for applying PGO to the rustc compilation step (not standard library or any tooling, including rustdoc). Expanding PGO to more tools is not terribly difficult but will involve more work and greater CI time commitment. For the same reason of avoiding greater time commitment, this currently avoids implementing for platforms outside of x86_64-unknown-linux-gnu, though in practice it should be quite simple to extend over time to more platforms. The initial implementation is intentionally minimal here to avoid too much work investment before we start seeing wins for a subset of Rust users. The choice of workloads to profile here is somewhat arbitrary, but the general rationale was to aim for a small set that largely avoided time regressions on perf.rust-lang.org's full suite of crates. The set chosen is libcore, cargo (and its dependencies), and a few ad-hoc stress tests from perf.rlo. The stress tests are arguably the most controversial, but they benefit those cases (avoiding regressions) and do not really remove wins from other benchmarks. The primary next step after this PR lands is to implement support for PGO in LLVM. It is unclear whether we can afford a full LLVM rebuild in CI, though, so the approach taken there may need to be more staggered. rustc-only PGO seems well affordable on linux at least, giving us up to 20% wall time wins on some crates for 15 minutes of extra CI time (1 hour up from 45 minutes). The PGO data is uploaded to allow others to reuse it if attempting to reproduce the CI build or potentially, in the future, on other platforms where an off-by-one strategy is used for dist builds at minimal performance cost.
2020-12-14 19:50:59 +01:00
t!(fs::create_dir_all(&dst), dst);
if builder.config.dry_run {
return;
}
for backend in builder.config.rust_codegen_backends.iter() {
if backend == "llvm" {
continue; // Already built as part of rustc
}
let stamp = codegen_backend_stamp(builder, compiler, target, *backend);
let dylib = t!(fs::read_to_string(&stamp));
let file = Path::new(&dylib);
let filename = file.file_name().unwrap().to_str().unwrap();
// change `librustc_codegen_cranelift-xxxxxx.so` to
// `librustc_codegen_cranelift-release.so`
let target_filename = {
let dash = filename.find('-').unwrap();
let dot = filename.find('.').unwrap();
format!("{}-{}{}", &filename[..dash], builder.rust_release(), &filename[dot..])
};
builder.copy(&file, &dst.join(target_filename));
}
}
/// Cargo's output path for the standard library in a given stage, compiled
/// by a particular compiler for the specified target.
pub fn libstd_stamp(builder: &Builder<'_>, compiler: Compiler, target: TargetSelection) -> PathBuf {
2018-05-19 22:04:41 +02:00
builder.cargo_out(compiler, Mode::Std, target).join(".libstd.stamp")
}
/// Cargo's output path for librustc in a given stage, compiled by a particular
/// compiler for the specified target.
2019-02-25 11:30:32 +01:00
pub fn librustc_stamp(
builder: &Builder<'_>,
compiler: Compiler,
target: TargetSelection,
2019-02-25 11:30:32 +01:00
) -> PathBuf {
2018-05-19 22:04:41 +02:00
builder.cargo_out(compiler, Mode::Rustc, target).join(".librustc.stamp")
}
/// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular
/// compiler for the specified target and backend.
fn codegen_backend_stamp(
builder: &Builder<'_>,
compiler: Compiler,
target: TargetSelection,
backend: Interned<String>,
) -> PathBuf {
builder
.cargo_out(compiler, Mode::Codegen, target)
.join(format!(".librustc_codegen_{}.stamp", backend))
}
2019-02-25 11:30:32 +01:00
pub fn compiler_file(
builder: &Builder<'_>,
compiler: &Path,
target: TargetSelection,
2019-02-25 11:30:32 +01:00
file: &str,
) -> PathBuf {
let mut cmd = Command::new(compiler);
cmd.args(builder.cflags(target, GitRepo::Rustc));
cmd.arg(format!("-print-file-name={}", file));
let out = output(&mut cmd);
PathBuf::from(out.trim())
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Sysroot {
pub compiler: Compiler,
}
impl Step for Sysroot {
type Output = Interned<PathBuf>;
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
2017-07-19 02:03:38 +02:00
run.never()
2017-07-14 14:30:16 +02:00
}
/// Returns the sysroot for the `compiler` specified that *this build system
/// generates*.
///
/// That is, the sysroot for the stage0 compiler is not what the compiler
/// thinks it is by default, but it's the same as the default for stages
/// 1-3.
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) -> Interned<PathBuf> {
let compiler = self.compiler;
let sysroot = if compiler.stage == 0 {
builder.out.join(&compiler.host.triple).join("stage0-sysroot")
} else {
builder.out.join(&compiler.host.triple).join(format!("stage{}", compiler.stage))
};
let _ = fs::remove_dir_all(&sysroot);
t!(fs::create_dir_all(&sysroot));
// If we're downloading a compiler from CI, we can use the same compiler for all stages other than 0.
if builder.config.download_rustc {
assert_eq!(
builder.config.build, compiler.host,
"Cross-compiling is not yet supported with `download-rustc`",
);
// Copy the compiler into the correct sysroot.
let stage0_dir = builder.config.out.join(&*builder.config.build.triple).join("stage0");
builder.cp_r(&stage0_dir, &sysroot);
return INTERNER.intern_path(sysroot);
}
// Symlink the source root into the same location inside the sysroot,
// where `rust-src` component would go (`$sysroot/lib/rustlib/src/rust`),
// so that any tools relying on `rust-src` also work for local builds,
// and also for translating the virtual `/rustc/$hash` back to the real
// directory (for running tests with `rust.remap-debuginfo = true`).
let sysroot_lib_rustlib_src = sysroot.join("lib/rustlib/src");
t!(fs::create_dir_all(&sysroot_lib_rustlib_src));
let sysroot_lib_rustlib_src_rust = sysroot_lib_rustlib_src.join("rust");
if let Err(e) = symlink_dir(&builder.config, &builder.src, &sysroot_lib_rustlib_src_rust) {
eprintln!(
"warning: creating symbolic link `{}` to `{}` failed with {}",
sysroot_lib_rustlib_src_rust.display(),
builder.src.display(),
e,
);
if builder.config.rust_remap_debuginfo {
eprintln!(
"warning: some `src/test/ui` tests will fail when lacking `{}`",
sysroot_lib_rustlib_src_rust.display(),
);
}
}
INTERNER.intern_path(sysroot)
}
rustbuild: Compile rustc twice, not thrice This commit switches the rustbuild build system to compiling the compiler twice for a normal bootstrap rather than the historical three times. Rust is a bootstrapped language which means that a previous version of the compiler is used to build the next version of the compiler. Over time, however, we change many parts of compiler artifacts such as the metadata format, symbol names, etc. These changes make artifacts from one compiler incompatible from another compiler. Consequently if a compiler wants to be able to use some artifacts then it itself must have compiled the artifacts. Historically the rustc build system has achieved this by compiling the compiler three times: * An older compiler (stage0) is downloaded to kick off the chain. * This compiler now compiles a new compiler (stage1) * The stage1 compiler then compiles another compiler (stage2) * Finally, the stage2 compiler needs libraries to link against, so it compiles all the libraries again. This entire process amounts in compiling the compiler three times. Additionally, this process always guarantees that the Rust source tree can compile itself because the stage2 compiler (created by a freshly created compiler) would successfully compile itself again. This property, ensuring Rust can compile itself, is quite important! In general, though, this third compilation is not required for general purpose development on the compiler. The third compiler (stage2) can reuse the libraries that were created during the second compile. In other words, the second compilation can produce both a compiler and the libraries that compiler will use. These artifacts *must* be compatible due to the way plugins work today anyway, and they were created by the same source code so they *should* be compatible as well. So given all that, this commit switches the default build process to only compile the compiler three times, avoiding this third compilation by copying artifacts from the previous one. Along the way a new entry in the Travis matrix was also added to ensure that our full bootstrap can succeed. This entry does not run tests, though, as it should not be necessary. To restore the old behavior of a full bootstrap (three compiles) you can either pass: ./configure --enable-full-bootstrap or if you're using config.toml: [build] full-bootstrap = true Overall this will hopefully be an easy 33% win in build times of the compiler. If we do 33% less work we should be 33% faster! This in turn should affect cycle times and such on Travis and AppVeyor positively as well as making it easier to work on the compiler itself.
2016-12-26 00:20:33 +01:00
}
#[derive(Debug, Copy, PartialOrd, Ord, Clone, PartialEq, Eq, Hash)]
pub struct Assemble {
/// The compiler which we will produce in this step. Assemble itself will
/// take care of ensuring that the necessary prerequisites to do so exist,
/// that is, this target can be a stage2 compiler and Assemble will build
/// previous stages for you.
pub target_compiler: Compiler,
}
rustbuild: Compile rustc twice, not thrice This commit switches the rustbuild build system to compiling the compiler twice for a normal bootstrap rather than the historical three times. Rust is a bootstrapped language which means that a previous version of the compiler is used to build the next version of the compiler. Over time, however, we change many parts of compiler artifacts such as the metadata format, symbol names, etc. These changes make artifacts from one compiler incompatible from another compiler. Consequently if a compiler wants to be able to use some artifacts then it itself must have compiled the artifacts. Historically the rustc build system has achieved this by compiling the compiler three times: * An older compiler (stage0) is downloaded to kick off the chain. * This compiler now compiles a new compiler (stage1) * The stage1 compiler then compiles another compiler (stage2) * Finally, the stage2 compiler needs libraries to link against, so it compiles all the libraries again. This entire process amounts in compiling the compiler three times. Additionally, this process always guarantees that the Rust source tree can compile itself because the stage2 compiler (created by a freshly created compiler) would successfully compile itself again. This property, ensuring Rust can compile itself, is quite important! In general, though, this third compilation is not required for general purpose development on the compiler. The third compiler (stage2) can reuse the libraries that were created during the second compile. In other words, the second compilation can produce both a compiler and the libraries that compiler will use. These artifacts *must* be compatible due to the way plugins work today anyway, and they were created by the same source code so they *should* be compatible as well. So given all that, this commit switches the default build process to only compile the compiler three times, avoiding this third compilation by copying artifacts from the previous one. Along the way a new entry in the Travis matrix was also added to ensure that our full bootstrap can succeed. This entry does not run tests, though, as it should not be necessary. To restore the old behavior of a full bootstrap (three compiles) you can either pass: ./configure --enable-full-bootstrap or if you're using config.toml: [build] full-bootstrap = true Overall this will hopefully be an easy 33% win in build times of the compiler. If we do 33% less work we should be 33% faster! This in turn should affect cycle times and such on Travis and AppVeyor positively as well as making it easier to work on the compiler itself.
2016-12-26 00:20:33 +01:00
impl Step for Assemble {
type Output = Compiler;
2019-02-25 11:30:32 +01:00
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
2017-07-14 14:30:16 +02:00
}
/// Prepare a new compiler from the artifacts in `stage`
///
/// This will assemble a compiler in `build/$host/stage$stage`. The compiler
/// must have been previously produced by the `stage - 1` builder.build
/// compiler.
2019-02-25 11:30:32 +01:00
fn run(self, builder: &Builder<'_>) -> Compiler {
let target_compiler = self.target_compiler;
if target_compiler.stage == 0 {
2019-12-22 23:42:04 +01:00
assert_eq!(
builder.config.build, target_compiler.host,
"Cannot obtain compiler for non-native build triple at stage 0"
);
// The stage 0 compiler for the build triple is always pre-built.
return target_compiler;
}
// Get the compiler that we'll use to bootstrap ourselves.
//
// Note that this is where the recursive nature of the bootstrap
// happens, as this will request the previous stage's compiler on
// downwards to stage 0.
//
// Also note that we're building a compiler for the host platform. We
// only assume that we can run `build` artifacts, which means that to
// produce some other architecture compiler we need to start from
// `build` to get there.
//
// FIXME: It may be faster if we build just a stage 1 compiler and then
// use that to bootstrap this compiler forward.
2019-12-22 23:42:04 +01:00
let build_compiler = builder.compiler(target_compiler.stage - 1, builder.config.build);
// If we're downloading a compiler from CI, we can use the same compiler for all stages other than 0.
if builder.config.download_rustc {
builder.ensure(Sysroot { compiler: target_compiler });
return target_compiler;
}
// Build the libraries for this compiler to link to (i.e., the libraries
// it uses at runtime). NOTE: Crates the target compiler compiles don't
// link to these. (FIXME: Is that correct? It seems to be correct most
// of the time but I think we do link to these for stage2/bin compilers
// when not performing a full bootstrap).
2019-12-22 23:42:04 +01:00
builder.ensure(Rustc { compiler: build_compiler, target: target_compiler.host });
for &backend in builder.config.rust_codegen_backends.iter() {
if backend == "llvm" {
continue; // Already built as part of rustc
}
builder.ensure(CodegenBackend {
compiler: build_compiler,
target: target_compiler.host,
backend,
});
}
let lld_install = if builder.config.lld_enabled {
2019-12-22 23:42:04 +01:00
Some(builder.ensure(native::Lld { target: target_compiler.host }))
rust: Import LLD for linking wasm objects This commit imports the LLD project from LLVM to serve as the default linker for the `wasm32-unknown-unknown` target. The `binaryen` submoule is consequently removed along with "binaryen linker" support in rustc. Moving to LLD brings with it a number of benefits for wasm code: * LLD is itself an actual linker, so there's no need to compile all wasm code with LTO any more. As a result builds should be *much* speedier as LTO is no longer forcibly enabled for all builds of the wasm target. * LLD is quickly becoming an "official solution" for linking wasm code together. This, I believe at least, is intended to be the main supported linker for native code and wasm moving forward. Picking up support early on should help ensure that we can help LLD identify bugs and otherwise prove that it works great for all our use cases! * Improvements to the wasm toolchain are currently primarily focused around LLVM and LLD (from what I can tell at least), so it's in general much better to be on this bandwagon for bugfixes and new features. * Historical "hacks" like `wasm-gc` will soon no longer be necessary, LLD will [natively implement][gc] `--gc-sections` (better than `wasm-gc`!) which means a postprocessor is no longer needed to show off Rust's "small wasm binary size". LLD is added in a pretty standard way to rustc right now. A new rustbuild target was defined for building LLD, and this is executed when a compiler's sysroot is being assembled. LLD is compiled against the LLVM that we've got in tree, which means we're currently on the `release_60` branch, but this may get upgraded in the near future! LLD is placed into rustc's sysroot in a `bin` directory. This is similar to where `gcc.exe` can be found on Windows. This directory is automatically added to `PATH` whenever rustc executes the linker, allowing us to define a `WasmLd` linker which implements the interface that `wasm-ld`, LLD's frontend, expects. Like Emscripten the LLD target is currently only enabled for Tier 1 platforms, notably OSX/Windows/Linux, and will need to be installed manually for compiling to wasm on other platforms. LLD is by default turned off in rustbuild, and requires a `config.toml` option to be enabled to turn it on. Finally the unstable `#![wasm_import_memory]` attribute was also removed as LLD has a native option for controlling this. [gc]: https://reviews.llvm.org/D42511
2017-08-27 03:30:12 +02:00
} else {
None
};
let stage = target_compiler.stage;
let host = target_compiler.host;
builder.info(&format!("Assembling stage{} compiler ({})", stage, host));
// Link in all dylibs to the libdir
let stamp = librustc_stamp(builder, build_compiler, target_compiler.host);
let proc_macros = builder
.read_stamp_file(&stamp)
.into_iter()
.filter_map(|(path, dependency_type)| {
if dependency_type == DependencyType::Host {
Some(path.file_name().unwrap().to_owned().into_string().unwrap())
} else {
None
}
})
.collect::<HashSet<_>>();
let sysroot = builder.sysroot(target_compiler);
let rustc_libdir = builder.rustc_libdir(target_compiler);
t!(fs::create_dir_all(&rustc_libdir));
let src_libdir = builder.sysroot_libdir(build_compiler, host);
for f in builder.read_dir(&src_libdir) {
let filename = f.file_name().into_string().unwrap();
if (is_dylib(&filename) || is_debug_info(&filename)) && !proc_macros.contains(&filename)
{
builder.copy(&f.path(), &rustc_libdir.join(&filename));
}
}
copy_codegen_backends_to_sysroot(builder, build_compiler, target_compiler);
// We prepend this bin directory to the user PATH when linking Rust binaries. To
// avoid shadowing the system LLD we rename the LLD we provide to `rust-lld`.
let libdir = builder.sysroot_libdir(target_compiler, target_compiler.host);
let libdir_bin = libdir.parent().unwrap().join("bin");
t!(fs::create_dir_all(&libdir_bin));
rust: Import LLD for linking wasm objects This commit imports the LLD project from LLVM to serve as the default linker for the `wasm32-unknown-unknown` target. The `binaryen` submoule is consequently removed along with "binaryen linker" support in rustc. Moving to LLD brings with it a number of benefits for wasm code: * LLD is itself an actual linker, so there's no need to compile all wasm code with LTO any more. As a result builds should be *much* speedier as LTO is no longer forcibly enabled for all builds of the wasm target. * LLD is quickly becoming an "official solution" for linking wasm code together. This, I believe at least, is intended to be the main supported linker for native code and wasm moving forward. Picking up support early on should help ensure that we can help LLD identify bugs and otherwise prove that it works great for all our use cases! * Improvements to the wasm toolchain are currently primarily focused around LLVM and LLD (from what I can tell at least), so it's in general much better to be on this bandwagon for bugfixes and new features. * Historical "hacks" like `wasm-gc` will soon no longer be necessary, LLD will [natively implement][gc] `--gc-sections` (better than `wasm-gc`!) which means a postprocessor is no longer needed to show off Rust's "small wasm binary size". LLD is added in a pretty standard way to rustc right now. A new rustbuild target was defined for building LLD, and this is executed when a compiler's sysroot is being assembled. LLD is compiled against the LLVM that we've got in tree, which means we're currently on the `release_60` branch, but this may get upgraded in the near future! LLD is placed into rustc's sysroot in a `bin` directory. This is similar to where `gcc.exe` can be found on Windows. This directory is automatically added to `PATH` whenever rustc executes the linker, allowing us to define a `WasmLd` linker which implements the interface that `wasm-ld`, LLD's frontend, expects. Like Emscripten the LLD target is currently only enabled for Tier 1 platforms, notably OSX/Windows/Linux, and will need to be installed manually for compiling to wasm on other platforms. LLD is by default turned off in rustbuild, and requires a `config.toml` option to be enabled to turn it on. Finally the unstable `#![wasm_import_memory]` attribute was also removed as LLD has a native option for controlling this. [gc]: https://reviews.llvm.org/D42511
2017-08-27 03:30:12 +02:00
if let Some(lld_install) = lld_install {
let src_exe = exe("lld", target_compiler.host);
let dst_exe = exe("rust-lld", target_compiler.host);
builder.copy(&lld_install.join("bin").join(&src_exe), &libdir_bin.join(&dst_exe));
}
// Similarly, copy `llvm-dwp` into libdir for Split DWARF. Only copy it when the LLVM
// backend is used to avoid unnecessarily building LLVM and because LLVM is not checked
// out by default when the LLVM backend is not enabled.
if builder.config.rust_codegen_backends.contains(&INTERNER.intern_str("llvm")) {
let src_exe = exe("llvm-dwp", target_compiler.host);
let dst_exe = exe("rust-llvm-dwp", target_compiler.host);
let llvm_config_bin = builder.ensure(native::Llvm { target: target_compiler.host });
if !builder.config.dry_run {
let llvm_bin_dir = output(Command::new(llvm_config_bin).arg("--bindir"));
let llvm_bin_dir = Path::new(llvm_bin_dir.trim());
builder.copy(&llvm_bin_dir.join(&src_exe), &libdir_bin.join(&dst_exe));
}
rust: Import LLD for linking wasm objects This commit imports the LLD project from LLVM to serve as the default linker for the `wasm32-unknown-unknown` target. The `binaryen` submoule is consequently removed along with "binaryen linker" support in rustc. Moving to LLD brings with it a number of benefits for wasm code: * LLD is itself an actual linker, so there's no need to compile all wasm code with LTO any more. As a result builds should be *much* speedier as LTO is no longer forcibly enabled for all builds of the wasm target. * LLD is quickly becoming an "official solution" for linking wasm code together. This, I believe at least, is intended to be the main supported linker for native code and wasm moving forward. Picking up support early on should help ensure that we can help LLD identify bugs and otherwise prove that it works great for all our use cases! * Improvements to the wasm toolchain are currently primarily focused around LLVM and LLD (from what I can tell at least), so it's in general much better to be on this bandwagon for bugfixes and new features. * Historical "hacks" like `wasm-gc` will soon no longer be necessary, LLD will [natively implement][gc] `--gc-sections` (better than `wasm-gc`!) which means a postprocessor is no longer needed to show off Rust's "small wasm binary size". LLD is added in a pretty standard way to rustc right now. A new rustbuild target was defined for building LLD, and this is executed when a compiler's sysroot is being assembled. LLD is compiled against the LLVM that we've got in tree, which means we're currently on the `release_60` branch, but this may get upgraded in the near future! LLD is placed into rustc's sysroot in a `bin` directory. This is similar to where `gcc.exe` can be found on Windows. This directory is automatically added to `PATH` whenever rustc executes the linker, allowing us to define a `WasmLd` linker which implements the interface that `wasm-ld`, LLD's frontend, expects. Like Emscripten the LLD target is currently only enabled for Tier 1 platforms, notably OSX/Windows/Linux, and will need to be installed manually for compiling to wasm on other platforms. LLD is by default turned off in rustbuild, and requires a `config.toml` option to be enabled to turn it on. Finally the unstable `#![wasm_import_memory]` attribute was also removed as LLD has a native option for controlling this. [gc]: https://reviews.llvm.org/D42511
2017-08-27 03:30:12 +02:00
}
// Ensure that `libLLVM.so` ends up in the newly build compiler directory,
// so that it can be found when the newly built `rustc` is run.
dist::maybe_install_llvm_runtime(builder, target_compiler.host, &sysroot);
dist::maybe_install_llvm_target(builder, target_compiler.host, &sysroot);
// Link the compiler binary itself into place
2018-05-19 22:04:41 +02:00
let out_dir = builder.cargo_out(build_compiler, Mode::Rustc, host);
let rustc = out_dir.join(exe("rustc-main", host));
let bindir = sysroot.join("bin");
t!(fs::create_dir_all(&bindir));
let compiler = builder.rustc(target_compiler);
builder.copy(&rustc, &compiler);
target_compiler
}
}
/// Link some files into a rustc sysroot.
///
/// For a particular stage this will link the file listed in `stamp` into the
/// `sysroot_dst` provided.
2018-12-02 21:47:41 +01:00
pub fn add_to_sysroot(
builder: &Builder<'_>,
sysroot_dst: &Path,
sysroot_host_dst: &Path,
2019-12-22 23:42:04 +01:00
stamp: &Path,
2018-12-02 21:47:41 +01:00
) {
let self_contained_dst = &sysroot_dst.join("self-contained");
t!(fs::create_dir_all(&sysroot_dst));
2018-12-02 21:47:41 +01:00
t!(fs::create_dir_all(&sysroot_host_dst));
t!(fs::create_dir_all(&self_contained_dst));
for (path, dependency_type) in builder.read_stamp_file(stamp) {
let dst = match dependency_type {
DependencyType::Host => sysroot_host_dst,
DependencyType::Target => sysroot_dst,
DependencyType::TargetSelfContained => self_contained_dst,
};
builder.copy(&path, &dst.join(path.file_name().unwrap()));
}
}
2019-12-22 23:42:04 +01:00
pub fn run_cargo(
builder: &Builder<'_>,
cargo: Cargo,
tail_args: Vec<String>,
stamp: &Path,
additional_target_deps: Vec<(PathBuf, DependencyType)>,
2019-12-22 23:42:04 +01:00
is_check: bool,
) -> Vec<PathBuf> {
if builder.config.dry_run {
return Vec::new();
}
// `target_root_dir` looks like $dir/$target/release
let target_root_dir = stamp.parent().unwrap();
// `target_deps_dir` looks like $dir/$target/release/deps
let target_deps_dir = target_root_dir.join("deps");
// `host_root_dir` looks like $dir/release
2019-12-22 23:42:04 +01:00
let host_root_dir = target_root_dir
.parent()
.unwrap() // chop off `release`
.parent()
.unwrap() // chop off `$target`
.join(target_root_dir.file_name().unwrap());
// Spawn Cargo slurping up its JSON output. We'll start building up the
// `deps` array of all files it generated along with a `toplevel` array of
// files we need to probe for later.
let mut deps = Vec::new();
let mut toplevel = Vec::new();
2018-12-04 19:26:54 +01:00
let ok = stream_cargo(builder, cargo, tail_args, &mut |msg| {
2018-12-02 21:47:41 +01:00
let (filenames, crate_types) = match msg {
CargoMessage::CompilerArtifact {
filenames,
2019-12-22 23:42:04 +01:00
target: CargoTarget { crate_types },
2018-12-02 21:47:41 +01:00
..
} => (filenames, crate_types),
_ => return,
};
for filename in filenames {
// Skip files like executables
2020-02-03 20:13:30 +01:00
if !(filename.ends_with(".rlib")
|| filename.ends_with(".lib")
|| filename.ends_with(".a")
|| is_debug_info(&filename)
2020-02-03 20:13:30 +01:00
|| is_dylib(&filename)
|| (is_check && filename.ends_with(".rmeta")))
2019-12-22 23:42:04 +01:00
{
continue;
}
let filename = Path::new(&*filename);
// If this was an output file in the "host dir" we don't actually
2018-12-02 21:47:41 +01:00
// worry about it, it's not relevant for us
if filename.starts_with(&host_root_dir) {
2018-12-02 21:47:41 +01:00
// Unless it's a proc macro used in the compiler
if crate_types.iter().any(|t| t == "proc-macro") {
deps.push((filename.to_path_buf(), DependencyType::Host));
2018-12-02 21:47:41 +01:00
}
continue;
2017-06-27 17:51:26 +02:00
}
// If this was output in the `deps` dir then this is a precise file
// name (hash included) so we start tracking it.
2017-06-27 17:51:26 +02:00
if filename.starts_with(&target_deps_dir) {
deps.push((filename.to_path_buf(), DependencyType::Target));
continue;
2017-06-27 17:51:26 +02:00
}
// Otherwise this was a "top level artifact" which right now doesn't
// have a hash in the name, but there's a version of this file in
// the `deps` folder which *does* have a hash in the name. That's
// the one we'll want to we'll probe for it later.
//
// We do not use `Path::file_stem` or `Path::extension` here,
// because some generated files may have multiple extensions e.g.
// `std-<hash>.dll.lib` on Windows. The aforementioned methods only
// split the file name by the last extension (`.lib`) while we need
// to split by all extensions (`.dll.lib`).
let expected_len = t!(filename.metadata()).len();
let filename = filename.file_name().unwrap().to_str().unwrap();
let mut parts = filename.splitn(2, '.');
let file_stem = parts.next().unwrap().to_owned();
let extension = parts.next().unwrap().to_owned();
toplevel.push((file_stem, extension, expected_len));
}
});
if !ok {
2018-11-11 10:43:33 +01:00
exit(1);
}
// Ok now we need to actually find all the files listed in `toplevel`. We've
// got a list of prefix/extensions and we basically just need to find the
// most recent file in the `deps` folder corresponding to each one.
let contents = t!(target_deps_dir.read_dir())
.map(|e| t!(e))
.map(|e| (e.path(), e.file_name().into_string().unwrap(), t!(e.metadata())))
.collect::<Vec<_>>();
for (prefix, extension, expected_len) in toplevel {
let candidates = contents.iter().filter(|&&(_, ref filename, ref meta)| {
meta.len() == expected_len
&& filename
.strip_prefix(&prefix[..])
.map(|s| s.starts_with('-') && s.ends_with(&extension[..]))
.unwrap_or(false)
});
2019-12-22 23:42:04 +01:00
let max = candidates
.max_by_key(|&&(_, _, ref metadata)| FileTime::from_last_modification_time(metadata));
let path_to_add = match max {
Some(triple) => triple.0.to_str().unwrap(),
None => panic!("no output generated for {:?} {:?}", prefix, extension),
};
if is_dylib(path_to_add) {
let candidate = format!("{}.lib", path_to_add);
let candidate = PathBuf::from(candidate);
if candidate.exists() {
deps.push((candidate, DependencyType::Target));
}
}
deps.push((path_to_add.into(), DependencyType::Target));
}
deps.extend(additional_target_deps);
deps.sort();
let mut new_contents = Vec::new();
for (dep, dependency_type) in deps.iter() {
new_contents.extend(match *dependency_type {
DependencyType::Host => b"h",
DependencyType::Target => b"t",
DependencyType::TargetSelfContained => b"s",
});
new_contents.extend(dep.to_str().unwrap().as_bytes());
new_contents.extend(b"\0");
}
t!(fs::write(&stamp, &new_contents));
2018-12-02 21:47:41 +01:00
deps.into_iter().map(|(d, _)| d).collect()
}
pub fn stream_cargo(
2019-02-25 11:30:32 +01:00
builder: &Builder<'_>,
cargo: Cargo,
2018-12-04 19:26:54 +01:00
tail_args: Vec<String>,
2019-02-25 11:30:32 +01:00
cb: &mut dyn FnMut(CargoMessage<'_>),
) -> bool {
let mut cargo = Command::from(cargo);
if builder.config.dry_run {
return true;
}
// Instruct Cargo to give us json messages on stdout, critically leaving
// stderr as piped so we can get those pretty colors.
let mut message_format = if builder.config.json_output {
String::from("json")
} else {
String::from("json-render-diagnostics")
};
2019-12-22 23:42:04 +01:00
if let Some(s) = &builder.config.rustc_error_format {
message_format.push_str(",json-diagnostic-");
message_format.push_str(s);
}
cargo.arg("--message-format").arg(message_format).stdout(Stdio::piped());
2018-12-04 19:26:54 +01:00
for arg in tail_args {
cargo.arg(arg);
}
builder.verbose(&format!("running: {:?}", cargo));
let mut child = match cargo.spawn() {
Ok(child) => child,
Err(e) => panic!("failed to execute command: {:?}\nerror: {}", cargo, e),
};
// Spawn Cargo slurping up its JSON output. We'll start building up the
// `deps` array of all files it generated along with a `toplevel` array of
// files we need to probe for later.
let stdout = BufReader::new(child.stdout.take().unwrap());
for line in stdout.lines() {
let line = t!(line);
2019-02-25 11:30:32 +01:00
match serde_json::from_str::<CargoMessage<'_>>(&line) {
Ok(msg) => {
if builder.config.json_output {
// Forward JSON to stdout.
println!("{}", line);
}
cb(msg)
}
// If this was informational, just print it out and continue
2019-12-22 23:42:04 +01:00
Err(_) => println!("{}", line),
}
}
// Make sure Cargo actually succeeded after we read all of its stdout.
let status = t!(child.wait());
if !status.success() {
2019-12-22 23:42:04 +01:00
eprintln!(
"command did not execute successfully: {:?}\n\
expected success, got: {}",
2019-12-22 23:42:04 +01:00
cargo, status
);
}
status.success()
}
2018-12-02 21:47:41 +01:00
#[derive(Deserialize)]
pub struct CargoTarget<'a> {
crate_types: Vec<Cow<'a, str>>,
}
#[derive(Deserialize)]
#[serde(tag = "reason", rename_all = "kebab-case")]
pub enum CargoMessage<'a> {
CompilerArtifact {
package_id: Cow<'a, str>,
features: Vec<Cow<'a, str>>,
filenames: Vec<Cow<'a, str>>,
2018-12-02 21:47:41 +01:00
target: CargoTarget<'a>,
},
BuildScriptExecuted {
package_id: Cow<'a, str>,
2018-12-04 19:26:54 +01:00
},
BuildFinished {
success: bool,
},
2018-12-04 19:26:54 +01:00
}