Auto merge of #72020 - alexcrichton:fix-incremental-linker-plugin-lto, r=oli-obk

Fix disagreeement about CGU reuse and LTO

This commit fixes an issue where the codegen backend's selection of LTO
disagreed with what the codegen later thought was being done. Discovered
in #72006 we have a longstanding issue where if `-Clinker-plugin-lto` in
optimized mode is compiled incrementally it will always panic on the
second compilation. The underlying issue turned out to be that the
production of the original artifact determined that LTO should not be
done (because it's being postponed to the linker) but the CGU reuse
selection thought that LTO was done so it was trying to load pre-LTO
artifacts which were never generated.

The fix here is to ensure that the logic when generating code which
determines what kind of LTO is being done is shared amongst the CGU
reuse decision and the backend actually doing LTO. This means that
they'll both be in agreement about whether the previous compilation did
indeed produce incremental pre-LTO artifacts.

Closes #72006
This commit is contained in:
bors 2020-05-10 04:41:01 +00:00
commit b3269536d0
4 changed files with 75 additions and 39 deletions

View File

@ -709,12 +709,53 @@ fn execute_work_item<B: ExtraBackendMethods>(
}
// Actual LTO type we end up choosing based on multiple factors.
enum ComputedLtoType {
pub enum ComputedLtoType {
No,
Thin,
Fat,
}
pub fn compute_per_cgu_lto_type(
sess_lto: &Lto,
opts: &config::Options,
sess_crate_types: &[CrateType],
module_kind: ModuleKind,
) -> ComputedLtoType {
// Metadata modules never participate in LTO regardless of the lto
// settings.
if module_kind == ModuleKind::Metadata {
return ComputedLtoType::No;
}
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
// When we're automatically doing ThinLTO for multi-codegen-unit
// builds we don't actually want to LTO the allocator modules if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
let is_allocator = module_kind == ModuleKind::Allocator;
// We ignore a request for full crate grath LTO if the cate type
// is only an rlib, as there is no full crate graph to process,
// that'll happen later.
//
// This use case currently comes up primarily for targets that
// require LTO so the request for LTO is always unconditionally
// passed down to the backend, but we don't actually want to do
// anything about it yet until we've got a final product.
let is_rlib = sess_crate_types.len() == 1 && sess_crate_types[0] == CrateType::Rlib;
match sess_lto {
Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
}
}
fn execute_optimize_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: ModuleCodegen<B::Module>,
@ -731,39 +772,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
// back to the coordinator thread for further LTO processing (which
// has to wait for all the initial modules to be optimized).
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
let linker_does_lto = cgcx.opts.cg.linker_plugin_lto.enabled();
// When we're automatically doing ThinLTO for multi-codegen-unit
// builds we don't actually want to LTO the allocator modules if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
let is_allocator = module.kind == ModuleKind::Allocator;
// We ignore a request for full crate grath LTO if the cate type
// is only an rlib, as there is no full crate graph to process,
// that'll happen later.
//
// This use case currently comes up primarily for targets that
// require LTO so the request for LTO is always unconditionally
// passed down to the backend, but we don't actually want to do
// anything about it yet until we've got a final product.
let is_rlib = cgcx.crate_types.len() == 1 && cgcx.crate_types[0] == CrateType::Rlib;
// Metadata modules never participate in LTO regardless of the lto
// settings.
let lto_type = if module.kind == ModuleKind::Metadata {
ComputedLtoType::No
} else {
match cgcx.lto {
Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
}
};
let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
// If we're doing some form of incremental LTO then we need to be sure to
// save our module to disk first.

View File

@ -14,8 +14,8 @@
//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
use crate::back::write::{
start_async_codegen, submit_codegened_module_to_llvm, submit_post_lto_module_to_llvm,
submit_pre_lto_module_to_llvm, OngoingCodegen,
compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
};
use crate::common::{IntPredicate, RealPredicate, TypeKind};
use crate::meth;
@ -43,7 +43,7 @@ use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{self, EntryFnType, Lto};
use rustc_session::config::{self, EntryFnType};
use rustc_session::Session;
use rustc_span::Span;
use rustc_symbol_mangling::test as symbol_names_test;
@ -941,8 +941,18 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
);
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
// We can re-use either the pre- or the post-thinlto state
if tcx.sess.lto() != Lto::No { CguReuse::PreLto } else { CguReuse::PostLto }
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
&tcx.sess.crate_types.borrow(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
_ => CguReuse::PreLto,
}
} else {
CguReuse::No
}

View File

@ -0,0 +1,9 @@
// revisions:cfail1 cfail2
// compile-flags: -Z query-dep-graph --crate-type rlib -C linker-plugin-lto -O
// no-prefer-dynamic
// build-pass
#![feature(rustc_attrs)]
#![rustc_partition_reused(module = "lto_in_linker", cfg = "cfail2")]
pub fn foo() {}

View File

@ -0,0 +1,8 @@
// revisions:cfail1 cfail2
// compile-flags: -Z query-dep-graph --crate-type rlib -C lto
// build-pass
#![feature(rustc_attrs)]
#![rustc_partition_reused(module = "rlib_lto", cfg = "cfail2")]
pub fn foo() {}