Use a QueryContext for try_mark_green.

This commit is contained in:
Camille GILLOT 2021-01-18 23:53:42 +01:00
parent 3bd14c7bbe
commit b27266fdb2
10 changed files with 133 additions and 143 deletions

View File

@ -465,9 +465,5 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
cgu.name()
);
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
CguReuse::PreLto
} else {
CguReuse::No
}
if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
}

View File

@ -867,7 +867,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
cgu.name()
);
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
if tcx.try_mark_green(&dep_node) {
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts

View File

@ -135,7 +135,7 @@ pub struct DepKindStruct {
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
/// add it to the "We don't have enough information to reconstruct..." group in
/// the match below.
pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
pub(crate) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
/// Invoke a query to put the on-disk cached value in memory.
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),

View File

@ -2,9 +2,6 @@ use crate::ich::StableHashingContext;
use crate::ty::{self, TyCtxt};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::LocalDefId;
mod dep_node;
@ -116,99 +113,7 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
&self.dep_graph
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
// FIXME: This match is just a workaround for incremental bugs and should
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
// bug that must be fixed before removing this.
match dep_node.kind {
DepKind::hir_owner | DepKind::hir_owner_nodes => {
if let Some(def_id) = dep_node.extract_def_id(*self) {
if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
// This `DefPath` does not have a
// corresponding `DepNode` (e.g. a
// struct field), and the ` DefPath`
// collided with the `DefPath` of a
// proper item that existed in the
// previous compilation session.
//
// Since the given `DefPath` does not
// denote the item that previously
// existed, we just fail to mark green.
return false;
}
} else {
// If the node does not exist anymore, we
// just fail to mark green.
return false;
}
}
_ => {
// For other kinds of nodes it's OK to be
// forced.
}
}
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
(dep_node.kind.force_from_dep_node)(*self, dep_node)
}
fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.sess.has_errors_or_delayed_span_bugs()
}
fn diagnostic(&self) -> &rustc_errors::Handler {
self.sess.diagnostic()
}
// Interactions with on_disk_cache
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
self.on_disk_cache
.as_ref()
.map(|c| c.load_diagnostics(*self, prev_dep_node_index))
.unwrap_or_default()
}
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics(dep_node_index, diagnostics)
}
}
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
}
}
fn profiler(&self) -> &SelfProfilerRef {
&self.prof
}
}
fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
def_id == hir_id.owner
}

View File

@ -119,6 +119,11 @@ impl TyCtxt<'tcx> {
pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
TyCtxtAt { tcx: self, span }
}
pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
let qcx = QueryCtxt { tcx: self, queries: self.queries };
self.dep_graph.try_mark_green(qcx, dep_node).is_some()
}
}
macro_rules! define_callbacks {

View File

@ -2,7 +2,7 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph;
use crate::dep_graph::{self, DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
use crate::ty::query::{on_disk_cache, Queries, Query};
use crate::ty::tls::{self, ImplicitCtxt};
use crate::ty::{self, TyCtxt};
@ -72,6 +72,95 @@ impl QueryContext for QueryCtxt<'tcx> {
(dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
// FIXME: This match is just a workaround for incremental bugs and should
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
// bug that must be fixed before removing this.
match dep_node.kind {
DepKind::hir_owner | DepKind::hir_owner_nodes => {
if let Some(def_id) = dep_node.extract_def_id(**self) {
let def_id = def_id.expect_local();
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
if def_id != hir_id.owner {
// This `DefPath` does not have a
// corresponding `DepNode` (e.g. a
// struct field), and the ` DefPath`
// collided with the `DefPath` of a
// proper item that existed in the
// previous compilation session.
//
// Since the given `DefPath` does not
// denote the item that previously
// existed, we just fail to mark green.
return false;
}
} else {
// If the node does not exist anymore, we
// just fail to mark green.
return false;
}
}
_ => {
// For other kinds of nodes it's OK to be
// forced.
}
}
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
(dep_node.kind.force_from_dep_node)(**self, dep_node)
}
fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.sess.has_errors_or_delayed_span_bugs()
}
fn diagnostic(&self) -> &rustc_errors::Handler {
self.sess.diagnostic()
}
// Interactions with on_disk_cache
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
self.on_disk_cache
.as_ref()
.map(|c| c.load_diagnostics(**self, prev_dep_node_index))
.unwrap_or_default()
}
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics(dep_node_index, diagnostics)
}
}
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
}
}
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.

View File

@ -587,7 +587,7 @@ impl<K: DepKind> DepGraph<K> {
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
@ -599,7 +599,7 @@ impl<K: DepKind> DepGraph<K> {
})
}
pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
@ -627,7 +627,7 @@ impl<K: DepKind> DepGraph<K> {
}
/// Try to mark a dep-node which existed in the previous compilation session as green.
fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,
@ -811,7 +811,7 @@ impl<K: DepKind> DepGraph<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,

View File

@ -13,8 +13,6 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use std::fmt;
use std::hash::Hash;
@ -32,30 +30,8 @@ pub trait DepContext: Copy {
/// Access the DepGraph.
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;
/// Return the diagnostic handler.
fn diagnostic(&self) -> &rustc_errors::Handler;
/// Load diagnostics associated to the node in the previous session.
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
);
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
}

View File

@ -14,7 +14,7 @@ pub use self::caches::{
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepNode, HasDepContext};
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use crate::query::job::QueryMap;
use rustc_data_structures::stable_hasher::HashStable;
@ -40,6 +40,28 @@ pub trait QueryContext: HasDepContext {
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;
/// Return the diagnostic handler.
fn diagnostic(&self) -> &rustc_errors::Handler;
/// Load diagnostics associated to the node in the previous session.
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
);
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.

View File

@ -460,7 +460,7 @@ where
tcx.dep_context().dep_graph().read_index(dep_node_index);
if unlikely!(!diagnostics.is_empty()) {
tcx.dep_context().store_diagnostics_for_anon_node(dep_node_index, diagnostics);
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
return job.complete(result, dep_node_index);
@ -473,10 +473,7 @@ where
// promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
let loaded = tcx.start_query(job.id, None, || {
let marked = tcx
.dep_context()
.dep_graph()
.try_mark_green_and_read(*tcx.dep_context(), &dep_node);
let marked = tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node);
marked.map(|(prev_dep_node_index, dep_node_index)| {
(
load_from_disk_and_cache_in_memory(
@ -641,7 +638,7 @@ where
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
if unlikely!(!diagnostics.is_empty()) && dep_node.kind != DepKind::NULL {
tcx.dep_context().store_diagnostics(dep_node_index, diagnostics);
tcx.store_diagnostics(dep_node_index, diagnostics);
}
let result = job.complete(result, dep_node_index);
@ -676,7 +673,7 @@ where
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
fn ensure_must_run<CTX, K, V>(tcx: CTX::DepContext, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
where
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
@ -688,9 +685,9 @@ where
// Ensuring an anonymous query makes no sense
assert!(!query.anon);
let dep_node = query.to_dep_node(tcx, key);
let dep_node = query.to_dep_node(*tcx.dep_context(), key);
match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
@ -701,7 +698,7 @@ where
true
}
Some((_, dep_node_index)) => {
tcx.profiler().query_cache_hit(dep_node_index.into());
tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
false
}
}
@ -768,7 +765,7 @@ where
{
let query = &Q::VTABLE;
if let QueryMode::Ensure = mode {
if !ensure_must_run(*tcx.dep_context(), &key, query) {
if !ensure_must_run(tcx, &key, query) {
return None;
}
}