Auto merge of #70162 - cjgillot:split_query, r=Zoxc

Move the query system to a dedicated crate

The query system `rustc::ty::query` is split out into the `rustc_query_system` crate.

Some commits are unformatted, to ease rebasing.

Based on #67761 and #69910.

r? @Zoxc
This commit is contained in:
bors 2020-03-27 21:36:51 +00:00
commit 0bf7c2ad77
28 changed files with 1557 additions and 1530 deletions

View File

@ -4081,12 +4081,12 @@ version = "0.0.0"
dependencies = [
"log",
"parking_lot 0.9.0",
"rustc_ast",
"rustc-rayon-core",
"rustc_data_structures",
"rustc_errors",
"rustc_hir",
"rustc_index",
"rustc_macros",
"rustc_span",
"serialize",
"smallvec 1.0.0",
]

View File

@ -8,7 +8,6 @@ use rustc_errors::Diagnostic;
use rustc_hir::def_id::DefId;
mod dep_node;
mod safe;
pub(crate) use rustc_query_system::dep_graph::DepNodeParams;
pub use rustc_query_system::dep_graph::{
@ -17,8 +16,6 @@ pub use rustc_query_system::dep_graph::{
};
pub use dep_node::{label_strs, DepConstructor, DepKind, DepNode, DepNodeExt};
pub use safe::AssertDepGraphSafe;
pub use safe::DepGraphSafe;
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
@ -27,6 +24,8 @@ pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepK
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
impl rustc_query_system::dep_graph::DepKind for DepKind {
const NULL: Self = DepKind::Null;
fn is_eval_always(&self) -> bool {
DepKind::is_eval_always(self)
}
@ -82,6 +81,10 @@ impl rustc_query_system::dep_graph::DepKind for DepKind {
op(icx.task_deps)
})
}
fn can_reconstruct_query_key(&self) -> bool {
DepKind::can_reconstruct_query_key(self)
}
}
impl<'tcx> DepContext for TyCtxt<'tcx> {
@ -92,6 +95,10 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
TyCtxt::create_stable_hashing_context(*self)
}
fn debug_dep_tasks(&self) -> bool {
self.sess.opts.debugging_opts.dep_tasks
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
// FIXME: This match is just a workaround for incremental bugs and should
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
@ -160,6 +167,14 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics)
}
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
) {
self.queries.on_disk_cache.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
}
fn profiler(&self) -> &SelfProfilerRef {
&self.prof
}
@ -169,23 +184,3 @@ fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
def_id.index == hir_id.owner.local_def_index
}
impl rustc_query_system::HashStableContext for StableHashingContext<'_> {
fn debug_dep_tasks(&self) -> bool {
self.sess().opts.debugging_opts.dep_tasks
}
}
impl rustc_query_system::HashStableContextProvider<StableHashingContext<'tcx>> for TyCtxt<'tcx> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> {
self.create_stable_hashing_context()
}
}
impl rustc_query_system::HashStableContextProvider<StableHashingContext<'a>>
for StableHashingContext<'a>
{
fn get_stable_hashing_context(&self) -> Self {
self.clone()
}
}

View File

@ -1,9 +0,0 @@
//! The `DepGraphSafe` trait
use crate::ty::TyCtxt;
pub use rustc_query_system::dep_graph::{AssertDepGraphSafe, DepGraphSafe};
/// The type context itself can be used to access all kinds of tracked
/// state, but those accesses should always generate read events.
impl<'tcx> DepGraphSafe for TyCtxt<'tcx> {}

View File

@ -194,8 +194,6 @@ impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> {
}
}
impl<'a> crate::dep_graph::DepGraphSafe for StableHashingContext<'a> {}
impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
panic!("Node IDs should not appear in incremental state");

View File

@ -1603,7 +1603,7 @@ nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
pub mod tls {
use super::{ptr_eq, GlobalCtxt, TyCtxt};
use crate::dep_graph::TaskDeps;
use crate::dep_graph::{DepKind, TaskDeps};
use crate::ty::query;
use rustc_data_structures::sync::{self, Lock};
use rustc_data_structures::thin_vec::ThinVec;
@ -1630,7 +1630,7 @@ pub mod tls {
/// The current query job, if any. This is updated by `JobOwner::start` in
/// `ty::query::plumbing` when executing a query.
pub query: Option<query::QueryJobId>,
pub query: Option<query::QueryJobId<DepKind>>,
/// Where to store diagnostics for the current query job, if any.
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.

View File

@ -1,82 +0,0 @@
use crate::dep_graph::SerializedDepNodeIndex;
use crate::dep_graph::{DepKind, DepNode};
use crate::ty::query::caches::QueryCache;
use crate::ty::query::plumbing::CycleError;
use crate::ty::query::QueryState;
use crate::ty::TyCtxt;
use rustc_data_structures::profiling::ProfileCategory;
use rustc_hir::def_id::DefId;
use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::Fingerprint;
use std::borrow::Cow;
use std::fmt::Debug;
use std::hash::Hash;
// Query configuration and description traits.
// FIXME(eddyb) false positive, the lifetime parameter is used for `Key`/`Value`.
#[allow(unused_lifetimes)]
pub trait QueryConfig<'tcx> {
const NAME: &'static str;
const CATEGORY: ProfileCategory;
type Key: Eq + Hash + Clone + Debug;
type Value: Clone;
}
pub(crate) trait QueryAccessors<'tcx>: QueryConfig<'tcx> {
const ANON: bool;
const EVAL_ALWAYS: bool;
const DEP_KIND: DepKind;
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: TyCtxt<'tcx>) -> &'a QueryState<'tcx, Self::Cache>;
fn to_dep_node(tcx: TyCtxt<'tcx>, key: &Self::Key) -> DepNode;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value;
fn hash_result(hcx: &mut StableHashingContext<'_>, result: &Self::Value)
-> Option<Fingerprint>;
fn handle_cycle_error(tcx: TyCtxt<'tcx>, error: CycleError<'tcx>) -> Self::Value;
}
pub(crate) trait QueryDescription<'tcx>: QueryAccessors<'tcx> {
fn describe(tcx: TyCtxt<'_>, key: Self::Key) -> Cow<'static, str>;
#[inline]
fn cache_on_disk(_: TyCtxt<'tcx>, _: Self::Key, _: Option<&Self::Value>) -> bool {
false
}
fn try_load_from_disk(_: TyCtxt<'tcx>, _: SerializedDepNodeIndex) -> Option<Self::Value> {
bug!("QueryDescription::load_from_disk() called for an unsupported query.")
}
}
impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M {
default fn describe(tcx: TyCtxt<'_>, def_id: DefId) -> Cow<'static, str> {
if !tcx.sess.verbose() {
format!("processing `{}`", tcx.def_path_str(def_id)).into()
} else {
let name = ::std::any::type_name::<M>();
format!("processing {:?} with query `{}`", def_id, name).into()
}
}
default fn cache_on_disk(_: TyCtxt<'tcx>, _: Self::Key, _: Option<&Self::Value>) -> bool {
false
}
default fn try_load_from_disk(
_: TyCtxt<'tcx>,
_: SerializedDepNodeIndex,
) -> Option<Self::Value> {
bug!("QueryDescription::load_from_disk() called for an unsupported query.")
}
}

View File

@ -1,531 +1,12 @@
use crate::dep_graph::DepKind;
use crate::ty::context::TyCtxt;
use crate::ty::query::plumbing::CycleError;
use crate::ty::query::Query;
use crate::ty::tls;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::Span;
use std::convert::TryFrom;
use std::marker::PhantomData;
use std::num::NonZeroU32;
#[cfg(parallel_compiler)]
use {
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
rustc_data_structures::{jobserver, OnDrop},
rustc_rayon_core as rayon_core,
rustc_span::DUMMY_SP,
std::iter::FromIterator,
std::{mem, process, thread},
};
/// Represents a span and a query key.
#[derive(Clone, Debug)]
pub struct QueryInfo<'tcx> {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
pub query: Query<'tcx>,
}
type QueryMap<'tcx> = FxHashMap<QueryJobId, QueryJobInfo<'tcx>>;
/// A value uniquely identifiying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryShardJobId(pub NonZeroU32);
/// A value uniquely identifiying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId {
/// Which job within a shard is this
pub job: QueryShardJobId,
/// In which shard is this job
pub shard: u16,
/// What kind of query this job is
pub kind: DepKind,
}
impl QueryJobId {
pub fn new(job: QueryShardJobId, shard: usize, kind: DepKind) -> Self {
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
}
fn query<'tcx>(self, map: &QueryMap<'tcx>) -> Query<'tcx> {
map.get(&self).unwrap().info.query.clone()
}
#[cfg(parallel_compiler)]
fn span(self, map: &QueryMap<'_>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
fn parent(self, map: &QueryMap<'_>) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch<'tcx>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
pub struct QueryJobInfo<'tcx> {
pub info: QueryInfo<'tcx>,
pub job: QueryJob<'tcx>,
}
/// Represents an active query job.
#[derive(Clone)]
pub struct QueryJob<'tcx> {
pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required.
pub span: Span,
/// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<QueryJobId>,
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
latch: Option<QueryLatch<'tcx>>,
dummy: PhantomData<QueryLatch<'tcx>>,
}
impl<'tcx> QueryJob<'tcx> {
/// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId>) -> Self {
QueryJob {
id,
span,
parent,
#[cfg(parallel_compiler)]
latch: None,
dummy: PhantomData,
}
}
#[cfg(parallel_compiler)]
pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch<'tcx> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
self.latch.as_ref().unwrap().clone()
}
#[cfg(not(parallel_compiler))]
pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch<'tcx> {
QueryLatch { id, dummy: PhantomData }
}
/// Signals to waiters that the query is complete.
///
/// This does nothing for single threaded rustc,
/// as there are no concurrent jobs which could be waiting on us
pub fn signal_complete(self) {
#[cfg(parallel_compiler)]
self.latch.map(|latch| latch.set());
}
}
#[cfg(not(parallel_compiler))]
#[derive(Clone)]
pub(super) struct QueryLatch<'tcx> {
id: QueryJobId,
dummy: PhantomData<&'tcx ()>,
}
#[cfg(not(parallel_compiler))]
impl<'tcx> QueryLatch<'tcx> {
pub(super) fn find_cycle_in_stack(&self, tcx: TyCtxt<'tcx>, span: Span) -> CycleError<'tcx> {
let query_map = tcx.queries.try_collect_active_jobs().unwrap();
// Get the current executing query (waiter) and find the waitee amongst its parents
let mut current_job = tls::with_related_context(tcx, |icx| icx.query);
let mut cycle = Vec::new();
while let Some(job) = current_job {
let info = query_map.get(&job).unwrap();
cycle.push(info.info.clone());
if job == self.id {
cycle.reverse();
// This is the end of the cycle
// The span entry we included was for the usage
// of the cycle itself, and not part of the cycle
// Replace it with the span which caused the cycle to form
cycle[0].span = span;
// Find out why the cycle itself was used
let usage = info
.job
.parent
.as_ref()
.map(|parent| (info.info.span, parent.query(&query_map)));
return CycleError { usage, cycle };
}
current_job = info.job.parent;
}
panic!("did not find a cycle")
}
}
#[cfg(parallel_compiler)]
struct QueryWaiter<'tcx> {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
cycle: Lock<Option<CycleError<'tcx>>>,
}
#[cfg(parallel_compiler)]
impl<'tcx> QueryWaiter<'tcx> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
}
}
#[cfg(parallel_compiler)]
struct QueryLatchInfo<'tcx> {
complete: bool,
waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
pub(super) struct QueryLatch<'tcx> {
info: Lrc<Mutex<QueryLatchInfo<'tcx>>>,
}
#[cfg(parallel_compiler)]
impl<'tcx> QueryLatch<'tcx> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
}
}
/// Awaits for the query job to complete.
#[cfg(parallel_compiler)]
pub(super) fn wait_on(&self, tcx: TyCtxt<'tcx>, span: Span) -> Result<(), CycleError<'tcx>> {
tls::with_related_context(tcx, move |icx| {
let waiter = Lrc::new(QueryWaiter {
query: icx.query,
span,
cycle: Lock::new(None),
condvar: Condvar::new(),
});
self.wait_on_inner(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
// although another thread may still have a Lrc reference so we cannot
// use Lrc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(cycle),
}
})
}
/// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
// the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
// Both of these will remove it from the `waiters` list before resuming
// this thread.
info.waiters.push(waiter.clone());
// If this detects a deadlock and the deadlock handler wants to resume this thread
// we have to be in the `wait` call. This is ensured by the deadlock handler
// getting the self.info lock.
rayon_core::mark_blocked();
jobserver::release_thread();
waiter.condvar.wait(&mut info);
// Release the lock before we potentially block in `acquire_thread`
mem::drop(info);
jobserver::acquire_thread();
}
}
/// Sets the latch and resumes all waiters on it
fn set(&self) {
let mut info = self.info.lock();
debug_assert!(!info.complete);
info.complete = true;
let registry = rayon_core::Registry::current();
for waiter in info.waiters.drain(..) {
waiter.notify(&registry);
}
}
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<'tcx>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
info.waiters.remove(waiter)
}
}
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)]
type Waiter = (QueryJobId, usize);
/// Visits all the non-resumable and resumable waiters of a query.
/// Only waiters in a query are visited.
/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
/// and a span indicating the reason the query waited on `query_ref`.
/// If `visit` returns Some, this function returns.
/// For visits of non-resumable waiters it returns the return value of `visit`.
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
fn visit_waiters<'tcx, F>(
query_map: &QueryMap<'tcx>,
query: QueryJobId,
mut visit: F,
) -> Option<Option<Waiter>>
where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
if let Some(cycle) = visit(query.span(query_map), parent) {
return Some(cycle);
}
}
// Visit the explicit waiters which use condvars and are resumable
if let Some(latch) = query.latch(query_map) {
for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
if let Some(waiter_query) = waiter.query {
if visit(waiter.span, waiter_query).is_some() {
// Return a value which indicates that this waiter can be resumed
return Some(Some((query, i)));
}
}
}
}
None
}
/// Look for query cycles by doing a depth first search starting at `query`.
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
fn cycle_check<'tcx>(
query_map: &QueryMap<'tcx>,
query: QueryJobId,
span: Span,
stack: &mut Vec<(Span, QueryJobId)>,
visited: &mut FxHashSet<QueryJobId>,
) -> Option<Option<Waiter>> {
if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
// We detected a query cycle, fix up the initial span and return Some
// Remove previous stack entries
stack.drain(0..p);
// Replace the span for the first query with the cycle cause
stack[0].0 = span;
Some(None)
} else {
None
};
}
// Query marked as visited is added it to the stack
stack.push((span, query));
// Visit all the waiters
let r = visit_waiters(query_map, query, |span, successor| {
cycle_check(query_map, successor, span, stack, visited)
});
// Remove the entry in our stack if we didn't find a cycle
if r.is_none() {
stack.pop();
}
r
}
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
fn connected_to_root<'tcx>(
query_map: &QueryMap<'tcx>,
query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>,
) -> bool {
// We already visited this or we're deliberately ignoring it
if !visited.insert(query) {
return false;
}
// This query is connected to the root (it has no query parent), return true
if query.parent(query_map).is_none() {
return true;
}
visit_waiters(query_map, query, |_, successor| {
connected_to_root(query_map, successor, visited).then_some(None)
})
.is_some()
}
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>(
query_map: &QueryMap<'tcx>,
tcx: TyCtxt<'tcx>,
queries: &'a [T],
f: F,
) -> &'a T {
// Deterministically pick an entry point
// FIXME: Sort this instead
let mut hcx = tcx.create_stable_hashing_context();
queries
.iter()
.min_by_key(|v| {
let (span, query) = f(v);
let mut stable_hasher = StableHasher::new();
query.query(query_map).hash_stable(&mut hcx, &mut stable_hasher);
// Prefer entry points which have valid spans for nicer error messages
// We add an integer to the tuple ensuring that entry points
// with valid spans are picked first
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
(span_cmp, stable_hasher.finish::<u64>())
})
.unwrap()
}
/// Looks for query cycles starting from the last query in `jobs`.
/// If a cycle is found, all queries in the cycle is removed from `jobs` and
/// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
fn remove_cycle<'tcx>(
query_map: &QueryMap<'tcx>,
jobs: &mut Vec<QueryJobId>,
wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
tcx: TyCtxt<'tcx>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
// Look for a cycle starting with the last query in `jobs`
if let Some(waiter) =
cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
{
// The stack is a vector of pairs of spans and queries; reverse it so that
// the earlier entries require later entries
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
// Shift the spans so that queries are matched with the span for their waitee
spans.rotate_right(1);
// Zip them back together
let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
// Remove the queries in our cycle from the list of jobs to look at
for r in &stack {
jobs.remove_item(&r.1);
}
// Find the queries in the cycle which are
// connected to queries outside the cycle
let entry_points = stack
.iter()
.filter_map(|&(span, query)| {
if query.parent(query_map).is_none() {
// This query is connected to the root (it has no query parent)
Some((span, query, None))
} else {
let mut waiters = Vec::new();
// Find all the direct waiters who lead to the root
visit_waiters(query_map, query, |span, waiter| {
// Mark all the other queries in the cycle as already visited
let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
if connected_to_root(query_map, waiter, &mut visited) {
waiters.push((span, waiter));
}
None
});
if waiters.is_empty() {
None
} else {
// Deterministically pick one of the waiters to show to the user
let waiter = *pick_query(query_map, tcx, &waiters, |s| *s);
Some((span, query, Some(waiter)))
}
}
})
.collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
// Deterministically pick an entry point
let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1));
// Shift the stack so that our entry point is first
let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
if let Some(pos) = entry_point_pos {
stack.rotate_left(pos);
}
let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
// Create the cycle error
let error = CycleError {
usage,
cycle: stack
.iter()
.map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
.collect(),
};
// We unwrap `waiter` here since there must always be one
// edge which is resumeable / waited using a query latch
let (waitee_query, waiter_idx) = waiter.unwrap();
// Extract the waiter we want to resume
let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
// Set the cycle error so it will be picked up when resumed
*waiter.cycle.lock() = Some(error);
// Put the waiter on the list of things to resume
wakelist.push(waiter);
true
} else {
false
}
}
use rustc_query_system::query::deadlock;
use rustc_rayon_core as rayon_core;
use std::thread;
/// Creates a new thread and forwards information in thread locals to it.
/// The new thread runs the deadlock handler.
/// Must only be called when a deadlock is about to happen.
#[cfg(parallel_compiler)]
pub unsafe fn handle_deadlock() {
let registry = rayon_core::Registry::current();
@ -546,44 +27,3 @@ pub unsafe fn handle_deadlock() {
})
});
}
/// Detects query cycles by using depth first search over all active query jobs.
/// If a query cycle is found it will break the cycle by finding an edge which
/// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
fn deadlock(tcx: TyCtxt<'_>, registry: &rayon_core::Registry) {
let on_panic = OnDrop(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
});
let mut wakelist = Vec::new();
let query_map = tcx.queries.try_collect_active_jobs().unwrap();
let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
let mut found_cycle = false;
while jobs.len() > 0 {
if remove_cycle(&query_map, &mut jobs, &mut wakelist, tcx) {
found_cycle = true;
}
}
// Check that a cycle was found. It is possible for a deadlock to occur without
// a query cycle if a query which can be waited on uses Rayon to do multithreading
// internally. Such a query (X) may be executing on 2 threads (A and B) and A may
// wait using Rayon on B. Rayon may then switch to executing another query (Y)
// which in turn will wait on X causing a deadlock. We have a false dependency from
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
assert!(found_cycle);
// FIXME: Ensure this won't cause a deadlock before we return
for waiter in wakelist.into_iter() {
waiter.notify(registry);
}
on_panic.disable();
}

View File

@ -4,10 +4,10 @@ use crate::infer::canonical::Canonical;
use crate::mir;
use crate::traits;
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::query::caches::DefaultCacheSelector;
use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
use rustc_query_system::query::DefaultCacheSelector;
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};

View File

@ -61,17 +61,17 @@ use std::sync::Arc;
#[macro_use]
mod plumbing;
pub(crate) use self::plumbing::CycleError;
use self::plumbing::*;
pub(crate) use rustc_query_system::query::CycleError;
use rustc_query_system::query::*;
mod stats;
pub use self::stats::print_stats;
#[cfg(parallel_compiler)]
mod job;
#[cfg(parallel_compiler)]
pub use self::job::handle_deadlock;
use self::job::QueryJobInfo;
pub use self::job::{QueryInfo, QueryJob, QueryJobId};
pub use rustc_query_system::query::{QueryInfo, QueryJob, QueryJobId};
mod keys;
use self::keys::Key;
@ -79,13 +79,9 @@ use self::keys::Key;
mod values;
use self::values::Value;
mod caches;
use self::caches::CacheSelector;
mod config;
use self::config::QueryAccessors;
pub use self::config::QueryConfig;
pub(crate) use self::config::QueryDescription;
use rustc_query_system::query::QueryAccessors;
pub use rustc_query_system::query::QueryConfig;
pub(crate) use rustc_query_system::query::QueryDescription;
mod on_disk_cache;
pub use self::on_disk_cache::OnDiskCache;

View File

@ -994,7 +994,8 @@ fn encode_query_results<'a, 'tcx, Q, E>(
query_result_index: &mut EncodedQueryResultIndex,
) -> Result<(), E::Error>
where
Q: super::config::QueryDescription<'tcx, Value: Encodable>,
Q: super::QueryDescription<TyCtxt<'tcx>>,
Q::Value: Encodable,
E: 'a + TyEncoder,
{
let _timer = tcx

View File

@ -2,372 +2,65 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
use crate::ty::query::caches::QueryCache;
use crate::ty::query::config::QueryDescription;
use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
use crate::dep_graph::DepGraph;
use crate::ty::query::Query;
use crate::ty::tls;
use crate::ty::tls::{self, ImplicitCtxt};
use crate::ty::{self, TyCtxt};
use rustc_query_system::query::QueryContext;
use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
#[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path;
use rustc_data_structures::fx::{FxHashMap, FxHasher};
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level};
use rustc_span::source_map::DUMMY_SP;
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
use rustc_span::def_id::DefId;
use rustc_span::Span;
use std::collections::hash_map::Entry;
use std::convert::TryFrom;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::mem;
use std::num::NonZeroU32;
use std::ptr;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub(crate) struct QueryStateShard<'tcx, K, C> {
cache: C,
active: FxHashMap<K, QueryResult<'tcx>>,
impl QueryContext for TyCtxt<'tcx> {
type Query = Query<'tcx>;
/// Used to generate unique ids for active jobs.
jobs: u32,
}
impl<'tcx, K, C> QueryStateShard<'tcx, K, C> {
fn get_cache(&mut self) -> &mut C {
&mut self.cache
fn incremental_verify_ich(&self) -> bool {
self.sess.opts.debugging_opts.incremental_verify_ich
}
}
impl<'tcx, K, C: Default> Default for QueryStateShard<'tcx, K, C> {
fn default() -> QueryStateShard<'tcx, K, C> {
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
fn verbose(&self) -> bool {
self.sess.verbose()
}
}
pub(crate) struct QueryState<'tcx, C: QueryCache> {
cache: C,
shards: Sharded<QueryStateShard<'tcx, C::Key, C::Sharded>>,
#[cfg(debug_assertions)]
pub(super) cache_hits: AtomicUsize,
}
impl<'tcx, C: QueryCache> QueryState<'tcx, C> {
pub(super) fn get_lookup<K2: Hash>(
&'tcx self,
key: &K2,
) -> QueryLookup<'tcx, C::Key, C::Sharded> {
// We compute the key's hash once and then use it for both the
// shard lookup and the hashmap lookup. This relies on the fact
// that both of them use `FxHasher`.
let mut hasher = FxHasher::default();
key.hash(&mut hasher);
let key_hash = hasher.finish();
let shard = self.shards.get_shard_index_by_hash(key_hash);
let lock = self.shards.get_shard_by_index(shard).lock();
QueryLookup { key_hash, shard, lock }
fn def_path_str(&self, def_id: DefId) -> String {
TyCtxt::def_path_str(*self, def_id)
}
}
/// Indicates the state of a query for a given key in a query map.
enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<'tcx>),
fn dep_graph(&self) -> &DepGraph {
&self.dep_graph
}
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
tls::with_related_context(*self, |icx| icx.query)
}
impl<'tcx, C: QueryCache> QueryState<'tcx, C> {
pub(super) fn iter_results<R>(
fn try_collect_active_jobs(
&self,
f: impl for<'a> FnOnce(
Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
) -> R,
) -> R {
self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
}
pub(super) fn all_inactive(&self) -> bool {
let shards = self.shards.lock_shards();
shards.iter().all(|shard| shard.active.is_empty())
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
self.queries.try_collect_active_jobs()
}
pub(super) fn try_collect_active_jobs(
&self,
kind: DepKind,
make_query: fn(C::Key) -> Query<'tcx>,
jobs: &mut FxHashMap<QueryJobId, QueryJobInfo<'tcx>>,
) -> Option<()>
where
C::Key: Clone,
{
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
let shards = self.shards.try_lock_shards()?;
let shards = shards.iter().enumerate();
jobs.extend(shards.flat_map(|(shard_id, shard)| {
shard.active.iter().filter_map(move |(k, v)| {
if let QueryResult::Started(ref job) = *v {
let id =
QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
Some((id, QueryJobInfo { info, job: job.clone() }))
} else {
None
}
})
}));
Some(())
}
}
impl<'tcx, C: QueryCache> Default for QueryState<'tcx, C> {
fn default() -> QueryState<'tcx, C> {
QueryState {
cache: C::default(),
shards: Default::default(),
#[cfg(debug_assertions)]
cache_hits: AtomicUsize::new(0),
}
}
}
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
pub(crate) struct QueryLookup<'tcx, K, C> {
pub(super) key_hash: u64,
shard: usize,
pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, K, C>>,
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
struct JobOwner<'tcx, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
state: &'tcx QueryState<'tcx, C>,
key: C::Key,
id: QueryJobId,
}
impl<'tcx, C: QueryCache> JobOwner<'tcx, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function assumes that `try_get_cached` is already called and returned `lookup`.
/// If the query is executing elsewhere, this will wait for it and return the result.
/// If the query panicked, this will silently panic.
///
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
fn try_start<Q>(
tcx: TyCtxt<'tcx>,
span: Span,
key: &C::Key,
mut lookup: QueryLookup<'tcx, C::Key, C::Sharded>,
) -> TryGetJob<'tcx, C>
where
Q: QueryDescription<'tcx, Key = C::Key, Value = C::Value, Cache = C>,
{
let lock = &mut *lookup.lock;
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
Entry::Occupied(mut entry) => {
match entry.get_mut() {
QueryResult::Started(job) => {
// For parallel queries, we'll block and wait until the query running
// in another thread has completed. Record how long we wait in the
// self-profiler.
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
Some(tcx.prof.query_blocked())
} else {
None
};
// Create the id of the job we're waiting for
let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
(job.latch(id), _query_blocked_prof_timer)
}
QueryResult::Poisoned => FatalError.raise(),
}
}
Entry::Vacant(entry) => {
// No job entry for this query. Return a new one to be started later.
// Generate an id unique within this shard.
let id = lock.jobs.checked_add(1).unwrap();
lock.jobs = id;
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query));
entry.insert(QueryResult::Started(job));
let owner =
JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
return TryGetJob::NotYetStarted(owner);
}
};
mem::drop(lookup.lock);
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
#[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| {
Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
}));
// With parallel queries we might just have to wait on some other
// thread.
#[cfg(parallel_compiler)]
{
let result = latch.wait_on(tcx, span);
if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
}
let cached = tcx.try_get_cached(
Q::query_state(tcx),
(*key).clone(),
|value, index| (value.clone(), index),
|_, _| panic!("value must be in cache after waiting"),
);
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
prof_timer.finish_with_query_invocation_id(cached.1.into());
}
return TryGetJob::JobCompleted(cached);
}
}
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
#[inline(always)]
fn complete(self, tcx: TyCtxt<'tcx>, result: &C::Value, dep_node_index: DepNodeIndex) {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
let state = self.state;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let job = {
let result = result.clone();
let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
job
};
job.signal_complete();
}
}
#[inline(always)]
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
{
let diagnostics = Lock::new(ThinVec::new());
let result = f(Some(&diagnostics));
(result, diagnostics.into_inner())
}
impl<'tcx, C: QueryCache> Drop for JobOwner<'tcx, C>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
#[inline(never)]
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
let state = self.state;
let shard = state.shards.get_shard_by_value(&self.key);
let job = {
let mut shard = shard.lock();
let job = match shard.active.remove(&self.key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
shard.active.insert(self.key.clone(), QueryResult::Poisoned);
job
};
// Also signal the completion of the job, so waiters
// will continue execution.
job.signal_complete();
}
}
#[derive(Clone)]
pub(crate) struct CycleError<'tcx> {
/// The query and related span that uses the cycle.
pub(super) usage: Option<(Span, Query<'tcx>)>,
pub(super) cycle: Vec<QueryInfo<'tcx>>,
}
/// The result of `try_start`.
enum TryGetJob<'tcx, C: QueryCache>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, C>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
JobCompleted((C::Value, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle.
Cycle(C::Value),
}
impl<'tcx> TyCtxt<'tcx> {
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
#[inline(always)]
fn start_query<F, R>(
self,
token: QueryJobId,
fn start_query<R>(
&self,
token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: F,
) -> R
where
F: FnOnce(TyCtxt<'tcx>) -> R,
{
compute: impl FnOnce(Self) -> R,
) -> R {
// The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(self, move |current_icx| {
tls::with_related_context(*self, move |current_icx| {
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = tls::ImplicitCtxt {
tcx: self,
let new_icx = ImplicitCtxt {
tcx: *self,
query: Some(token),
diagnostics,
layout_depth: current_icx.layout_depth,
@ -375,15 +68,17 @@ impl<'tcx> TyCtxt<'tcx> {
};
// Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, |_| compute(self))
tls::enter_context(&new_icx, |_| compute(*self))
})
}
}
impl<'tcx> TyCtxt<'tcx> {
#[inline(never)]
#[cold]
pub(super) fn report_cycle(
self,
CycleError { usage, cycle: stack }: CycleError<'tcx>,
CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
) -> DiagnosticBuilder<'tcx> {
assert!(!stack.is_empty());
@ -433,7 +128,7 @@ impl<'tcx> TyCtxt<'tcx> {
// Be careful reyling on global state here: this code is called from
// a panic hook, which means that the global `Handler` may be in a weird
// state if it was responsible for triggering the panic.
tls::with_context_opt(|icx| {
ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
let query_map = icx.tcx.queries.try_collect_active_jobs();
@ -468,337 +163,6 @@ impl<'tcx> TyCtxt<'tcx> {
eprintln!("end of query stack");
}
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
#[inline(always)]
fn try_get_cached<C, R, OnHit, OnMiss>(
self,
state: &'tcx QueryState<'tcx, C>,
key: C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
C: QueryCache,
OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'tcx, C::Key, C::Sharded>) -> R,
{
state.cache.lookup(
state,
QueryStateShard::<C::Key, C::Sharded>::get_cache,
key,
|value, index| {
if unlikely!(self.prof.enabled()) {
self.prof.query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
state.cache_hits.fetch_add(1, Ordering::Relaxed);
}
on_hit(value, index)
},
on_miss,
)
}
#[inline(never)]
pub(super) fn get_query<Q: QueryDescription<'tcx> + 'tcx>(
self,
span: Span,
key: Q::Key,
) -> Q::Value {
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
self.try_get_cached(
Q::query_state(self),
key,
|value, index| {
self.dep_graph.read_index(index);
value.clone()
},
|key, lookup| self.try_execute_query::<Q>(span, key, lookup),
)
}
#[inline(always)]
fn try_execute_query<Q: QueryDescription<'tcx> + 'tcx>(
self,
span: Span,
key: Q::Key,
lookup: QueryLookup<'tcx, Q::Key, <Q::Cache as QueryCache>::Sharded>,
) -> Q::Value {
let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted((v, index)) => {
self.dep_graph.read_index(index);
return v;
}
};
// Fast path for when incr. comp. is off. `to_dep_node` is
// expensive for some `DepKind`s.
if !self.dep_graph.is_fully_enabled() {
let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null);
return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
}
if Q::ANON {
let prof_timer = self.prof.query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.id, diagnostics, |tcx| {
tcx.dep_graph.with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
self.dep_graph.read_index(dep_node_index);
if unlikely!(!diagnostics.is_empty()) {
self.queries
.on_disk_cache
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
job.complete(self, &result, dep_node_index);
return result;
}
let dep_node = Q::to_dep_node(self, &key);
if !Q::EVAL_ALWAYS {
// The diagnostics for this query will be
// promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
let loaded = self.start_query(job.id, None, |tcx| {
let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
marked.map(|(prev_dep_node_index, dep_node_index)| {
(
tcx.load_from_disk_and_cache_in_memory::<Q>(
key.clone(),
prev_dep_node_index,
dep_node_index,
&dep_node,
),
dep_node_index,
)
})
});
if let Some((result, dep_node_index)) = loaded {
job.complete(self, &result, dep_node_index);
return result;
}
}
let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
self.dep_graph.read_index(dep_node_index);
result
}
fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
self,
key: Q::Key,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node_index: DepNodeIndex,
dep_node: &DepNode,
) -> Q::Value {
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
debug_assert!(self.dep_graph.is_green(dep_node));
// First we try to load the result from the on-disk cache.
let result = if Q::cache_on_disk(self, key.clone(), None) {
let prof_timer = self.prof.incr_cache_loading();
let result = Q::try_load_from_disk(self, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!dep_node.kind.can_reconstruct_query_key() || result.is_some(),
"missing on-disk cache entry for {:?}",
dep_node
);
result
} else {
// Some things are never cached on disk.
None
};
let result = if let Some(result) = result {
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = self.prof.query_provider();
// The dep-graph for this computation is already in-place.
let result = self.dep_graph.with_ignore(|| Q::compute(self, key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
result
};
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
}
result
}
#[inline(never)]
#[cold]
fn incremental_verify_ich<Q: QueryDescription<'tcx>>(
self,
result: &Q::Value,
dep_node: &DepNode,
dep_node_index: DepNodeIndex,
) {
use rustc_data_structures::fingerprint::Fingerprint;
assert!(
Some(self.dep_graph.fingerprint_of(dep_node_index))
== self.dep_graph.prev_fingerprint_of(dep_node),
"fingerprint for green query instance not loaded from cache: {:?}",
dep_node,
);
debug!("BEGIN verify_ich({:?})", dep_node);
let mut hcx = self.create_stable_hashing_context();
let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
debug!("END verify_ich({:?})", dep_node);
let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
}
#[inline(always)]
fn force_query_with_job<Q: QueryDescription<'tcx> + 'tcx>(
self,
key: Q::Key,
job: JobOwner<'tcx, Q::Cache>,
dep_node: DepNode,
) -> (Q::Value, DepNodeIndex) {
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
!self.dep_graph.dep_node_exists(&dep_node),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
key,
dep_node
);
let prof_timer = self.prof.query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.id, diagnostics, |tcx| {
if Q::EVAL_ALWAYS {
tcx.dep_graph.with_eval_always_task(
dep_node,
tcx,
key,
Q::compute,
Q::hash_result,
)
} else {
tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
}
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
if unlikely!(!diagnostics.is_empty()) {
if dep_node.kind != crate::dep_graph::DepKind::Null {
self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics);
}
}
job.complete(self, &result, dep_node_index);
(result, dep_node_index)
}
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
///
/// This function is particularly useful when executing passes for their
/// side-effects -- e.g., in order to report errors for erroneous programs.
///
/// Note: The optimization is only available during incr. comp.
pub(super) fn ensure_query<Q: QueryDescription<'tcx> + 'tcx>(self, key: Q::Key) {
if Q::EVAL_ALWAYS {
let _ = self.get_query::<Q>(DUMMY_SP, key);
return;
}
// Ensuring an anonymous query makes no sense
assert!(!Q::ANON);
let dep_node = Q::to_dep_node(self, &key);
match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
let _ = self.get_query::<Q>(DUMMY_SP, key);
}
Some((_, dep_node_index)) => {
self.prof.query_cache_hit(dep_node_index.into());
}
}
}
#[allow(dead_code)]
pub(super) fn force_query<Q: QueryDescription<'tcx> + 'tcx>(
self,
key: Q::Key,
span: Span,
dep_node: DepNode,
) {
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
self.try_get_cached(
Q::query_state(self),
key,
|_, _| {
// Cache hit, do nothing
},
|key, lookup| {
let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted(_) => return,
};
self.force_query_with_job::<Q>(key, job, dep_node);
},
);
}
}
macro_rules! handle_cycle_error {
@ -909,7 +273,7 @@ macro_rules! define_queries_inner {
}
}
pub fn describe(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
let (r, name) = match *self {
$(Query::$name(key) => {
(queries::$name::describe(tcx, key), stringify!($name))
@ -956,14 +320,14 @@ macro_rules! define_queries_inner {
})*
}
$(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> {
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
type Key = $K;
type Value = $V;
const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category;
}
impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> {
impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
const ANON: bool = is_anon!([$($modifiers)*]);
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
@ -971,7 +335,7 @@ macro_rules! define_queries_inner {
type Cache = query_storage!([$($modifiers)*][$K, $V]);
#[inline(always)]
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self::Cache> {
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
&tcx.queries.$name
}
@ -1002,7 +366,7 @@ macro_rules! define_queries_inner {
fn handle_cycle_error(
tcx: TyCtxt<'tcx>,
error: CycleError<'tcx>
error: CycleError<Query<'tcx>>
) -> Self::Value {
handle_cycle_error!([$($modifiers)*][tcx, error])
}
@ -1017,7 +381,7 @@ macro_rules! define_queries_inner {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: $K) {
self.tcx.ensure_query::<queries::$name<'_>>(key)
ensure_query::<queries::$name<'_>, _>(self.tcx, key)
})*
}
@ -1095,7 +459,7 @@ macro_rules! define_queries_inner {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: $K) -> $V {
self.tcx.get_query::<queries::$name<'_>>(self.span, key)
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key)
})*
}
@ -1124,8 +488,8 @@ macro_rules! define_queries_struct {
fallback_extern_providers: Box<Providers<$tcx>>,
$($(#[$attr])* $name: QueryState<
$tcx,
<queries::$name<$tcx> as QueryAccessors<'tcx>>::Cache,
TyCtxt<$tcx>,
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
>,)*
}
@ -1145,12 +509,12 @@ macro_rules! define_queries_struct {
pub(crate) fn try_collect_active_jobs(
&self
) -> Option<FxHashMap<QueryJobId, QueryJobInfo<'tcx>>> {
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
let mut jobs = FxHashMap::default();
$(
self.$name.try_collect_active_jobs(
<queries::$name<'tcx> as QueryAccessors<'tcx>>::DEP_KIND,
<queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
Query::$name,
&mut jobs,
)?;

View File

@ -1,11 +1,11 @@
use crate::ty::context::TyCtxt;
use crate::ty::query::caches::QueryCache;
use crate::ty::query::plumbing::QueryState;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
use rustc_query_system::query::QueryCache;
use rustc_query_system::query::QueryState;
use std::fmt::Debug;
use std::io::Write;
@ -160,7 +160,7 @@ where
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_state: &QueryState<'tcx, C>,
query_state: &QueryState<TyCtxt<'tcx>, C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,

View File

@ -1,9 +1,9 @@
use crate::ty::query::caches::QueryCache;
use crate::ty::query::config::QueryAccessors;
use crate::ty::query::plumbing::QueryState;
use crate::ty::query::queries;
use crate::ty::TyCtxt;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_query_system::query::QueryCache;
use rustc_query_system::query::QueryState;
use rustc_query_system::query::{QueryAccessors, QueryContext};
use std::any::type_name;
use std::mem;
@ -38,7 +38,10 @@ struct QueryStats {
local_def_id_keys: Option<usize>,
}
fn stats<'tcx, C: QueryCache>(name: &'static str, map: &QueryState<'tcx, C>) -> QueryStats {
fn stats<CTX: QueryContext, C: QueryCache>(
name: &'static str,
map: &QueryState<CTX, C>,
) -> QueryStats {
let mut stats = QueryStats {
name,
#[cfg(debug_assertions)]
@ -124,7 +127,8 @@ macro_rules! print_stats {
$($(
queries.push(stats::<
<queries::$name<'_> as QueryAccessors<'_>>::Cache,
TyCtxt<'_>,
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
>(
stringify!($name),
&tcx.queries.$name,

View File

@ -7,7 +7,6 @@ use crate::type_::Type;
use crate::value::Value;
use rustc::bug;
use rustc::dep_graph::DepGraphSafe;
use rustc::mir::mono::CodegenUnit;
use rustc::ty::layout::{
HasParamEnv, LayoutError, LayoutOf, PointeeInfo, Size, TyLayout, VariantIdx,
@ -90,8 +89,6 @@ pub struct CodegenCx<'ll, 'tcx> {
local_gen_sym_counter: Cell<usize>,
}
impl<'ll, 'tcx> DepGraphSafe for CodegenCx<'ll, 'tcx> {}
pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
let reloc_model_arg = match sess.opts.cg.relocation_model {
Some(ref s) => &s[..],

View File

@ -380,7 +380,7 @@ fn add_query_description_impl(
quote! {
#[allow(unused_variables)]
fn describe(
#tcx: TyCtxt<'_>,
#tcx: TyCtxt<'tcx>,
#key: #arg,
) -> Cow<'static, str> {
format!(#desc).into()
@ -393,7 +393,7 @@ fn add_query_description_impl(
let desc = desc.unwrap_or(quote! {});
impls.extend(quote! {
impl<'tcx> QueryDescription<'tcx> for queries::#name<'tcx> {
impl<'tcx> QueryDescription<TyCtxt<'tcx>> for queries::#name<'tcx> {
#desc
#cache
}
@ -489,7 +489,8 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
::rustc::dep_graph::DepKind::#name => {
if <#arg as DepNodeParams<TyCtxt<'_>>>::CAN_RECONSTRUCT_QUERY_KEY {
if let Some(key) = <#arg as DepNodeParams<TyCtxt<'_>>>::recover($tcx, $dep_node) {
$tcx.force_query::<crate::ty::query::queries::#name<'_>>(
force_query::<crate::ty::query::queries::#name<'_>, _>(
$tcx,
key,
DUMMY_SP,
*$dep_node

View File

@ -37,7 +37,7 @@ macro_rules! provide {
$(fn $name<$lt: $lt, T: IntoArgs>(
$tcx: TyCtxt<$lt>,
def_id_arg: T,
) -> <ty::queries::$name<$lt> as QueryConfig<$lt>>::Value {
) -> <ty::queries::$name<$lt> as QueryConfig<TyCtxt<$lt>>>::Value {
let _prof_timer =
$tcx.prof.generic_activity("metadata_decode_entry");

View File

@ -11,12 +11,12 @@ doctest = false
[dependencies]
log = { version = "0.4", features = ["release_max_level_info", "std"] }
rustc_ast = { path = "../librustc_ast" }
rustc-rayon-core = "0.3.0"
rustc_data_structures = { path = "../librustc_data_structures" }
rustc_errors = { path = "../librustc_errors" }
rustc_hir = { path = "../librustc_hir" }
rustc_index = { path = "../librustc_index" }
rustc_macros = { path = "../librustc_macros" }
rustc_serialize = { path = "../libserialize", package = "serialize" }
rustc_span = { path = "../librustc_span" }
parking_lot = "0.9"
smallvec = { version = "1.0", features = ["union", "may_dangle"] }

View File

@ -46,7 +46,6 @@ use super::{DepContext, DepKind};
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_macros::HashStable_Generic;
use std::fmt;
use std::hash::Hash;
@ -127,7 +126,6 @@ where
/// the need to be mapped or unmapped. (This ensures we can serialize
/// them even in the absence of a tcx.)
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
#[derive(HashStable_Generic)]
pub struct WorkProductId {
hash: Fingerprint,
}
@ -144,3 +142,10 @@ impl WorkProductId {
WorkProductId { hash: fingerprint }
}
}
impl<HCX> HashStable<HCX> for WorkProductId {
#[inline]
fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
self.hash.hash_stable(hcx, hasher)
}
}

View File

@ -20,10 +20,8 @@ use std::sync::atomic::Ordering::Relaxed;
use super::debug::EdgeFilter;
use super::prev::PreviousDepGraph;
use super::query::DepGraphQuery;
use super::safe::DepGraphSafe;
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepContext, DepKind, DepNode, WorkProductId};
use crate::{HashStableContext, HashStableContextProvider};
#[derive(Clone)]
pub struct DepGraph<K: DepKind> {
@ -191,18 +189,14 @@ impl<K: DepKind> DepGraph<K> {
/// `arg` parameter.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
pub fn with_task<H, C, A, R>(
pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: C,
cx: Ctxt,
arg: A,
task: fn(C, A) -> R,
hash_result: impl FnOnce(&mut H, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex)
where
C: DepGraphSafe + HashStableContextProvider<H>,
H: HashStableContext,
{
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(
key,
cx,
@ -223,13 +217,13 @@ impl<K: DepKind> DepGraph<K> {
)
}
fn with_task_impl<H, C, A, R>(
fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: C,
cx: Ctxt,
arg: A,
no_tcx: bool,
task: fn(C, A) -> R,
task: fn(Ctxt, A) -> R,
create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
finish_task_and_alloc_depnode: fn(
&CurrentDepGraph<K>,
@ -237,12 +231,8 @@ impl<K: DepKind> DepGraph<K> {
Fingerprint,
Option<TaskDeps<K>>,
) -> DepNodeIndex,
hash_result: impl FnOnce(&mut H, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex)
where
C: DepGraphSafe + HashStableContextProvider<H>,
H: HashStableContext,
{
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
if let Some(ref data) = self.data {
let task_deps = create_task(key).map(Lock::new);
@ -251,7 +241,7 @@ impl<K: DepKind> DepGraph<K> {
// anyway so that
// - we make sure that the infrastructure works and
// - we can get an idea of the runtime cost.
let mut hcx = cx.get_stable_hashing_context();
let mut hcx = cx.create_stable_hashing_context();
let result = if no_tcx {
task(cx, arg)
@ -268,7 +258,7 @@ impl<K: DepKind> DepGraph<K> {
task_deps.map(|lock| lock.into_inner()),
);
let print_status = cfg!(debug_assertions) && hcx.debug_dep_tasks();
let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks();
// Determine the color of the new DepNode.
if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
@ -335,18 +325,14 @@ impl<K: DepKind> DepGraph<K> {
/// Executes something within an "eval-always" task which is a task
/// that runs whenever anything changes.
pub fn with_eval_always_task<H, C, A, R>(
pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: C,
cx: Ctxt,
arg: A,
task: fn(C, A) -> R,
hash_result: impl FnOnce(&mut H, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex)
where
C: DepGraphSafe + HashStableContextProvider<H>,
H: HashStableContext,
{
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(
key,
cx,

View File

@ -3,7 +3,6 @@ mod dep_node;
mod graph;
mod prev;
mod query;
mod safe;
mod serialized;
pub use dep_node::{DepNode, DepNodeParams, WorkProductId};
@ -11,8 +10,6 @@ pub use graph::WorkProductFileKind;
pub use graph::{hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, WorkProduct};
pub use prev::PreviousDepGraph;
pub use query::DepGraphQuery;
pub use safe::AssertDepGraphSafe;
pub use safe::DepGraphSafe;
pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use rustc_data_structures::profiling::SelfProfilerRef;
@ -25,11 +22,13 @@ use std::hash::Hash;
pub trait DepContext: Copy {
type DepKind: self::DepKind;
type StableHashingContext: crate::HashStableContext;
type StableHashingContext;
/// Create a hashing context for hashing new results.
fn create_stable_hashing_context(&self) -> Self::StableHashingContext;
fn debug_dep_tasks(&self) -> bool;
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
@ -48,12 +47,21 @@ pub trait DepContext: Copy {
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
);
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
}
/// Describe the different families of dependency nodes.
pub trait DepKind: Copy + fmt::Debug + Eq + Ord + Hash {
const NULL: Self;
/// Return whether this kind always require evaluation.
fn is_eval_always(&self) -> bool;
@ -72,4 +80,6 @@ pub trait DepKind: Copy + fmt::Debug + Eq + Ord + Hash {
fn read_deps<OP>(op: OP) -> ()
where
OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps<Self>>>) -> ();
fn can_reconstruct_query_key(&self) -> bool;
}

View File

@ -1,51 +0,0 @@
//! The `DepGraphSafe` trait
use rustc_ast::ast::NodeId;
use rustc_hir::def_id::DefId;
use rustc_hir::BodyId;
/// The `DepGraphSafe` trait is used to specify what kinds of values
/// are safe to "leak" into a task. The idea is that this should be
/// only be implemented for things like the tcx as well as various id
/// types, which will create reads in the dep-graph whenever the trait
/// loads anything that might depend on the input program.
pub trait DepGraphSafe {}
/// A `BodyId` on its own doesn't give access to any particular state.
/// You must fetch the state from the various maps or generate
/// on-demand queries, all of which create reads.
impl DepGraphSafe for BodyId {}
/// A `NodeId` on its own doesn't give access to any particular state.
/// You must fetch the state from the various maps or generate
/// on-demand queries, all of which create reads.
impl DepGraphSafe for NodeId {}
/// A `DefId` on its own doesn't give access to any particular state.
/// You must fetch the state from the various maps or generate
/// on-demand queries, all of which create reads.
impl DepGraphSafe for DefId {}
/// Tuples make it easy to build up state.
impl<A, B> DepGraphSafe for (A, B)
where
A: DepGraphSafe,
B: DepGraphSafe,
{
}
/// Shared ref to dep-graph-safe stuff should still be dep-graph-safe.
impl<'a, A> DepGraphSafe for &'a A where A: DepGraphSafe {}
/// Mut ref to dep-graph-safe stuff should still be dep-graph-safe.
impl<'a, A> DepGraphSafe for &'a mut A where A: DepGraphSafe {}
/// No data here! :)
impl DepGraphSafe for () {}
/// A convenient override that lets you pass arbitrary state into a
/// task. Every use should be accompanied by a comment explaining why
/// it makes sense (or how it could be refactored away in the future).
pub struct AssertDepGraphSafe<T>(pub T);
impl<T> DepGraphSafe for AssertDepGraphSafe<T> {}

View File

@ -1,32 +1,17 @@
#![feature(bool_to_option)]
#![feature(const_fn)]
#![feature(const_if_match)]
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(hash_raw_entry)]
#![feature(specialization)]
#![feature(stmt_expr_attributes)]
#![feature(vec_remove_item)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate rustc_data_structures;
pub mod dep_graph;
pub trait HashStableContext {
fn debug_dep_tasks(&self) -> bool;
}
/// Something that can provide a stable hashing context.
pub trait HashStableContextProvider<Ctxt> {
fn get_stable_hashing_context(&self) -> Ctxt;
}
impl<Ctxt, T: HashStableContextProvider<Ctxt>> HashStableContextProvider<Ctxt> for &T {
fn get_stable_hashing_context(&self) -> Ctxt {
(**self).get_stable_hashing_context()
}
}
impl<Ctxt, T: HashStableContextProvider<Ctxt>> HashStableContextProvider<Ctxt> for &mut T {
fn get_stable_hashing_context(&self) -> Ctxt {
(**self).get_stable_hashing_context()
}
}
pub mod query;

View File

@ -0,0 +1,3 @@
For more information about how the query system works, see the [rustc dev guide].
[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html

View File

@ -1,6 +1,6 @@
use crate::dep_graph::DepNodeIndex;
use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard};
use crate::ty::TyCtxt;
use crate::query::plumbing::{QueryLookup, QueryState};
use crate::query::QueryContext;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded;
@ -8,12 +8,12 @@ use std::default::Default;
use std::hash::Hash;
use std::marker::PhantomData;
pub(crate) trait CacheSelector<K, V> {
pub trait CacheSelector<K: Hash, V> {
type Cache: QueryCache<Key = K, Value = V>;
}
pub(crate) trait QueryCache: Default {
type Key;
pub trait QueryCache: Default {
type Key: Hash;
type Value;
type Sharded: Default;
@ -21,25 +21,21 @@ pub(crate) trait QueryCache: Default {
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
fn lookup<'tcx, R, GetCache, OnHit, OnMiss>(
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
&self,
state: &'tcx QueryState<'tcx, Self>,
get_cache: GetCache,
state: &QueryState<CTX, Self>,
key: Self::Key,
// `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
GetCache: for<'a> Fn(
&'a mut QueryStateShard<'tcx, Self::Key, Self::Sharded>,
) -> &'a mut Self::Sharded,
OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R,
OnMiss: FnOnce(Self::Key, QueryLookup<'tcx, Self::Key, Self::Sharded>) -> R;
OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
fn complete(
fn complete<CTX: QueryContext>(
&self,
tcx: TyCtxt<'tcx>,
tcx: CTX,
lock_sharded_storage: &mut Self::Sharded,
key: Self::Key,
value: Self::Value,
@ -76,32 +72,29 @@ impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)]
fn lookup<'tcx, R, GetCache, OnHit, OnMiss>(
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
&self,
state: &'tcx QueryState<'tcx, Self>,
get_cache: GetCache,
state: &QueryState<CTX, Self>,
key: K,
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
GetCache:
for<'a> Fn(&'a mut QueryStateShard<'tcx, K, Self::Sharded>) -> &'a mut Self::Sharded,
OnHit: FnOnce(&V, DepNodeIndex) -> R,
OnMiss: FnOnce(K, QueryLookup<'tcx, K, Self::Sharded>) -> R,
OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
{
let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock;
let result = get_cache(lock).raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) }
}
#[inline]
fn complete(
fn complete<CTX: QueryContext>(
&self,
_: TyCtxt<'tcx>,
_: CTX,
lock_sharded_storage: &mut Self::Sharded,
key: K,
value: V,

View File

@ -0,0 +1,82 @@
//! Query configuration and description traits.
use crate::dep_graph::DepNode;
use crate::dep_graph::SerializedDepNodeIndex;
use crate::query::caches::QueryCache;
use crate::query::plumbing::CycleError;
use crate::query::{QueryContext, QueryState};
use rustc_data_structures::profiling::ProfileCategory;
use rustc_span::def_id::DefId;
use rustc_data_structures::fingerprint::Fingerprint;
use std::borrow::Cow;
use std::fmt::Debug;
use std::hash::Hash;
// The parameter `CTX` is required in librustc: implementations may need to access the `'tcx`
// lifetime in `CTX = TyCtxt<'tcx>`.
pub trait QueryConfig<CTX> {
const NAME: &'static str;
const CATEGORY: ProfileCategory;
type Key: Eq + Hash + Clone + Debug;
type Value: Clone;
}
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
const ANON: bool;
const EVAL_ALWAYS: bool;
const DEP_KIND: CTX::DepKind;
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute(tcx: CTX, key: Self::Key) -> Self::Value;
fn hash_result(
hcx: &mut CTX::StableHashingContext,
result: &Self::Value,
) -> Option<Fingerprint>;
fn handle_cycle_error(tcx: CTX, error: CycleError<CTX::Query>) -> Self::Value;
}
pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> {
fn describe(tcx: CTX, key: Self::Key) -> Cow<'static, str>;
#[inline]
fn cache_on_disk(_: CTX, _: Self::Key, _: Option<&Self::Value>) -> bool {
false
}
fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option<Self::Value> {
panic!("QueryDescription::load_from_disk() called for an unsupported query.")
}
}
impl<CTX: QueryContext, M> QueryDescription<CTX> for M
where
M: QueryAccessors<CTX, Key = DefId>,
{
default fn describe(tcx: CTX, def_id: DefId) -> Cow<'static, str> {
if !tcx.verbose() {
format!("processing `{}`", tcx.def_path_str(def_id)).into()
} else {
let name = ::std::any::type_name::<M>();
format!("processing {:?} with query `{}`", def_id, name).into()
}
}
default fn cache_on_disk(_: CTX, _: Self::Key, _: Option<&Self::Value>) -> bool {
false
}
default fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option<Self::Value> {
panic!("QueryDescription::load_from_disk() called for an unsupported query.")
}
}

View File

@ -0,0 +1,564 @@
use crate::dep_graph::{DepContext, DepKind};
use crate::query::plumbing::CycleError;
use crate::query::QueryContext;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::Span;
use std::convert::TryFrom;
use std::marker::PhantomData;
use std::num::NonZeroU32;
#[cfg(parallel_compiler)]
use {
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
rustc_data_structures::{jobserver, OnDrop},
rustc_rayon_core as rayon_core,
rustc_span::DUMMY_SP,
std::iter::FromIterator,
std::{mem, process},
};
/// Represents a span and a query key.
#[derive(Clone, Debug)]
pub struct QueryInfo<Q> {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
pub query: Q,
}
type QueryMap<CTX> = FxHashMap<QueryJobId<<CTX as DepContext>::DepKind>, QueryJobInfo<CTX>>;
/// A value uniquely identifiying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryShardJobId(pub NonZeroU32);
/// A value uniquely identifiying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId<K> {
/// Which job within a shard is this
pub job: QueryShardJobId,
/// In which shard is this job
pub shard: u16,
/// What kind of query this job is
pub kind: K,
}
impl<K: DepKind> QueryJobId<K> {
pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self {
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
}
fn query<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> CTX::Query {
map.get(&self).unwrap().info.query.clone()
}
#[cfg(parallel_compiler)]
fn span<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
fn parent<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Option<QueryJobId<K>> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
fn latch<'a, CTX: QueryContext<DepKind = K>>(
self,
map: &'a QueryMap<CTX>,
) -> Option<&'a QueryLatch<CTX>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
pub struct QueryJobInfo<CTX: QueryContext> {
pub info: QueryInfo<CTX::Query>,
pub job: QueryJob<CTX>,
}
/// Represents an active query job.
#[derive(Clone)]
pub struct QueryJob<CTX: QueryContext> {
pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required.
pub span: Span,
/// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<QueryJobId<CTX::DepKind>>,
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
latch: Option<QueryLatch<CTX>>,
dummy: PhantomData<QueryLatch<CTX>>,
}
impl<CTX: QueryContext> QueryJob<CTX> {
/// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<CTX::DepKind>>) -> Self {
QueryJob {
id,
span,
parent,
#[cfg(parallel_compiler)]
latch: None,
dummy: PhantomData,
}
}
#[cfg(parallel_compiler)]
pub(super) fn latch(&mut self, _id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
self.latch.as_ref().unwrap().clone()
}
#[cfg(not(parallel_compiler))]
pub(super) fn latch(&mut self, id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
QueryLatch { id, dummy: PhantomData }
}
/// Signals to waiters that the query is complete.
///
/// This does nothing for single threaded rustc,
/// as there are no concurrent jobs which could be waiting on us
pub fn signal_complete(self) {
#[cfg(parallel_compiler)]
self.latch.map(|latch| latch.set());
}
}
#[cfg(not(parallel_compiler))]
#[derive(Clone)]
pub(super) struct QueryLatch<CTX: QueryContext> {
id: QueryJobId<CTX::DepKind>,
dummy: PhantomData<CTX>,
}
#[cfg(not(parallel_compiler))]
impl<CTX: QueryContext> QueryLatch<CTX> {
pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError<CTX::Query> {
let query_map = tcx.try_collect_active_jobs().unwrap();
// Get the current executing query (waiter) and find the waitee amongst its parents
let mut current_job = tcx.current_query_job();
let mut cycle = Vec::new();
while let Some(job) = current_job {
let info = query_map.get(&job).unwrap();
cycle.push(info.info.clone());
if job == self.id {
cycle.reverse();
// This is the end of the cycle
// The span entry we included was for the usage
// of the cycle itself, and not part of the cycle
// Replace it with the span which caused the cycle to form
cycle[0].span = span;
// Find out why the cycle itself was used
let usage = info
.job
.parent
.as_ref()
.map(|parent| (info.info.span, parent.query(&query_map)));
return CycleError { usage, cycle };
}
current_job = info.job.parent;
}
panic!("did not find a cycle")
}
}
#[cfg(parallel_compiler)]
struct QueryWaiter<CTX: QueryContext> {
query: Option<QueryJobId<CTX::DepKind>>,
condvar: Condvar,
span: Span,
cycle: Lock<Option<CycleError<CTX::Query>>>,
}
#[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryWaiter<CTX> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
}
}
#[cfg(parallel_compiler)]
struct QueryLatchInfo<CTX: QueryContext> {
complete: bool,
waiters: Vec<Lrc<QueryWaiter<CTX>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
pub(super) struct QueryLatch<CTX: QueryContext> {
info: Lrc<Mutex<QueryLatchInfo<CTX>>>,
}
#[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
}
}
}
#[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> {
/// Awaits for the query job to complete.
pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError<CTX::Query>> {
let query = tcx.current_query_job();
let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
// although another thread may still have a Lrc reference so we cannot
// use Lrc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(cycle),
}
}
}
#[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> {
/// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<CTX>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
// the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
// Both of these will remove it from the `waiters` list before resuming
// this thread.
info.waiters.push(waiter.clone());
// If this detects a deadlock and the deadlock handler wants to resume this thread
// we have to be in the `wait` call. This is ensured by the deadlock handler
// getting the self.info lock.
rayon_core::mark_blocked();
jobserver::release_thread();
waiter.condvar.wait(&mut info);
// Release the lock before we potentially block in `acquire_thread`
mem::drop(info);
jobserver::acquire_thread();
}
}
/// Sets the latch and resumes all waiters on it
fn set(&self) {
let mut info = self.info.lock();
debug_assert!(!info.complete);
info.complete = true;
let registry = rayon_core::Registry::current();
for waiter in info.waiters.drain(..) {
waiter.notify(&registry);
}
}
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<CTX>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
info.waiters.remove(waiter)
}
}
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)]
type Waiter<K> = (QueryJobId<K>, usize);
/// Visits all the non-resumable and resumable waiters of a query.
/// Only waiters in a query are visited.
/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
/// and a span indicating the reason the query waited on `query_ref`.
/// If `visit` returns Some, this function returns.
/// For visits of non-resumable waiters it returns the return value of `visit`.
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
fn visit_waiters<CTX: QueryContext, F>(
query_map: &QueryMap<CTX>,
query: QueryJobId<CTX::DepKind>,
mut visit: F,
) -> Option<Option<Waiter<CTX::DepKind>>>
where
F: FnMut(Span, QueryJobId<CTX::DepKind>) -> Option<Option<Waiter<CTX::DepKind>>>,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
if let Some(cycle) = visit(query.span(query_map), parent) {
return Some(cycle);
}
}
// Visit the explicit waiters which use condvars and are resumable
if let Some(latch) = query.latch(query_map) {
for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
if let Some(waiter_query) = waiter.query {
if visit(waiter.span, waiter_query).is_some() {
// Return a value which indicates that this waiter can be resumed
return Some(Some((query, i)));
}
}
}
}
None
}
/// Look for query cycles by doing a depth first search starting at `query`.
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
fn cycle_check<CTX: QueryContext>(
query_map: &QueryMap<CTX>,
query: QueryJobId<CTX::DepKind>,
span: Span,
stack: &mut Vec<(Span, QueryJobId<CTX::DepKind>)>,
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
) -> Option<Option<Waiter<CTX::DepKind>>> {
if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
// We detected a query cycle, fix up the initial span and return Some
// Remove previous stack entries
stack.drain(0..p);
// Replace the span for the first query with the cycle cause
stack[0].0 = span;
Some(None)
} else {
None
};
}
// Query marked as visited is added it to the stack
stack.push((span, query));
// Visit all the waiters
let r = visit_waiters(query_map, query, |span, successor| {
cycle_check(query_map, successor, span, stack, visited)
});
// Remove the entry in our stack if we didn't find a cycle
if r.is_none() {
stack.pop();
}
r
}
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
fn connected_to_root<CTX: QueryContext>(
query_map: &QueryMap<CTX>,
query: QueryJobId<CTX::DepKind>,
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
) -> bool {
// We already visited this or we're deliberately ignoring it
if !visited.insert(query) {
return false;
}
// This query is connected to the root (it has no query parent), return true
if query.parent(query_map).is_none() {
return true;
}
visit_waiters(query_map, query, |_, successor| {
connected_to_root(query_map, successor, visited).then_some(None)
})
.is_some()
}
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
fn pick_query<'a, CTX, T, F>(query_map: &QueryMap<CTX>, tcx: CTX, queries: &'a [T], f: F) -> &'a T
where
CTX: QueryContext,
F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
{
// Deterministically pick an entry point
// FIXME: Sort this instead
let mut hcx = tcx.create_stable_hashing_context();
queries
.iter()
.min_by_key(|v| {
let (span, query) = f(v);
let mut stable_hasher = StableHasher::new();
query.query(query_map).hash_stable(&mut hcx, &mut stable_hasher);
// Prefer entry points which have valid spans for nicer error messages
// We add an integer to the tuple ensuring that entry points
// with valid spans are picked first
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
(span_cmp, stable_hasher.finish::<u64>())
})
.unwrap()
}
/// Looks for query cycles starting from the last query in `jobs`.
/// If a cycle is found, all queries in the cycle is removed from `jobs` and
/// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
fn remove_cycle<CTX: QueryContext>(
query_map: &QueryMap<CTX>,
jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
wakelist: &mut Vec<Lrc<QueryWaiter<CTX>>>,
tcx: CTX,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
// Look for a cycle starting with the last query in `jobs`
if let Some(waiter) =
cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
{
// The stack is a vector of pairs of spans and queries; reverse it so that
// the earlier entries require later entries
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
// Shift the spans so that queries are matched with the span for their waitee
spans.rotate_right(1);
// Zip them back together
let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
// Remove the queries in our cycle from the list of jobs to look at
for r in &stack {
jobs.remove_item(&r.1);
}
// Find the queries in the cycle which are
// connected to queries outside the cycle
let entry_points = stack
.iter()
.filter_map(|&(span, query)| {
if query.parent(query_map).is_none() {
// This query is connected to the root (it has no query parent)
Some((span, query, None))
} else {
let mut waiters = Vec::new();
// Find all the direct waiters who lead to the root
visit_waiters(query_map, query, |span, waiter| {
// Mark all the other queries in the cycle as already visited
let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
if connected_to_root(query_map, waiter, &mut visited) {
waiters.push((span, waiter));
}
None
});
if waiters.is_empty() {
None
} else {
// Deterministically pick one of the waiters to show to the user
let waiter = *pick_query(query_map, tcx, &waiters, |s| *s);
Some((span, query, Some(waiter)))
}
}
})
.collect::<Vec<(Span, QueryJobId<CTX::DepKind>, Option<(Span, QueryJobId<CTX::DepKind>)>)>>();
// Deterministically pick an entry point
let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1));
// Shift the stack so that our entry point is first
let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
if let Some(pos) = entry_point_pos {
stack.rotate_left(pos);
}
let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
// Create the cycle error
let error = CycleError {
usage,
cycle: stack
.iter()
.map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
.collect(),
};
// We unwrap `waiter` here since there must always be one
// edge which is resumeable / waited using a query latch
let (waitee_query, waiter_idx) = waiter.unwrap();
// Extract the waiter we want to resume
let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
// Set the cycle error so it will be picked up when resumed
*waiter.cycle.lock() = Some(error);
// Put the waiter on the list of things to resume
wakelist.push(waiter);
true
} else {
false
}
}
/// Detects query cycles by using depth first search over all active query jobs.
/// If a query cycle is found it will break the cycle by finding an edge which
/// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) {
let on_panic = OnDrop(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
});
let mut wakelist = Vec::new();
let query_map = tcx.try_collect_active_jobs().unwrap();
let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect();
let mut found_cycle = false;
while jobs.len() > 0 {
if remove_cycle(&query_map, &mut jobs, &mut wakelist, tcx) {
found_cycle = true;
}
}
// Check that a cycle was found. It is possible for a deadlock to occur without
// a query cycle if a query which can be waited on uses Rayon to do multithreading
// internally. Such a query (X) may be executing on 2 threads (A and B) and A may
// wait using Rayon on B. Rayon may then switch to executing another query (Y)
// which in turn will wait on X causing a deadlock. We have a false dependency from
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
assert!(found_cycle);
// FIXME: Ensure this won't cause a deadlock before we return
for waiter in wakelist.into_iter() {
waiter.notify(registry);
}
on_panic.disable();
}

View File

@ -0,0 +1,52 @@
mod plumbing;
pub use self::plumbing::*;
mod job;
#[cfg(parallel_compiler)]
pub use self::job::deadlock;
pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
mod caches;
pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache};
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepContext, DepGraph};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::HashStable;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_span::def_id::DefId;
pub trait QueryContext: DepContext {
type Query: Clone + HashStable<Self::StableHashingContext>;
fn incremental_verify_ich(&self) -> bool;
fn verbose(&self) -> bool;
/// Get string representation from DefPath.
fn def_path_str(&self, def_id: DefId) -> String;
/// Access the DepGraph.
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
/// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
fn try_collect_active_jobs(
&self,
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>>;
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
fn start_query<R>(
&self,
token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce(Self) -> R,
) -> R;
}

View File

@ -0,0 +1,693 @@
//! The implementation of the query system itself. This defines the macros that
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph::{DepKind, DepNode};
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::query::caches::QueryCache;
use crate::query::config::QueryDescription;
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
use crate::query::QueryContext;
#[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHasher};
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{Diagnostic, FatalError};
use rustc_span::source_map::DUMMY_SP;
use rustc_span::Span;
use std::collections::hash_map::Entry;
use std::convert::TryFrom;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::mem;
use std::num::NonZeroU32;
use std::ptr;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub struct QueryStateShard<CTX: QueryContext, K, C> {
pub(super) cache: C,
active: FxHashMap<K, QueryResult<CTX>>,
/// Used to generate unique ids for active jobs.
jobs: u32,
}
impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
fn default() -> QueryStateShard<CTX, K, C> {
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
}
}
pub struct QueryState<CTX: QueryContext, C: QueryCache> {
cache: C,
shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
#[cfg(debug_assertions)]
pub cache_hits: AtomicUsize,
}
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
pub(super) fn get_lookup<'tcx>(
&'tcx self,
key: &C::Key,
) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
// We compute the key's hash once and then use it for both the
// shard lookup and the hashmap lookup. This relies on the fact
// that both of them use `FxHasher`.
let mut hasher = FxHasher::default();
key.hash(&mut hasher);
let key_hash = hasher.finish();
let shard = self.shards.get_shard_index_by_hash(key_hash);
let lock = self.shards.get_shard_by_index(shard).lock();
QueryLookup { key_hash, shard, lock }
}
}
/// Indicates the state of a query for a given key in a query map.
enum QueryResult<CTX: QueryContext> {
/// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<CTX>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
pub fn iter_results<R>(
&self,
f: impl for<'a> FnOnce(
Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
) -> R,
) -> R {
self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
}
pub fn all_inactive(&self) -> bool {
let shards = self.shards.lock_shards();
shards.iter().all(|shard| shard.active.is_empty())
}
pub fn try_collect_active_jobs(
&self,
kind: CTX::DepKind,
make_query: fn(C::Key) -> CTX::Query,
jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
) -> Option<()>
where
C::Key: Clone,
{
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
let shards = self.shards.try_lock_shards()?;
let shards = shards.iter().enumerate();
jobs.extend(shards.flat_map(|(shard_id, shard)| {
shard.active.iter().filter_map(move |(k, v)| {
if let QueryResult::Started(ref job) = *v {
let id =
QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
Some((id, QueryJobInfo { info, job: job.clone() }))
} else {
None
}
})
}));
Some(())
}
}
impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
fn default() -> QueryState<CTX, C> {
QueryState {
cache: C::default(),
shards: Default::default(),
#[cfg(debug_assertions)]
cache_hits: AtomicUsize::new(0),
}
}
}
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
pub(super) key_hash: u64,
shard: usize,
pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
struct JobOwner<'tcx, CTX: QueryContext, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
state: &'tcx QueryState<CTX, C>,
key: C::Key,
id: QueryJobId<CTX::DepKind>,
}
impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function assumes that `try_get_cached` is already called and returned `lookup`.
/// If the query is executing elsewhere, this will wait for it and return the result.
/// If the query panicked, this will silently panic.
///
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
fn try_start<'a, 'b, Q>(
tcx: CTX,
span: Span,
key: &C::Key,
mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
) -> TryGetJob<'b, CTX, C>
where
Q: QueryDescription<CTX, Key = C::Key, Value = C::Value, Cache = C>,
CTX: QueryContext,
{
let lock = &mut *lookup.lock;
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
Entry::Occupied(mut entry) => {
match entry.get_mut() {
QueryResult::Started(job) => {
// For parallel queries, we'll block and wait until the query running
// in another thread has completed. Record how long we wait in the
// self-profiler.
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
Some(tcx.profiler().query_blocked())
} else {
None
};
// Create the id of the job we're waiting for
let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
(job.latch(id), _query_blocked_prof_timer)
}
QueryResult::Poisoned => FatalError.raise(),
}
}
Entry::Vacant(entry) => {
// No job entry for this query. Return a new one to be started later.
// Generate an id unique within this shard.
let id = lock.jobs.checked_add(1).unwrap();
lock.jobs = id;
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
let job = tcx.current_query_job();
let job = QueryJob::new(id, span, job);
entry.insert(QueryResult::Started(job));
let owner =
JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
return TryGetJob::NotYetStarted(owner);
}
};
mem::drop(lookup.lock);
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
#[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| {
Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
}));
// With parallel queries we might just have to wait on some other
// thread.
#[cfg(parallel_compiler)]
{
let result = latch.wait_on(tcx, span);
if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
}
let cached = try_get_cached(
tcx,
Q::query_state(tcx),
(*key).clone(),
|value, index| (value.clone(), index),
|_, _| panic!("value must be in cache after waiting"),
);
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
prof_timer.finish_with_query_invocation_id(cached.1.into());
}
return TryGetJob::JobCompleted(cached);
}
}
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
#[inline(always)]
fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
let state = self.state;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let job = {
let result = result.clone();
let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
job
};
job.signal_complete();
}
}
#[inline(always)]
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
{
let diagnostics = Lock::new(ThinVec::new());
let result = f(Some(&diagnostics));
(result, diagnostics.into_inner())
}
impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
#[inline(never)]
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
let state = self.state;
let shard = state.shards.get_shard_by_value(&self.key);
let job = {
let mut shard = shard.lock();
let job = match shard.active.remove(&self.key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
shard.active.insert(self.key.clone(), QueryResult::Poisoned);
job
};
// Also signal the completion of the job, so waiters
// will continue execution.
job.signal_complete();
}
}
#[derive(Clone)]
pub struct CycleError<Q> {
/// The query and related span that uses the cycle.
pub usage: Option<(Span, Q)>,
pub cycle: Vec<QueryInfo<Q>>,
}
/// The result of `try_start`.
enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, CTX, C>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
JobCompleted((C::Value, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle.
Cycle(C::Value),
}
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
#[inline(always)]
fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
tcx: CTX,
state: &QueryState<CTX, C>,
key: C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
C: QueryCache,
CTX: QueryContext,
OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
{
state.cache.lookup(
state,
key,
|value, index| {
if unlikely!(tcx.profiler().enabled()) {
tcx.profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
state.cache_hits.fetch_add(1, Ordering::Relaxed);
}
on_hit(value, index)
},
on_miss,
)
}
#[inline(always)]
fn try_execute_query<Q, CTX>(
tcx: CTX,
span: Span,
key: Q::Key,
lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache>::Sharded>,
) -> Q::Value
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
{
let job = match JobOwner::try_start::<Q>(tcx, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted((v, index)) => {
tcx.dep_graph().read_index(index);
return v;
}
};
// Fast path for when incr. comp. is off. `to_dep_node` is
// expensive for some `DepKind`s.
if !tcx.dep_graph().is_fully_enabled() {
let null_dep_node = DepNode::new_no_params(DepKind::NULL);
return force_query_with_job::<Q, _>(tcx, key, job, null_dep_node).0;
}
if Q::ANON {
let prof_timer = tcx.profiler().query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
tcx.start_query(job.id, diagnostics, |tcx| {
tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
tcx.dep_graph().read_index(dep_node_index);
if unlikely!(!diagnostics.is_empty()) {
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
job.complete(tcx, &result, dep_node_index);
return result;
}
let dep_node = Q::to_dep_node(tcx, &key);
if !Q::EVAL_ALWAYS {
// The diagnostics for this query will be
// promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
let loaded = tcx.start_query(job.id, None, |tcx| {
let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
marked.map(|(prev_dep_node_index, dep_node_index)| {
(
load_from_disk_and_cache_in_memory::<Q, _>(
tcx,
key.clone(),
prev_dep_node_index,
dep_node_index,
&dep_node,
),
dep_node_index,
)
})
});
if let Some((result, dep_node_index)) = loaded {
job.complete(tcx, &result, dep_node_index);
return result;
}
}
let (result, dep_node_index) = force_query_with_job::<Q, _>(tcx, key, job, dep_node);
tcx.dep_graph().read_index(dep_node_index);
result
}
fn load_from_disk_and_cache_in_memory<Q, CTX>(
tcx: CTX,
key: Q::Key,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node_index: DepNodeIndex,
dep_node: &DepNode<CTX::DepKind>,
) -> Q::Value
where
CTX: QueryContext,
Q: QueryDescription<CTX>,
{
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
debug_assert!(tcx.dep_graph().is_green(dep_node));
// First we try to load the result from the on-disk cache.
let result = if Q::cache_on_disk(tcx, key.clone(), None) {
let prof_timer = tcx.profiler().incr_cache_loading();
let result = Q::try_load_from_disk(tcx, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!dep_node.kind.can_reconstruct_query_key() || result.is_some(),
"missing on-disk cache entry for {:?}",
dep_node
);
result
} else {
// Some things are never cached on disk.
None
};
let result = if let Some(result) = result {
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = tcx.profiler().query_provider();
// The dep-graph for this computation is already in-place.
let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
result
};
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
if unlikely!(tcx.incremental_verify_ich()) {
incremental_verify_ich::<Q, _>(tcx, &result, dep_node, dep_node_index);
}
result
}
#[inline(never)]
#[cold]
fn incremental_verify_ich<Q, CTX>(
tcx: CTX,
result: &Q::Value,
dep_node: &DepNode<CTX::DepKind>,
dep_node_index: DepNodeIndex,
) where
CTX: QueryContext,
Q: QueryDescription<CTX>,
{
assert!(
Some(tcx.dep_graph().fingerprint_of(dep_node_index))
== tcx.dep_graph().prev_fingerprint_of(dep_node),
"fingerprint for green query instance not loaded from cache: {:?}",
dep_node,
);
debug!("BEGIN verify_ich({:?})", dep_node);
let mut hcx = tcx.create_stable_hashing_context();
let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
debug!("END verify_ich({:?})", dep_node);
let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
}
#[inline(always)]
fn force_query_with_job<Q, CTX>(
tcx: CTX,
key: Q::Key,
job: JobOwner<'_, CTX, Q::Cache>,
dep_node: DepNode<CTX::DepKind>,
) -> (Q::Value, DepNodeIndex)
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
{
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
!tcx.dep_graph().dep_node_exists(&dep_node),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
key,
dep_node
);
let prof_timer = tcx.profiler().query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
tcx.start_query(job.id, diagnostics, |tcx| {
if Q::EVAL_ALWAYS {
tcx.dep_graph().with_eval_always_task(
dep_node,
tcx,
key,
Q::compute,
Q::hash_result,
)
} else {
tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
}
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
if unlikely!(!diagnostics.is_empty()) {
if dep_node.kind != DepKind::NULL {
tcx.store_diagnostics(dep_node_index, diagnostics);
}
}
job.complete(tcx, &result, dep_node_index);
(result, dep_node_index)
}
#[inline(never)]
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Value
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
{
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
try_get_cached(
tcx,
Q::query_state(tcx),
key,
|value, index| {
tcx.dep_graph().read_index(index);
value.clone()
},
|key, lookup| try_execute_query::<Q, _>(tcx, span, key, lookup),
)
}
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
///
/// This function is particularly useful when executing passes for their
/// side-effects -- e.g., in order to report errors for erroneous programs.
///
/// Note: The optimization is only available during incr. comp.
pub fn ensure_query<Q, CTX>(tcx: CTX, key: Q::Key)
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
{
if Q::EVAL_ALWAYS {
let _ = get_query::<Q, _>(tcx, DUMMY_SP, key);
return;
}
// Ensuring an anonymous query makes no sense
assert!(!Q::ANON);
let dep_node = Q::to_dep_node(tcx, &key);
match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
let _ = get_query::<Q, _>(tcx, DUMMY_SP, key);
}
Some((_, dep_node_index)) => {
tcx.profiler().query_cache_hit(dep_node_index.into());
}
}
}
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
try_get_cached(
tcx,
Q::query_state(tcx),
key,
|_, _| {
// Cache hit, do nothing
},
|key, lookup| {
let job = match JobOwner::try_start::<Q>(tcx, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted(_) => return,
};
force_query_with_job::<Q, _>(tcx, key, job, dep_node);
},
);
}