Auto merge of #81855 - cjgillot:ensure-cache, r=oli-obk
Check the result cache before the DepGraph when ensuring queries Split out of https://github.com/rust-lang/rust/pull/70951 Calling `ensure` on already forced queries is a common operation. Looking at the results cache first is faster than checking the DepGraph for a green node.
This commit is contained in:
commit
d1206f950f
@ -63,23 +63,9 @@ impl<T> Sharded<T> {
|
|||||||
if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
|
if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
|
|
||||||
/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
|
|
||||||
/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
|
|
||||||
/// `hash` can be computed with any hasher, so long as that hasher is used
|
|
||||||
/// consistently for each `Sharded` instance.
|
|
||||||
#[inline]
|
|
||||||
pub fn get_shard_index_by_hash(&self, hash: u64) -> usize {
|
|
||||||
let hash_len = mem::size_of::<usize>();
|
|
||||||
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
|
|
||||||
// hashbrown also uses the lowest bits, so we can't use those
|
|
||||||
let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
|
|
||||||
bits % SHARDS
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
|
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
|
||||||
&self.shards[self.get_shard_index_by_hash(hash)].0
|
&self.shards[get_shard_index_by_hash(hash)].0
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -166,3 +152,17 @@ fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
|
|||||||
val.hash(&mut state);
|
val.hash(&mut state);
|
||||||
state.finish()
|
state.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
|
||||||
|
/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
|
||||||
|
/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
|
||||||
|
/// `hash` can be computed with any hasher, so long as that hasher is used
|
||||||
|
/// consistently for each `Sharded` instance.
|
||||||
|
#[inline]
|
||||||
|
pub fn get_shard_index_by_hash(hash: u64) -> usize {
|
||||||
|
let hash_len = mem::size_of::<usize>();
|
||||||
|
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
|
||||||
|
// hashbrown also uses the lowest bits, so we can't use those
|
||||||
|
let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
|
||||||
|
bits % SHARDS
|
||||||
|
}
|
||||||
|
@ -963,6 +963,7 @@ pub struct GlobalCtxt<'tcx> {
|
|||||||
pub(crate) definitions: &'tcx Definitions,
|
pub(crate) definitions: &'tcx Definitions,
|
||||||
|
|
||||||
pub queries: query::Queries<'tcx>,
|
pub queries: query::Queries<'tcx>,
|
||||||
|
pub query_caches: query::QueryCaches<'tcx>,
|
||||||
|
|
||||||
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
|
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
|
||||||
maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
|
maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
|
||||||
@ -1154,6 +1155,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||||||
untracked_crate: krate,
|
untracked_crate: krate,
|
||||||
definitions,
|
definitions,
|
||||||
queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
|
queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
|
||||||
|
query_caches: query::QueryCaches::default(),
|
||||||
ty_rcache: Default::default(),
|
ty_rcache: Default::default(),
|
||||||
pred_rcache: Default::default(),
|
pred_rcache: Default::default(),
|
||||||
selection_cache: Default::default(),
|
selection_cache: Default::default(),
|
||||||
|
@ -1244,10 +1244,9 @@ where
|
|||||||
.prof
|
.prof
|
||||||
.extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
|
.extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
|
||||||
|
|
||||||
let state = Q::query_state(tcx);
|
assert!(Q::query_state(tcx).all_inactive());
|
||||||
assert!(state.all_inactive());
|
let cache = Q::query_cache(tcx);
|
||||||
|
cache.iter_results(|results| {
|
||||||
state.iter_results(|results| {
|
|
||||||
for (key, value, dep_node) in results {
|
for (key, value, dep_node) in results {
|
||||||
if Q::cache_on_disk(tcx, &key, Some(value)) {
|
if Q::cache_on_disk(tcx, &key, Some(value)) {
|
||||||
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
|
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
|
||||||
|
@ -342,14 +342,28 @@ macro_rules! define_queries {
|
|||||||
|
|
||||||
$(pub type $name<$tcx> = $V;)*
|
$(pub type $name<$tcx> = $V;)*
|
||||||
}
|
}
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_storage {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
|
||||||
|
}
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_stored {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct QueryCaches<$tcx> {
|
||||||
|
$($(#[$attr])* $name: QueryCacheStore<query_storage::$name<$tcx>>,)*
|
||||||
|
}
|
||||||
|
|
||||||
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
|
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
|
||||||
type Key = $($K)*;
|
type Key = $($K)*;
|
||||||
type Value = $V;
|
type Value = $V;
|
||||||
type Stored = <
|
type Stored = query_stored::$name<$tcx>;
|
||||||
query_storage!([$($modifiers)*][$($K)*, $V])
|
|
||||||
as QueryStorage
|
|
||||||
>::Stored;
|
|
||||||
const NAME: &'static str = stringify!($name);
|
const NAME: &'static str = stringify!($name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,13 +372,20 @@ macro_rules! define_queries {
|
|||||||
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
|
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
|
||||||
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
|
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
|
||||||
|
|
||||||
type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
|
type Cache = query_storage::$name<$tcx>;
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
|
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Query<$tcx>, Self::Key> {
|
||||||
&tcx.queries.$name
|
&tcx.queries.$name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryCacheStore<Self::Cache>
|
||||||
|
where 'tcx:'a
|
||||||
|
{
|
||||||
|
&tcx.query_caches.$name
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
|
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
|
||||||
let provider = tcx.queries.providers.get(key.query_crate())
|
let provider = tcx.queries.providers.get(key.query_crate())
|
||||||
@ -401,7 +422,15 @@ macro_rules! define_queries {
|
|||||||
$($(#[$attr])*
|
$($(#[$attr])*
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
|
||||||
ensure_query::<queries::$name<'_>, _>(self.tcx, key.into_query_param())
|
let key = key.into_query_param();
|
||||||
|
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {});
|
||||||
|
|
||||||
|
let lookup = match cached {
|
||||||
|
Ok(()) => return,
|
||||||
|
Err(lookup) => lookup,
|
||||||
|
};
|
||||||
|
|
||||||
|
get_query::<queries::$name<'_>, _>(self.tcx, DUMMY_SP, key, lookup, QueryMode::Ensure);
|
||||||
})*
|
})*
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,10 +471,9 @@ macro_rules! define_queries {
|
|||||||
$($(#[$attr])*
|
$($(#[$attr])*
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*))
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
||||||
-> <queries::$name<$tcx> as QueryConfig>::Stored
|
|
||||||
{
|
{
|
||||||
self.at(DUMMY_SP).$name(key.into_query_param())
|
self.at(DUMMY_SP).$name(key)
|
||||||
})*
|
})*
|
||||||
|
|
||||||
/// All self-profiling events generated by the query engine use
|
/// All self-profiling events generated by the query engine use
|
||||||
@ -471,7 +499,7 @@ macro_rules! define_queries {
|
|||||||
alloc_self_profile_query_strings_for_query_cache(
|
alloc_self_profile_query_strings_for_query_cache(
|
||||||
self,
|
self,
|
||||||
stringify!($name),
|
stringify!($name),
|
||||||
&self.queries.$name,
|
&self.query_caches.$name,
|
||||||
&mut string_cache,
|
&mut string_cache,
|
||||||
);
|
);
|
||||||
})*
|
})*
|
||||||
@ -481,10 +509,19 @@ macro_rules! define_queries {
|
|||||||
impl TyCtxtAt<$tcx> {
|
impl TyCtxtAt<$tcx> {
|
||||||
$($(#[$attr])*
|
$($(#[$attr])*
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*))
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
||||||
-> <queries::$name<$tcx> as QueryConfig>::Stored
|
|
||||||
{
|
{
|
||||||
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
|
let key = key.into_query_param();
|
||||||
|
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| {
|
||||||
|
value.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
let lookup = match cached {
|
||||||
|
Ok(value) => return value,
|
||||||
|
Err(lookup) => lookup,
|
||||||
|
};
|
||||||
|
|
||||||
|
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key, lookup, QueryMode::Get).unwrap()
|
||||||
})*
|
})*
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,8 +555,8 @@ macro_rules! define_queries_struct {
|
|||||||
|
|
||||||
$($(#[$attr])* $name: QueryState<
|
$($(#[$attr])* $name: QueryState<
|
||||||
crate::dep_graph::DepKind,
|
crate::dep_graph::DepKind,
|
||||||
<TyCtxt<$tcx> as QueryContext>::Query,
|
Query<$tcx>,
|
||||||
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
|
query_keys::$name<$tcx>,
|
||||||
>,)*
|
>,)*
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ use rustc_data_structures::fx::FxHashMap;
|
|||||||
use rustc_data_structures::profiling::SelfProfiler;
|
use rustc_data_structures::profiling::SelfProfiler;
|
||||||
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
|
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
|
||||||
use rustc_hir::definitions::DefPathData;
|
use rustc_hir::definitions::DefPathData;
|
||||||
use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
|
use rustc_query_system::query::{QueryCache, QueryCacheStore};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ where
|
|||||||
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
query_name: &'static str,
|
query_name: &'static str,
|
||||||
query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
|
query_cache: &QueryCacheStore<C>,
|
||||||
string_cache: &mut QueryKeyStringCache,
|
string_cache: &mut QueryKeyStringCache,
|
||||||
) where
|
) where
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
@ -251,7 +251,7 @@ pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
|||||||
// need to invoke queries itself, we cannot keep the query caches
|
// need to invoke queries itself, we cannot keep the query caches
|
||||||
// locked while doing so. Instead we copy out the
|
// locked while doing so. Instead we copy out the
|
||||||
// `(query_key, dep_node_index)` pairs and release the lock again.
|
// `(query_key, dep_node_index)` pairs and release the lock again.
|
||||||
let query_keys_and_indices: Vec<_> = query_state
|
let query_keys_and_indices: Vec<_> = query_cache
|
||||||
.iter_results(|results| results.map(|(k, _, i)| (k.clone(), i)).collect());
|
.iter_results(|results| results.map(|(k, _, i)| (k.clone(), i)).collect());
|
||||||
|
|
||||||
// Now actually allocate the strings. If allocating the strings
|
// Now actually allocate the strings. If allocating the strings
|
||||||
@ -276,7 +276,7 @@ pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
|||||||
let query_name = profiler.get_or_alloc_cached_string(query_name);
|
let query_name = profiler.get_or_alloc_cached_string(query_name);
|
||||||
let event_id = event_id_builder.from_label(query_name).to_string_id();
|
let event_id = event_id_builder.from_label(query_name).to_string_id();
|
||||||
|
|
||||||
query_state.iter_results(|results| {
|
query_cache.iter_results(|results| {
|
||||||
let query_invocation_ids: Vec<_> = results.map(|v| v.2.into()).collect();
|
let query_invocation_ids: Vec<_> = results.map(|v| v.2.into()).collect();
|
||||||
|
|
||||||
profiler.bulk_map_query_invocation_id_to_single_string(
|
profiler.bulk_map_query_invocation_id_to_single_string(
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
use crate::ty::query::queries;
|
use crate::ty::query::queries;
|
||||||
use crate::ty::TyCtxt;
|
use crate::ty::TyCtxt;
|
||||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||||
use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
|
use rustc_query_system::query::{QueryAccessors, QueryCache, QueryCacheStore};
|
||||||
|
|
||||||
use std::any::type_name;
|
use std::any::type_name;
|
||||||
use std::hash::Hash;
|
|
||||||
use std::mem;
|
use std::mem;
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
@ -37,10 +36,8 @@ struct QueryStats {
|
|||||||
local_def_id_keys: Option<usize>,
|
local_def_id_keys: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
|
fn stats<C>(name: &'static str, map: &QueryCacheStore<C>) -> QueryStats
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
Q: Clone,
|
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
{
|
{
|
||||||
let mut stats = QueryStats {
|
let mut stats = QueryStats {
|
||||||
@ -128,12 +125,10 @@ macro_rules! print_stats {
|
|||||||
|
|
||||||
$(
|
$(
|
||||||
queries.push(stats::<
|
queries.push(stats::<
|
||||||
crate::dep_graph::DepKind,
|
|
||||||
<TyCtxt<'_> as QueryContext>::Query,
|
|
||||||
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
|
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
|
||||||
>(
|
>(
|
||||||
stringify!($name),
|
stringify!($name),
|
||||||
&tcx.queries.$name,
|
&tcx.query_caches.$name,
|
||||||
));
|
));
|
||||||
)*
|
)*
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::dep_graph::DepNodeIndex;
|
use crate::dep_graph::DepNodeIndex;
|
||||||
use crate::query::plumbing::{QueryLookup, QueryState};
|
use crate::query::plumbing::{QueryCacheStore, QueryLookup};
|
||||||
|
|
||||||
use rustc_arena::TypedArena;
|
use rustc_arena::TypedArena;
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
@ -31,17 +31,15 @@ pub trait QueryCache: QueryStorage {
|
|||||||
/// It returns the shard index and a lock guard to the shard,
|
/// It returns the shard index and a lock guard to the shard,
|
||||||
/// which will be used if the query is not in the cache and we need
|
/// which will be used if the query is not in the cache and we need
|
||||||
/// to compute it.
|
/// to compute it.
|
||||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
fn lookup<'s, R, OnHit>(
|
||||||
&self,
|
&self,
|
||||||
state: &QueryState<D, Q, Self>,
|
state: &'s QueryCacheStore<Self>,
|
||||||
key: Self::Key,
|
key: &Self::Key,
|
||||||
// `on_hit` can be called while holding a lock to the query state shard.
|
// `on_hit` can be called while holding a lock to the query state shard.
|
||||||
on_hit: OnHit,
|
on_hit: OnHit,
|
||||||
on_miss: OnMiss,
|
) -> Result<R, QueryLookup>
|
||||||
) -> R
|
|
||||||
where
|
where
|
||||||
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
|
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
|
||||||
OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
|
|
||||||
|
|
||||||
fn complete(
|
fn complete(
|
||||||
&self,
|
&self,
|
||||||
@ -95,23 +93,24 @@ where
|
|||||||
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
|
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
fn lookup<'s, R, OnHit>(
|
||||||
&self,
|
&self,
|
||||||
state: &QueryState<D, Q, Self>,
|
state: &'s QueryCacheStore<Self>,
|
||||||
key: K,
|
key: &K,
|
||||||
on_hit: OnHit,
|
on_hit: OnHit,
|
||||||
on_miss: OnMiss,
|
) -> Result<R, QueryLookup>
|
||||||
) -> R
|
|
||||||
where
|
where
|
||||||
OnHit: FnOnce(&V, DepNodeIndex) -> R,
|
OnHit: FnOnce(&V, DepNodeIndex) -> R,
|
||||||
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
|
|
||||||
{
|
{
|
||||||
let mut lookup = state.get_lookup(&key);
|
let (lookup, lock) = state.get_lookup(key);
|
||||||
let lock = &mut *lookup.lock;
|
let result = lock.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
|
||||||
|
|
||||||
let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
|
if let Some((_, value)) = result {
|
||||||
|
let hit_result = on_hit(&value.0, value.1);
|
||||||
if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) }
|
Ok(hit_result)
|
||||||
|
} else {
|
||||||
|
Err(lookup)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -177,26 +176,23 @@ where
|
|||||||
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
|
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
fn lookup<'s, R, OnHit>(
|
||||||
&self,
|
&self,
|
||||||
state: &QueryState<D, Q, Self>,
|
state: &'s QueryCacheStore<Self>,
|
||||||
key: K,
|
key: &K,
|
||||||
on_hit: OnHit,
|
on_hit: OnHit,
|
||||||
on_miss: OnMiss,
|
) -> Result<R, QueryLookup>
|
||||||
) -> R
|
|
||||||
where
|
where
|
||||||
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
|
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
|
||||||
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
|
|
||||||
{
|
{
|
||||||
let mut lookup = state.get_lookup(&key);
|
let (lookup, lock) = state.get_lookup(key);
|
||||||
let lock = &mut *lookup.lock;
|
let result = lock.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
|
||||||
|
|
||||||
let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
|
|
||||||
|
|
||||||
if let Some((_, value)) = result {
|
if let Some((_, value)) = result {
|
||||||
on_hit(&&value.0, value.1)
|
let hit_result = on_hit(&&value.0, value.1);
|
||||||
|
Ok(hit_result)
|
||||||
} else {
|
} else {
|
||||||
on_miss(key, lookup)
|
Err(lookup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use crate::dep_graph::DepNode;
|
|||||||
use crate::dep_graph::SerializedDepNodeIndex;
|
use crate::dep_graph::SerializedDepNodeIndex;
|
||||||
use crate::query::caches::QueryCache;
|
use crate::query::caches::QueryCache;
|
||||||
use crate::query::plumbing::CycleError;
|
use crate::query::plumbing::CycleError;
|
||||||
use crate::query::{QueryContext, QueryState};
|
use crate::query::{QueryCacheStore, QueryContext, QueryState};
|
||||||
|
|
||||||
use rustc_data_structures::fingerprint::Fingerprint;
|
use rustc_data_structures::fingerprint::Fingerprint;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
@ -73,7 +73,12 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
|
|||||||
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
|
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
|
||||||
|
|
||||||
// Don't use this method to access query results, instead use the methods on TyCtxt
|
// Don't use this method to access query results, instead use the methods on TyCtxt
|
||||||
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
|
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Key>;
|
||||||
|
|
||||||
|
// Don't use this method to access query results, instead use the methods on TyCtxt
|
||||||
|
fn query_cache<'a>(tcx: CTX) -> &'a QueryCacheStore<Self::Cache>
|
||||||
|
where
|
||||||
|
CTX: 'a;
|
||||||
|
|
||||||
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
|
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
|
||||||
where
|
where
|
||||||
|
@ -13,11 +13,10 @@ use crate::query::{QueryContext, QueryMap};
|
|||||||
use rustc_data_structures::cold_path;
|
use rustc_data_structures::cold_path;
|
||||||
use rustc_data_structures::fingerprint::Fingerprint;
|
use rustc_data_structures::fingerprint::Fingerprint;
|
||||||
use rustc_data_structures::fx::{FxHashMap, FxHasher};
|
use rustc_data_structures::fx::{FxHashMap, FxHasher};
|
||||||
use rustc_data_structures::sharded::Sharded;
|
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
|
||||||
use rustc_data_structures::sync::{Lock, LockGuard};
|
use rustc_data_structures::sync::{Lock, LockGuard};
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
use rustc_data_structures::thin_vec::ThinVec;
|
||||||
use rustc_errors::{Diagnostic, FatalError};
|
use rustc_errors::{Diagnostic, FatalError};
|
||||||
use rustc_span::source_map::DUMMY_SP;
|
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
@ -28,43 +27,75 @@ use std::ptr;
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
pub(super) struct QueryStateShard<D, Q, K, C> {
|
pub struct QueryCacheStore<C: QueryCache> {
|
||||||
pub(super) cache: C,
|
cache: C,
|
||||||
|
shards: Sharded<C::Sharded>,
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
pub cache_hits: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C: QueryCache> Default for QueryCacheStore<C> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
cache: C::default(),
|
||||||
|
shards: Default::default(),
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
cache_hits: AtomicUsize::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
|
||||||
|
pub struct QueryLookup {
|
||||||
|
pub(super) key_hash: u64,
|
||||||
|
shard: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
// We compute the key's hash once and then use it for both the
|
||||||
|
// shard lookup and the hashmap lookup. This relies on the fact
|
||||||
|
// that both of them use `FxHasher`.
|
||||||
|
fn hash_for_shard<K: Hash>(key: &K) -> u64 {
|
||||||
|
let mut hasher = FxHasher::default();
|
||||||
|
key.hash(&mut hasher);
|
||||||
|
hasher.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C: QueryCache> QueryCacheStore<C> {
|
||||||
|
pub(super) fn get_lookup<'tcx>(
|
||||||
|
&'tcx self,
|
||||||
|
key: &C::Key,
|
||||||
|
) -> (QueryLookup, LockGuard<'tcx, C::Sharded>) {
|
||||||
|
let key_hash = hash_for_shard(key);
|
||||||
|
let shard = get_shard_index_by_hash(key_hash);
|
||||||
|
let lock = self.shards.get_shard_by_index(shard).lock();
|
||||||
|
(QueryLookup { key_hash, shard }, lock)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn iter_results<R>(
|
||||||
|
&self,
|
||||||
|
f: impl for<'a> FnOnce(
|
||||||
|
Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
|
||||||
|
) -> R,
|
||||||
|
) -> R {
|
||||||
|
self.cache.iter(&self.shards, |shard| &mut *shard, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct QueryStateShard<D, Q, K> {
|
||||||
active: FxHashMap<K, QueryResult<D, Q>>,
|
active: FxHashMap<K, QueryResult<D, Q>>,
|
||||||
|
|
||||||
/// Used to generate unique ids for active jobs.
|
/// Used to generate unique ids for active jobs.
|
||||||
jobs: u32,
|
jobs: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
|
impl<D, Q, K> Default for QueryStateShard<D, Q, K> {
|
||||||
fn default() -> QueryStateShard<D, Q, K, C> {
|
fn default() -> QueryStateShard<D, Q, K> {
|
||||||
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
|
QueryStateShard { active: Default::default(), jobs: 0 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct QueryState<D, Q, C: QueryCache> {
|
pub struct QueryState<D, Q, K> {
|
||||||
cache: C,
|
shards: Sharded<QueryStateShard<D, Q, K>>,
|
||||||
shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
pub cache_hits: AtomicUsize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
|
|
||||||
pub(super) fn get_lookup<'tcx>(
|
|
||||||
&'tcx self,
|
|
||||||
key: &C::Key,
|
|
||||||
) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
|
|
||||||
// We compute the key's hash once and then use it for both the
|
|
||||||
// shard lookup and the hashmap lookup. This relies on the fact
|
|
||||||
// that both of them use `FxHasher`.
|
|
||||||
let mut hasher = FxHasher::default();
|
|
||||||
key.hash(&mut hasher);
|
|
||||||
let key_hash = hasher.finish();
|
|
||||||
|
|
||||||
let shard = self.shards.get_shard_index_by_hash(key_hash);
|
|
||||||
let lock = self.shards.get_shard_by_index(shard).lock();
|
|
||||||
QueryLookup { key_hash, shard, lock }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates the state of a query for a given key in a query map.
|
/// Indicates the state of a query for a given key in a query map.
|
||||||
@ -77,21 +108,12 @@ enum QueryResult<D, Q> {
|
|||||||
Poisoned,
|
Poisoned,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, Q, C> QueryState<D, Q, C>
|
impl<D, Q, K> QueryState<D, Q, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
D: Copy + Clone + Eq + Hash,
|
||||||
Q: Clone,
|
Q: Clone,
|
||||||
C: QueryCache,
|
K: Eq + Hash + Clone + Debug,
|
||||||
{
|
{
|
||||||
pub fn iter_results<R>(
|
|
||||||
&self,
|
|
||||||
f: impl for<'a> FnOnce(
|
|
||||||
Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
|
|
||||||
) -> R,
|
|
||||||
) -> R {
|
|
||||||
self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn all_inactive(&self) -> bool {
|
pub fn all_inactive(&self) -> bool {
|
||||||
let shards = self.shards.lock_shards();
|
let shards = self.shards.lock_shards();
|
||||||
shards.iter().all(|shard| shard.active.is_empty())
|
shards.iter().all(|shard| shard.active.is_empty())
|
||||||
@ -100,7 +122,7 @@ where
|
|||||||
pub fn try_collect_active_jobs(
|
pub fn try_collect_active_jobs(
|
||||||
&self,
|
&self,
|
||||||
kind: D,
|
kind: D,
|
||||||
make_query: fn(C::Key) -> Q,
|
make_query: fn(K) -> Q,
|
||||||
jobs: &mut QueryMap<D, Q>,
|
jobs: &mut QueryMap<D, Q>,
|
||||||
) -> Option<()> {
|
) -> Option<()> {
|
||||||
// We use try_lock_shards here since we are called from the
|
// We use try_lock_shards here since we are called from the
|
||||||
@ -123,24 +145,12 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
|
impl<D, Q, K> Default for QueryState<D, Q, K> {
|
||||||
fn default() -> QueryState<D, Q, C> {
|
fn default() -> QueryState<D, Q, K> {
|
||||||
QueryState {
|
QueryState { shards: Default::default() }
|
||||||
cache: C::default(),
|
|
||||||
shards: Default::default(),
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
cache_hits: AtomicUsize::new(0),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
|
|
||||||
pub struct QueryLookup<'tcx, D, Q, K, C> {
|
|
||||||
pub(super) key_hash: u64,
|
|
||||||
shard: usize,
|
|
||||||
pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A type representing the responsibility to execute the job in the `job` field.
|
/// A type representing the responsibility to execute the job in the `job` field.
|
||||||
/// This will poison the relevant query if dropped.
|
/// This will poison the relevant query if dropped.
|
||||||
struct JobOwner<'tcx, D, Q, C>
|
struct JobOwner<'tcx, D, Q, C>
|
||||||
@ -149,7 +159,8 @@ where
|
|||||||
Q: Clone,
|
Q: Clone,
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
{
|
{
|
||||||
state: &'tcx QueryState<D, Q, C>,
|
state: &'tcx QueryState<D, Q, C::Key>,
|
||||||
|
cache: &'tcx QueryCacheStore<C>,
|
||||||
key: C::Key,
|
key: C::Key,
|
||||||
id: QueryJobId<D>,
|
id: QueryJobId<D>,
|
||||||
}
|
}
|
||||||
@ -169,18 +180,21 @@ where
|
|||||||
/// This function is inlined because that results in a noticeable speed-up
|
/// This function is inlined because that results in a noticeable speed-up
|
||||||
/// for some compile-time benchmarks.
|
/// for some compile-time benchmarks.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn try_start<'a, 'b, CTX>(
|
fn try_start<'b, CTX>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
|
state: &'b QueryState<CTX::DepKind, CTX::Query, C::Key>,
|
||||||
|
cache: &'b QueryCacheStore<C>,
|
||||||
span: Span,
|
span: Span,
|
||||||
key: &C::Key,
|
key: &C::Key,
|
||||||
mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
|
lookup: QueryLookup,
|
||||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||||
) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
|
) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
|
||||||
where
|
where
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
let lock = &mut *lookup.lock;
|
let shard = lookup.shard;
|
||||||
|
let mut state_lock = state.shards.get_shard_by_index(shard).lock();
|
||||||
|
let lock = &mut *state_lock;
|
||||||
|
|
||||||
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
|
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
@ -196,7 +210,7 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Create the id of the job we're waiting for
|
// Create the id of the job we're waiting for
|
||||||
let id = QueryJobId::new(job.id, lookup.shard, query.dep_kind);
|
let id = QueryJobId::new(job.id, shard, query.dep_kind);
|
||||||
|
|
||||||
(job.latch(id), _query_blocked_prof_timer)
|
(job.latch(id), _query_blocked_prof_timer)
|
||||||
}
|
}
|
||||||
@ -211,18 +225,18 @@ where
|
|||||||
lock.jobs = id;
|
lock.jobs = id;
|
||||||
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
|
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
|
||||||
|
|
||||||
let global_id = QueryJobId::new(id, lookup.shard, query.dep_kind);
|
let global_id = QueryJobId::new(id, shard, query.dep_kind);
|
||||||
|
|
||||||
let job = tcx.current_query_job();
|
let job = tcx.current_query_job();
|
||||||
let job = QueryJob::new(id, span, job);
|
let job = QueryJob::new(id, span, job);
|
||||||
|
|
||||||
entry.insert(QueryResult::Started(job));
|
entry.insert(QueryResult::Started(job));
|
||||||
|
|
||||||
let owner = JobOwner { state, id: global_id, key: (*key).clone() };
|
let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
|
||||||
return TryGetJob::NotYetStarted(owner);
|
return TryGetJob::NotYetStarted(owner);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
mem::drop(lookup.lock);
|
mem::drop(state_lock);
|
||||||
|
|
||||||
// If we are single-threaded we know that we have cycle error,
|
// If we are single-threaded we know that we have cycle error,
|
||||||
// so we just return the error.
|
// so we just return the error.
|
||||||
@ -234,7 +248,7 @@ where
|
|||||||
span,
|
span,
|
||||||
);
|
);
|
||||||
let value = query.handle_cycle_error(tcx, error);
|
let value = query.handle_cycle_error(tcx, error);
|
||||||
state.cache.store_nocache(value)
|
cache.cache.store_nocache(value)
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// With parallel queries we might just have to wait on some other
|
// With parallel queries we might just have to wait on some other
|
||||||
@ -245,17 +259,23 @@ where
|
|||||||
|
|
||||||
if let Err(cycle) = result {
|
if let Err(cycle) = result {
|
||||||
let value = query.handle_cycle_error(tcx, cycle);
|
let value = query.handle_cycle_error(tcx, cycle);
|
||||||
let value = state.cache.store_nocache(value);
|
let value = cache.cache.store_nocache(value);
|
||||||
return TryGetJob::Cycle(value);
|
return TryGetJob::Cycle(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
let cached = try_get_cached(
|
let cached = cache
|
||||||
tcx,
|
.cache
|
||||||
state,
|
.lookup(cache, &key, |value, index| {
|
||||||
(*key).clone(),
|
if unlikely!(tcx.profiler().enabled()) {
|
||||||
|value, index| (value.clone(), index),
|
tcx.profiler().query_cache_hit(index.into());
|
||||||
|_, _| panic!("value must be in cache after waiting"),
|
}
|
||||||
);
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
(value.clone(), index)
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
|
||||||
|
|
||||||
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
|
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
|
||||||
prof_timer.finish_with_query_invocation_id(cached.1.into());
|
prof_timer.finish_with_query_invocation_id(cached.1.into());
|
||||||
@ -271,17 +291,25 @@ where
|
|||||||
// We can move out of `self` here because we `mem::forget` it below
|
// We can move out of `self` here because we `mem::forget` it below
|
||||||
let key = unsafe { ptr::read(&self.key) };
|
let key = unsafe { ptr::read(&self.key) };
|
||||||
let state = self.state;
|
let state = self.state;
|
||||||
|
let cache = self.cache;
|
||||||
|
|
||||||
// Forget ourself so our destructor won't poison the query
|
// Forget ourself so our destructor won't poison the query
|
||||||
mem::forget(self);
|
mem::forget(self);
|
||||||
|
|
||||||
let (job, result) = {
|
let (job, result) = {
|
||||||
let mut lock = state.shards.get_shard_by_value(&key).lock();
|
let key_hash = hash_for_shard(&key);
|
||||||
let job = match lock.active.remove(&key).unwrap() {
|
let shard = get_shard_index_by_hash(key_hash);
|
||||||
QueryResult::Started(job) => job,
|
let job = {
|
||||||
QueryResult::Poisoned => panic!(),
|
let mut lock = state.shards.get_shard_by_index(shard).lock();
|
||||||
|
match lock.active.remove(&key).unwrap() {
|
||||||
|
QueryResult::Started(job) => job,
|
||||||
|
QueryResult::Poisoned => panic!(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let result = {
|
||||||
|
let mut lock = cache.shards.get_shard_by_index(shard).lock();
|
||||||
|
cache.cache.complete(&mut lock, key, result, dep_node_index)
|
||||||
};
|
};
|
||||||
let result = state.cache.complete(&mut lock.cache, key, result, dep_node_index);
|
|
||||||
(job, result)
|
(job, result)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -357,43 +385,38 @@ where
|
|||||||
/// It returns the shard index and a lock guard to the shard,
|
/// It returns the shard index and a lock guard to the shard,
|
||||||
/// which will be used if the query is not in the cache and we need
|
/// which will be used if the query is not in the cache and we need
|
||||||
/// to compute it.
|
/// to compute it.
|
||||||
fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
|
pub fn try_get_cached<'a, CTX, C, R, OnHit>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
cache: &'a QueryCacheStore<C>,
|
||||||
key: C::Key,
|
key: &C::Key,
|
||||||
// `on_hit` can be called while holding a lock to the query cache
|
// `on_hit` can be called while holding a lock to the query cache
|
||||||
on_hit: OnHit,
|
on_hit: OnHit,
|
||||||
on_miss: OnMiss,
|
) -> Result<R, QueryLookup>
|
||||||
) -> R
|
|
||||||
where
|
where
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
|
OnHit: FnOnce(&C::Stored) -> R,
|
||||||
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
|
|
||||||
{
|
{
|
||||||
state.cache.lookup(
|
cache.cache.lookup(cache, &key, |value, index| {
|
||||||
state,
|
if unlikely!(tcx.profiler().enabled()) {
|
||||||
key,
|
tcx.profiler().query_cache_hit(index.into());
|
||||||
|value, index| {
|
}
|
||||||
if unlikely!(tcx.profiler().enabled()) {
|
#[cfg(debug_assertions)]
|
||||||
tcx.profiler().query_cache_hit(index.into());
|
{
|
||||||
}
|
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||||
#[cfg(debug_assertions)]
|
}
|
||||||
{
|
tcx.dep_graph().read_index(index);
|
||||||
state.cache_hits.fetch_add(1, Ordering::Relaxed);
|
on_hit(value)
|
||||||
}
|
})
|
||||||
on_hit(value, index)
|
|
||||||
},
|
|
||||||
on_miss,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_execute_query<CTX, C>(
|
fn try_execute_query<CTX, C>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>,
|
||||||
|
cache: &QueryCacheStore<C>,
|
||||||
span: Span,
|
span: Span,
|
||||||
key: C::Key,
|
key: C::Key,
|
||||||
lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
|
lookup: QueryLookup,
|
||||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||||
) -> C::Stored
|
) -> C::Stored
|
||||||
where
|
where
|
||||||
@ -402,7 +425,7 @@ where
|
|||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||||
tcx, state, span, &key, lookup, query,
|
tcx, state, cache, span, &key, lookup, query,
|
||||||
) {
|
) {
|
||||||
TryGetJob::NotYetStarted(job) => job,
|
TryGetJob::NotYetStarted(job) => job,
|
||||||
TryGetJob::Cycle(result) => return result,
|
TryGetJob::Cycle(result) => return result,
|
||||||
@ -617,9 +640,11 @@ where
|
|||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn get_query_impl<CTX, C>(
|
fn get_query_impl<CTX, C>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>,
|
||||||
|
cache: &QueryCacheStore<C>,
|
||||||
span: Span,
|
span: Span,
|
||||||
key: C::Key,
|
key: C::Key,
|
||||||
|
lookup: QueryLookup,
|
||||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||||
) -> C::Stored
|
) -> C::Stored
|
||||||
where
|
where
|
||||||
@ -627,45 +652,31 @@ where
|
|||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||||
{
|
{
|
||||||
try_get_cached(
|
try_execute_query(tcx, state, cache, span, key, lookup, query)
|
||||||
tcx,
|
|
||||||
state,
|
|
||||||
key,
|
|
||||||
|value, index| {
|
|
||||||
tcx.dep_graph().read_index(index);
|
|
||||||
value.clone()
|
|
||||||
},
|
|
||||||
|key, lookup| try_execute_query(tcx, state, span, key, lookup, query),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure that either this query has all green inputs or been executed.
|
/// Ensure that either this query has all green inputs or been executed.
|
||||||
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
|
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
|
||||||
|
/// Returns true if the query should still run.
|
||||||
///
|
///
|
||||||
/// This function is particularly useful when executing passes for their
|
/// This function is particularly useful when executing passes for their
|
||||||
/// side-effects -- e.g., in order to report errors for erroneous programs.
|
/// side-effects -- e.g., in order to report errors for erroneous programs.
|
||||||
///
|
///
|
||||||
/// Note: The optimization is only available during incr. comp.
|
/// Note: The optimization is only available during incr. comp.
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn ensure_query_impl<CTX, C>(
|
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
|
||||||
tcx: CTX,
|
where
|
||||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
K: crate::dep_graph::DepNodeParams<CTX>,
|
||||||
key: C::Key,
|
|
||||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
|
||||||
) where
|
|
||||||
C: QueryCache,
|
|
||||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
if query.eval_always {
|
if query.eval_always {
|
||||||
let _ = get_query_impl(tcx, state, DUMMY_SP, key, query);
|
return true;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensuring an anonymous query makes no sense
|
// Ensuring an anonymous query makes no sense
|
||||||
assert!(!query.anon);
|
assert!(!query.anon);
|
||||||
|
|
||||||
let dep_node = query.to_dep_node(tcx, &key);
|
let dep_node = query.to_dep_node(tcx, key);
|
||||||
|
|
||||||
match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
|
match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
|
||||||
None => {
|
None => {
|
||||||
@ -675,10 +686,11 @@ fn ensure_query_impl<CTX, C>(
|
|||||||
// DepNodeIndex. We must invoke the query itself. The performance cost
|
// DepNodeIndex. We must invoke the query itself. The performance cost
|
||||||
// this introduces should be negligible as we'll immediately hit the
|
// this introduces should be negligible as we'll immediately hit the
|
||||||
// in-memory cache, or another query down the line will.
|
// in-memory cache, or another query down the line will.
|
||||||
let _ = get_query_impl(tcx, state, DUMMY_SP, key, query);
|
true
|
||||||
}
|
}
|
||||||
Some((_, dep_node_index)) => {
|
Some((_, dep_node_index)) => {
|
||||||
tcx.profiler().query_cache_hit(dep_node_index.into());
|
tcx.profiler().query_cache_hit(dep_node_index.into());
|
||||||
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,7 +698,8 @@ fn ensure_query_impl<CTX, C>(
|
|||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn force_query_impl<CTX, C>(
|
fn force_query_impl<CTX, C>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>,
|
||||||
|
cache: &QueryCacheStore<C>,
|
||||||
key: C::Key,
|
key: C::Key,
|
||||||
span: Span,
|
span: Span,
|
||||||
dep_node: DepNode<CTX::DepKind>,
|
dep_node: DepNode<CTX::DepKind>,
|
||||||
@ -698,46 +711,60 @@ fn force_query_impl<CTX, C>(
|
|||||||
{
|
{
|
||||||
// We may be concurrently trying both execute and force a query.
|
// We may be concurrently trying both execute and force a query.
|
||||||
// Ensure that only one of them runs the query.
|
// Ensure that only one of them runs the query.
|
||||||
|
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
||||||
|
if unlikely!(tcx.profiler().enabled()) {
|
||||||
|
tcx.profiler().query_cache_hit(index.into());
|
||||||
|
}
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
try_get_cached(
|
let lookup = match cached {
|
||||||
tcx,
|
Ok(()) => return,
|
||||||
state,
|
Err(lookup) => lookup,
|
||||||
key,
|
};
|
||||||
|_, _| {
|
|
||||||
// Cache hit, do nothing
|
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||||
},
|
tcx, state, cache, span, &key, lookup, query,
|
||||||
|key, lookup| {
|
) {
|
||||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
TryGetJob::NotYetStarted(job) => job,
|
||||||
tcx, state, span, &key, lookup, query,
|
TryGetJob::Cycle(_) => return,
|
||||||
) {
|
#[cfg(parallel_compiler)]
|
||||||
TryGetJob::NotYetStarted(job) => job,
|
TryGetJob::JobCompleted(_) => return,
|
||||||
TryGetJob::Cycle(_) => return,
|
};
|
||||||
#[cfg(parallel_compiler)]
|
force_query_with_job(tcx, key, job, dep_node, query);
|
||||||
TryGetJob::JobCompleted(_) => return,
|
|
||||||
};
|
|
||||||
force_query_with_job(tcx, key, job, dep_node, query);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Stored
|
pub enum QueryMode {
|
||||||
|
Get,
|
||||||
|
Ensure,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_query<Q, CTX>(
|
||||||
|
tcx: CTX,
|
||||||
|
span: Span,
|
||||||
|
key: Q::Key,
|
||||||
|
lookup: QueryLookup,
|
||||||
|
mode: QueryMode,
|
||||||
|
) -> Option<Q::Stored>
|
||||||
where
|
where
|
||||||
Q: QueryDescription<CTX>,
|
Q: QueryDescription<CTX>,
|
||||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
|
let query = &Q::VTABLE;
|
||||||
|
if let QueryMode::Ensure = mode {
|
||||||
|
if !ensure_must_run(tcx, &key, query) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
|
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
|
||||||
|
let value =
|
||||||
get_query_impl(tcx, Q::query_state(tcx), span, key, &Q::VTABLE)
|
get_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), span, key, lookup, query);
|
||||||
}
|
Some(value)
|
||||||
|
|
||||||
pub fn ensure_query<Q, CTX>(tcx: CTX, key: Q::Key)
|
|
||||||
where
|
|
||||||
Q: QueryDescription<CTX>,
|
|
||||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
|
||||||
CTX: QueryContext,
|
|
||||||
{
|
|
||||||
ensure_query_impl(tcx, Q::query_state(tcx), key, &Q::VTABLE)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
|
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
|
||||||
@ -746,5 +773,5 @@ where
|
|||||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
force_query_impl(tcx, Q::query_state(tcx), key, span, dep_node, &Q::VTABLE)
|
force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user