From 8c1c90bb5c351d3577a803d0ac2abf9ea213e56b Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 8 Mar 2020 17:24:56 +0100 Subject: [PATCH 01/31] Make QueryConfig argument a type. --- src/librustc/ty/query/config.rs | 10 ++++------ src/librustc/ty/query/plumbing.rs | 2 +- src/librustc_metadata/rmeta/decoder/cstore_impl.rs | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index 72a0fdf1567..fc8387ba1ba 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -1,3 +1,5 @@ +//! Query configuration and description traits. + use crate::dep_graph::SerializedDepNodeIndex; use crate::dep_graph::{DepKind, DepNode}; use crate::ty::query::caches::QueryCache; @@ -13,11 +15,7 @@ use std::borrow::Cow; use std::fmt::Debug; use std::hash::Hash; -// Query configuration and description traits. - -// FIXME(eddyb) false positive, the lifetime parameter is used for `Key`/`Value`. -#[allow(unused_lifetimes)] -pub trait QueryConfig<'tcx> { +pub trait QueryConfig { const NAME: &'static str; const CATEGORY: ProfileCategory; @@ -25,7 +23,7 @@ pub trait QueryConfig<'tcx> { type Value: Clone; } -pub(crate) trait QueryAccessors<'tcx>: QueryConfig<'tcx> { +pub(crate) trait QueryAccessors<'tcx>: QueryConfig> { const ANON: bool; const EVAL_ALWAYS: bool; const DEP_KIND: DepKind; diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index c0cc119c5a1..82c955778bd 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -956,7 +956,7 @@ macro_rules! define_queries_inner { })* } - $(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> { + $(impl<$tcx> QueryConfig> for queries::$name<$tcx> { type Key = $K; type Value = $V; const NAME: &'static str = stringify!($name); diff --git a/src/librustc_metadata/rmeta/decoder/cstore_impl.rs b/src/librustc_metadata/rmeta/decoder/cstore_impl.rs index b9f1dd1663e..7a1ac9e0a60 100644 --- a/src/librustc_metadata/rmeta/decoder/cstore_impl.rs +++ b/src/librustc_metadata/rmeta/decoder/cstore_impl.rs @@ -37,7 +37,7 @@ macro_rules! provide { $(fn $name<$lt: $lt, T: IntoArgs>( $tcx: TyCtxt<$lt>, def_id_arg: T, - ) -> as QueryConfig<$lt>>::Value { + ) -> as QueryConfig>>::Value { let _prof_timer = $tcx.prof.generic_activity("metadata_decode_entry"); From f74fd03999bed3686195a1a7d42259ae11331e32 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 8 Mar 2020 18:20:18 +0100 Subject: [PATCH 02/31] Make QueryAccessor argument a type. --- src/librustc/ty/query/caches.rs | 15 ++-- src/librustc/ty/query/config.rs | 22 +++-- src/librustc/ty/query/job.rs | 74 ++++++++++------- src/librustc/ty/query/plumbing.rs | 93 ++++++++++++---------- src/librustc/ty/query/profiling_support.rs | 2 +- src/librustc/ty/query/stats.rs | 10 ++- 6 files changed, 127 insertions(+), 89 deletions(-) diff --git a/src/librustc/ty/query/caches.rs b/src/librustc/ty/query/caches.rs index a11b3bcba3e..7dd858142da 100644 --- a/src/librustc/ty/query/caches.rs +++ b/src/librustc/ty/query/caches.rs @@ -23,7 +23,7 @@ pub(crate) trait QueryCache: Default { /// to compute it. fn lookup<'tcx, R, GetCache, OnHit, OnMiss>( &self, - state: &'tcx QueryState<'tcx, Self>, + state: &'tcx QueryState, Self>, get_cache: GetCache, key: Self::Key, // `on_hit` can be called while holding a lock to the query state shard. @@ -32,10 +32,10 @@ pub(crate) trait QueryCache: Default { ) -> R where GetCache: for<'a> Fn( - &'a mut QueryStateShard<'tcx, Self::Key, Self::Sharded>, + &'a mut QueryStateShard, Self::Key, Self::Sharded>, ) -> &'a mut Self::Sharded, OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(Self::Key, QueryLookup<'tcx, Self::Key, Self::Sharded>) -> R; + OnMiss: FnOnce(Self::Key, QueryLookup<'tcx, TyCtxt<'tcx>, Self::Key, Self::Sharded>) -> R; fn complete( &self, @@ -78,17 +78,18 @@ impl QueryCache for DefaultCache { #[inline(always)] fn lookup<'tcx, R, GetCache, OnHit, OnMiss>( &self, - state: &'tcx QueryState<'tcx, Self>, + state: &'tcx QueryState, Self>, get_cache: GetCache, key: K, on_hit: OnHit, on_miss: OnMiss, ) -> R where - GetCache: - for<'a> Fn(&'a mut QueryStateShard<'tcx, K, Self::Sharded>) -> &'a mut Self::Sharded, + GetCache: for<'a> Fn( + &'a mut QueryStateShard, K, Self::Sharded>, + ) -> &'a mut Self::Sharded, OnHit: FnOnce(&V, DepNodeIndex) -> R, - OnMiss: FnOnce(K, QueryLookup<'tcx, K, Self::Sharded>) -> R, + OnMiss: FnOnce(K, QueryLookup<'tcx, TyCtxt<'tcx>, K, Self::Sharded>) -> R, { let mut lookup = state.get_lookup(&key); let lock = &mut *lookup.lock; diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index fc8387ba1ba..eaa1006791b 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -23,7 +23,11 @@ pub trait QueryConfig { type Value: Clone; } -pub(crate) trait QueryAccessors<'tcx>: QueryConfig> { +pub trait QueryContext: Copy { + type Query; +} + +pub(crate) trait QueryAccessors: QueryConfig { const ANON: bool; const EVAL_ALWAYS: bool; const DEP_KIND: DepKind; @@ -31,20 +35,20 @@ pub(crate) trait QueryAccessors<'tcx>: QueryConfig> { type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: TyCtxt<'tcx>) -> &'a QueryState<'tcx, Self::Cache>; + fn query_state<'a>(tcx: CTX) -> &'a QueryState; - fn to_dep_node(tcx: TyCtxt<'tcx>, key: &Self::Key) -> DepNode; + fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode; // Don't use this method to compute query results, instead use the methods on TyCtxt - fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value; + fn compute(tcx: CTX, key: Self::Key) -> Self::Value; fn hash_result(hcx: &mut StableHashingContext<'_>, result: &Self::Value) -> Option; - fn handle_cycle_error(tcx: TyCtxt<'tcx>, error: CycleError<'tcx>) -> Self::Value; + fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } -pub(crate) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { +pub(crate) trait QueryDescription<'tcx>: QueryAccessors> { fn describe(tcx: TyCtxt<'_>, key: Self::Key) -> Cow<'static, str>; #[inline] @@ -57,7 +61,11 @@ pub(crate) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { } } -impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M { +impl<'tcx, M> QueryDescription<'tcx> for M +where + M: QueryAccessors, Key = DefId>, + //M::Cache: QueryCache, +{ default fn describe(tcx: TyCtxt<'_>, def_id: DefId) -> Cow<'static, str> { if !tcx.sess.verbose() { format!("processing `{}`", tcx.def_path_str(def_id)).into() diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index 4e88fc54637..e52c25d86b1 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -1,5 +1,6 @@ use crate::dep_graph::DepKind; use crate::ty::context::TyCtxt; +use crate::ty::query::config::QueryContext; use crate::ty::query::plumbing::CycleError; use crate::ty::query::Query; use crate::ty::tls; @@ -27,13 +28,13 @@ use { /// Represents a span and a query key. #[derive(Clone, Debug)] -pub struct QueryInfo<'tcx> { +pub struct QueryInfo { /// The span corresponding to the reason for which this query was required. pub span: Span, - pub query: Query<'tcx>, + pub query: CTX::Query, } -type QueryMap<'tcx> = FxHashMap>; +type QueryMap<'tcx> = FxHashMap>>; /// A value uniquely identifiying an active query job within a shard in the query cache. #[derive(Copy, Clone, Eq, PartialEq, Hash)] @@ -72,19 +73,19 @@ impl QueryJobId { } #[cfg(parallel_compiler)] - fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch<'tcx>> { + fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch>> { map.get(&self).unwrap().job.latch.as_ref() } } -pub struct QueryJobInfo<'tcx> { - pub info: QueryInfo<'tcx>, - pub job: QueryJob<'tcx>, +pub struct QueryJobInfo { + pub info: QueryInfo, + pub job: QueryJob, } /// Represents an active query job. #[derive(Clone)] -pub struct QueryJob<'tcx> { +pub struct QueryJob { pub id: QueryShardJobId, /// The span corresponding to the reason for which this query was required. @@ -95,12 +96,12 @@ pub struct QueryJob<'tcx> { /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] - latch: Option>, + latch: Option>, - dummy: PhantomData>, + dummy: PhantomData>, } -impl<'tcx> QueryJob<'tcx> { +impl QueryJob { /// Creates a new query job. pub fn new(id: QueryShardJobId, span: Span, parent: Option) -> Self { QueryJob { @@ -114,7 +115,7 @@ impl<'tcx> QueryJob<'tcx> { } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch<'tcx> { + pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -122,7 +123,7 @@ impl<'tcx> QueryJob<'tcx> { } #[cfg(not(parallel_compiler))] - pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch<'tcx> { + pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch { QueryLatch { id, dummy: PhantomData } } @@ -138,14 +139,18 @@ impl<'tcx> QueryJob<'tcx> { #[cfg(not(parallel_compiler))] #[derive(Clone)] -pub(super) struct QueryLatch<'tcx> { +pub(super) struct QueryLatch { id: QueryJobId, - dummy: PhantomData<&'tcx ()>, + dummy: PhantomData, } #[cfg(not(parallel_compiler))] -impl<'tcx> QueryLatch<'tcx> { - pub(super) fn find_cycle_in_stack(&self, tcx: TyCtxt<'tcx>, span: Span) -> CycleError<'tcx> { +impl<'tcx> QueryLatch> { + pub(super) fn find_cycle_in_stack( + &self, + tcx: TyCtxt<'tcx>, + span: Span, + ) -> CycleError> { let query_map = tcx.queries.try_collect_active_jobs().unwrap(); // Get the current executing query (waiter) and find the waitee amongst its parents @@ -181,15 +186,15 @@ impl<'tcx> QueryLatch<'tcx> { } #[cfg(parallel_compiler)] -struct QueryWaiter<'tcx> { +struct QueryWaiter { query: Option, condvar: Condvar, span: Span, - cycle: Lock>>, + cycle: Lock>>, } #[cfg(parallel_compiler)] -impl<'tcx> QueryWaiter<'tcx> { +impl QueryWaiter { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); @@ -197,28 +202,34 @@ impl<'tcx> QueryWaiter<'tcx> { } #[cfg(parallel_compiler)] -struct QueryLatchInfo<'tcx> { +struct QueryLatchInfo { complete: bool, - waiters: Vec>>, + waiters: Vec>>, } #[cfg(parallel_compiler)] #[derive(Clone)] -pub(super) struct QueryLatch<'tcx> { - info: Lrc>>, +pub(super) struct QueryLatch { + info: Lrc>>, } #[cfg(parallel_compiler)] -impl<'tcx> QueryLatch<'tcx> { +impl QueryLatch { fn new() -> Self { QueryLatch { info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), } } +} +#[cfg(parallel_compiler)] +impl<'tcx> QueryLatch> { /// Awaits for the query job to complete. - #[cfg(parallel_compiler)] - pub(super) fn wait_on(&self, tcx: TyCtxt<'tcx>, span: Span) -> Result<(), CycleError<'tcx>> { + pub(super) fn wait_on( + &self, + tcx: TyCtxt<'tcx>, + span: Span, + ) -> Result<(), CycleError>> { tls::with_related_context(tcx, move |icx| { let waiter = Lrc::new(QueryWaiter { query: icx.query, @@ -237,9 +248,12 @@ impl<'tcx> QueryLatch<'tcx> { } }) } +} +#[cfg(parallel_compiler)] +impl QueryLatch { /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc>) { + fn wait_on_inner(&self, waiter: &Lrc>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -273,7 +287,7 @@ impl<'tcx> QueryLatch<'tcx> { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc> { + fn extract_waiter(&self, waiter: usize) -> Lrc> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -427,7 +441,7 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( fn remove_cycle<'tcx>( query_map: &QueryMap<'tcx>, jobs: &mut Vec, - wakelist: &mut Vec>>, + wakelist: &mut Vec>>>, tcx: TyCtxt<'tcx>, ) -> bool { let mut visited = FxHashSet::default(); diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 82c955778bd..6506e1f83b4 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -4,7 +4,7 @@ use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex}; use crate::ty::query::caches::QueryCache; -use crate::ty::query::config::QueryDescription; +use crate::ty::query::config::{QueryContext, QueryDescription}; use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; use crate::ty::query::Query; use crate::ty::tls; @@ -29,38 +29,38 @@ use std::ptr; #[cfg(debug_assertions)] use std::sync::atomic::{AtomicUsize, Ordering}; -pub(crate) struct QueryStateShard<'tcx, K, C> { +pub(crate) struct QueryStateShard { cache: C, - active: FxHashMap>, + active: FxHashMap>, /// Used to generate unique ids for active jobs. jobs: u32, } -impl<'tcx, K, C> QueryStateShard<'tcx, K, C> { +impl QueryStateShard { fn get_cache(&mut self) -> &mut C { &mut self.cache } } -impl<'tcx, K, C: Default> Default for QueryStateShard<'tcx, K, C> { - fn default() -> QueryStateShard<'tcx, K, C> { +impl Default for QueryStateShard { + fn default() -> QueryStateShard { QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 } } } -pub(crate) struct QueryState<'tcx, C: QueryCache> { +pub(crate) struct QueryState { cache: C, - shards: Sharded>, + shards: Sharded>, #[cfg(debug_assertions)] pub(super) cache_hits: AtomicUsize, } -impl<'tcx, C: QueryCache> QueryState<'tcx, C> { +impl QueryState { pub(super) fn get_lookup( &'tcx self, key: &K2, - ) -> QueryLookup<'tcx, C::Key, C::Sharded> { + ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> { // We compute the key's hash once and then use it for both the // shard lookup and the hashmap lookup. This relies on the fact // that both of them use `FxHasher`. @@ -75,16 +75,16 @@ impl<'tcx, C: QueryCache> QueryState<'tcx, C> { } /// Indicates the state of a query for a given key in a query map. -enum QueryResult<'tcx> { +enum QueryResult { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob<'tcx>), + Started(QueryJob), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl<'tcx, C: QueryCache> QueryState<'tcx, C> { +impl QueryState { pub(super) fn iter_results( &self, f: impl for<'a> FnOnce( @@ -101,8 +101,8 @@ impl<'tcx, C: QueryCache> QueryState<'tcx, C> { pub(super) fn try_collect_active_jobs( &self, kind: DepKind, - make_query: fn(C::Key) -> Query<'tcx>, - jobs: &mut FxHashMap>, + make_query: fn(C::Key) -> CTX::Query, + jobs: &mut FxHashMap>, ) -> Option<()> where C::Key: Clone, @@ -128,8 +128,8 @@ impl<'tcx, C: QueryCache> QueryState<'tcx, C> { } } -impl<'tcx, C: QueryCache> Default for QueryState<'tcx, C> { - fn default() -> QueryState<'tcx, C> { +impl Default for QueryState { + fn default() -> QueryState { QueryState { cache: C::default(), shards: Default::default(), @@ -140,26 +140,26 @@ impl<'tcx, C: QueryCache> Default for QueryState<'tcx, C> { } /// Values used when checking a query cache which can be reused on a cache-miss to execute the query. -pub(crate) struct QueryLookup<'tcx, K, C> { +pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> { pub(super) key_hash: u64, shard: usize, - pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, K, C>>, + pub(super) lock: LockGuard<'tcx, QueryStateShard>, } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, C> +struct JobOwner<'tcx, CTX: QueryContext, C> where C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { - state: &'tcx QueryState<'tcx, C>, + state: &'tcx QueryState, key: C::Key, id: QueryJobId, } -impl<'tcx, C: QueryCache> JobOwner<'tcx, C> +impl<'tcx, C: QueryCache> JobOwner<'tcx, TyCtxt<'tcx>, C> where C: QueryCache, C::Key: Eq + Hash + Clone + Debug, @@ -178,7 +178,7 @@ where tcx: TyCtxt<'tcx>, span: Span, key: &C::Key, - mut lookup: QueryLookup<'tcx, C::Key, C::Sharded>, + mut lookup: QueryLookup<'tcx, TyCtxt<'tcx>, C::Key, C::Sharded>, ) -> TryGetJob<'tcx, C> where Q: QueryDescription<'tcx, Key = C::Key, Value = C::Value, Cache = C>, @@ -258,7 +258,14 @@ where return TryGetJob::JobCompleted(cached); } } +} +impl<'tcx, CTX: QueryContext, C: QueryCache> JobOwner<'tcx, CTX, C> +where + C: QueryCache, + C::Key: Eq + Hash + Clone + Debug, + C::Value: Clone, +{ /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query #[inline(always)] @@ -295,7 +302,7 @@ where (result, diagnostics.into_inner()) } -impl<'tcx, C: QueryCache> Drop for JobOwner<'tcx, C> +impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> where C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, @@ -322,10 +329,10 @@ where } #[derive(Clone)] -pub(crate) struct CycleError<'tcx> { +pub(crate) struct CycleError { /// The query and related span that uses the cycle. - pub(super) usage: Option<(Span, Query<'tcx>)>, - pub(super) cycle: Vec>, + pub(super) usage: Option<(Span, CTX::Query)>, + pub(super) cycle: Vec>, } /// The result of `try_start`. @@ -335,7 +342,7 @@ where C::Value: Clone, { /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, C>), + NotYetStarted(JobOwner<'tcx, TyCtxt<'tcx>, C>), /// The query was already completed. /// Returns the result of the query and its dep-node index @@ -347,6 +354,10 @@ where Cycle(C::Value), } +impl QueryContext for TyCtxt<'tcx> { + type Query = Query<'tcx>; +} + impl<'tcx> TyCtxt<'tcx> { /// Executes a job by changing the `ImplicitCtxt` to point to the /// new query job while it executes. It returns the diagnostics @@ -383,7 +394,7 @@ impl<'tcx> TyCtxt<'tcx> { #[cold] pub(super) fn report_cycle( self, - CycleError { usage, cycle: stack }: CycleError<'tcx>, + CycleError { usage, cycle: stack }: CycleError>, ) -> DiagnosticBuilder<'tcx> { assert!(!stack.is_empty()); @@ -476,7 +487,7 @@ impl<'tcx> TyCtxt<'tcx> { #[inline(always)] fn try_get_cached( self, - state: &'tcx QueryState<'tcx, C>, + state: &'tcx QueryState, C>, key: C::Key, // `on_hit` can be called while holding a lock to the query cache on_hit: OnHit, @@ -485,11 +496,11 @@ impl<'tcx> TyCtxt<'tcx> { where C: QueryCache, OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(C::Key, QueryLookup<'tcx, C::Key, C::Sharded>) -> R, + OnMiss: FnOnce(C::Key, QueryLookup<'tcx, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R, { state.cache.lookup( state, - QueryStateShard::::get_cache, + QueryStateShard::, C::Key, C::Sharded>::get_cache, key, |value, index| { if unlikely!(self.prof.enabled()) { @@ -529,7 +540,7 @@ impl<'tcx> TyCtxt<'tcx> { self, span: Span, key: Q::Key, - lookup: QueryLookup<'tcx, Q::Key, ::Sharded>, + lookup: QueryLookup<'tcx, TyCtxt<'tcx>, Q::Key, ::Sharded>, ) -> Q::Value { let job = match JobOwner::try_start::(self, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, @@ -690,7 +701,7 @@ impl<'tcx> TyCtxt<'tcx> { fn force_query_with_job + 'tcx>( self, key: Q::Key, - job: JobOwner<'tcx, Q::Cache>, + job: JobOwner<'tcx, TyCtxt<'tcx>, Q::Cache>, dep_node: DepNode, ) -> (Q::Value, DepNodeIndex) { // If the following assertion triggers, it can have two reasons: @@ -963,7 +974,7 @@ macro_rules! define_queries_inner { const CATEGORY: ProfileCategory = $category; } - impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> { + impl<$tcx> QueryAccessors> for queries::$name<$tcx> { const ANON: bool = is_anon!([$($modifiers)*]); const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; @@ -971,7 +982,7 @@ macro_rules! define_queries_inner { type Cache = query_storage!([$($modifiers)*][$K, $V]); #[inline(always)] - fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self::Cache> { + fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { &tcx.queries.$name } @@ -1002,7 +1013,7 @@ macro_rules! define_queries_inner { fn handle_cycle_error( tcx: TyCtxt<'tcx>, - error: CycleError<'tcx> + error: CycleError> ) -> Self::Value { handle_cycle_error!([$($modifiers)*][tcx, error]) } @@ -1124,8 +1135,8 @@ macro_rules! define_queries_struct { fallback_extern_providers: Box>, $($(#[$attr])* $name: QueryState< - $tcx, - as QueryAccessors<'tcx>>::Cache, + TyCtxt<$tcx>, + as QueryAccessors>>::Cache, >,)* } @@ -1145,12 +1156,12 @@ macro_rules! define_queries_struct { pub(crate) fn try_collect_active_jobs( &self - ) -> Option>> { + ) -> Option>>> { let mut jobs = FxHashMap::default(); $( self.$name.try_collect_active_jobs( - as QueryAccessors<'tcx>>::DEP_KIND, + as QueryAccessors>>::DEP_KIND, Query::$name, &mut jobs, )?; diff --git a/src/librustc/ty/query/profiling_support.rs b/src/librustc/ty/query/profiling_support.rs index 0081794051f..a540a18a19f 100644 --- a/src/librustc/ty/query/profiling_support.rs +++ b/src/librustc/ty/query/profiling_support.rs @@ -160,7 +160,7 @@ where pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( tcx: TyCtxt<'tcx>, query_name: &'static str, - query_state: &QueryState<'tcx, C>, + query_state: &QueryState, C>, string_cache: &mut QueryKeyStringCache, ) where C: QueryCache, diff --git a/src/librustc/ty/query/stats.rs b/src/librustc/ty/query/stats.rs index 527bb46c908..e6578d1eb5f 100644 --- a/src/librustc/ty/query/stats.rs +++ b/src/librustc/ty/query/stats.rs @@ -1,5 +1,5 @@ use crate::ty::query::caches::QueryCache; -use crate::ty::query::config::QueryAccessors; +use crate::ty::query::config::{QueryAccessors, QueryContext}; use crate::ty::query::plumbing::QueryState; use crate::ty::query::queries; use crate::ty::TyCtxt; @@ -38,7 +38,10 @@ struct QueryStats { local_def_id_keys: Option, } -fn stats<'tcx, C: QueryCache>(name: &'static str, map: &QueryState<'tcx, C>) -> QueryStats { +fn stats( + name: &'static str, + map: &QueryState, +) -> QueryStats { let mut stats = QueryStats { name, #[cfg(debug_assertions)] @@ -124,7 +127,8 @@ macro_rules! print_stats { $($( queries.push(stats::< - as QueryAccessors<'_>>::Cache, + TyCtxt<'_>, + as QueryAccessors>>::Cache, >( stringify!($name), &tcx.queries.$name, From 57c3177b31ab56fa00ca919ad3154a7c12348b94 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 8 Mar 2020 19:42:11 +0100 Subject: [PATCH 03/31] Make QueryDescription parameter a type. --- src/librustc/ty/query/config.rs | 32 ++++++++++++++------------ src/librustc/ty/query/on_disk_cache.rs | 3 ++- src/librustc/ty/query/plumbing.rs | 28 ++++++++++++++-------- src/librustc_macros/src/query.rs | 4 ++-- 4 files changed, 40 insertions(+), 27 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index eaa1006791b..e65f3094820 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -5,12 +5,12 @@ use crate::dep_graph::{DepKind, DepNode}; use crate::ty::query::caches::QueryCache; use crate::ty::query::plumbing::CycleError; use crate::ty::query::QueryState; -use crate::ty::TyCtxt; use rustc_data_structures::profiling::ProfileCategory; use rustc_hir::def_id::DefId; use crate::ich::StableHashingContext; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; use std::hash::Hash; @@ -25,6 +25,12 @@ pub trait QueryConfig { pub trait QueryContext: Copy { type Query; + + /// Access the session. + fn session(&self) -> &Session; + + /// Get string representation from DefPath. + fn def_path_str(&self, def_id: DefId) -> String; } pub(crate) trait QueryAccessors: QueryConfig { @@ -48,26 +54,25 @@ pub(crate) trait QueryAccessors: QueryConfig { fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } -pub(crate) trait QueryDescription<'tcx>: QueryAccessors> { - fn describe(tcx: TyCtxt<'_>, key: Self::Key) -> Cow<'static, str>; +pub(crate) trait QueryDescription: QueryAccessors { + fn describe(tcx: CTX, key: Self::Key) -> Cow<'static, str>; #[inline] - fn cache_on_disk(_: TyCtxt<'tcx>, _: Self::Key, _: Option<&Self::Value>) -> bool { + fn cache_on_disk(_: CTX, _: Self::Key, _: Option<&Self::Value>) -> bool { false } - fn try_load_from_disk(_: TyCtxt<'tcx>, _: SerializedDepNodeIndex) -> Option { + fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { bug!("QueryDescription::load_from_disk() called for an unsupported query.") } } -impl<'tcx, M> QueryDescription<'tcx> for M +impl QueryDescription for M where - M: QueryAccessors, Key = DefId>, - //M::Cache: QueryCache, + M: QueryAccessors, { - default fn describe(tcx: TyCtxt<'_>, def_id: DefId) -> Cow<'static, str> { - if !tcx.sess.verbose() { + default fn describe(tcx: CTX, def_id: DefId) -> Cow<'static, str> { + if !tcx.session().verbose() { format!("processing `{}`", tcx.def_path_str(def_id)).into() } else { let name = ::std::any::type_name::(); @@ -75,14 +80,11 @@ where } } - default fn cache_on_disk(_: TyCtxt<'tcx>, _: Self::Key, _: Option<&Self::Value>) -> bool { + default fn cache_on_disk(_: CTX, _: Self::Key, _: Option<&Self::Value>) -> bool { false } - default fn try_load_from_disk( - _: TyCtxt<'tcx>, - _: SerializedDepNodeIndex, - ) -> Option { + default fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { bug!("QueryDescription::load_from_disk() called for an unsupported query.") } } diff --git a/src/librustc/ty/query/on_disk_cache.rs b/src/librustc/ty/query/on_disk_cache.rs index 155f792bd9d..14839e6ad50 100644 --- a/src/librustc/ty/query/on_disk_cache.rs +++ b/src/librustc/ty/query/on_disk_cache.rs @@ -994,7 +994,8 @@ fn encode_query_results<'a, 'tcx, Q, E>( query_result_index: &mut EncodedQueryResultIndex, ) -> Result<(), E::Error> where - Q: super::config::QueryDescription<'tcx, Value: Encodable>, + Q: super::config::QueryDescription>, + Q::Value: Encodable, E: 'a + TyEncoder, { let _timer = tcx diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 6506e1f83b4..c0f2c4a11bc 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -17,6 +17,8 @@ use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::{Lock, LockGuard}; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}; +use rustc_session::Session; +use rustc_span::def_id::DefId; use rustc_span::source_map::DUMMY_SP; use rustc_span::Span; use std::collections::hash_map::Entry; @@ -181,7 +183,7 @@ where mut lookup: QueryLookup<'tcx, TyCtxt<'tcx>, C::Key, C::Sharded>, ) -> TryGetJob<'tcx, C> where - Q: QueryDescription<'tcx, Key = C::Key, Value = C::Value, Cache = C>, + Q: QueryDescription, Key = C::Key, Value = C::Value, Cache = C>, { let lock = &mut *lookup.lock; @@ -356,6 +358,14 @@ where impl QueryContext for TyCtxt<'tcx> { type Query = Query<'tcx>; + + fn session(&self) -> &Session { + &self.sess + } + + fn def_path_str(&self, def_id: DefId) -> String { + TyCtxt::def_path_str(*self, def_id) + } } impl<'tcx> TyCtxt<'tcx> { @@ -517,7 +527,7 @@ impl<'tcx> TyCtxt<'tcx> { } #[inline(never)] - pub(super) fn get_query + 'tcx>( + pub(super) fn get_query> + 'tcx>( self, span: Span, key: Q::Key, @@ -536,7 +546,7 @@ impl<'tcx> TyCtxt<'tcx> { } #[inline(always)] - fn try_execute_query + 'tcx>( + fn try_execute_query> + 'tcx>( self, span: Span, key: Q::Key, @@ -614,7 +624,7 @@ impl<'tcx> TyCtxt<'tcx> { result } - fn load_from_disk_and_cache_in_memory>( + fn load_from_disk_and_cache_in_memory>>( self, key: Q::Key, prev_dep_node_index: SerializedDepNodeIndex, @@ -671,7 +681,7 @@ impl<'tcx> TyCtxt<'tcx> { #[inline(never)] #[cold] - fn incremental_verify_ich>( + fn incremental_verify_ich>>( self, result: &Q::Value, dep_node: &DepNode, @@ -698,7 +708,7 @@ impl<'tcx> TyCtxt<'tcx> { } #[inline(always)] - fn force_query_with_job + 'tcx>( + fn force_query_with_job> + 'tcx>( self, key: Q::Key, job: JobOwner<'tcx, TyCtxt<'tcx>, Q::Cache>, @@ -756,7 +766,7 @@ impl<'tcx> TyCtxt<'tcx> { /// side-effects -- e.g., in order to report errors for erroneous programs. /// /// Note: The optimization is only available during incr. comp. - pub(super) fn ensure_query + 'tcx>(self, key: Q::Key) { + pub(super) fn ensure_query> + 'tcx>(self, key: Q::Key) { if Q::EVAL_ALWAYS { let _ = self.get_query::(DUMMY_SP, key); return; @@ -784,7 +794,7 @@ impl<'tcx> TyCtxt<'tcx> { } #[allow(dead_code)] - pub(super) fn force_query + 'tcx>( + pub(super) fn force_query> + 'tcx>( self, key: Q::Key, span: Span, @@ -920,7 +930,7 @@ macro_rules! define_queries_inner { } } - pub fn describe(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> { + pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> { let (r, name) = match *self { $(Query::$name(key) => { (queries::$name::describe(tcx, key), stringify!($name)) diff --git a/src/librustc_macros/src/query.rs b/src/librustc_macros/src/query.rs index e7005f2f5ba..57fe8ede9d1 100644 --- a/src/librustc_macros/src/query.rs +++ b/src/librustc_macros/src/query.rs @@ -380,7 +380,7 @@ fn add_query_description_impl( quote! { #[allow(unused_variables)] fn describe( - #tcx: TyCtxt<'_>, + #tcx: TyCtxt<'tcx>, #key: #arg, ) -> Cow<'static, str> { format!(#desc).into() @@ -393,7 +393,7 @@ fn add_query_description_impl( let desc = desc.unwrap_or(quote! {}); impls.extend(quote! { - impl<'tcx> QueryDescription<'tcx> for queries::#name<'tcx> { + impl<'tcx> QueryDescription> for queries::#name<'tcx> { #desc #cache } From c4a451e5af8ed1153a5a7a127c594265bd44e624 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 11 Mar 2020 22:02:50 +0100 Subject: [PATCH 04/31] Make QueryCache generic on the context. --- src/librustc/ty/query/caches.rs | 35 +++++++------- src/librustc/ty/query/config.rs | 2 +- src/librustc/ty/query/plumbing.rs | 55 ++++++++++++---------- src/librustc/ty/query/profiling_support.rs | 2 +- src/librustc/ty/query/stats.rs | 2 +- 5 files changed, 50 insertions(+), 46 deletions(-) diff --git a/src/librustc/ty/query/caches.rs b/src/librustc/ty/query/caches.rs index 7dd858142da..f740fada1e5 100644 --- a/src/librustc/ty/query/caches.rs +++ b/src/librustc/ty/query/caches.rs @@ -1,6 +1,6 @@ use crate::dep_graph::DepNodeIndex; +use crate::ty::query::config::QueryContext; use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; -use crate::ty::TyCtxt; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; @@ -8,11 +8,11 @@ use std::default::Default; use std::hash::Hash; use std::marker::PhantomData; -pub(crate) trait CacheSelector { - type Cache: QueryCache; +pub(crate) trait CacheSelector { + type Cache: QueryCache; } -pub(crate) trait QueryCache: Default { +pub(crate) trait QueryCache: Default { type Key; type Value; type Sharded: Default; @@ -21,9 +21,9 @@ pub(crate) trait QueryCache: Default { /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. - fn lookup<'tcx, R, GetCache, OnHit, OnMiss>( + fn lookup( &self, - state: &'tcx QueryState, Self>, + state: &QueryState, get_cache: GetCache, key: Self::Key, // `on_hit` can be called while holding a lock to the query state shard. @@ -32,14 +32,14 @@ pub(crate) trait QueryCache: Default { ) -> R where GetCache: for<'a> Fn( - &'a mut QueryStateShard, Self::Key, Self::Sharded>, + &'a mut QueryStateShard, ) -> &'a mut Self::Sharded, OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(Self::Key, QueryLookup<'tcx, TyCtxt<'tcx>, Self::Key, Self::Sharded>) -> R; + OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R; fn complete( &self, - tcx: TyCtxt<'tcx>, + tcx: CTX, lock_sharded_storage: &mut Self::Sharded, key: Self::Key, value: Self::Value, @@ -58,7 +58,7 @@ pub(crate) trait QueryCache: Default { pub struct DefaultCacheSelector; -impl CacheSelector for DefaultCacheSelector { +impl CacheSelector for DefaultCacheSelector { type Cache = DefaultCache; } @@ -70,26 +70,25 @@ impl Default for DefaultCache { } } -impl QueryCache for DefaultCache { +impl QueryCache for DefaultCache { type Key = K; type Value = V; type Sharded = FxHashMap; #[inline(always)] - fn lookup<'tcx, R, GetCache, OnHit, OnMiss>( + fn lookup( &self, - state: &'tcx QueryState, Self>, + state: &QueryState, get_cache: GetCache, key: K, on_hit: OnHit, on_miss: OnMiss, ) -> R where - GetCache: for<'a> Fn( - &'a mut QueryStateShard, K, Self::Sharded>, - ) -> &'a mut Self::Sharded, + GetCache: + for<'a> Fn(&'a mut QueryStateShard) -> &'a mut Self::Sharded, OnHit: FnOnce(&V, DepNodeIndex) -> R, - OnMiss: FnOnce(K, QueryLookup<'tcx, TyCtxt<'tcx>, K, Self::Sharded>) -> R, + OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R, { let mut lookup = state.get_lookup(&key); let lock = &mut *lookup.lock; @@ -102,7 +101,7 @@ impl QueryCache for DefaultCache { #[inline] fn complete( &self, - _: TyCtxt<'tcx>, + _: CTX, lock_sharded_storage: &mut Self::Sharded, key: K, value: V, diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index e65f3094820..fd2f855d5b1 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -38,7 +38,7 @@ pub(crate) trait QueryAccessors: QueryConfig { const EVAL_ALWAYS: bool; const DEP_KIND: DepKind; - type Cache: QueryCache; + type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt fn query_state<'a>(tcx: CTX) -> &'a QueryState; diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index c0f2c4a11bc..8462610a6b6 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -51,14 +51,14 @@ impl Default for QueryStateShard { } } -pub(crate) struct QueryState { +pub(crate) struct QueryState> { cache: C, shards: Sharded>, #[cfg(debug_assertions)] pub(super) cache_hits: AtomicUsize, } -impl QueryState { +impl> QueryState { pub(super) fn get_lookup( &'tcx self, key: &K2, @@ -86,7 +86,7 @@ enum QueryResult { Poisoned, } -impl QueryState { +impl> QueryState { pub(super) fn iter_results( &self, f: impl for<'a> FnOnce( @@ -130,7 +130,7 @@ impl QueryState { } } -impl Default for QueryState { +impl> Default for QueryState { fn default() -> QueryState { QueryState { cache: C::default(), @@ -152,7 +152,7 @@ pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> { /// This will poison the relevant query if dropped. struct JobOwner<'tcx, CTX: QueryContext, C> where - C: QueryCache, + C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { @@ -161,9 +161,9 @@ where id: QueryJobId, } -impl<'tcx, C: QueryCache> JobOwner<'tcx, TyCtxt<'tcx>, C> +impl<'tcx, C> JobOwner<'tcx, TyCtxt<'tcx>, C> where - C: QueryCache, + C: QueryCache> + 'tcx, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { @@ -176,12 +176,12 @@ where /// This function is inlined because that results in a noticeable speed-up /// for some compile-time benchmarks. #[inline(always)] - fn try_start( + fn try_start<'a, 'b, Q>( tcx: TyCtxt<'tcx>, span: Span, key: &C::Key, - mut lookup: QueryLookup<'tcx, TyCtxt<'tcx>, C::Key, C::Sharded>, - ) -> TryGetJob<'tcx, C> + mut lookup: QueryLookup<'a, TyCtxt<'tcx>, C::Key, C::Sharded>, + ) -> TryGetJob<'b, TyCtxt<'tcx>, C> where Q: QueryDescription, Key = C::Key, Value = C::Value, Cache = C>, { @@ -262,16 +262,16 @@ where } } -impl<'tcx, CTX: QueryContext, C: QueryCache> JobOwner<'tcx, CTX, C> +impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C> where - C: QueryCache, + C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query #[inline(always)] - fn complete(self, tcx: TyCtxt<'tcx>, result: &C::Value, dep_node_index: DepNodeIndex) { + fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) { // We can move out of `self` here because we `mem::forget` it below let key = unsafe { ptr::read(&self.key) }; let state = self.state; @@ -304,7 +304,7 @@ where (result, diagnostics.into_inner()) } -impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> +impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> where C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, @@ -338,13 +338,13 @@ pub(crate) struct CycleError { } /// The result of `try_start`. -enum TryGetJob<'tcx, C: QueryCache> +enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache> where C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, TyCtxt<'tcx>, C>), + NotYetStarted(JobOwner<'tcx, CTX, C>), /// The query was already completed. /// Returns the result of the query and its dep-node index @@ -504,9 +504,9 @@ impl<'tcx> TyCtxt<'tcx> { on_miss: OnMiss, ) -> R where - C: QueryCache, + C: QueryCache>, OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(C::Key, QueryLookup<'tcx, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R, + OnMiss: FnOnce(C::Key, QueryLookup<'_, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R, { state.cache.lookup( state, @@ -550,7 +550,12 @@ impl<'tcx> TyCtxt<'tcx> { self, span: Span, key: Q::Key, - lookup: QueryLookup<'tcx, TyCtxt<'tcx>, Q::Key, ::Sharded>, + lookup: QueryLookup< + '_, + TyCtxt<'tcx>, + Q::Key, + >>::Sharded, + >, ) -> Q::Value { let job = match JobOwner::try_start::(self, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, @@ -866,14 +871,14 @@ macro_rules! is_eval_always { } macro_rules! query_storage { - ([][$K:ty, $V:ty]) => { - <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache + (<$tcx:tt>[][$K:ty, $V:ty]) => { + <<$K as Key>::CacheSelector as CacheSelector, $K, $V>>::Cache }; - ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { + (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { $ty }; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - query_storage!([$($($modifiers)*)*][$($args)*]) + (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { + query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*]) }; } @@ -989,7 +994,7 @@ macro_rules! define_queries_inner { const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; - type Cache = query_storage!([$($modifiers)*][$K, $V]); + type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]); #[inline(always)] fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { diff --git a/src/librustc/ty/query/profiling_support.rs b/src/librustc/ty/query/profiling_support.rs index a540a18a19f..616fbaafab9 100644 --- a/src/librustc/ty/query/profiling_support.rs +++ b/src/librustc/ty/query/profiling_support.rs @@ -163,7 +163,7 @@ pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( query_state: &QueryState, C>, string_cache: &mut QueryKeyStringCache, ) where - C: QueryCache, + C: QueryCache>, C::Key: Debug + Clone, { tcx.prof.with_profiler(|profiler| { diff --git a/src/librustc/ty/query/stats.rs b/src/librustc/ty/query/stats.rs index e6578d1eb5f..a13f00dc6d4 100644 --- a/src/librustc/ty/query/stats.rs +++ b/src/librustc/ty/query/stats.rs @@ -38,7 +38,7 @@ struct QueryStats { local_def_id_keys: Option, } -fn stats( +fn stats>( name: &'static str, map: &QueryState, ) -> QueryStats { From ee9781cea03142b23ee609096b18d03aeaf1648b Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 18 Mar 2020 20:19:44 +0100 Subject: [PATCH 05/31] Make QueryContext a subtrait of DepContext. --- src/librustc/ty/query/config.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index fd2f855d5b1..77feb7f4df3 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -1,15 +1,15 @@ //! Query configuration and description traits. +use crate::dep_graph::DepKind; use crate::dep_graph::SerializedDepNodeIndex; -use crate::dep_graph::{DepKind, DepNode}; use crate::ty::query::caches::QueryCache; use crate::ty::query::plumbing::CycleError; use crate::ty::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; use rustc_hir::def_id::DefId; -use crate::ich::StableHashingContext; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_query_system::dep_graph::{DepContext, DepNode}; use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; @@ -23,7 +23,7 @@ pub trait QueryConfig { type Value: Clone; } -pub trait QueryContext: Copy { +pub trait QueryContext: DepContext { type Query; /// Access the session. @@ -36,20 +36,22 @@ pub trait QueryContext: Copy { pub(crate) trait QueryAccessors: QueryConfig { const ANON: bool; const EVAL_ALWAYS: bool; - const DEP_KIND: DepKind; + const DEP_KIND: CTX::DepKind; type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt fn query_state<'a>(tcx: CTX) -> &'a QueryState; - fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode; + fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode; // Don't use this method to compute query results, instead use the methods on TyCtxt fn compute(tcx: CTX, key: Self::Key) -> Self::Value; - fn hash_result(hcx: &mut StableHashingContext<'_>, result: &Self::Value) - -> Option; + fn hash_result( + hcx: &mut CTX::StableHashingContext, + result: &Self::Value, + ) -> Option; fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } From 2a52436619d94e42aa1e0ca57f412e5ce06ef561 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 18 Mar 2020 20:30:02 +0100 Subject: [PATCH 06/31] Generalise QueryJobId. --- src/librustc/ty/context.rs | 4 +-- src/librustc/ty/query/job.rs | 48 +++++++++++++++---------------- src/librustc/ty/query/plumbing.rs | 8 +++--- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index c1d13b0353e..3d4806f53be 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1603,7 +1603,7 @@ nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>} pub mod tls { use super::{ptr_eq, GlobalCtxt, TyCtxt}; - use crate::dep_graph::TaskDeps; + use crate::dep_graph::{DepKind, TaskDeps}; use crate::ty::query; use rustc_data_structures::sync::{self, Lock}; use rustc_data_structures::thin_vec::ThinVec; @@ -1630,7 +1630,7 @@ pub mod tls { /// The current query job, if any. This is updated by `JobOwner::start` in /// `ty::query::plumbing` when executing a query. - pub query: Option, + pub query: Option>, /// Where to store diagnostics for the current query job, if any. /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query. diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index e52c25d86b1..488615c7443 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -34,7 +34,7 @@ pub struct QueryInfo { pub query: CTX::Query, } -type QueryMap<'tcx> = FxHashMap>>; +type QueryMap<'tcx> = FxHashMap, QueryJobInfo>>; /// A value uniquely identifiying an active query job within a shard in the query cache. #[derive(Copy, Clone, Eq, PartialEq, Hash)] @@ -42,7 +42,7 @@ pub struct QueryShardJobId(pub NonZeroU32); /// A value uniquely identifiying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub struct QueryJobId { +pub struct QueryJobId { /// Which job within a shard is this pub job: QueryShardJobId, @@ -50,10 +50,10 @@ pub struct QueryJobId { pub shard: u16, /// What kind of query this job is - pub kind: DepKind, + pub kind: K, } -impl QueryJobId { +impl QueryJobId { pub fn new(job: QueryShardJobId, shard: usize, kind: DepKind) -> Self { QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } @@ -68,7 +68,7 @@ impl QueryJobId { } #[cfg(parallel_compiler)] - fn parent(self, map: &QueryMap<'_>) -> Option { + fn parent(self, map: &QueryMap<'_>) -> Option> { map.get(&self).unwrap().job.parent } @@ -92,7 +92,7 @@ pub struct QueryJob { pub span: Span, /// The parent query job which created this job and is implicitly waiting on it. - pub parent: Option, + pub parent: Option>, /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] @@ -103,7 +103,7 @@ pub struct QueryJob { impl QueryJob { /// Creates a new query job. - pub fn new(id: QueryShardJobId, span: Span, parent: Option) -> Self { + pub fn new(id: QueryShardJobId, span: Span, parent: Option>) -> Self { QueryJob { id, span, @@ -115,7 +115,7 @@ impl QueryJob { } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch { + pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -123,7 +123,7 @@ impl QueryJob { } #[cfg(not(parallel_compiler))] - pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch { + pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch { QueryLatch { id, dummy: PhantomData } } @@ -139,8 +139,8 @@ impl QueryJob { #[cfg(not(parallel_compiler))] #[derive(Clone)] -pub(super) struct QueryLatch { - id: QueryJobId, +pub(super) struct QueryLatch { + id: QueryJobId, dummy: PhantomData, } @@ -187,7 +187,7 @@ impl<'tcx> QueryLatch> { #[cfg(parallel_compiler)] struct QueryWaiter { - query: Option, + query: Option>, condvar: Condvar, span: Span, cycle: Lock>>, @@ -297,7 +297,7 @@ impl QueryLatch { /// A resumable waiter of a query. The usize is the index into waiters in the query's latch #[cfg(parallel_compiler)] -type Waiter = (QueryJobId, usize); +type Waiter = (QueryJobId, usize); /// Visits all the non-resumable and resumable waiters of a query. /// Only waiters in a query are visited. @@ -311,11 +311,11 @@ type Waiter = (QueryJobId, usize); #[cfg(parallel_compiler)] fn visit_waiters<'tcx, F>( query_map: &QueryMap<'tcx>, - query: QueryJobId, + query: QueryJobId, mut visit: F, ) -> Option> where - F: FnMut(Span, QueryJobId) -> Option>, + F: FnMut(Span, QueryJobId) -> Option>, { // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(parent) = query.parent(query_map) { @@ -346,10 +346,10 @@ where #[cfg(parallel_compiler)] fn cycle_check<'tcx>( query_map: &QueryMap<'tcx>, - query: QueryJobId, + query: QueryJobId, span: Span, - stack: &mut Vec<(Span, QueryJobId)>, - visited: &mut FxHashSet, + stack: &mut Vec<(Span, QueryJobId)>, + visited: &mut FxHashSet>, ) -> Option> { if !visited.insert(query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) { @@ -387,8 +387,8 @@ fn cycle_check<'tcx>( #[cfg(parallel_compiler)] fn connected_to_root<'tcx>( query_map: &QueryMap<'tcx>, - query: QueryJobId, - visited: &mut FxHashSet, + query: QueryJobId, + visited: &mut FxHashSet>, ) -> bool { // We already visited this or we're deliberately ignoring it if !visited.insert(query) { @@ -408,7 +408,7 @@ fn connected_to_root<'tcx>( // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( +fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( query_map: &QueryMap<'tcx>, tcx: TyCtxt<'tcx>, queries: &'a [T], @@ -440,7 +440,7 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( #[cfg(parallel_compiler)] fn remove_cycle<'tcx>( query_map: &QueryMap<'tcx>, - jobs: &mut Vec, + jobs: &mut Vec>, wakelist: &mut Vec>>>, tcx: TyCtxt<'tcx>, ) -> bool { @@ -495,7 +495,7 @@ fn remove_cycle<'tcx>( } } }) - .collect::)>>(); + .collect::, Option<(Span, QueryJobId)>)>>(); // Deterministically pick an entry point let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1)); @@ -575,7 +575,7 @@ fn deadlock(tcx: TyCtxt<'_>, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let query_map = tcx.queries.try_collect_active_jobs().unwrap(); - let mut jobs: Vec = query_map.keys().cloned().collect(); + let mut jobs: Vec> = query_map.keys().cloned().collect(); let mut found_cycle = false; diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 8462610a6b6..2d1614d6ccd 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -104,7 +104,7 @@ impl> QueryState { &self, kind: DepKind, make_query: fn(C::Key) -> CTX::Query, - jobs: &mut FxHashMap>, + jobs: &mut FxHashMap, QueryJobInfo>, ) -> Option<()> where C::Key: Clone, @@ -158,7 +158,7 @@ where { state: &'tcx QueryState, key: C::Key, - id: QueryJobId, + id: QueryJobId, } impl<'tcx, C> JobOwner<'tcx, TyCtxt<'tcx>, C> @@ -375,7 +375,7 @@ impl<'tcx> TyCtxt<'tcx> { #[inline(always)] fn start_query( self, - token: QueryJobId, + token: QueryJobId, diagnostics: Option<&Lock>>, compute: F, ) -> R @@ -1171,7 +1171,7 @@ macro_rules! define_queries_struct { pub(crate) fn try_collect_active_jobs( &self - ) -> Option>>> { + ) -> Option, QueryJobInfo>>> { let mut jobs = FxHashMap::default(); $( From a51ad889dd712e8b665656ebf08a2f85034f7415 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 18 Mar 2020 21:02:02 +0100 Subject: [PATCH 07/31] Decouple from DepKind. --- src/librustc/dep_graph/mod.rs | 6 ++++++ src/librustc/ty/query/config.rs | 3 +-- src/librustc/ty/query/plumbing.rs | 21 +++++++++++---------- src/librustc_query_system/dep_graph/mod.rs | 4 ++++ 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 3c39597584d..dfb962227ff 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -27,6 +27,8 @@ pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph; impl rustc_query_system::dep_graph::DepKind for DepKind { + const NULL: Self = DepKind::Null; + fn is_eval_always(&self) -> bool { DepKind::is_eval_always(self) } @@ -82,6 +84,10 @@ impl rustc_query_system::dep_graph::DepKind for DepKind { op(icx.task_deps) }) } + + fn can_reconstruct_query_key(&self) -> bool { + DepKind::can_reconstruct_query_key(self) + } } impl<'tcx> DepContext for TyCtxt<'tcx> { diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index 77feb7f4df3..c3b0103cc0c 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -1,6 +1,5 @@ //! Query configuration and description traits. -use crate::dep_graph::DepKind; use crate::dep_graph::SerializedDepNodeIndex; use crate::ty::query::caches::QueryCache; use crate::ty::query::plumbing::CycleError; @@ -23,7 +22,7 @@ pub trait QueryConfig { type Value: Clone; } -pub trait QueryContext: DepContext { +pub trait QueryContext: DepContext { type Query; /// Access the session. diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 2d1614d6ccd..ede1ccdbc04 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -2,7 +2,7 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex}; +use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::ty::query::caches::QueryCache; use crate::ty::query::config::{QueryContext, QueryDescription}; use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; @@ -17,6 +17,7 @@ use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::{Lock, LockGuard}; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}; +use rustc_query_system::dep_graph::{DepKind, DepNode}; use rustc_session::Session; use rustc_span::def_id::DefId; use rustc_span::source_map::DUMMY_SP; @@ -102,7 +103,7 @@ impl> QueryState { pub(super) fn try_collect_active_jobs( &self, - kind: DepKind, + kind: CTX::DepKind, make_query: fn(C::Key) -> CTX::Query, jobs: &mut FxHashMap, QueryJobInfo>, ) -> Option<()> @@ -375,7 +376,7 @@ impl<'tcx> TyCtxt<'tcx> { #[inline(always)] fn start_query( self, - token: QueryJobId, + token: QueryJobId, diagnostics: Option<&Lock>>, compute: F, ) -> R @@ -570,7 +571,7 @@ impl<'tcx> TyCtxt<'tcx> { // Fast path for when incr. comp. is off. `to_dep_node` is // expensive for some `DepKind`s. if !self.dep_graph.is_fully_enabled() { - let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null); + let null_dep_node = DepNode::new_no_params(DepKind::NULL); return self.force_query_with_job::(key, job, null_dep_node).0; } @@ -634,7 +635,7 @@ impl<'tcx> TyCtxt<'tcx> { key: Q::Key, prev_dep_node_index: SerializedDepNodeIndex, dep_node_index: DepNodeIndex, - dep_node: &DepNode, + dep_node: &DepNode, ) -> Q::Value { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. @@ -689,7 +690,7 @@ impl<'tcx> TyCtxt<'tcx> { fn incremental_verify_ich>>( self, result: &Q::Value, - dep_node: &DepNode, + dep_node: &DepNode, dep_node_index: DepNodeIndex, ) { use rustc_data_structures::fingerprint::Fingerprint; @@ -716,8 +717,8 @@ impl<'tcx> TyCtxt<'tcx> { fn force_query_with_job> + 'tcx>( self, key: Q::Key, - job: JobOwner<'tcx, TyCtxt<'tcx>, Q::Cache>, - dep_node: DepNode, + job: JobOwner<'tcx, Self, Q::Cache>, + dep_node: DepNode, ) -> (Q::Value, DepNodeIndex) { // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or @@ -754,7 +755,7 @@ impl<'tcx> TyCtxt<'tcx> { prof_timer.finish_with_query_invocation_id(dep_node_index.into()); if unlikely!(!diagnostics.is_empty()) { - if dep_node.kind != crate::dep_graph::DepKind::Null { + if dep_node.kind != DepKind::NULL { self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics); } } @@ -803,7 +804,7 @@ impl<'tcx> TyCtxt<'tcx> { self, key: Q::Key, span: Span, - dep_node: DepNode, + dep_node: DepNode, ) { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index 825b341cd14..888151782c7 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -54,6 +54,8 @@ pub trait DepContext: Copy { /// Describe the different families of dependency nodes. pub trait DepKind: Copy + fmt::Debug + Eq + Ord + Hash { + const NULL: Self; + /// Return whether this kind always require evaluation. fn is_eval_always(&self) -> bool; @@ -72,4 +74,6 @@ pub trait DepKind: Copy + fmt::Debug + Eq + Ord + Hash { fn read_deps(op: OP) -> () where OP: for<'a> FnOnce(Option<&'a Lock>>) -> (); + + fn can_reconstruct_query_key(&self) -> bool; } From 232364a580fb60199db8e3c9c1cfd391d0046a2a Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 18 Mar 2020 23:58:19 +0100 Subject: [PATCH 08/31] Generalise QueryLatch. --- src/librustc/ty/query/config.rs | 4 ++++ src/librustc/ty/query/job.rs | 22 ++++++++++++---------- src/librustc/ty/query/plumbing.rs | 4 ++++ 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index c3b0103cc0c..a4fb70f281c 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -2,6 +2,7 @@ use crate::dep_graph::SerializedDepNodeIndex; use crate::ty::query::caches::QueryCache; +use crate::ty::query::job::QueryJobId; use crate::ty::query::plumbing::CycleError; use crate::ty::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; @@ -30,6 +31,9 @@ pub trait QueryContext: DepContext { /// Get string representation from DefPath. fn def_path_str(&self, def_id: DefId) -> String; + + /// Get the query information from the TLS context. + fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R; } pub(crate) trait QueryAccessors: QueryConfig { diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index 488615c7443..b4d986074a4 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -53,11 +53,13 @@ pub struct QueryJobId { pub kind: K, } -impl QueryJobId { - pub fn new(job: QueryShardJobId, shard: usize, kind: DepKind) -> Self { +impl QueryJobId { + pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self { QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } +} +impl QueryJobId { fn query<'tcx>(self, map: &QueryMap<'tcx>) -> Query<'tcx> { map.get(&self).unwrap().info.query.clone() } @@ -223,16 +225,16 @@ impl QueryLatch { } #[cfg(parallel_compiler)] -impl<'tcx> QueryLatch> { +impl QueryLatch +where + K: rustc_query_system::dep_graph::DepKind, + CTX: QueryContext, +{ /// Awaits for the query job to complete. - pub(super) fn wait_on( - &self, - tcx: TyCtxt<'tcx>, - span: Span, - ) -> Result<(), CycleError>> { - tls::with_related_context(tcx, move |icx| { + pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError> { + tcx.read_query_job(move |query| { let waiter = Lrc::new(QueryWaiter { - query: icx.query, + query, span, cycle: Lock::new(None), condvar: Condvar::new(), diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index ede1ccdbc04..6d01f34c630 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -367,6 +367,10 @@ impl QueryContext for TyCtxt<'tcx> { fn def_path_str(&self, def_id: DefId) -> String { TyCtxt::def_path_str(*self, def_id) } + + fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { + tls::with_related_context(*self, move |icx| op(icx.query)) + } } impl<'tcx> TyCtxt<'tcx> { From 63087b6b15bff0346fda199e1a077ab3cdc30ba3 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 08:28:24 +0100 Subject: [PATCH 09/31] Parametrise by try_collect_active_jobs. --- src/librustc/ty/query/config.rs | 11 ++- src/librustc/ty/query/job.rs | 120 +++++++++++++++--------------- src/librustc/ty/query/plumbing.rs | 16 ++-- 3 files changed, 78 insertions(+), 69 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index a4fb70f281c..9495d9bfc6b 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -2,13 +2,14 @@ use crate::dep_graph::SerializedDepNodeIndex; use crate::ty::query::caches::QueryCache; -use crate::ty::query::job::QueryJobId; +use crate::ty::query::job::{QueryJobId, QueryJobInfo}; use crate::ty::query::plumbing::CycleError; use crate::ty::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; use rustc_hir::def_id::DefId; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::fx::FxHashMap; use rustc_query_system::dep_graph::{DepContext, DepNode}; use rustc_session::Session; use std::borrow::Cow; @@ -24,7 +25,7 @@ pub trait QueryConfig { } pub trait QueryContext: DepContext { - type Query; + type Query: Clone; /// Access the session. fn session(&self) -> &Session; @@ -34,6 +35,10 @@ pub trait QueryContext: DepContext { /// Get the query information from the TLS context. fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R; + + fn try_collect_active_jobs( + &self, + ) -> Option, QueryJobInfo>>; } pub(crate) trait QueryAccessors: QueryConfig { @@ -56,7 +61,7 @@ pub(crate) trait QueryAccessors: QueryConfig { result: &Self::Value, ) -> Option; - fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; + fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } pub(crate) trait QueryDescription: QueryAccessors { diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index b4d986074a4..253c814588f 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -1,11 +1,10 @@ -use crate::dep_graph::DepKind; -use crate::ty::context::TyCtxt; use crate::ty::query::config::QueryContext; use crate::ty::query::plumbing::CycleError; -use crate::ty::query::Query; +#[cfg(parallel_compiler)] use crate::ty::tls; use rustc_data_structures::fx::FxHashMap; +use rustc_query_system::dep_graph::DepContext; use rustc_span::Span; use std::convert::TryFrom; @@ -28,13 +27,13 @@ use { /// Represents a span and a query key. #[derive(Clone, Debug)] -pub struct QueryInfo { +pub struct QueryInfo { /// The span corresponding to the reason for which this query was required. pub span: Span, - pub query: CTX::Query, + pub query: Q, } -type QueryMap<'tcx> = FxHashMap, QueryJobInfo>>; +type QueryMap = FxHashMap::DepKind>, QueryJobInfo>; /// A value uniquely identifiying an active query job within a shard in the query cache. #[derive(Copy, Clone, Eq, PartialEq, Hash)] @@ -53,35 +52,36 @@ pub struct QueryJobId { pub kind: K, } -impl QueryJobId { +impl QueryJobId { pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self { QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } -} -impl QueryJobId { - fn query<'tcx>(self, map: &QueryMap<'tcx>) -> Query<'tcx> { + fn query>(self, map: &QueryMap) -> CTX::Query { map.get(&self).unwrap().info.query.clone() } #[cfg(parallel_compiler)] - fn span(self, map: &QueryMap<'_>) -> Span { + fn span>(self, map: &QueryMap) -> Span { map.get(&self).unwrap().job.span } #[cfg(parallel_compiler)] - fn parent(self, map: &QueryMap<'_>) -> Option> { + fn parent>(self, map: &QueryMap) -> Option> { map.get(&self).unwrap().job.parent } #[cfg(parallel_compiler)] - fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch>> { + fn latch<'a, CTX: QueryContext>( + self, + map: &'a QueryMap, + ) -> Option<&'a QueryLatch> { map.get(&self).unwrap().job.latch.as_ref() } } pub struct QueryJobInfo { - pub info: QueryInfo, + pub info: QueryInfo, pub job: QueryJob, } @@ -147,16 +147,12 @@ pub(super) struct QueryLatch { } #[cfg(not(parallel_compiler))] -impl<'tcx> QueryLatch> { - pub(super) fn find_cycle_in_stack( - &self, - tcx: TyCtxt<'tcx>, - span: Span, - ) -> CycleError> { - let query_map = tcx.queries.try_collect_active_jobs().unwrap(); +impl QueryLatch { + pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError { + let query_map = tcx.try_collect_active_jobs().unwrap(); // Get the current executing query (waiter) and find the waitee amongst its parents - let mut current_job = tls::with_related_context(tcx, |icx| icx.query); + let mut current_job = tcx.read_query_job(|query| query); let mut cycle = Vec::new(); while let Some(job) = current_job { @@ -192,7 +188,7 @@ struct QueryWaiter { query: Option>, condvar: Condvar, span: Span, - cycle: Lock>>, + cycle: Lock>>, } #[cfg(parallel_compiler)] @@ -225,13 +221,9 @@ impl QueryLatch { } #[cfg(parallel_compiler)] -impl QueryLatch -where - K: rustc_query_system::dep_graph::DepKind, - CTX: QueryContext, -{ +impl QueryLatch { /// Awaits for the query job to complete. - pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError> { + pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError> { tcx.read_query_job(move |query| { let waiter = Lrc::new(QueryWaiter { query, @@ -299,7 +291,7 @@ impl QueryLatch { /// A resumable waiter of a query. The usize is the index into waiters in the query's latch #[cfg(parallel_compiler)] -type Waiter = (QueryJobId, usize); +type Waiter = (QueryJobId, usize); /// Visits all the non-resumable and resumable waiters of a query. /// Only waiters in a query are visited. @@ -311,13 +303,13 @@ type Waiter = (QueryJobId, usize); /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_compiler)] -fn visit_waiters<'tcx, F>( - query_map: &QueryMap<'tcx>, - query: QueryJobId, +fn visit_waiters( + query_map: &QueryMap, + query: QueryJobId, mut visit: F, -) -> Option> +) -> Option>> where - F: FnMut(Span, QueryJobId) -> Option>, + F: FnMut(Span, QueryJobId) -> Option>>, { // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(parent) = query.parent(query_map) { @@ -346,13 +338,13 @@ where /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_compiler)] -fn cycle_check<'tcx>( - query_map: &QueryMap<'tcx>, - query: QueryJobId, +fn cycle_check( + query_map: &QueryMap, + query: QueryJobId, span: Span, - stack: &mut Vec<(Span, QueryJobId)>, - visited: &mut FxHashSet>, -) -> Option> { + stack: &mut Vec<(Span, QueryJobId)>, + visited: &mut FxHashSet>, +) -> Option>> { if !visited.insert(query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) { // We detected a query cycle, fix up the initial span and return Some @@ -387,10 +379,10 @@ fn cycle_check<'tcx>( /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_compiler)] -fn connected_to_root<'tcx>( - query_map: &QueryMap<'tcx>, - query: QueryJobId, - visited: &mut FxHashSet>, +fn connected_to_root( + query_map: &QueryMap, + query: QueryJobId, + visited: &mut FxHashSet>, ) -> bool { // We already visited this or we're deliberately ignoring it if !visited.insert(query) { @@ -410,12 +402,12 @@ fn connected_to_root<'tcx>( // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( - query_map: &QueryMap<'tcx>, - tcx: TyCtxt<'tcx>, - queries: &'a [T], - f: F, -) -> &'a T { +fn pick_query<'a, CTX, T, F>(query_map: &QueryMap, tcx: CTX, queries: &'a [T], f: F) -> &'a T +where + CTX: QueryContext, + CTX::Query: HashStable, + F: Fn(&T) -> (Span, QueryJobId), +{ // Deterministically pick an entry point // FIXME: Sort this instead let mut hcx = tcx.create_stable_hashing_context(); @@ -440,12 +432,15 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, QueryJobId)>( /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. #[cfg(parallel_compiler)] -fn remove_cycle<'tcx>( - query_map: &QueryMap<'tcx>, - jobs: &mut Vec>, - wakelist: &mut Vec>>>, - tcx: TyCtxt<'tcx>, -) -> bool { +fn remove_cycle( + query_map: &QueryMap, + jobs: &mut Vec>, + wakelist: &mut Vec>>, + tcx: CTX, +) -> bool +where + CTX::Query: HashStable, +{ let mut visited = FxHashSet::default(); let mut stack = Vec::new(); // Look for a cycle starting with the last query in `jobs` @@ -497,7 +492,7 @@ fn remove_cycle<'tcx>( } } }) - .collect::, Option<(Span, QueryJobId)>)>>(); + .collect::, Option<(Span, QueryJobId)>)>>(); // Deterministically pick an entry point let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1)); @@ -569,15 +564,18 @@ pub unsafe fn handle_deadlock() { /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -fn deadlock(tcx: TyCtxt<'_>, registry: &rayon_core::Registry) { +fn deadlock(tcx: CTX, registry: &rayon_core::Registry) +where + CTX::Query: HashStable, +{ let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); }); let mut wakelist = Vec::new(); - let query_map = tcx.queries.try_collect_active_jobs().unwrap(); - let mut jobs: Vec> = query_map.keys().cloned().collect(); + let query_map = tcx.try_collect_active_jobs().unwrap(); + let mut jobs: Vec> = query_map.keys().cloned().collect(); let mut found_cycle = false; diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 6d01f34c630..14c0aea7c8f 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -332,10 +332,10 @@ where } #[derive(Clone)] -pub(crate) struct CycleError { +pub(crate) struct CycleError { /// The query and related span that uses the cycle. - pub(super) usage: Option<(Span, CTX::Query)>, - pub(super) cycle: Vec>, + pub(super) usage: Option<(Span, Q)>, + pub(super) cycle: Vec>, } /// The result of `try_start`. @@ -371,6 +371,12 @@ impl QueryContext for TyCtxt<'tcx> { fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { tls::with_related_context(*self, move |icx| op(icx.query)) } + + fn try_collect_active_jobs( + &self, + ) -> Option, QueryJobInfo>> { + self.queries.try_collect_active_jobs() + } } impl<'tcx> TyCtxt<'tcx> { @@ -409,7 +415,7 @@ impl<'tcx> TyCtxt<'tcx> { #[cold] pub(super) fn report_cycle( self, - CycleError { usage, cycle: stack }: CycleError>, + CycleError { usage, cycle: stack }: CycleError>, ) -> DiagnosticBuilder<'tcx> { assert!(!stack.is_empty()); @@ -1033,7 +1039,7 @@ macro_rules! define_queries_inner { fn handle_cycle_error( tcx: TyCtxt<'tcx>, - error: CycleError> + error: CycleError> ) -> Self::Value { handle_cycle_error!([$($modifiers)*][tcx, error]) } From 42f0db59873ab47d5c8a00620e94bea63dea35a0 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 13:15:34 +0100 Subject: [PATCH 10/31] Move HashStable bound to the trait definition. --- src/librustc/ty/query/config.rs | 3 ++- src/librustc/ty/query/job.rs | 11 ++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index 9495d9bfc6b..04662986702 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -10,6 +10,7 @@ use rustc_hir::def_id::DefId; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::stable_hasher::HashStable; use rustc_query_system::dep_graph::{DepContext, DepNode}; use rustc_session::Session; use std::borrow::Cow; @@ -25,7 +26,7 @@ pub trait QueryConfig { } pub trait QueryContext: DepContext { - type Query: Clone; + type Query: Clone + HashStable; /// Access the session. fn session(&self) -> &Session; diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index 253c814588f..7f0156abdea 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -405,7 +405,6 @@ fn connected_to_root( fn pick_query<'a, CTX, T, F>(query_map: &QueryMap, tcx: CTX, queries: &'a [T], f: F) -> &'a T where CTX: QueryContext, - CTX::Query: HashStable, F: Fn(&T) -> (Span, QueryJobId), { // Deterministically pick an entry point @@ -437,10 +436,7 @@ fn remove_cycle( jobs: &mut Vec>, wakelist: &mut Vec>>, tcx: CTX, -) -> bool -where - CTX::Query: HashStable, -{ +) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); // Look for a cycle starting with the last query in `jobs` @@ -564,10 +560,7 @@ pub unsafe fn handle_deadlock() { /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -fn deadlock(tcx: CTX, registry: &rayon_core::Registry) -where - CTX::Query: HashStable, -{ +fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); From decfd704fe17b4da16d57cb133ff3d29f9bcf295 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 17:53:31 +0100 Subject: [PATCH 11/31] Generalise try_get_cached. --- src/librustc/ty/query/plumbing.rs | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 14c0aea7c8f..dbbfc4af825 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -247,7 +247,8 @@ where return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle)); } - let cached = tcx.try_get_cached( + let cached = try_get_cached( + tcx, Q::query_state(tcx), (*key).clone(), |value, index| (value.clone(), index), @@ -500,32 +501,34 @@ impl<'tcx> TyCtxt<'tcx> { eprintln!("end of query stack"); } +} /// Checks if the query is already computed and in the cache. /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. #[inline(always)] - fn try_get_cached( - self, - state: &'tcx QueryState, C>, + fn try_get_cached( + tcx: CTX, + state: &QueryState, key: C::Key, // `on_hit` can be called while holding a lock to the query cache on_hit: OnHit, on_miss: OnMiss, ) -> R where - C: QueryCache>, + C: QueryCache, + CTX: QueryContext, OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(C::Key, QueryLookup<'_, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R, + OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, { state.cache.lookup( state, - QueryStateShard::, C::Key, C::Sharded>::get_cache, + QueryStateShard::::get_cache, key, |value, index| { - if unlikely!(self.prof.enabled()) { - self.prof.query_cache_hit(index.into()); + if unlikely!(tcx.profiler().enabled()) { + tcx.profiler().query_cache_hit(index.into()); } #[cfg(debug_assertions)] { @@ -537,6 +540,7 @@ impl<'tcx> TyCtxt<'tcx> { ) } +impl<'tcx> TyCtxt<'tcx> { #[inline(never)] pub(super) fn get_query> + 'tcx>( self, @@ -545,7 +549,8 @@ impl<'tcx> TyCtxt<'tcx> { ) -> Q::Value { debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); - self.try_get_cached( + try_get_cached( + self, Q::query_state(self), key, |value, index| { @@ -819,7 +824,8 @@ impl<'tcx> TyCtxt<'tcx> { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. - self.try_get_cached( + try_get_cached( + self, Q::query_state(self), key, |_, _| { From 4ac4ccd727e8eaccae1e58b303347e7991f4d355 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 17:59:19 +0100 Subject: [PATCH 12/31] Generalise JobOwner::try_start. --- src/librustc/ty/query/plumbing.rs | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index dbbfc4af825..75420634a5a 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -162,9 +162,9 @@ where id: QueryJobId, } -impl<'tcx, C> JobOwner<'tcx, TyCtxt<'tcx>, C> +impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C> where - C: QueryCache> + 'tcx, + C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { @@ -177,14 +177,16 @@ where /// This function is inlined because that results in a noticeable speed-up /// for some compile-time benchmarks. #[inline(always)] - fn try_start<'a, 'b, Q>( - tcx: TyCtxt<'tcx>, + fn try_start<'a, 'b, Q, K>( + tcx: CTX, span: Span, key: &C::Key, - mut lookup: QueryLookup<'a, TyCtxt<'tcx>, C::Key, C::Sharded>, - ) -> TryGetJob<'b, TyCtxt<'tcx>, C> + mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>, + ) -> TryGetJob<'b, CTX, C> where - Q: QueryDescription, Key = C::Key, Value = C::Value, Cache = C>, + K: DepKind, + Q: QueryDescription, + CTX: QueryContext, { let lock = &mut *lookup.lock; @@ -196,7 +198,7 @@ where // in another thread has completed. Record how long we wait in the // self-profiler. let _query_blocked_prof_timer = if cfg!(parallel_compiler) { - Some(tcx.prof.query_blocked()) + Some(tcx.profiler().query_blocked()) } else { None }; @@ -219,7 +221,7 @@ where let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND); - let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query)); + let job = tcx.read_query_job(|query| QueryJob::new(id, span, query)); entry.insert(QueryResult::Started(job)); @@ -262,14 +264,7 @@ where return TryGetJob::JobCompleted(cached); } } -} -impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C> -where - C: QueryCache, - C::Key: Eq + Hash + Clone + Debug, - C::Value: Clone, -{ /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query #[inline(always)] @@ -573,7 +568,7 @@ impl<'tcx> TyCtxt<'tcx> { >>::Sharded, >, ) -> Q::Value { - let job = match JobOwner::try_start::(self, span, &key, lookup) { + let job = match JobOwner::try_start::(self, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, TryGetJob::Cycle(result) => return result, #[cfg(parallel_compiler)] @@ -832,7 +827,7 @@ impl<'tcx> TyCtxt<'tcx> { // Cache hit, do nothing }, |key, lookup| { - let job = match JobOwner::try_start::(self, span, &key, lookup) { + let job = match JobOwner::try_start::(self, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, TryGetJob::Cycle(_) => return, #[cfg(parallel_compiler)] From 27e8a95717c9f807ea1b4fc0e85da6813433011e Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 18:31:17 +0100 Subject: [PATCH 13/31] Generalise Query starting. --- src/librustc/dep_graph/mod.rs | 4 + src/librustc/ty/query/config.rs | 18 ++- src/librustc/ty/query/plumbing.rs | 171 ++++++++++++--------- src/librustc_query_system/dep_graph/mod.rs | 5 +- 4 files changed, 122 insertions(+), 76 deletions(-) diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index dfb962227ff..556b1479b61 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -166,6 +166,10 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics) } + fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec) { + self.queries.on_disk_cache.store_diagnostics_for_anon_node(dep_node_index, diagnostics) + } + fn profiler(&self) -> &SelfProfilerRef { &self.prof } diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index 04662986702..91e82858b0b 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -11,7 +11,10 @@ use rustc_hir::def_id::DefId; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::HashStable; -use rustc_query_system::dep_graph::{DepContext, DepNode}; +use rustc_data_structures::sync::Lock; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_errors::Diagnostic; +use rustc_query_system::dep_graph::{DepContext, DepGraph, DepNode}; use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; @@ -34,12 +37,25 @@ pub trait QueryContext: DepContext { /// Get string representation from DefPath. fn def_path_str(&self, def_id: DefId) -> String; + /// Access the DepGraph. + fn dep_graph(&self) -> &DepGraph; + /// Get the query information from the TLS context. fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R; fn try_collect_active_jobs( &self, ) -> Option, QueryJobInfo>>; + + /// Executes a job by changing the `ImplicitCtxt` to point to the + /// new query job while it executes. It returns the diagnostics + /// captured during execution and the actual result. + fn start_query( + &self, + token: QueryJobId, + diagnostics: Option<&Lock>>, + compute: impl FnOnce(Self) -> R, + ) -> R; } pub(crate) trait QueryAccessors: QueryConfig { diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 75420634a5a..ae0ca080dac 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -7,7 +7,7 @@ use crate::ty::query::caches::QueryCache; use crate::ty::query::config::{QueryContext, QueryDescription}; use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; use crate::ty::query::Query; -use crate::ty::tls; +use crate::ty::tls::{self, ImplicitCtxt}; use crate::ty::{self, TyCtxt}; #[cfg(not(parallel_compiler))] @@ -17,7 +17,8 @@ use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::{Lock, LockGuard}; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}; -use rustc_query_system::dep_graph::{DepKind, DepNode}; +use rustc_query_system::dep_graph::{DepContext, DepGraph, DepKind, DepNode}; +use rustc_query_system::HashStableContextProvider; use rustc_session::Session; use rustc_span::def_id::DefId; use rustc_span::source_map::DUMMY_SP; @@ -364,6 +365,10 @@ impl QueryContext for TyCtxt<'tcx> { TyCtxt::def_path_str(*self, def_id) } + fn dep_graph(&self) -> &DepGraph { + &self.dep_graph + } + fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { tls::with_related_context(*self, move |icx| op(icx.query)) } @@ -373,29 +378,24 @@ impl QueryContext for TyCtxt<'tcx> { ) -> Option, QueryJobInfo>> { self.queries.try_collect_active_jobs() } -} -impl<'tcx> TyCtxt<'tcx> { /// Executes a job by changing the `ImplicitCtxt` to point to the /// new query job while it executes. It returns the diagnostics /// captured during execution and the actual result. #[inline(always)] - fn start_query( - self, - token: QueryJobId, + fn start_query( + &self, + token: QueryJobId, diagnostics: Option<&Lock>>, - compute: F, - ) -> R - where - F: FnOnce(TyCtxt<'tcx>) -> R, - { + compute: impl FnOnce(Self) -> R, + ) -> R { // The `TyCtxt` stored in TLS has the same global interner lifetime // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // when accessing the `ImplicitCtxt`. - tls::with_related_context(self, move |current_icx| { + tls::with_related_context(*self, move |current_icx| { // Update the `ImplicitCtxt` to point to our new query job. - let new_icx = tls::ImplicitCtxt { - tcx: self, + let new_icx = ImplicitCtxt { + tcx: *self, query: Some(token), diagnostics, layout_depth: current_icx.layout_depth, @@ -403,10 +403,12 @@ impl<'tcx> TyCtxt<'tcx> { }; // Use the `ImplicitCtxt` while we execute the query. - tls::enter_context(&new_icx, |_| compute(self)) + tls::enter_context(&new_icx, |_| compute(*self)) }) } +} +impl<'tcx> TyCtxt<'tcx> { #[inline(never)] #[cold] pub(super) fn report_cycle( @@ -552,74 +554,80 @@ impl<'tcx> TyCtxt<'tcx> { self.dep_graph.read_index(index); value.clone() }, - |key, lookup| self.try_execute_query::(span, key, lookup), + |key, lookup| try_execute_query::(self, span, key, lookup), ) } +} #[inline(always)] - fn try_execute_query> + 'tcx>( - self, + fn try_execute_query( + tcx: CTX, span: Span, key: Q::Key, lookup: QueryLookup< '_, - TyCtxt<'tcx>, + CTX, Q::Key, - >>::Sharded, + >::Sharded, >, - ) -> Q::Value { - let job = match JobOwner::try_start::(self, span, &key, lookup) { + ) -> Q::Value + where + Q: QueryDescription, + CTX: QueryContext, + CTX: HashStableContextProvider<::StableHashingContext>, + K: DepKind, + { + let job = match JobOwner::try_start::(tcx, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, TryGetJob::Cycle(result) => return result, #[cfg(parallel_compiler)] TryGetJob::JobCompleted((v, index)) => { - self.dep_graph.read_index(index); + tcx.dep_graph().read_index(index); return v; } }; // Fast path for when incr. comp. is off. `to_dep_node` is // expensive for some `DepKind`s. - if !self.dep_graph.is_fully_enabled() { + if !tcx.dep_graph().is_fully_enabled() { let null_dep_node = DepNode::new_no_params(DepKind::NULL); - return self.force_query_with_job::(key, job, null_dep_node).0; + return force_query_with_job::(tcx, key, job, null_dep_node).0; } if Q::ANON { - let prof_timer = self.prof.query_provider(); + let prof_timer = tcx.profiler().query_provider(); let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { - self.start_query(job.id, diagnostics, |tcx| { - tcx.dep_graph.with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key)) + tcx.start_query(job.id, diagnostics, |tcx| { + tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key)) }) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - self.dep_graph.read_index(dep_node_index); + tcx.dep_graph().read_index(dep_node_index); if unlikely!(!diagnostics.is_empty()) { - self.queries - .on_disk_cache - .store_diagnostics_for_anon_node(dep_node_index, diagnostics); + tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics); } - job.complete(self, &result, dep_node_index); + job.complete(tcx, &result, dep_node_index); return result; } - let dep_node = Q::to_dep_node(self, &key); + let dep_node = Q::to_dep_node(tcx, &key); if !Q::EVAL_ALWAYS { // The diagnostics for this query will be // promoted to the current session during // `try_mark_green()`, so we can ignore them here. - let loaded = self.start_query(job.id, None, |tcx| { - let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node); + let loaded = tcx.start_query(job.id, None, |tcx| { + let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node); marked.map(|(prev_dep_node_index, dep_node_index)| { ( - tcx.load_from_disk_and_cache_in_memory::( + load_from_disk_and_cache_in_memory::( + tcx, key.clone(), prev_dep_node_index, dep_node_index, @@ -630,32 +638,36 @@ impl<'tcx> TyCtxt<'tcx> { }) }); if let Some((result, dep_node_index)) = loaded { - job.complete(self, &result, dep_node_index); + job.complete(tcx, &result, dep_node_index); return result; } } - let (result, dep_node_index) = self.force_query_with_job::(key, job, dep_node); - self.dep_graph.read_index(dep_node_index); + let (result, dep_node_index) = force_query_with_job::(tcx, key, job, dep_node); + tcx.dep_graph().read_index(dep_node_index); result } - fn load_from_disk_and_cache_in_memory>>( - self, + fn load_from_disk_and_cache_in_memory( + tcx: CTX, key: Q::Key, prev_dep_node_index: SerializedDepNodeIndex, dep_node_index: DepNodeIndex, - dep_node: &DepNode, - ) -> Q::Value { + dep_node: &DepNode, + ) -> Q::Value + where + CTX: QueryContext, + Q: QueryDescription, + { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - debug_assert!(self.dep_graph.is_green(dep_node)); + debug_assert!(tcx.dep_graph().is_green(dep_node)); // First we try to load the result from the on-disk cache. - let result = if Q::cache_on_disk(self, key.clone(), None) { - let prof_timer = self.prof.incr_cache_loading(); - let result = Q::try_load_from_disk(self, prev_dep_node_index); + let result = if Q::cache_on_disk(tcx, key.clone(), None) { + let prof_timer = tcx.profiler().incr_cache_loading(); + let result = Q::try_load_from_disk(tcx, prev_dep_node_index); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); // We always expect to find a cached result for things that @@ -676,10 +688,10 @@ impl<'tcx> TyCtxt<'tcx> { } else { // We could not load a result from the on-disk cache, so // recompute. - let prof_timer = self.prof.query_provider(); + let prof_timer = tcx.profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = self.dep_graph.with_ignore(|| Q::compute(self, key)); + let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -688,8 +700,8 @@ impl<'tcx> TyCtxt<'tcx> { // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. - if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) { - self.incremental_verify_ich::(&result, dep_node, dep_node_index); + if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) { + incremental_verify_ich::(tcx, &result, dep_node, dep_node_index); } result @@ -697,46 +709,56 @@ impl<'tcx> TyCtxt<'tcx> { #[inline(never)] #[cold] - fn incremental_verify_ich>>( - self, + fn incremental_verify_ich( + tcx: CTX, result: &Q::Value, - dep_node: &DepNode, + dep_node: &DepNode, dep_node_index: DepNodeIndex, - ) { + ) + where + CTX: QueryContext, + Q: QueryDescription, + { use rustc_data_structures::fingerprint::Fingerprint; assert!( - Some(self.dep_graph.fingerprint_of(dep_node_index)) - == self.dep_graph.prev_fingerprint_of(dep_node), + Some(tcx.dep_graph().fingerprint_of(dep_node_index)) + == tcx.dep_graph().prev_fingerprint_of(dep_node), "fingerprint for green query instance not loaded from cache: {:?}", dep_node, ); debug!("BEGIN verify_ich({:?})", dep_node); - let mut hcx = self.create_stable_hashing_context(); + let mut hcx = tcx.create_stable_hashing_context(); let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); debug!("END verify_ich({:?})", dep_node); - let old_hash = self.dep_graph.fingerprint_of(dep_node_index); + let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index); assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,); } #[inline(always)] - fn force_query_with_job> + 'tcx>( - self, + fn force_query_with_job( + tcx: CTX, key: Q::Key, - job: JobOwner<'tcx, Self, Q::Cache>, - dep_node: DepNode, - ) -> (Q::Value, DepNodeIndex) { + job: JobOwner<'tcx, CTX, Q::Cache>, + dep_node: DepNode, + ) -> (Q::Value, DepNodeIndex) + where + Q: QueryDescription, + CTX: QueryContext, + CTX: HashStableContextProvider<::StableHashingContext>, + K: DepKind, + { // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or // in `DepGraph::try_mark_green()`. // 2. Two distinct query keys get mapped to the same `DepNode` // (see for example #48923). assert!( - !self.dep_graph.dep_node_exists(&dep_node), + !tcx.dep_graph().dep_node_exists(&dep_node), "forcing query with already existing `DepNode`\n\ - query-key: {:?}\n\ - dep-node: {:?}", @@ -744,12 +766,12 @@ impl<'tcx> TyCtxt<'tcx> { dep_node ); - let prof_timer = self.prof.query_provider(); + let prof_timer = tcx.profiler().query_provider(); let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { - self.start_query(job.id, diagnostics, |tcx| { + tcx.start_query(job.id, diagnostics, |tcx| { if Q::EVAL_ALWAYS { - tcx.dep_graph.with_eval_always_task( + tcx.dep_graph().with_eval_always_task( dep_node, tcx, key, @@ -757,7 +779,7 @@ impl<'tcx> TyCtxt<'tcx> { Q::hash_result, ) } else { - tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result) + tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result) } }) }); @@ -766,15 +788,16 @@ impl<'tcx> TyCtxt<'tcx> { if unlikely!(!diagnostics.is_empty()) { if dep_node.kind != DepKind::NULL { - self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics); + tcx.store_diagnostics(dep_node_index, diagnostics); } } - job.complete(self, &result, dep_node_index); + job.complete(tcx, &result, dep_node_index); (result, dep_node_index) } +impl<'tcx> TyCtxt<'tcx> { /// Ensure that either this query has all green inputs or been executed. /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. /// @@ -833,7 +856,7 @@ impl<'tcx> TyCtxt<'tcx> { #[cfg(parallel_compiler)] TryGetJob::JobCompleted(_) => return, }; - self.force_query_with_job::(key, job, dep_node); + force_query_with_job::(self, key, job, dep_node); }, ); } diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index 888151782c7..ca4377e783d 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -23,7 +23,7 @@ use rustc_errors::Diagnostic; use std::fmt; use std::hash::Hash; -pub trait DepContext: Copy { +pub trait DepContext: Copy + DepGraphSafe { type DepKind: self::DepKind; type StableHashingContext: crate::HashStableContext; @@ -48,6 +48,9 @@ pub trait DepContext: Copy { /// Register diagnostics for the given node, for use in next session. fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec); + /// Register diagnostics for the given node, for use in next session. + fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec); + /// Access the profiler. fn profiler(&self) -> &SelfProfilerRef; } From 6184a71a391c3bbbf0f4fa3dcabcd6a2ec1a4a46 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 18:43:01 +0100 Subject: [PATCH 14/31] Make get_query into an extension trait. --- src/librustc/ty/query/plumbing.rs | 84 ++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 29 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index ae0ca080dac..65a7081bd38 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -537,28 +537,6 @@ impl<'tcx> TyCtxt<'tcx> { ) } -impl<'tcx> TyCtxt<'tcx> { - #[inline(never)] - pub(super) fn get_query> + 'tcx>( - self, - span: Span, - key: Q::Key, - ) -> Q::Value { - debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); - - try_get_cached( - self, - Q::query_state(self), - key, - |value, index| { - self.dep_graph.read_index(index); - value.clone() - }, - |key, lookup| try_execute_query::(self, span, key, lookup), - ) - } -} - #[inline(always)] fn try_execute_query( tcx: CTX, @@ -797,7 +775,13 @@ impl<'tcx> TyCtxt<'tcx> { (result, dep_node_index) } -impl<'tcx> TyCtxt<'tcx> { +pub(super) trait QueryGetter: QueryContext { + fn get_query>( + self, + span: Span, + key: Q::Key, + ) -> Q::Value; + /// Ensure that either this query has all green inputs or been executed. /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. /// @@ -805,7 +789,50 @@ impl<'tcx> TyCtxt<'tcx> { /// side-effects -- e.g., in order to report errors for erroneous programs. /// /// Note: The optimization is only available during incr. comp. - pub(super) fn ensure_query> + 'tcx>(self, key: Q::Key) { + fn ensure_query>(self, key: Q::Key); + + fn force_query>( + self, + key: Q::Key, + span: Span, + dep_node: DepNode, + ); +} + +impl QueryGetter for CTX +where + CTX: QueryContext, + CTX: HashStableContextProvider<::StableHashingContext>, + K: DepKind, +{ + #[inline(never)] + fn get_query>( + self, + span: Span, + key: Q::Key, + ) -> Q::Value { + debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); + + try_get_cached( + self, + Q::query_state(self), + key, + |value, index| { + self.dep_graph().read_index(index); + value.clone() + }, + |key, lookup| try_execute_query::(self, span, key, lookup), + ) + } + + /// Ensure that either this query has all green inputs or been executed. + /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. + /// + /// This function is particularly useful when executing passes for their + /// side-effects -- e.g., in order to report errors for erroneous programs. + /// + /// Note: The optimization is only available during incr. comp. + fn ensure_query>(self, key: Q::Key) { if Q::EVAL_ALWAYS { let _ = self.get_query::(DUMMY_SP, key); return; @@ -816,7 +843,7 @@ impl<'tcx> TyCtxt<'tcx> { let dep_node = Q::to_dep_node(self, &key); - match self.dep_graph.try_mark_green_and_read(self, &dep_node) { + match self.dep_graph().try_mark_green_and_read(self, &dep_node) { None => { // A None return from `try_mark_green_and_read` means that this is either // a new dep node or that the dep node has already been marked red. @@ -827,17 +854,16 @@ impl<'tcx> TyCtxt<'tcx> { let _ = self.get_query::(DUMMY_SP, key); } Some((_, dep_node_index)) => { - self.prof.query_cache_hit(dep_node_index.into()); + self.profiler().query_cache_hit(dep_node_index.into()); } } } - #[allow(dead_code)] - pub(super) fn force_query> + 'tcx>( + fn force_query>( self, key: Q::Key, span: Span, - dep_node: DepNode, + dep_node: DepNode, ) { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. From 5b8dac3c39a5f5551e995e7a6f44b4ed72c4923d Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 08:49:03 +0100 Subject: [PATCH 15/31] Move query system to librustc_query_system. --- src/{librustc/ty => librustc_query_system}/query/README.md | 0 src/{librustc/ty => librustc_query_system}/query/caches.rs | 0 src/{librustc/ty => librustc_query_system}/query/config.rs | 0 src/{librustc/ty => librustc_query_system}/query/job.rs | 0 src/{librustc/ty => librustc_query_system}/query/mod.rs | 0 src/{librustc/ty => librustc_query_system}/query/plumbing.rs | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename src/{librustc/ty => librustc_query_system}/query/README.md (100%) rename src/{librustc/ty => librustc_query_system}/query/caches.rs (100%) rename src/{librustc/ty => librustc_query_system}/query/config.rs (100%) rename src/{librustc/ty => librustc_query_system}/query/job.rs (100%) rename src/{librustc/ty => librustc_query_system}/query/mod.rs (100%) rename src/{librustc/ty => librustc_query_system}/query/plumbing.rs (100%) diff --git a/src/librustc/ty/query/README.md b/src/librustc_query_system/query/README.md similarity index 100% rename from src/librustc/ty/query/README.md rename to src/librustc_query_system/query/README.md diff --git a/src/librustc/ty/query/caches.rs b/src/librustc_query_system/query/caches.rs similarity index 100% rename from src/librustc/ty/query/caches.rs rename to src/librustc_query_system/query/caches.rs diff --git a/src/librustc/ty/query/config.rs b/src/librustc_query_system/query/config.rs similarity index 100% rename from src/librustc/ty/query/config.rs rename to src/librustc_query_system/query/config.rs diff --git a/src/librustc/ty/query/job.rs b/src/librustc_query_system/query/job.rs similarity index 100% rename from src/librustc/ty/query/job.rs rename to src/librustc_query_system/query/job.rs diff --git a/src/librustc/ty/query/mod.rs b/src/librustc_query_system/query/mod.rs similarity index 100% rename from src/librustc/ty/query/mod.rs rename to src/librustc_query_system/query/mod.rs diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs similarity index 100% rename from src/librustc/ty/query/plumbing.rs rename to src/librustc_query_system/query/plumbing.rs From 8e873c3c646b7739268f139ac140b6ac4218e979 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 14:13:31 +0100 Subject: [PATCH 16/31] Make librustc_query_system compile. --- Cargo.lock | 3 + src/librustc_query_system/Cargo.toml | 3 + src/librustc_query_system/lib.rs | 6 + src/librustc_query_system/query/caches.rs | 8 +- src/librustc_query_system/query/config.rs | 18 +- src/librustc_query_system/query/job.rs | 39 +- src/librustc_query_system/query/mod.rs | 192 +------ src/librustc_query_system/query/plumbing.rs | 573 +------------------- 8 files changed, 57 insertions(+), 785 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bf61989135..6d70ab32c9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4042,12 +4042,15 @@ version = "0.0.0" dependencies = [ "log", "parking_lot 0.9.0", + "rustc-rayon-core", "rustc_ast", "rustc_data_structures", "rustc_errors", "rustc_hir", "rustc_index", "rustc_macros", + "rustc_session", + "rustc_span", "serialize", "smallvec 1.0.0", ] diff --git a/src/librustc_query_system/Cargo.toml b/src/librustc_query_system/Cargo.toml index a01bb5e5ea3..065c54bb85a 100644 --- a/src/librustc_query_system/Cargo.toml +++ b/src/librustc_query_system/Cargo.toml @@ -11,6 +11,7 @@ doctest = false [dependencies] log = { version = "0.4", features = ["release_max_level_info", "std"] } +rustc-rayon-core = "0.3.0" rustc_ast = { path = "../librustc_ast" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } @@ -18,5 +19,7 @@ rustc_hir = { path = "../librustc_hir" } rustc_index = { path = "../librustc_index" } rustc_macros = { path = "../librustc_macros" } rustc_serialize = { path = "../libserialize", package = "serialize" } +rustc_session = { path = "../librustc_session" } +rustc_span = { path = "../librustc_span" } parking_lot = "0.9" smallvec = { version = "1.0", features = ["union", "may_dangle"] } diff --git a/src/librustc_query_system/lib.rs b/src/librustc_query_system/lib.rs index ef4886828c4..5750d8e8c35 100644 --- a/src/librustc_query_system/lib.rs +++ b/src/librustc_query_system/lib.rs @@ -1,14 +1,20 @@ +#![feature(bool_to_option)] #![feature(const_fn)] #![feature(const_if_match)] #![feature(const_panic)] #![feature(core_intrinsics)] +#![feature(hash_raw_entry)] #![feature(specialization)] #![feature(stmt_expr_attributes)] +#![feature(vec_remove_item)] #[macro_use] extern crate log; +#[macro_use] +extern crate rustc_data_structures; pub mod dep_graph; +pub mod query; pub trait HashStableContext { fn debug_dep_tasks(&self) -> bool; diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index f740fada1e5..efde51c4db6 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -1,6 +1,6 @@ use crate::dep_graph::DepNodeIndex; -use crate::ty::query::config::QueryContext; -use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; +use crate::query::config::QueryContext; +use crate::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; @@ -8,11 +8,11 @@ use std::default::Default; use std::hash::Hash; use std::marker::PhantomData; -pub(crate) trait CacheSelector { +pub trait CacheSelector { type Cache: QueryCache; } -pub(crate) trait QueryCache: Default { +pub trait QueryCache: Default { type Key; type Value; type Sharded: Default; diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 91e82858b0b..53adcbdeea7 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -1,10 +1,11 @@ //! Query configuration and description traits. use crate::dep_graph::SerializedDepNodeIndex; -use crate::ty::query::caches::QueryCache; -use crate::ty::query::job::{QueryJobId, QueryJobInfo}; -use crate::ty::query::plumbing::CycleError; -use crate::ty::query::QueryState; +use crate::dep_graph::{DepContext, DepGraph, DepNode}; +use crate::query::caches::QueryCache; +use crate::query::job::{QueryJobId, QueryJobInfo}; +use crate::query::plumbing::CycleError; +use crate::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; use rustc_hir::def_id::DefId; @@ -14,7 +15,6 @@ use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; -use rustc_query_system::dep_graph::{DepContext, DepGraph, DepNode}; use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; @@ -58,7 +58,7 @@ pub trait QueryContext: DepContext { ) -> R; } -pub(crate) trait QueryAccessors: QueryConfig { +pub trait QueryAccessors: QueryConfig { const ANON: bool; const EVAL_ALWAYS: bool; const DEP_KIND: CTX::DepKind; @@ -81,7 +81,7 @@ pub(crate) trait QueryAccessors: QueryConfig { fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } -pub(crate) trait QueryDescription: QueryAccessors { +pub trait QueryDescription: QueryAccessors { fn describe(tcx: CTX, key: Self::Key) -> Cow<'static, str>; #[inline] @@ -90,7 +90,7 @@ pub(crate) trait QueryDescription: QueryAccessors { } fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { - bug!("QueryDescription::load_from_disk() called for an unsupported query.") + panic!("QueryDescription::load_from_disk() called for an unsupported query.") } } @@ -112,6 +112,6 @@ where } default fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { - bug!("QueryDescription::load_from_disk() called for an unsupported query.") + panic!("QueryDescription::load_from_disk() called for an unsupported query.") } } diff --git a/src/librustc_query_system/query/job.rs b/src/librustc_query_system/query/job.rs index 7f0156abdea..9068760d323 100644 --- a/src/librustc_query_system/query/job.rs +++ b/src/librustc_query_system/query/job.rs @@ -1,10 +1,8 @@ -use crate::ty::query::config::QueryContext; -use crate::ty::query::plumbing::CycleError; -#[cfg(parallel_compiler)] -use crate::ty::tls; +use crate::dep_graph::{DepKind, DepContext}; +use crate::query::config::QueryContext; +use crate::query::plumbing::CycleError; use rustc_data_structures::fx::FxHashMap; -use rustc_query_system::dep_graph::DepContext; use rustc_span::Span; use std::convert::TryFrom; @@ -22,7 +20,7 @@ use { rustc_rayon_core as rayon_core, rustc_span::DUMMY_SP, std::iter::FromIterator, - std::{mem, process, thread}, + std::{mem, process}, }; /// Represents a span and a query key. @@ -52,7 +50,7 @@ pub struct QueryJobId { pub kind: K, } -impl QueryJobId { +impl QueryJobId { pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self { QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } @@ -529,38 +527,13 @@ fn remove_cycle( } } -/// Creates a new thread and forwards information in thread locals to it. -/// The new thread runs the deadlock handler. -/// Must only be called when a deadlock is about to happen. -#[cfg(parallel_compiler)] -pub unsafe fn handle_deadlock() { - let registry = rayon_core::Registry::current(); - - let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| gcx_ptr as *const _); - let gcx_ptr = &*gcx_ptr; - - let rustc_span_globals = - rustc_span::GLOBALS.with(|rustc_span_globals| rustc_span_globals as *const _); - let rustc_span_globals = &*rustc_span_globals; - let syntax_globals = rustc_ast::attr::GLOBALS.with(|syntax_globals| syntax_globals as *const _); - let syntax_globals = &*syntax_globals; - thread::spawn(move || { - tls::GCX_PTR.set(gcx_ptr, || { - rustc_ast::attr::GLOBALS.set(syntax_globals, || { - rustc_span::GLOBALS - .set(rustc_span_globals, || tls::with_global(|tcx| deadlock(tcx, ®istry))) - }); - }) - }); -} - /// Detects query cycles by using depth first search over all active query jobs. /// If a query cycle is found it will break the cycle by finding an edge which /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { +pub fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); diff --git a/src/librustc_query_system/query/mod.rs b/src/librustc_query_system/query/mod.rs index c75e0d95e8f..0b8ad5c16a5 100644 --- a/src/librustc_query_system/query/mod.rs +++ b/src/librustc_query_system/query/mod.rs @@ -1,195 +1,13 @@ -use crate::dep_graph::{self, DepConstructor, DepNode, DepNodeParams}; -use crate::hir::exports::Export; -use crate::hir::map; -use crate::infer::canonical::{self, Canonical}; -use crate::lint::LintLevelMap; -use crate::middle::codegen_fn_attrs::CodegenFnAttrs; -use crate::middle::cstore::{CrateSource, DepKind, NativeLibraryKind}; -use crate::middle::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLibrary}; -use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel}; -use crate::middle::lang_items::{LangItem, LanguageItems}; -use crate::middle::lib_features::LibFeatures; -use crate::middle::privacy::AccessLevels; -use crate::middle::region; -use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes}; -use crate::middle::stability::{self, DeprecationEntry}; -use crate::mir; -use crate::mir::interpret::GlobalId; -use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult, ConstValue}; -use crate::mir::interpret::{LitToConstError, LitToConstInput}; -use crate::mir::mono::CodegenUnit; -use crate::traits::query::{ - CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, - CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal, - CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution, -}; -use crate::traits::query::{ - DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult, - OutlivesBound, -}; -use crate::traits::specialization_graph; -use crate::traits::Clauses; -use crate::traits::{self, Vtable}; -use crate::ty::steal::Steal; -use crate::ty::subst::{GenericArg, SubstsRef}; -use crate::ty::util::AlwaysRequiresDrop; -use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; -use crate::util::common::ErrorReported; -use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; -use rustc_data_structures::profiling::ProfileCategory::*; -use rustc_data_structures::stable_hasher::StableVec; -use rustc_data_structures::svh::Svh; -use rustc_data_structures::sync::Lrc; -use rustc_hir as hir; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId}; -use rustc_hir::{Crate, HirIdSet, ItemLocalId, TraitCandidate}; -use rustc_index::vec::IndexVec; -use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; -use rustc_session::CrateDisambiguator; -use rustc_target::spec::PanicStrategy; - -use rustc_ast::ast; -use rustc_attr as attr; -use rustc_span::symbol::Symbol; -use rustc_span::{Span, DUMMY_SP}; -use std::borrow::Cow; -use std::collections::BTreeMap; -use std::ops::Deref; -use std::sync::Arc; - -#[macro_use] mod plumbing; -pub(crate) use self::plumbing::CycleError; -use self::plumbing::*; - -mod stats; -pub use self::stats::print_stats; +pub use self::plumbing::*; mod job; +pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; #[cfg(parallel_compiler)] -pub use self::job::handle_deadlock; -use self::job::QueryJobInfo; -pub use self::job::{QueryInfo, QueryJob, QueryJobId}; - -mod keys; -use self::keys::Key; - -mod values; -use self::values::Value; +pub use self::job::deadlock; mod caches; -use self::caches::CacheSelector; +pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache}; mod config; -use self::config::QueryAccessors; -pub use self::config::QueryConfig; -pub(crate) use self::config::QueryDescription; - -mod on_disk_cache; -pub use self::on_disk_cache::OnDiskCache; - -mod profiling_support; -pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder}; - -// Each of these queries corresponds to a function pointer field in the -// `Providers` struct for requesting a value of that type, and a method -// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way -// which memoizes and does dep-graph tracking, wrapping around the actual -// `Providers` that the driver creates (using several `rustc_*` crates). -// -// The result type of each query must implement `Clone`, and additionally -// `ty::query::values::Value`, which produces an appropriate placeholder -// (error) value if the query resulted in a query cycle. -// Queries marked with `fatal_cycle` do not need the latter implementation, -// as they will raise an fatal error on query cycles instead. - -rustc_query_append! { [define_queries!][<'tcx>] } - -/// The red/green evaluation system will try to mark a specific DepNode in the -/// dependency graph as green by recursively trying to mark the dependencies of -/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode` -/// where we don't know if it is red or green and we therefore actually have -/// to recompute its value in order to find out. Since the only piece of -/// information that we have at that point is the `DepNode` we are trying to -/// re-evaluate, we need some way to re-run a query from just that. This is what -/// `force_from_dep_node()` implements. -/// -/// In the general case, a `DepNode` consists of a `DepKind` and an opaque -/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint -/// is usually constructed by computing a stable hash of the query-key that the -/// `DepNode` corresponds to. Consequently, it is not in general possible to go -/// back from hash to query-key (since hash functions are not reversible). For -/// this reason `force_from_dep_node()` is expected to fail from time to time -/// because we just cannot find out, from the `DepNode` alone, what the -/// corresponding query-key is and therefore cannot re-run the query. -/// -/// The system deals with this case letting `try_mark_green` fail which forces -/// the root query to be re-evaluated. -/// -/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless. -/// Fortunately, we can use some contextual information that will allow us to -/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we -/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a -/// valid `DefPathHash`. Since we also always build a huge table that maps every -/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have -/// everything we need to re-run the query. -/// -/// Take the `mir_validated` query as an example. Like many other queries, it -/// just has a single parameter: the `DefId` of the item it will compute the -/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode` -/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` -/// is actually a `DefPathHash`, and can therefore just look up the corresponding -/// `DefId` in `tcx.def_path_hash_to_def_id`. -/// -/// When you implement a new query, it will likely have a corresponding new -/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As -/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter, -/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just -/// add it to the "We don't have enough information to reconstruct..." group in -/// the match below. -pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool { - // We must avoid ever having to call `force_from_dep_node()` for a - // `DepNode::codegen_unit`: - // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!( - dep_node.kind != crate::dep_graph::DepKind::codegen_unit, - "calling force_from_dep_node() on DepKind::codegen_unit" - ); - - if !dep_node.kind.can_reconstruct_query_key() { - return false; - } - - rustc_dep_node_force!([dep_node, tcx] - // These are inputs that are expected to be pre-allocated and that - // should therefore always be red or green already. - crate::dep_graph::DepKind::CrateMetadata | - - // These are anonymous nodes. - crate::dep_graph::DepKind::TraitSelect | - - // We don't have enough information to reconstruct the query key of - // these. - crate::dep_graph::DepKind::CompileCodegenUnit => { - bug!("force_from_dep_node: encountered {:?}", dep_node) - } - ); - - false -} - -pub(crate) fn try_load_from_on_disk_cache<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) { - rustc_dep_node_try_load_from_on_disk_cache!(dep_node, tcx) -} +pub use self::config::{QueryAccessors, QueryConfig, QueryContext, QueryDescription}; diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 65a7081bd38..0bae613fcfb 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -2,25 +2,21 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. +use crate::dep_graph::{DepKind, DepContext, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; -use crate::ty::query::caches::QueryCache; -use crate::ty::query::config::{QueryContext, QueryDescription}; -use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; -use crate::ty::query::Query; -use crate::ty::tls::{self, ImplicitCtxt}; -use crate::ty::{self, TyCtxt}; +use crate::query::caches::QueryCache; +use crate::query::config::{QueryContext, QueryDescription}; +use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; +use crate::HashStableContextProvider; #[cfg(not(parallel_compiler))] use rustc_data_structures::cold_path; +use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHasher}; use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::{Lock, LockGuard}; use rustc_data_structures::thin_vec::ThinVec; -use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}; -use rustc_query_system::dep_graph::{DepContext, DepGraph, DepKind, DepNode}; -use rustc_query_system::HashStableContextProvider; -use rustc_session::Session; -use rustc_span::def_id::DefId; +use rustc_errors::{Diagnostic, FatalError}; use rustc_span::source_map::DUMMY_SP; use rustc_span::Span; use std::collections::hash_map::Entry; @@ -33,7 +29,7 @@ use std::ptr; #[cfg(debug_assertions)] use std::sync::atomic::{AtomicUsize, Ordering}; -pub(crate) struct QueryStateShard { +pub struct QueryStateShard { cache: C, active: FxHashMap>, @@ -53,15 +49,15 @@ impl Default for QueryStateShard { } } -pub(crate) struct QueryState> { +pub struct QueryState> { cache: C, shards: Sharded>, #[cfg(debug_assertions)] - pub(super) cache_hits: AtomicUsize, + pub cache_hits: AtomicUsize, } impl> QueryState { - pub(super) fn get_lookup( + pub(super) fn get_lookup<'tcx, K2: Hash>( &'tcx self, key: &K2, ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> { @@ -89,7 +85,7 @@ enum QueryResult { } impl> QueryState { - pub(super) fn iter_results( + pub fn iter_results( &self, f: impl for<'a> FnOnce( Box + 'a>, @@ -97,12 +93,13 @@ impl> QueryState { ) -> R { self.cache.iter(&self.shards, |shard| &mut shard.cache, f) } - pub(super) fn all_inactive(&self) -> bool { + + pub fn all_inactive(&self) -> bool { let shards = self.shards.lock_shards(); shards.iter().all(|shard| shard.active.is_empty()) } - pub(super) fn try_collect_active_jobs( + pub fn try_collect_active_jobs( &self, kind: CTX::DepKind, make_query: fn(C::Key) -> CTX::Query, @@ -144,7 +141,7 @@ impl> Default for QueryState { } /// Values used when checking a query cache which can be reused on a cache-miss to execute the query. -pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> { +pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> { pub(super) key_hash: u64, shard: usize, pub(super) lock: LockGuard<'tcx, QueryStateShard>, @@ -329,10 +326,10 @@ where } #[derive(Clone)] -pub(crate) struct CycleError { +pub struct CycleError { /// The query and related span that uses the cycle. - pub(super) usage: Option<(Span, Q)>, - pub(super) cycle: Vec>, + pub usage: Option<(Span, Q)>, + pub cycle: Vec>, } /// The result of `try_start`. @@ -354,152 +351,6 @@ where Cycle(C::Value), } -impl QueryContext for TyCtxt<'tcx> { - type Query = Query<'tcx>; - - fn session(&self) -> &Session { - &self.sess - } - - fn def_path_str(&self, def_id: DefId) -> String { - TyCtxt::def_path_str(*self, def_id) - } - - fn dep_graph(&self) -> &DepGraph { - &self.dep_graph - } - - fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { - tls::with_related_context(*self, move |icx| op(icx.query)) - } - - fn try_collect_active_jobs( - &self, - ) -> Option, QueryJobInfo>> { - self.queries.try_collect_active_jobs() - } - - /// Executes a job by changing the `ImplicitCtxt` to point to the - /// new query job while it executes. It returns the diagnostics - /// captured during execution and the actual result. - #[inline(always)] - fn start_query( - &self, - token: QueryJobId, - diagnostics: Option<&Lock>>, - compute: impl FnOnce(Self) -> R, - ) -> R { - // The `TyCtxt` stored in TLS has the same global interner lifetime - // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes - // when accessing the `ImplicitCtxt`. - tls::with_related_context(*self, move |current_icx| { - // Update the `ImplicitCtxt` to point to our new query job. - let new_icx = ImplicitCtxt { - tcx: *self, - query: Some(token), - diagnostics, - layout_depth: current_icx.layout_depth, - task_deps: current_icx.task_deps, - }; - - // Use the `ImplicitCtxt` while we execute the query. - tls::enter_context(&new_icx, |_| compute(*self)) - }) - } -} - -impl<'tcx> TyCtxt<'tcx> { - #[inline(never)] - #[cold] - pub(super) fn report_cycle( - self, - CycleError { usage, cycle: stack }: CycleError>, - ) -> DiagnosticBuilder<'tcx> { - assert!(!stack.is_empty()); - - let fix_span = |span: Span, query: &Query<'tcx>| { - self.sess.source_map().guess_head_span(query.default_span(self, span)) - }; - - // Disable naming impls with types in this path, since that - // sometimes cycles itself, leading to extra cycle errors. - // (And cycle errors around impls tend to occur during the - // collect/coherence phases anyhow.) - ty::print::with_forced_impl_filename_line(|| { - let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); - let mut err = struct_span_err!( - self.sess, - span, - E0391, - "cycle detected when {}", - stack[0].query.describe(self) - ); - - for i in 1..stack.len() { - let query = &stack[i].query; - let span = fix_span(stack[(i + 1) % stack.len()].span, query); - err.span_note(span, &format!("...which requires {}...", query.describe(self))); - } - - err.note(&format!( - "...which again requires {}, completing the cycle", - stack[0].query.describe(self) - )); - - if let Some((span, query)) = usage { - err.span_note( - fix_span(span, &query), - &format!("cycle used when {}", query.describe(self)), - ); - } - - err - }) - } - - pub fn try_print_query_stack(handler: &Handler) { - eprintln!("query stack during panic:"); - - // Be careful reyling on global state here: this code is called from - // a panic hook, which means that the global `Handler` may be in a weird - // state if it was responsible for triggering the panic. - tls::with_context_opt(|icx| { - if let Some(icx) = icx { - let query_map = icx.tcx.queries.try_collect_active_jobs(); - - let mut current_query = icx.query; - let mut i = 0; - - while let Some(query) = current_query { - let query_info = - if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) { - info - } else { - break; - }; - let mut diag = Diagnostic::new( - Level::FailureNote, - &format!( - "#{} [{}] {}", - i, - query_info.info.query.name(), - query_info.info.query.describe(icx.tcx) - ), - ); - diag.span = - icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into(); - handler.force_print_diagnostic(diag); - - current_query = query_info.job.parent; - i += 1; - } - } - }); - - eprintln!("end of query stack"); - } -} - /// Checks if the query is already computed and in the cache. /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need @@ -697,8 +548,6 @@ impl<'tcx> TyCtxt<'tcx> { CTX: QueryContext, Q: QueryDescription, { - use rustc_data_structures::fingerprint::Fingerprint; - assert!( Some(tcx.dep_graph().fingerprint_of(dep_node_index)) == tcx.dep_graph().prev_fingerprint_of(dep_node), @@ -721,7 +570,7 @@ impl<'tcx> TyCtxt<'tcx> { fn force_query_with_job( tcx: CTX, key: Q::Key, - job: JobOwner<'tcx, CTX, Q::Cache>, + job: JobOwner<'_, CTX, Q::Cache>, dep_node: DepNode, ) -> (Q::Value, DepNodeIndex) where @@ -775,7 +624,7 @@ impl<'tcx> TyCtxt<'tcx> { (result, dep_node_index) } -pub(super) trait QueryGetter: QueryContext { +pub trait QueryGetter: QueryContext { fn get_query>( self, span: Span, @@ -887,383 +736,3 @@ where ); } } - -macro_rules! handle_cycle_error { - ([][$tcx: expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); - Value::from_cycle_error($tcx) - }}; - ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); - $tcx.sess.abort_if_errors(); - unreachable!() - }}; - ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).delay_as_bug(); - Value::from_cycle_error($tcx) - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - handle_cycle_error!([$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! is_anon { - ([]) => {{ - false - }}; - ([anon $($rest:tt)*]) => {{ - true - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { - is_anon!([$($($modifiers)*)*]) - }; -} - -macro_rules! is_eval_always { - ([]) => {{ - false - }}; - ([eval_always $($rest:tt)*]) => {{ - true - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { - is_eval_always!([$($($modifiers)*)*]) - }; -} - -macro_rules! query_storage { - (<$tcx:tt>[][$K:ty, $V:ty]) => { - <<$K as Key>::CacheSelector as CacheSelector, $K, $V>>::Cache - }; - (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { - $ty - }; - (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! hash_result { - ([][$hcx:expr, $result:expr]) => {{ - dep_graph::hash_result($hcx, &$result) - }}; - ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{ - None - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - hash_result!([$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! define_queries { - (<$tcx:tt> $($category:tt { - $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)* - },)*) => { - define_queries_inner! { <$tcx> - $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)* - } - } -} - -macro_rules! define_queries_inner { - (<$tcx:tt> - $($(#[$attr:meta])* category<$category:tt> - [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { - - use std::mem; - use crate::{ - rustc_data_structures::stable_hasher::HashStable, - rustc_data_structures::stable_hasher::StableHasher, - ich::StableHashingContext - }; - use rustc_data_structures::profiling::ProfileCategory; - - define_queries_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) - } - - #[allow(nonstandard_style)] - #[derive(Clone, Debug)] - pub enum Query<$tcx> { - $($(#[$attr])* $name($K)),* - } - - impl<$tcx> Query<$tcx> { - pub fn name(&self) -> &'static str { - match *self { - $(Query::$name(_) => stringify!($name),)* - } - } - - pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> { - let (r, name) = match *self { - $(Query::$name(key) => { - (queries::$name::describe(tcx, key), stringify!($name)) - })* - }; - if tcx.sess.verbose() { - format!("{} [{}]", r, name).into() - } else { - r - } - } - - // FIXME(eddyb) Get more valid `Span`s on queries. - pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span { - if !span.is_dummy() { - return span; - } - // The `def_span` query is used to calculate `default_span`, - // so exit to avoid infinite recursion. - if let Query::def_span(..) = *self { - return span - } - match *self { - $(Query::$name(key) => key.default_span(tcx),)* - } - } - } - - impl<'a, $tcx> HashStable> for Query<$tcx> { - fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - $(Query::$name(key) => key.hash_stable(hcx, hasher),)* - } - } - } - - pub mod queries { - use std::marker::PhantomData; - - $(#[allow(nonstandard_style)] - pub struct $name<$tcx> { - data: PhantomData<&$tcx ()> - })* - } - - $(impl<$tcx> QueryConfig> for queries::$name<$tcx> { - type Key = $K; - type Value = $V; - const NAME: &'static str = stringify!($name); - const CATEGORY: ProfileCategory = $category; - } - - impl<$tcx> QueryAccessors> for queries::$name<$tcx> { - const ANON: bool = is_anon!([$($modifiers)*]); - const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); - const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; - - type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]); - - #[inline(always)] - fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { - &tcx.queries.$name - } - - #[allow(unused)] - #[inline(always)] - fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode { - DepConstructor::$node(tcx, *key) - } - - #[inline] - fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { - let provider = tcx.queries.providers.get(key.query_crate()) - // HACK(eddyb) it's possible crates may be loaded after - // the query engine is created, and because crate loading - // is not yet integrated with the query engine, such crates - // would be missing appropriate entries in `providers`. - .unwrap_or(&tcx.queries.fallback_extern_providers) - .$name; - provider(tcx, key) - } - - fn hash_result( - _hcx: &mut StableHashingContext<'_>, - _result: &Self::Value - ) -> Option { - hash_result!([$($modifiers)*][_hcx, _result]) - } - - fn handle_cycle_error( - tcx: TyCtxt<'tcx>, - error: CycleError> - ) -> Self::Value { - handle_cycle_error!([$($modifiers)*][tcx, error]) - } - })* - - #[derive(Copy, Clone)] - pub struct TyCtxtEnsure<'tcx> { - pub tcx: TyCtxt<'tcx>, - } - - impl TyCtxtEnsure<$tcx> { - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) { - self.tcx.ensure_query::>(key) - })* - } - - #[derive(Copy, Clone)] - pub struct TyCtxtAt<'tcx> { - pub tcx: TyCtxt<'tcx>, - pub span: Span, - } - - impl Deref for TyCtxtAt<'tcx> { - type Target = TyCtxt<'tcx>; - #[inline(always)] - fn deref(&self) -> &Self::Target { - &self.tcx - } - } - - impl TyCtxt<$tcx> { - /// Returns a transparent wrapper for `TyCtxt`, which ensures queries - /// are executed instead of just returning their results. - #[inline(always)] - pub fn ensure(self) -> TyCtxtEnsure<$tcx> { - TyCtxtEnsure { - tcx: self, - } - } - - /// Returns a transparent wrapper for `TyCtxt` which uses - /// `span` as the location of queries performed through it. - #[inline(always)] - pub fn at(self, span: Span) -> TyCtxtAt<$tcx> { - TyCtxtAt { - tcx: self, - span - } - } - - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) -> $V { - self.at(DUMMY_SP).$name(key) - })* - - /// All self-profiling events generated by the query engine use - /// virtual `StringId`s for their `event_id`. This method makes all - /// those virtual `StringId`s point to actual strings. - /// - /// If we are recording only summary data, the ids will point to - /// just the query names. If we are recording query keys too, we - /// allocate the corresponding strings here. - pub fn alloc_self_profile_query_strings(self) { - use crate::ty::query::profiling_support::{ - alloc_self_profile_query_strings_for_query_cache, - QueryKeyStringCache, - }; - - if !self.prof.enabled() { - return; - } - - let mut string_cache = QueryKeyStringCache::new(); - - $({ - alloc_self_profile_query_strings_for_query_cache( - self, - stringify!($name), - &self.queries.$name, - &mut string_cache, - ); - })* - } - } - - impl TyCtxtAt<$tcx> { - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) -> $V { - self.tcx.get_query::>(self.span, key) - })* - } - - define_provider_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$name] [$K] [$V]))*) - } - - impl<$tcx> Copy for Providers<$tcx> {} - impl<$tcx> Clone for Providers<$tcx> { - fn clone(&self) -> Self { *self } - } - } -} - -macro_rules! define_queries_struct { - (tcx: $tcx:tt, - input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - pub struct Queries<$tcx> { - /// This provides access to the incrimental comilation on-disk cache for query results. - /// Do not access this directly. It is only meant to be used by - /// `DepGraph::try_mark_green()` and the query infrastructure. - pub(crate) on_disk_cache: OnDiskCache<'tcx>, - - providers: IndexVec>, - fallback_extern_providers: Box>, - - $($(#[$attr])* $name: QueryState< - TyCtxt<$tcx>, - as QueryAccessors>>::Cache, - >,)* - } - - impl<$tcx> Queries<$tcx> { - pub(crate) fn new( - providers: IndexVec>, - fallback_extern_providers: Providers<$tcx>, - on_disk_cache: OnDiskCache<'tcx>, - ) -> Self { - Queries { - providers, - fallback_extern_providers: Box::new(fallback_extern_providers), - on_disk_cache, - $($name: Default::default()),* - } - } - - pub(crate) fn try_collect_active_jobs( - &self - ) -> Option, QueryJobInfo>>> { - let mut jobs = FxHashMap::default(); - - $( - self.$name.try_collect_active_jobs( - as QueryAccessors>>::DEP_KIND, - Query::$name, - &mut jobs, - )?; - )* - - Some(jobs) - } - } - }; -} - -macro_rules! define_provider_struct { - (tcx: $tcx:tt, - input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => { - pub struct Providers<$tcx> { - $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)* - } - - impl<$tcx> Default for Providers<$tcx> { - fn default() -> Self { - $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R { - bug!("`tcx.{}({:?})` unsupported by its crate", - stringify!($name), key); - })* - Providers { $($name),* } - } - } - }; -} From dca03443a02793aed40d3796460d541311300877 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 19 Mar 2020 14:24:21 +0100 Subject: [PATCH 17/31] Make librustc compile. --- src/librustc/ty/query/README.md | 3 + src/librustc/ty/query/job.rs | 29 ++ src/librustc/ty/query/keys.rs | 2 +- src/librustc/ty/query/mod.rs | 191 ++++++++ src/librustc/ty/query/on_disk_cache.rs | 2 +- src/librustc/ty/query/plumbing.rs | 543 +++++++++++++++++++++ src/librustc/ty/query/profiling_support.rs | 4 +- src/librustc/ty/query/stats.rs | 6 +- 8 files changed, 773 insertions(+), 7 deletions(-) create mode 100644 src/librustc/ty/query/README.md create mode 100644 src/librustc/ty/query/job.rs create mode 100644 src/librustc/ty/query/mod.rs create mode 100644 src/librustc/ty/query/plumbing.rs diff --git a/src/librustc/ty/query/README.md b/src/librustc/ty/query/README.md new file mode 100644 index 00000000000..8ec07b9fdeb --- /dev/null +++ b/src/librustc/ty/query/README.md @@ -0,0 +1,3 @@ +For more information about how the query system works, see the [rustc dev guide]. + +[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs new file mode 100644 index 00000000000..5f7a9e81158 --- /dev/null +++ b/src/librustc/ty/query/job.rs @@ -0,0 +1,29 @@ +use crate::ty::tls; + +use rustc_query_system::query::deadlock; +use rustc_rayon_core as rayon_core; +use std::thread; + +/// Creates a new thread and forwards information in thread locals to it. +/// The new thread runs the deadlock handler. +/// Must only be called when a deadlock is about to happen. +pub unsafe fn handle_deadlock() { + let registry = rayon_core::Registry::current(); + + let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| gcx_ptr as *const _); + let gcx_ptr = &*gcx_ptr; + + let rustc_span_globals = + rustc_span::GLOBALS.with(|rustc_span_globals| rustc_span_globals as *const _); + let rustc_span_globals = &*rustc_span_globals; + let syntax_globals = rustc_ast::attr::GLOBALS.with(|syntax_globals| syntax_globals as *const _); + let syntax_globals = &*syntax_globals; + thread::spawn(move || { + tls::GCX_PTR.set(gcx_ptr, || { + rustc_ast::attr::GLOBALS.set(syntax_globals, || { + rustc_span::GLOBALS + .set(rustc_span_globals, || tls::with_global(|tcx| deadlock(tcx, ®istry))) + }); + }) + }); +} diff --git a/src/librustc/ty/query/keys.rs b/src/librustc/ty/query/keys.rs index 6be1f04efca..a261e484a85 100644 --- a/src/librustc/ty/query/keys.rs +++ b/src/librustc/ty/query/keys.rs @@ -4,10 +4,10 @@ use crate::infer::canonical::Canonical; use crate::mir; use crate::traits; use crate::ty::fast_reject::SimplifiedType; -use crate::ty::query::caches::DefaultCacheSelector; use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, Ty, TyCtxt}; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; +use rustc_query_system::query::DefaultCacheSelector; use rustc_span::symbol::Symbol; use rustc_span::{Span, DUMMY_SP}; diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs new file mode 100644 index 00000000000..744237520fb --- /dev/null +++ b/src/librustc/ty/query/mod.rs @@ -0,0 +1,191 @@ +use crate::dep_graph::{self, DepConstructor, DepNode, DepNodeParams}; +use crate::hir::exports::Export; +use crate::hir::map; +use crate::infer::canonical::{self, Canonical}; +use crate::lint::LintLevelMap; +use crate::middle::codegen_fn_attrs::CodegenFnAttrs; +use crate::middle::cstore::{CrateSource, DepKind, NativeLibraryKind}; +use crate::middle::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLibrary}; +use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel}; +use crate::middle::lang_items::{LangItem, LanguageItems}; +use crate::middle::lib_features::LibFeatures; +use crate::middle::privacy::AccessLevels; +use crate::middle::region; +use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes}; +use crate::middle::stability::{self, DeprecationEntry}; +use crate::mir; +use crate::mir::interpret::GlobalId; +use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult, ConstValue}; +use crate::mir::interpret::{LitToConstError, LitToConstInput}; +use crate::mir::mono::CodegenUnit; +use crate::traits::query::{ + CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, + CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal, + CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution, +}; +use crate::traits::query::{ + DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult, + OutlivesBound, +}; +use crate::traits::specialization_graph; +use crate::traits::Clauses; +use crate::traits::{self, Vtable}; +use crate::ty::steal::Steal; +use crate::ty::subst::{GenericArg, SubstsRef}; +use crate::ty::util::AlwaysRequiresDrop; +use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; +use crate::util::common::ErrorReported; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; +use rustc_data_structures::profiling::ProfileCategory::*; +use rustc_data_structures::stable_hasher::StableVec; +use rustc_data_structures::svh::Svh; +use rustc_data_structures::sync::Lrc; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId}; +use rustc_hir::{Crate, HirIdSet, ItemLocalId, TraitCandidate}; +use rustc_index::vec::IndexVec; +use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; +use rustc_session::CrateDisambiguator; +use rustc_target::spec::PanicStrategy; + +use rustc_ast::ast; +use rustc_attr as attr; +use rustc_span::symbol::Symbol; +use rustc_span::{Span, DUMMY_SP}; +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::ops::Deref; +use std::sync::Arc; + +#[macro_use] +mod plumbing; +pub(crate) use rustc_query_system::query::CycleError; +use rustc_query_system::query::*; + +mod stats; +pub use self::stats::print_stats; + +#[cfg(parallel_compiler)] +mod job; +#[cfg(parallel_compiler)] +pub use self::job::handle_deadlock; +pub use rustc_query_system::query::{QueryInfo, QueryJob, QueryJobId}; + +mod keys; +use self::keys::Key; + +mod values; +use self::values::Value; + +use rustc_query_system::query::QueryAccessors; +pub use rustc_query_system::query::QueryConfig; +pub(crate) use rustc_query_system::query::QueryDescription; + +mod on_disk_cache; +pub use self::on_disk_cache::OnDiskCache; + +mod profiling_support; +pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder}; + +// Each of these queries corresponds to a function pointer field in the +// `Providers` struct for requesting a value of that type, and a method +// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way +// which memoizes and does dep-graph tracking, wrapping around the actual +// `Providers` that the driver creates (using several `rustc_*` crates). +// +// The result type of each query must implement `Clone`, and additionally +// `ty::query::values::Value`, which produces an appropriate placeholder +// (error) value if the query resulted in a query cycle. +// Queries marked with `fatal_cycle` do not need the latter implementation, +// as they will raise an fatal error on query cycles instead. + +rustc_query_append! { [define_queries!][<'tcx>] } + +/// The red/green evaluation system will try to mark a specific DepNode in the +/// dependency graph as green by recursively trying to mark the dependencies of +/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode` +/// where we don't know if it is red or green and we therefore actually have +/// to recompute its value in order to find out. Since the only piece of +/// information that we have at that point is the `DepNode` we are trying to +/// re-evaluate, we need some way to re-run a query from just that. This is what +/// `force_from_dep_node()` implements. +/// +/// In the general case, a `DepNode` consists of a `DepKind` and an opaque +/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint +/// is usually constructed by computing a stable hash of the query-key that the +/// `DepNode` corresponds to. Consequently, it is not in general possible to go +/// back from hash to query-key (since hash functions are not reversible). For +/// this reason `force_from_dep_node()` is expected to fail from time to time +/// because we just cannot find out, from the `DepNode` alone, what the +/// corresponding query-key is and therefore cannot re-run the query. +/// +/// The system deals with this case letting `try_mark_green` fail which forces +/// the root query to be re-evaluated. +/// +/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless. +/// Fortunately, we can use some contextual information that will allow us to +/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we +/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a +/// valid `DefPathHash`. Since we also always build a huge table that maps every +/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have +/// everything we need to re-run the query. +/// +/// Take the `mir_validated` query as an example. Like many other queries, it +/// just has a single parameter: the `DefId` of the item it will compute the +/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode` +/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` +/// is actually a `DefPathHash`, and can therefore just look up the corresponding +/// `DefId` in `tcx.def_path_hash_to_def_id`. +/// +/// When you implement a new query, it will likely have a corresponding new +/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As +/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter, +/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just +/// add it to the "We don't have enough information to reconstruct..." group in +/// the match below. +pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool { + // We must avoid ever having to call `force_from_dep_node()` for a + // `DepNode::codegen_unit`: + // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we + // would always end up having to evaluate the first caller of the + // `codegen_unit` query that *is* reconstructible. This might very well be + // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just + // to re-trigger calling the `codegen_unit` query with the right key. At + // that point we would already have re-done all the work we are trying to + // avoid doing in the first place. + // The solution is simple: Just explicitly call the `codegen_unit` query for + // each CGU, right after partitioning. This way `try_mark_green` will always + // hit the cache instead of having to go through `force_from_dep_node`. + // This assertion makes sure, we actually keep applying the solution above. + debug_assert!( + dep_node.kind != crate::dep_graph::DepKind::codegen_unit, + "calling force_from_dep_node() on DepKind::codegen_unit" + ); + + if !dep_node.kind.can_reconstruct_query_key() { + return false; + } + + rustc_dep_node_force!([dep_node, tcx] + // These are inputs that are expected to be pre-allocated and that + // should therefore always be red or green already. + crate::dep_graph::DepKind::CrateMetadata | + + // These are anonymous nodes. + crate::dep_graph::DepKind::TraitSelect | + + // We don't have enough information to reconstruct the query key of + // these. + crate::dep_graph::DepKind::CompileCodegenUnit => { + bug!("force_from_dep_node: encountered {:?}", dep_node) + } + ); + + false +} + +pub(crate) fn try_load_from_on_disk_cache<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) { + rustc_dep_node_try_load_from_on_disk_cache!(dep_node, tcx) +} diff --git a/src/librustc/ty/query/on_disk_cache.rs b/src/librustc/ty/query/on_disk_cache.rs index 14839e6ad50..8aecc0e698a 100644 --- a/src/librustc/ty/query/on_disk_cache.rs +++ b/src/librustc/ty/query/on_disk_cache.rs @@ -994,7 +994,7 @@ fn encode_query_results<'a, 'tcx, Q, E>( query_result_index: &mut EncodedQueryResultIndex, ) -> Result<(), E::Error> where - Q: super::config::QueryDescription>, + Q: super::QueryDescription>, Q::Value: Encodable, E: 'a + TyEncoder, { diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs new file mode 100644 index 00000000000..ef60ac893d2 --- /dev/null +++ b/src/librustc/ty/query/plumbing.rs @@ -0,0 +1,543 @@ +//! The implementation of the query system itself. This defines the macros that +//! generate the actual methods on tcx which find and execute the provider, +//! manage the caches, and so forth. + +use crate::dep_graph::DepGraph; +use crate::ty::query::Query; +use crate::ty::tls::{self, ImplicitCtxt}; +use crate::ty::{self, TyCtxt}; +use rustc_query_system::query::QueryContext; +use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo}; + +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::Lock; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level}; +use rustc_session::Session; +use rustc_span::def_id::DefId; +use rustc_span::Span; + +impl QueryContext for TyCtxt<'tcx> { + type Query = Query<'tcx>; + + fn session(&self) -> &Session { + &self.sess + } + + fn def_path_str(&self, def_id: DefId) -> String { + TyCtxt::def_path_str(*self, def_id) + } + + fn dep_graph(&self) -> &DepGraph { + &self.dep_graph + } + + fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { + tls::with_related_context(*self, move |icx| op(icx.query)) + } + + fn try_collect_active_jobs( + &self, + ) -> Option, QueryJobInfo>> { + self.queries.try_collect_active_jobs() + } + + /// Executes a job by changing the `ImplicitCtxt` to point to the + /// new query job while it executes. It returns the diagnostics + /// captured during execution and the actual result. + #[inline(always)] + fn start_query( + &self, + token: QueryJobId, + diagnostics: Option<&Lock>>, + compute: impl FnOnce(Self) -> R, + ) -> R { + // The `TyCtxt` stored in TLS has the same global interner lifetime + // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes + // when accessing the `ImplicitCtxt`. + tls::with_related_context(*self, move |current_icx| { + // Update the `ImplicitCtxt` to point to our new query job. + let new_icx = ImplicitCtxt { + tcx: *self, + query: Some(token), + diagnostics, + layout_depth: current_icx.layout_depth, + task_deps: current_icx.task_deps, + }; + + // Use the `ImplicitCtxt` while we execute the query. + tls::enter_context(&new_icx, |_| compute(*self)) + }) + } +} + +impl<'tcx> TyCtxt<'tcx> { + #[inline(never)] + #[cold] + pub(super) fn report_cycle( + self, + CycleError { usage, cycle: stack }: CycleError>, + ) -> DiagnosticBuilder<'tcx> { + assert!(!stack.is_empty()); + + let fix_span = |span: Span, query: &Query<'tcx>| { + self.sess.source_map().guess_head_span(query.default_span(self, span)) + }; + + // Disable naming impls with types in this path, since that + // sometimes cycles itself, leading to extra cycle errors. + // (And cycle errors around impls tend to occur during the + // collect/coherence phases anyhow.) + ty::print::with_forced_impl_filename_line(|| { + let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); + let mut err = struct_span_err!( + self.sess, + span, + E0391, + "cycle detected when {}", + stack[0].query.describe(self) + ); + + for i in 1..stack.len() { + let query = &stack[i].query; + let span = fix_span(stack[(i + 1) % stack.len()].span, query); + err.span_note(span, &format!("...which requires {}...", query.describe(self))); + } + + err.note(&format!( + "...which again requires {}, completing the cycle", + stack[0].query.describe(self) + )); + + if let Some((span, query)) = usage { + err.span_note( + fix_span(span, &query), + &format!("cycle used when {}", query.describe(self)), + ); + } + + err + }) + } + + pub fn try_print_query_stack(handler: &Handler) { + eprintln!("query stack during panic:"); + + // Be careful reyling on global state here: this code is called from + // a panic hook, which means that the global `Handler` may be in a weird + // state if it was responsible for triggering the panic. + ty::tls::with_context_opt(|icx| { + if let Some(icx) = icx { + let query_map = icx.tcx.queries.try_collect_active_jobs(); + + let mut current_query = icx.query; + let mut i = 0; + + while let Some(query) = current_query { + let query_info = + if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) { + info + } else { + break; + }; + let mut diag = Diagnostic::new( + Level::FailureNote, + &format!( + "#{} [{}] {}", + i, + query_info.info.query.name(), + query_info.info.query.describe(icx.tcx) + ), + ); + diag.span = icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into(); + handler.force_print_diagnostic(diag); + + current_query = query_info.job.parent; + i += 1; + } + } + }); + + eprintln!("end of query stack"); + } +} + +macro_rules! handle_cycle_error { + ([][$tcx: expr, $error:expr]) => {{ + $tcx.report_cycle($error).emit(); + Value::from_cycle_error($tcx) + }}; + ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{ + $tcx.report_cycle($error).emit(); + $tcx.sess.abort_if_errors(); + unreachable!() + }}; + ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{ + $tcx.report_cycle($error).delay_as_bug(); + Value::from_cycle_error($tcx) + }}; + ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { + handle_cycle_error!([$($($modifiers)*)*][$($args)*]) + }; +} + +macro_rules! is_anon { + ([]) => {{ + false + }}; + ([anon $($rest:tt)*]) => {{ + true + }}; + ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { + is_anon!([$($($modifiers)*)*]) + }; +} + +macro_rules! is_eval_always { + ([]) => {{ + false + }}; + ([eval_always $($rest:tt)*]) => {{ + true + }}; + ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { + is_eval_always!([$($($modifiers)*)*]) + }; +} + +macro_rules! query_storage { + (<$tcx:tt>[][$K:ty, $V:ty]) => { + <<$K as Key>::CacheSelector as CacheSelector, $K, $V>>::Cache + }; + (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { + $ty + }; + (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { + query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*]) + }; +} + +macro_rules! hash_result { + ([][$hcx:expr, $result:expr]) => {{ + dep_graph::hash_result($hcx, &$result) + }}; + ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{ + None + }}; + ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { + hash_result!([$($($modifiers)*)*][$($args)*]) + }; +} + +macro_rules! define_queries { + (<$tcx:tt> $($category:tt { + $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)* + },)*) => { + define_queries_inner! { <$tcx> + $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)* + } + } +} + +macro_rules! define_queries_inner { + (<$tcx:tt> + $($(#[$attr:meta])* category<$category:tt> + [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { + + use std::mem; + use crate::{ + rustc_data_structures::stable_hasher::HashStable, + rustc_data_structures::stable_hasher::StableHasher, + ich::StableHashingContext + }; + use rustc_data_structures::profiling::ProfileCategory; + + define_queries_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) + } + + #[allow(nonstandard_style)] + #[derive(Clone, Debug)] + pub enum Query<$tcx> { + $($(#[$attr])* $name($K)),* + } + + impl<$tcx> Query<$tcx> { + pub fn name(&self) -> &'static str { + match *self { + $(Query::$name(_) => stringify!($name),)* + } + } + + pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> { + let (r, name) = match *self { + $(Query::$name(key) => { + (queries::$name::describe(tcx, key), stringify!($name)) + })* + }; + if tcx.sess.verbose() { + format!("{} [{}]", r, name).into() + } else { + r + } + } + + // FIXME(eddyb) Get more valid `Span`s on queries. + pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span { + if !span.is_dummy() { + return span; + } + // The `def_span` query is used to calculate `default_span`, + // so exit to avoid infinite recursion. + if let Query::def_span(..) = *self { + return span + } + match *self { + $(Query::$name(key) => key.default_span(tcx),)* + } + } + } + + impl<'a, $tcx> HashStable> for Query<$tcx> { + fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + $(Query::$name(key) => key.hash_stable(hcx, hasher),)* + } + } + } + + pub mod queries { + use std::marker::PhantomData; + + $(#[allow(nonstandard_style)] + pub struct $name<$tcx> { + data: PhantomData<&$tcx ()> + })* + } + + $(impl<$tcx> QueryConfig> for queries::$name<$tcx> { + type Key = $K; + type Value = $V; + const NAME: &'static str = stringify!($name); + const CATEGORY: ProfileCategory = $category; + } + + impl<$tcx> QueryAccessors> for queries::$name<$tcx> { + const ANON: bool = is_anon!([$($modifiers)*]); + const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); + const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; + + type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]); + + #[inline(always)] + fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { + &tcx.queries.$name + } + + #[allow(unused)] + #[inline(always)] + fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode { + DepConstructor::$node(tcx, *key) + } + + #[inline] + fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { + let provider = tcx.queries.providers.get(key.query_crate()) + // HACK(eddyb) it's possible crates may be loaded after + // the query engine is created, and because crate loading + // is not yet integrated with the query engine, such crates + // would be missing appropriate entries in `providers`. + .unwrap_or(&tcx.queries.fallback_extern_providers) + .$name; + provider(tcx, key) + } + + fn hash_result( + _hcx: &mut StableHashingContext<'_>, + _result: &Self::Value + ) -> Option { + hash_result!([$($modifiers)*][_hcx, _result]) + } + + fn handle_cycle_error( + tcx: TyCtxt<'tcx>, + error: CycleError> + ) -> Self::Value { + handle_cycle_error!([$($modifiers)*][tcx, error]) + } + })* + + #[derive(Copy, Clone)] + pub struct TyCtxtEnsure<'tcx> { + pub tcx: TyCtxt<'tcx>, + } + + impl TyCtxtEnsure<$tcx> { + $($(#[$attr])* + #[inline(always)] + pub fn $name(self, key: $K) { + self.tcx.ensure_query::>(key) + })* + } + + #[derive(Copy, Clone)] + pub struct TyCtxtAt<'tcx> { + pub tcx: TyCtxt<'tcx>, + pub span: Span, + } + + impl Deref for TyCtxtAt<'tcx> { + type Target = TyCtxt<'tcx>; + #[inline(always)] + fn deref(&self) -> &Self::Target { + &self.tcx + } + } + + impl TyCtxt<$tcx> { + /// Returns a transparent wrapper for `TyCtxt`, which ensures queries + /// are executed instead of just returning their results. + #[inline(always)] + pub fn ensure(self) -> TyCtxtEnsure<$tcx> { + TyCtxtEnsure { + tcx: self, + } + } + + /// Returns a transparent wrapper for `TyCtxt` which uses + /// `span` as the location of queries performed through it. + #[inline(always)] + pub fn at(self, span: Span) -> TyCtxtAt<$tcx> { + TyCtxtAt { + tcx: self, + span + } + } + + $($(#[$attr])* + #[inline(always)] + pub fn $name(self, key: $K) -> $V { + self.at(DUMMY_SP).$name(key) + })* + + /// All self-profiling events generated by the query engine use + /// virtual `StringId`s for their `event_id`. This method makes all + /// those virtual `StringId`s point to actual strings. + /// + /// If we are recording only summary data, the ids will point to + /// just the query names. If we are recording query keys too, we + /// allocate the corresponding strings here. + pub fn alloc_self_profile_query_strings(self) { + use crate::ty::query::profiling_support::{ + alloc_self_profile_query_strings_for_query_cache, + QueryKeyStringCache, + }; + + if !self.prof.enabled() { + return; + } + + let mut string_cache = QueryKeyStringCache::new(); + + $({ + alloc_self_profile_query_strings_for_query_cache( + self, + stringify!($name), + &self.queries.$name, + &mut string_cache, + ); + })* + } + } + + impl TyCtxtAt<$tcx> { + $($(#[$attr])* + #[inline(always)] + pub fn $name(self, key: $K) -> $V { + self.tcx.get_query::>(self.span, key) + })* + } + + define_provider_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$name] [$K] [$V]))*) + } + + impl<$tcx> Copy for Providers<$tcx> {} + impl<$tcx> Clone for Providers<$tcx> { + fn clone(&self) -> Self { *self } + } + } +} + +macro_rules! define_queries_struct { + (tcx: $tcx:tt, + input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { + pub struct Queries<$tcx> { + /// This provides access to the incrimental comilation on-disk cache for query results. + /// Do not access this directly. It is only meant to be used by + /// `DepGraph::try_mark_green()` and the query infrastructure. + pub(crate) on_disk_cache: OnDiskCache<'tcx>, + + providers: IndexVec>, + fallback_extern_providers: Box>, + + $($(#[$attr])* $name: QueryState< + TyCtxt<$tcx>, + as QueryAccessors>>::Cache, + >,)* + } + + impl<$tcx> Queries<$tcx> { + pub(crate) fn new( + providers: IndexVec>, + fallback_extern_providers: Providers<$tcx>, + on_disk_cache: OnDiskCache<'tcx>, + ) -> Self { + Queries { + providers, + fallback_extern_providers: Box::new(fallback_extern_providers), + on_disk_cache, + $($name: Default::default()),* + } + } + + pub(crate) fn try_collect_active_jobs( + &self + ) -> Option, QueryJobInfo>>> { + let mut jobs = FxHashMap::default(); + + $( + self.$name.try_collect_active_jobs( + as QueryAccessors>>::DEP_KIND, + Query::$name, + &mut jobs, + )?; + )* + + Some(jobs) + } + } + }; +} + +macro_rules! define_provider_struct { + (tcx: $tcx:tt, + input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => { + pub struct Providers<$tcx> { + $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)* + } + + impl<$tcx> Default for Providers<$tcx> { + fn default() -> Self { + $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R { + bug!("`tcx.{}({:?})` unsupported by its crate", + stringify!($name), key); + })* + Providers { $($name),* } + } + } + }; +} diff --git a/src/librustc/ty/query/profiling_support.rs b/src/librustc/ty/query/profiling_support.rs index 616fbaafab9..d7972045d12 100644 --- a/src/librustc/ty/query/profiling_support.rs +++ b/src/librustc/ty/query/profiling_support.rs @@ -1,11 +1,11 @@ use crate::ty::context::TyCtxt; -use crate::ty::query::caches::QueryCache; -use crate::ty::query::plumbing::QueryState; use measureme::{StringComponent, StringId}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::profiling::SelfProfiler; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::definitions::DefPathData; +use rustc_query_system::query::QueryCache; +use rustc_query_system::query::QueryState; use std::fmt::Debug; use std::io::Write; diff --git a/src/librustc/ty/query/stats.rs b/src/librustc/ty/query/stats.rs index a13f00dc6d4..12e9094fba6 100644 --- a/src/librustc/ty/query/stats.rs +++ b/src/librustc/ty/query/stats.rs @@ -1,9 +1,9 @@ -use crate::ty::query::caches::QueryCache; -use crate::ty::query::config::{QueryAccessors, QueryContext}; -use crate::ty::query::plumbing::QueryState; use crate::ty::query::queries; use crate::ty::TyCtxt; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; +use rustc_query_system::query::QueryCache; +use rustc_query_system::query::QueryState; +use rustc_query_system::query::{QueryAccessors, QueryContext}; use std::any::type_name; use std::mem; From 301ad11e9b6d33594a08aade7332af3b40f2d7b2 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 26 Mar 2020 09:40:50 +0100 Subject: [PATCH 18/31] Rustfmt. --- src/librustc/dep_graph/mod.rs | 6 +- src/librustc/ty/query/plumbing.rs | 3 +- src/librustc_query_system/dep_graph/mod.rs | 6 +- src/librustc_query_system/query/job.rs | 2 +- src/librustc_query_system/query/mod.rs | 2 +- src/librustc_query_system/query/plumbing.rs | 508 ++++++++++---------- 6 files changed, 261 insertions(+), 266 deletions(-) diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 556b1479b61..4d9d439c526 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -166,7 +166,11 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics) } - fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec) { + fn store_diagnostics_for_anon_node( + &self, + dep_node_index: DepNodeIndex, + diagnostics: ThinVec, + ) { self.queries.on_disk_cache.store_diagnostics_for_anon_node(dep_node_index, diagnostics) } diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index ef60ac893d2..fb699c6fae0 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -149,7 +149,8 @@ impl<'tcx> TyCtxt<'tcx> { query_info.info.query.describe(icx.tcx) ), ); - diag.span = icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into(); + diag.span = + icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into(); handler.force_print_diagnostic(diag); current_query = query_info.job.parent; diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index ca4377e783d..2faca546213 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -49,7 +49,11 @@ pub trait DepContext: Copy + DepGraphSafe { fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec); /// Register diagnostics for the given node, for use in next session. - fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec); + fn store_diagnostics_for_anon_node( + &self, + dep_node_index: DepNodeIndex, + diagnostics: ThinVec, + ); /// Access the profiler. fn profiler(&self) -> &SelfProfilerRef; diff --git a/src/librustc_query_system/query/job.rs b/src/librustc_query_system/query/job.rs index 9068760d323..92ab97f210a 100644 --- a/src/librustc_query_system/query/job.rs +++ b/src/librustc_query_system/query/job.rs @@ -1,4 +1,4 @@ -use crate::dep_graph::{DepKind, DepContext}; +use crate::dep_graph::{DepContext, DepKind}; use crate::query::config::QueryContext; use crate::query::plumbing::CycleError; diff --git a/src/librustc_query_system/query/mod.rs b/src/librustc_query_system/query/mod.rs index 0b8ad5c16a5..9d0a6665eac 100644 --- a/src/librustc_query_system/query/mod.rs +++ b/src/librustc_query_system/query/mod.rs @@ -2,9 +2,9 @@ mod plumbing; pub use self::plumbing::*; mod job; -pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; #[cfg(parallel_compiler)] pub use self::job::deadlock; +pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; mod caches; pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache}; diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 0bae613fcfb..6fd86d65c1d 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -2,7 +2,7 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepKind, DepContext, DepNode}; +use crate::dep_graph::{DepContext, DepKind, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::query::caches::QueryCache; use crate::query::config::{QueryContext, QueryDescription}; @@ -351,285 +351,275 @@ where Cycle(C::Value), } - /// Checks if the query is already computed and in the cache. - /// It returns the shard index and a lock guard to the shard, - /// which will be used if the query is not in the cache and we need - /// to compute it. - #[inline(always)] - fn try_get_cached( - tcx: CTX, - state: &QueryState, - key: C::Key, - // `on_hit` can be called while holding a lock to the query cache - on_hit: OnHit, - on_miss: OnMiss, - ) -> R - where - C: QueryCache, - CTX: QueryContext, - OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, - OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, - { - state.cache.lookup( - state, - QueryStateShard::::get_cache, - key, - |value, index| { - if unlikely!(tcx.profiler().enabled()) { - tcx.profiler().query_cache_hit(index.into()); - } - #[cfg(debug_assertions)] - { - state.cache_hits.fetch_add(1, Ordering::Relaxed); - } - on_hit(value, index) - }, - on_miss, - ) - } - - #[inline(always)] - fn try_execute_query( - tcx: CTX, - span: Span, - key: Q::Key, - lookup: QueryLookup< - '_, - CTX, - Q::Key, - >::Sharded, - >, - ) -> Q::Value - where - Q: QueryDescription, - CTX: QueryContext, - CTX: HashStableContextProvider<::StableHashingContext>, - K: DepKind, - { - let job = match JobOwner::try_start::(tcx, span, &key, lookup) { - TryGetJob::NotYetStarted(job) => job, - TryGetJob::Cycle(result) => return result, - #[cfg(parallel_compiler)] - TryGetJob::JobCompleted((v, index)) => { - tcx.dep_graph().read_index(index); - return v; +/// Checks if the query is already computed and in the cache. +/// It returns the shard index and a lock guard to the shard, +/// which will be used if the query is not in the cache and we need +/// to compute it. +#[inline(always)] +fn try_get_cached( + tcx: CTX, + state: &QueryState, + key: C::Key, + // `on_hit` can be called while holding a lock to the query cache + on_hit: OnHit, + on_miss: OnMiss, +) -> R +where + C: QueryCache, + CTX: QueryContext, + OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, + OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, +{ + state.cache.lookup( + state, + QueryStateShard::::get_cache, + key, + |value, index| { + if unlikely!(tcx.profiler().enabled()) { + tcx.profiler().query_cache_hit(index.into()); } - }; - - // Fast path for when incr. comp. is off. `to_dep_node` is - // expensive for some `DepKind`s. - if !tcx.dep_graph().is_fully_enabled() { - let null_dep_node = DepNode::new_no_params(DepKind::NULL); - return force_query_with_job::(tcx, key, job, null_dep_node).0; - } - - if Q::ANON { - let prof_timer = tcx.profiler().query_provider(); - - let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { - tcx.start_query(job.id, diagnostics, |tcx| { - tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key)) - }) - }); - - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - - tcx.dep_graph().read_index(dep_node_index); - - if unlikely!(!diagnostics.is_empty()) { - tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics); + #[cfg(debug_assertions)] + { + state.cache_hits.fetch_add(1, Ordering::Relaxed); } - - job.complete(tcx, &result, dep_node_index); - - return result; - } - - let dep_node = Q::to_dep_node(tcx, &key); - - if !Q::EVAL_ALWAYS { - // The diagnostics for this query will be - // promoted to the current session during - // `try_mark_green()`, so we can ignore them here. - let loaded = tcx.start_query(job.id, None, |tcx| { - let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node); - marked.map(|(prev_dep_node_index, dep_node_index)| { - ( - load_from_disk_and_cache_in_memory::( - tcx, - key.clone(), - prev_dep_node_index, - dep_node_index, - &dep_node, - ), - dep_node_index, - ) - }) - }); - if let Some((result, dep_node_index)) = loaded { - job.complete(tcx, &result, dep_node_index); - return result; - } - } - - let (result, dep_node_index) = force_query_with_job::(tcx, key, job, dep_node); - tcx.dep_graph().read_index(dep_node_index); - result - } - - fn load_from_disk_and_cache_in_memory( - tcx: CTX, - key: Q::Key, - prev_dep_node_index: SerializedDepNodeIndex, - dep_node_index: DepNodeIndex, - dep_node: &DepNode, - ) -> Q::Value - where - CTX: QueryContext, - Q: QueryDescription, - { - // Note this function can be called concurrently from the same query - // We must ensure that this is handled correctly. - - debug_assert!(tcx.dep_graph().is_green(dep_node)); - - // First we try to load the result from the on-disk cache. - let result = if Q::cache_on_disk(tcx, key.clone(), None) { - let prof_timer = tcx.profiler().incr_cache_loading(); - let result = Q::try_load_from_disk(tcx, prev_dep_node_index); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - - // We always expect to find a cached result for things that - // can be forced from `DepNode`. - debug_assert!( - !dep_node.kind.can_reconstruct_query_key() || result.is_some(), - "missing on-disk cache entry for {:?}", - dep_node - ); - result - } else { - // Some things are never cached on disk. - None - }; - - let result = if let Some(result) = result { - result - } else { - // We could not load a result from the on-disk cache, so - // recompute. - let prof_timer = tcx.profiler().query_provider(); - - // The dep-graph for this computation is already in-place. - let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key)); - - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - - result - }; - - // If `-Zincremental-verify-ich` is specified, re-hash results from - // the cache and make sure that they have the expected fingerprint. - if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) { - incremental_verify_ich::(tcx, &result, dep_node, dep_node_index); - } - - result - } - - #[inline(never)] - #[cold] - fn incremental_verify_ich( - tcx: CTX, - result: &Q::Value, - dep_node: &DepNode, - dep_node_index: DepNodeIndex, + on_hit(value, index) + }, + on_miss, ) - where - CTX: QueryContext, - Q: QueryDescription, - { - assert!( - Some(tcx.dep_graph().fingerprint_of(dep_node_index)) - == tcx.dep_graph().prev_fingerprint_of(dep_node), - "fingerprint for green query instance not loaded from cache: {:?}", - dep_node, - ); +} - debug!("BEGIN verify_ich({:?})", dep_node); - let mut hcx = tcx.create_stable_hashing_context(); +#[inline(always)] +fn try_execute_query( + tcx: CTX, + span: Span, + key: Q::Key, + lookup: QueryLookup<'_, CTX, Q::Key, >::Sharded>, +) -> Q::Value +where + Q: QueryDescription, + CTX: QueryContext, + CTX: HashStableContextProvider<::StableHashingContext>, + K: DepKind, +{ + let job = match JobOwner::try_start::(tcx, span, &key, lookup) { + TryGetJob::NotYetStarted(job) => job, + TryGetJob::Cycle(result) => return result, + #[cfg(parallel_compiler)] + TryGetJob::JobCompleted((v, index)) => { + tcx.dep_graph().read_index(index); + return v; + } + }; - let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); - debug!("END verify_ich({:?})", dep_node); - - let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index); - - assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,); + // Fast path for when incr. comp. is off. `to_dep_node` is + // expensive for some `DepKind`s. + if !tcx.dep_graph().is_fully_enabled() { + let null_dep_node = DepNode::new_no_params(DepKind::NULL); + return force_query_with_job::(tcx, key, job, null_dep_node).0; } - #[inline(always)] - fn force_query_with_job( - tcx: CTX, - key: Q::Key, - job: JobOwner<'_, CTX, Q::Cache>, - dep_node: DepNode, - ) -> (Q::Value, DepNodeIndex) - where - Q: QueryDescription, - CTX: QueryContext, - CTX: HashStableContextProvider<::StableHashingContext>, - K: DepKind, - { - // If the following assertion triggers, it can have two reasons: - // 1. Something is wrong with DepNode creation, either here or - // in `DepGraph::try_mark_green()`. - // 2. Two distinct query keys get mapped to the same `DepNode` - // (see for example #48923). - assert!( - !tcx.dep_graph().dep_node_exists(&dep_node), - "forcing query with already existing `DepNode`\n\ - - query-key: {:?}\n\ - - dep-node: {:?}", - key, - dep_node - ); - + if Q::ANON { let prof_timer = tcx.profiler().query_provider(); let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { tcx.start_query(job.id, diagnostics, |tcx| { - if Q::EVAL_ALWAYS { - tcx.dep_graph().with_eval_always_task( - dep_node, - tcx, - key, - Q::compute, - Q::hash_result, - ) - } else { - tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result) - } + tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key)) }) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + tcx.dep_graph().read_index(dep_node_index); + if unlikely!(!diagnostics.is_empty()) { - if dep_node.kind != DepKind::NULL { - tcx.store_diagnostics(dep_node_index, diagnostics); - } + tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics); } job.complete(tcx, &result, dep_node_index); - (result, dep_node_index) + return result; } + let dep_node = Q::to_dep_node(tcx, &key); + + if !Q::EVAL_ALWAYS { + // The diagnostics for this query will be + // promoted to the current session during + // `try_mark_green()`, so we can ignore them here. + let loaded = tcx.start_query(job.id, None, |tcx| { + let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node); + marked.map(|(prev_dep_node_index, dep_node_index)| { + ( + load_from_disk_and_cache_in_memory::( + tcx, + key.clone(), + prev_dep_node_index, + dep_node_index, + &dep_node, + ), + dep_node_index, + ) + }) + }); + if let Some((result, dep_node_index)) = loaded { + job.complete(tcx, &result, dep_node_index); + return result; + } + } + + let (result, dep_node_index) = force_query_with_job::(tcx, key, job, dep_node); + tcx.dep_graph().read_index(dep_node_index); + result +} + +fn load_from_disk_and_cache_in_memory( + tcx: CTX, + key: Q::Key, + prev_dep_node_index: SerializedDepNodeIndex, + dep_node_index: DepNodeIndex, + dep_node: &DepNode, +) -> Q::Value +where + CTX: QueryContext, + Q: QueryDescription, +{ + // Note this function can be called concurrently from the same query + // We must ensure that this is handled correctly. + + debug_assert!(tcx.dep_graph().is_green(dep_node)); + + // First we try to load the result from the on-disk cache. + let result = if Q::cache_on_disk(tcx, key.clone(), None) { + let prof_timer = tcx.profiler().incr_cache_loading(); + let result = Q::try_load_from_disk(tcx, prev_dep_node_index); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + // We always expect to find a cached result for things that + // can be forced from `DepNode`. + debug_assert!( + !dep_node.kind.can_reconstruct_query_key() || result.is_some(), + "missing on-disk cache entry for {:?}", + dep_node + ); + result + } else { + // Some things are never cached on disk. + None + }; + + let result = if let Some(result) = result { + result + } else { + // We could not load a result from the on-disk cache, so + // recompute. + let prof_timer = tcx.profiler().query_provider(); + + // The dep-graph for this computation is already in-place. + let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key)); + + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + result + }; + + // If `-Zincremental-verify-ich` is specified, re-hash results from + // the cache and make sure that they have the expected fingerprint. + if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) { + incremental_verify_ich::(tcx, &result, dep_node, dep_node_index); + } + + result +} + +#[inline(never)] +#[cold] +fn incremental_verify_ich( + tcx: CTX, + result: &Q::Value, + dep_node: &DepNode, + dep_node_index: DepNodeIndex, +) where + CTX: QueryContext, + Q: QueryDescription, +{ + assert!( + Some(tcx.dep_graph().fingerprint_of(dep_node_index)) + == tcx.dep_graph().prev_fingerprint_of(dep_node), + "fingerprint for green query instance not loaded from cache: {:?}", + dep_node, + ); + + debug!("BEGIN verify_ich({:?})", dep_node); + let mut hcx = tcx.create_stable_hashing_context(); + + let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); + debug!("END verify_ich({:?})", dep_node); + + let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index); + + assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,); +} + +#[inline(always)] +fn force_query_with_job( + tcx: CTX, + key: Q::Key, + job: JobOwner<'_, CTX, Q::Cache>, + dep_node: DepNode, +) -> (Q::Value, DepNodeIndex) +where + Q: QueryDescription, + CTX: QueryContext, + CTX: HashStableContextProvider<::StableHashingContext>, + K: DepKind, +{ + // If the following assertion triggers, it can have two reasons: + // 1. Something is wrong with DepNode creation, either here or + // in `DepGraph::try_mark_green()`. + // 2. Two distinct query keys get mapped to the same `DepNode` + // (see for example #48923). + assert!( + !tcx.dep_graph().dep_node_exists(&dep_node), + "forcing query with already existing `DepNode`\n\ + - query-key: {:?}\n\ + - dep-node: {:?}", + key, + dep_node + ); + + let prof_timer = tcx.profiler().query_provider(); + + let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { + tcx.start_query(job.id, diagnostics, |tcx| { + if Q::EVAL_ALWAYS { + tcx.dep_graph().with_eval_always_task( + dep_node, + tcx, + key, + Q::compute, + Q::hash_result, + ) + } else { + tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result) + } + }) + }); + + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + if unlikely!(!diagnostics.is_empty()) { + if dep_node.kind != DepKind::NULL { + tcx.store_diagnostics(dep_node_index, diagnostics); + } + } + + job.complete(tcx, &result, dep_node_index); + + (result, dep_node_index) +} + pub trait QueryGetter: QueryContext { - fn get_query>( - self, - span: Span, - key: Q::Key, - ) -> Q::Value; + fn get_query>(self, span: Span, key: Q::Key) -> Q::Value; /// Ensure that either this query has all green inputs or been executed. /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. @@ -655,11 +645,7 @@ where K: DepKind, { #[inline(never)] - fn get_query>( - self, - span: Span, - key: Q::Key, - ) -> Q::Value { + fn get_query>(self, span: Span, key: Q::Key) -> Q::Value { debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); try_get_cached( From d305b2ccc61c6faf6c10271122fcf3a76a94a5bf Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 09:27:29 +0100 Subject: [PATCH 19/31] Unify key types in get_lookup. --- src/librustc_query_system/query/caches.rs | 4 ++-- src/librustc_query_system/query/plumbing.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index efde51c4db6..400e6fe84a8 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -8,12 +8,12 @@ use std::default::Default; use std::hash::Hash; use std::marker::PhantomData; -pub trait CacheSelector { +pub trait CacheSelector { type Cache: QueryCache; } pub trait QueryCache: Default { - type Key; + type Key: Hash; type Value; type Sharded: Default; diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 6fd86d65c1d..dbe7b9c385d 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -57,9 +57,9 @@ pub struct QueryState> { } impl> QueryState { - pub(super) fn get_lookup<'tcx, K2: Hash>( + pub(super) fn get_lookup<'tcx>( &'tcx self, - key: &K2, + key: &C::Key, ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> { // We compute the key's hash once and then use it for both the // shard lookup and the hashmap lookup. This relies on the fact From 228ca8ef0a2087d5000aa28f821f31c0d675be1f Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 09:30:13 +0100 Subject: [PATCH 20/31] Access QueryStateShard directly. --- src/librustc_query_system/query/caches.rs | 15 ++++----------- src/librustc_query_system/query/plumbing.rs | 9 +-------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index 400e6fe84a8..f79aa992fd2 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -1,6 +1,6 @@ use crate::dep_graph::DepNodeIndex; use crate::query::config::QueryContext; -use crate::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; +use crate::query::plumbing::{QueryLookup, QueryState}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; @@ -21,19 +21,15 @@ pub trait QueryCache: Default { /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. - fn lookup( + fn lookup( &self, state: &QueryState, - get_cache: GetCache, key: Self::Key, // `on_hit` can be called while holding a lock to the query state shard. on_hit: OnHit, on_miss: OnMiss, ) -> R where - GetCache: for<'a> Fn( - &'a mut QueryStateShard, - ) -> &'a mut Self::Sharded, OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R, OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R; @@ -76,24 +72,21 @@ impl QueryCache for DefaultCache type Sharded = FxHashMap; #[inline(always)] - fn lookup( + fn lookup( &self, state: &QueryState, - get_cache: GetCache, key: K, on_hit: OnHit, on_miss: OnMiss, ) -> R where - GetCache: - for<'a> Fn(&'a mut QueryStateShard) -> &'a mut Self::Sharded, OnHit: FnOnce(&V, DepNodeIndex) -> R, OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R, { let mut lookup = state.get_lookup(&key); let lock = &mut *lookup.lock; - let result = get_cache(lock).raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key); + let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key); if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) } } diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index dbe7b9c385d..cf23467cf99 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -30,19 +30,13 @@ use std::ptr; use std::sync::atomic::{AtomicUsize, Ordering}; pub struct QueryStateShard { - cache: C, + pub(super) cache: C, active: FxHashMap>, /// Used to generate unique ids for active jobs. jobs: u32, } -impl QueryStateShard { - fn get_cache(&mut self) -> &mut C { - &mut self.cache - } -} - impl Default for QueryStateShard { fn default() -> QueryStateShard { QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 } @@ -372,7 +366,6 @@ where { state.cache.lookup( state, - QueryStateShard::::get_cache, key, |value, index| { if unlikely!(tcx.profiler().enabled()) { From b6033fca02d87955ac7f391cdcbeb4707d2c8f02 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 20:09:06 +0100 Subject: [PATCH 21/31] Retire DepGraphSafe and HashStableContext. --- src/librustc/dep_graph/mod.rs | 17 ------- src/librustc/dep_graph/safe.rs | 9 ---- src/librustc/ich/hcx.rs | 2 - src/librustc_codegen_llvm/context.rs | 3 -- src/librustc_query_system/dep_graph/graph.rs | 47 +++++++----------- src/librustc_query_system/dep_graph/mod.rs | 5 +- src/librustc_query_system/dep_graph/safe.rs | 51 -------------------- src/librustc_query_system/lib.rs | 17 ------- src/librustc_query_system/query/plumbing.rs | 29 +++++------ 9 files changed, 29 insertions(+), 151 deletions(-) delete mode 100644 src/librustc/dep_graph/safe.rs delete mode 100644 src/librustc_query_system/dep_graph/safe.rs diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 4d9d439c526..de94b6b1850 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -8,7 +8,6 @@ use rustc_errors::Diagnostic; use rustc_hir::def_id::DefId; mod dep_node; -mod safe; pub(crate) use rustc_query_system::dep_graph::DepNodeParams; pub use rustc_query_system::dep_graph::{ @@ -17,8 +16,6 @@ pub use rustc_query_system::dep_graph::{ }; pub use dep_node::{label_strs, DepConstructor, DepKind, DepNode, DepNodeExt}; -pub use safe::AssertDepGraphSafe; -pub use safe::DepGraphSafe; pub type DepGraph = rustc_query_system::dep_graph::DepGraph; pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps; @@ -189,17 +186,3 @@ impl rustc_query_system::HashStableContext for StableHashingContext<'_> { self.sess().opts.debugging_opts.dep_tasks } } - -impl rustc_query_system::HashStableContextProvider> for TyCtxt<'tcx> { - fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> { - self.create_stable_hashing_context() - } -} - -impl rustc_query_system::HashStableContextProvider> - for StableHashingContext<'a> -{ - fn get_stable_hashing_context(&self) -> Self { - self.clone() - } -} diff --git a/src/librustc/dep_graph/safe.rs b/src/librustc/dep_graph/safe.rs deleted file mode 100644 index 47a1c09672f..00000000000 --- a/src/librustc/dep_graph/safe.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! The `DepGraphSafe` trait - -use crate::ty::TyCtxt; - -pub use rustc_query_system::dep_graph::{AssertDepGraphSafe, DepGraphSafe}; - -/// The type context itself can be used to access all kinds of tracked -/// state, but those accesses should always generate read events. -impl<'tcx> DepGraphSafe for TyCtxt<'tcx> {} diff --git a/src/librustc/ich/hcx.rs b/src/librustc/ich/hcx.rs index a9466e8252d..ac3faae072b 100644 --- a/src/librustc/ich/hcx.rs +++ b/src/librustc/ich/hcx.rs @@ -194,8 +194,6 @@ impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> { } } -impl<'a> crate::dep_graph::DepGraphSafe for StableHashingContext<'a> {} - impl<'a> HashStable> for ast::NodeId { fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) { panic!("Node IDs should not appear in incremental state"); diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 92f59feef90..7e87f45ba4b 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -7,7 +7,6 @@ use crate::type_::Type; use crate::value::Value; use rustc::bug; -use rustc::dep_graph::DepGraphSafe; use rustc::mir::mono::CodegenUnit; use rustc::ty::layout::{ HasParamEnv, LayoutError, LayoutOf, PointeeInfo, Size, TyLayout, VariantIdx, @@ -90,8 +89,6 @@ pub struct CodegenCx<'ll, 'tcx> { local_gen_sym_counter: Cell, } -impl<'ll, 'tcx> DepGraphSafe for CodegenCx<'ll, 'tcx> {} - pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { Some(ref s) => &s[..], diff --git a/src/librustc_query_system/dep_graph/graph.rs b/src/librustc_query_system/dep_graph/graph.rs index 7352551559c..60c5dcda425 100644 --- a/src/librustc_query_system/dep_graph/graph.rs +++ b/src/librustc_query_system/dep_graph/graph.rs @@ -20,10 +20,9 @@ use std::sync::atomic::Ordering::Relaxed; use super::debug::EdgeFilter; use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; -use super::safe::DepGraphSafe; use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use super::{DepContext, DepKind, DepNode, WorkProductId}; -use crate::{HashStableContext, HashStableContextProvider}; +use crate::HashStableContext; #[derive(Clone)] pub struct DepGraph { @@ -191,18 +190,14 @@ impl DepGraph { /// `arg` parameter. /// /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html - pub fn with_task( + pub fn with_task, A, R>( &self, key: DepNode, - cx: C, + cx: Ctxt, arg: A, - task: fn(C, A) -> R, - hash_result: impl FnOnce(&mut H, &R) -> Option, - ) -> (R, DepNodeIndex) - where - C: DepGraphSafe + HashStableContextProvider, - H: HashStableContext, - { + task: fn(Ctxt, A) -> R, + hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option, + ) -> (R, DepNodeIndex) { self.with_task_impl( key, cx, @@ -223,13 +218,13 @@ impl DepGraph { ) } - fn with_task_impl( + fn with_task_impl, A, R>( &self, key: DepNode, - cx: C, + cx: Ctxt, arg: A, no_tcx: bool, - task: fn(C, A) -> R, + task: fn(Ctxt, A) -> R, create_task: fn(DepNode) -> Option>, finish_task_and_alloc_depnode: fn( &CurrentDepGraph, @@ -237,12 +232,8 @@ impl DepGraph { Fingerprint, Option>, ) -> DepNodeIndex, - hash_result: impl FnOnce(&mut H, &R) -> Option, - ) -> (R, DepNodeIndex) - where - C: DepGraphSafe + HashStableContextProvider, - H: HashStableContext, - { + hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option, + ) -> (R, DepNodeIndex) { if let Some(ref data) = self.data { let task_deps = create_task(key).map(Lock::new); @@ -251,7 +242,7 @@ impl DepGraph { // anyway so that // - we make sure that the infrastructure works and // - we can get an idea of the runtime cost. - let mut hcx = cx.get_stable_hashing_context(); + let mut hcx = cx.create_stable_hashing_context(); let result = if no_tcx { task(cx, arg) @@ -335,18 +326,14 @@ impl DepGraph { /// Executes something within an "eval-always" task which is a task /// that runs whenever anything changes. - pub fn with_eval_always_task( + pub fn with_eval_always_task, A, R>( &self, key: DepNode, - cx: C, + cx: Ctxt, arg: A, - task: fn(C, A) -> R, - hash_result: impl FnOnce(&mut H, &R) -> Option, - ) -> (R, DepNodeIndex) - where - C: DepGraphSafe + HashStableContextProvider, - H: HashStableContext, - { + task: fn(Ctxt, A) -> R, + hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option, + ) -> (R, DepNodeIndex) { self.with_task_impl( key, cx, diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index 2faca546213..f215dadc660 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -3,7 +3,6 @@ mod dep_node; mod graph; mod prev; mod query; -mod safe; mod serialized; pub use dep_node::{DepNode, DepNodeParams, WorkProductId}; @@ -11,8 +10,6 @@ pub use graph::WorkProductFileKind; pub use graph::{hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, WorkProduct}; pub use prev::PreviousDepGraph; pub use query::DepGraphQuery; -pub use safe::AssertDepGraphSafe; -pub use safe::DepGraphSafe; pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use rustc_data_structures::profiling::SelfProfilerRef; @@ -23,7 +20,7 @@ use rustc_errors::Diagnostic; use std::fmt; use std::hash::Hash; -pub trait DepContext: Copy + DepGraphSafe { +pub trait DepContext: Copy { type DepKind: self::DepKind; type StableHashingContext: crate::HashStableContext; diff --git a/src/librustc_query_system/dep_graph/safe.rs b/src/librustc_query_system/dep_graph/safe.rs deleted file mode 100644 index 7bba348f884..00000000000 --- a/src/librustc_query_system/dep_graph/safe.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! The `DepGraphSafe` trait - -use rustc_ast::ast::NodeId; -use rustc_hir::def_id::DefId; -use rustc_hir::BodyId; - -/// The `DepGraphSafe` trait is used to specify what kinds of values -/// are safe to "leak" into a task. The idea is that this should be -/// only be implemented for things like the tcx as well as various id -/// types, which will create reads in the dep-graph whenever the trait -/// loads anything that might depend on the input program. -pub trait DepGraphSafe {} - -/// A `BodyId` on its own doesn't give access to any particular state. -/// You must fetch the state from the various maps or generate -/// on-demand queries, all of which create reads. -impl DepGraphSafe for BodyId {} - -/// A `NodeId` on its own doesn't give access to any particular state. -/// You must fetch the state from the various maps or generate -/// on-demand queries, all of which create reads. -impl DepGraphSafe for NodeId {} - -/// A `DefId` on its own doesn't give access to any particular state. -/// You must fetch the state from the various maps or generate -/// on-demand queries, all of which create reads. -impl DepGraphSafe for DefId {} - -/// Tuples make it easy to build up state. -impl DepGraphSafe for (A, B) -where - A: DepGraphSafe, - B: DepGraphSafe, -{ -} - -/// Shared ref to dep-graph-safe stuff should still be dep-graph-safe. -impl<'a, A> DepGraphSafe for &'a A where A: DepGraphSafe {} - -/// Mut ref to dep-graph-safe stuff should still be dep-graph-safe. -impl<'a, A> DepGraphSafe for &'a mut A where A: DepGraphSafe {} - -/// No data here! :) -impl DepGraphSafe for () {} - -/// A convenient override that lets you pass arbitrary state into a -/// task. Every use should be accompanied by a comment explaining why -/// it makes sense (or how it could be refactored away in the future). -pub struct AssertDepGraphSafe(pub T); - -impl DepGraphSafe for AssertDepGraphSafe {} diff --git a/src/librustc_query_system/lib.rs b/src/librustc_query_system/lib.rs index 5750d8e8c35..1f7fde642eb 100644 --- a/src/librustc_query_system/lib.rs +++ b/src/librustc_query_system/lib.rs @@ -19,20 +19,3 @@ pub mod query; pub trait HashStableContext { fn debug_dep_tasks(&self) -> bool; } - -/// Something that can provide a stable hashing context. -pub trait HashStableContextProvider { - fn get_stable_hashing_context(&self) -> Ctxt; -} - -impl> HashStableContextProvider for &T { - fn get_stable_hashing_context(&self) -> Ctxt { - (**self).get_stable_hashing_context() - } -} - -impl> HashStableContextProvider for &mut T { - fn get_stable_hashing_context(&self) -> Ctxt { - (**self).get_stable_hashing_context() - } -} diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index cf23467cf99..f025a056512 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -2,12 +2,11 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepContext, DepKind, DepNode}; +use crate::dep_graph::{DepKind, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::query::caches::QueryCache; use crate::query::config::{QueryContext, QueryDescription}; use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; -use crate::HashStableContextProvider; #[cfg(not(parallel_compiler))] use rustc_data_structures::cold_path; @@ -382,7 +381,7 @@ where } #[inline(always)] -fn try_execute_query( +fn try_execute_query( tcx: CTX, span: Span, key: Q::Key, @@ -390,9 +389,7 @@ fn try_execute_query( ) -> Q::Value where Q: QueryDescription, - CTX: QueryContext, - CTX: HashStableContextProvider<::StableHashingContext>, - K: DepKind, + CTX: QueryContext, { let job = match JobOwner::try_start::(tcx, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, @@ -408,7 +405,7 @@ where // expensive for some `DepKind`s. if !tcx.dep_graph().is_fully_enabled() { let null_dep_node = DepNode::new_no_params(DepKind::NULL); - return force_query_with_job::(tcx, key, job, null_dep_node).0; + return force_query_with_job::(tcx, key, job, null_dep_node).0; } if Q::ANON { @@ -460,7 +457,7 @@ where } } - let (result, dep_node_index) = force_query_with_job::(tcx, key, job, dep_node); + let (result, dep_node_index) = force_query_with_job::(tcx, key, job, dep_node); tcx.dep_graph().read_index(dep_node_index); result } @@ -554,7 +551,7 @@ fn incremental_verify_ich( } #[inline(always)] -fn force_query_with_job( +fn force_query_with_job( tcx: CTX, key: Q::Key, job: JobOwner<'_, CTX, Q::Cache>, @@ -562,9 +559,7 @@ fn force_query_with_job( ) -> (Q::Value, DepNodeIndex) where Q: QueryDescription, - CTX: QueryContext, - CTX: HashStableContextProvider<::StableHashingContext>, - K: DepKind, + CTX: QueryContext, { // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or @@ -631,11 +626,9 @@ pub trait QueryGetter: QueryContext { ); } -impl QueryGetter for CTX +impl QueryGetter for CTX where - CTX: QueryContext, - CTX: HashStableContextProvider<::StableHashingContext>, - K: DepKind, + CTX: QueryContext, { #[inline(never)] fn get_query>(self, span: Span, key: Q::Key) -> Q::Value { @@ -649,7 +642,7 @@ where self.dep_graph().read_index(index); value.clone() }, - |key, lookup| try_execute_query::(self, span, key, lookup), + |key, lookup| try_execute_query::(self, span, key, lookup), ) } @@ -710,7 +703,7 @@ where #[cfg(parallel_compiler)] TryGetJob::JobCompleted(_) => return, }; - force_query_with_job::(self, key, job, dep_node); + force_query_with_job::(self, key, job, dep_node); }, ); } From 0e8b59a2f4d006943424de9363421b7e2ed45e89 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 20:37:19 +0100 Subject: [PATCH 22/31] Prune dependencies. --- Cargo.lock | 2 -- src/librustc_query_system/Cargo.toml | 2 -- src/librustc_query_system/query/config.rs | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d70ab32c9d..f0a5bed778e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4043,10 +4043,8 @@ dependencies = [ "log", "parking_lot 0.9.0", "rustc-rayon-core", - "rustc_ast", "rustc_data_structures", "rustc_errors", - "rustc_hir", "rustc_index", "rustc_macros", "rustc_session", diff --git a/src/librustc_query_system/Cargo.toml b/src/librustc_query_system/Cargo.toml index 065c54bb85a..6304e632b51 100644 --- a/src/librustc_query_system/Cargo.toml +++ b/src/librustc_query_system/Cargo.toml @@ -12,10 +12,8 @@ doctest = false [dependencies] log = { version = "0.4", features = ["release_max_level_info", "std"] } rustc-rayon-core = "0.3.0" -rustc_ast = { path = "../librustc_ast" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } -rustc_hir = { path = "../librustc_hir" } rustc_index = { path = "../librustc_index" } rustc_macros = { path = "../librustc_macros" } rustc_serialize = { path = "../libserialize", package = "serialize" } diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 53adcbdeea7..35828da05bf 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -7,7 +7,7 @@ use crate::query::job::{QueryJobId, QueryJobInfo}; use crate::query::plumbing::CycleError; use crate::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; -use rustc_hir::def_id::DefId; +use rustc_span::def_id::DefId; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; From fa06cfd25b2f53d01eb92605caac8d39cbb57ab0 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 23:46:47 +0100 Subject: [PATCH 23/31] Move generics on QueryCache. --- src/librustc/ty/query/plumbing.rs | 2 +- src/librustc/ty/query/profiling_support.rs | 2 +- src/librustc/ty/query/stats.rs | 2 +- src/librustc_query_system/query/caches.rs | 18 +++++++++--------- src/librustc_query_system/query/config.rs | 2 +- src/librustc_query_system/query/plumbing.rs | 20 ++++++++++---------- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index fb699c6fae0..8e34aba8a9e 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -208,7 +208,7 @@ macro_rules! is_eval_always { macro_rules! query_storage { (<$tcx:tt>[][$K:ty, $V:ty]) => { - <<$K as Key>::CacheSelector as CacheSelector, $K, $V>>::Cache + <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache }; (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { $ty diff --git a/src/librustc/ty/query/profiling_support.rs b/src/librustc/ty/query/profiling_support.rs index d7972045d12..e0d3e764dad 100644 --- a/src/librustc/ty/query/profiling_support.rs +++ b/src/librustc/ty/query/profiling_support.rs @@ -163,7 +163,7 @@ pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( query_state: &QueryState, C>, string_cache: &mut QueryKeyStringCache, ) where - C: QueryCache>, + C: QueryCache, C::Key: Debug + Clone, { tcx.prof.with_profiler(|profiler| { diff --git a/src/librustc/ty/query/stats.rs b/src/librustc/ty/query/stats.rs index 12e9094fba6..b496bf839ab 100644 --- a/src/librustc/ty/query/stats.rs +++ b/src/librustc/ty/query/stats.rs @@ -38,7 +38,7 @@ struct QueryStats { local_def_id_keys: Option, } -fn stats>( +fn stats( name: &'static str, map: &QueryState, ) -> QueryStats { diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index f79aa992fd2..51bea58fd80 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -8,11 +8,11 @@ use std::default::Default; use std::hash::Hash; use std::marker::PhantomData; -pub trait CacheSelector { - type Cache: QueryCache; +pub trait CacheSelector { + type Cache: QueryCache; } -pub trait QueryCache: Default { +pub trait QueryCache: Default { type Key: Hash; type Value; type Sharded: Default; @@ -21,7 +21,7 @@ pub trait QueryCache: Default { /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. - fn lookup( + fn lookup( &self, state: &QueryState, key: Self::Key, @@ -33,7 +33,7 @@ pub trait QueryCache: Default { OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R, OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R; - fn complete( + fn complete( &self, tcx: CTX, lock_sharded_storage: &mut Self::Sharded, @@ -54,7 +54,7 @@ pub trait QueryCache: Default { pub struct DefaultCacheSelector; -impl CacheSelector for DefaultCacheSelector { +impl CacheSelector for DefaultCacheSelector { type Cache = DefaultCache; } @@ -66,13 +66,13 @@ impl Default for DefaultCache { } } -impl QueryCache for DefaultCache { +impl QueryCache for DefaultCache { type Key = K; type Value = V; type Sharded = FxHashMap; #[inline(always)] - fn lookup( + fn lookup( &self, state: &QueryState, key: K, @@ -92,7 +92,7 @@ impl QueryCache for DefaultCache } #[inline] - fn complete( + fn complete( &self, _: CTX, lock_sharded_storage: &mut Self::Sharded, diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 35828da05bf..4800b66d889 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -63,7 +63,7 @@ pub trait QueryAccessors: QueryConfig { const EVAL_ALWAYS: bool; const DEP_KIND: CTX::DepKind; - type Cache: QueryCache; + type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt fn query_state<'a>(tcx: CTX) -> &'a QueryState; diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index f025a056512..1bba4bd7e88 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -42,14 +42,14 @@ impl Default for QueryStateShard { } } -pub struct QueryState> { +pub struct QueryState { cache: C, shards: Sharded>, #[cfg(debug_assertions)] pub cache_hits: AtomicUsize, } -impl> QueryState { +impl QueryState { pub(super) fn get_lookup<'tcx>( &'tcx self, key: &C::Key, @@ -77,7 +77,7 @@ enum QueryResult { Poisoned, } -impl> QueryState { +impl QueryState { pub fn iter_results( &self, f: impl for<'a> FnOnce( @@ -122,7 +122,7 @@ impl> QueryState { } } -impl> Default for QueryState { +impl Default for QueryState { fn default() -> QueryState { QueryState { cache: C::default(), @@ -144,7 +144,7 @@ pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> { /// This will poison the relevant query if dropped. struct JobOwner<'tcx, CTX: QueryContext, C> where - C: QueryCache, + C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { @@ -155,7 +155,7 @@ where impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C> where - C: QueryCache, + C: QueryCache, C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, { @@ -292,7 +292,7 @@ where (result, diagnostics.into_inner()) } -impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> +impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> where C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, @@ -326,7 +326,7 @@ pub struct CycleError { } /// The result of `try_start`. -enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache> +enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache> where C::Key: Eq + Hash + Clone + Debug, C::Value: Clone, @@ -358,7 +358,7 @@ fn try_get_cached( on_miss: OnMiss, ) -> R where - C: QueryCache, + C: QueryCache, CTX: QueryContext, OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, @@ -385,7 +385,7 @@ fn try_execute_query( tcx: CTX, span: Span, key: Q::Key, - lookup: QueryLookup<'_, CTX, Q::Key, >::Sharded>, + lookup: QueryLookup<'_, CTX, Q::Key, ::Sharded>, ) -> Q::Value where Q: QueryDescription, From 5dfed41812b3904c1f50bd11aa7047b29ee9f47a Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 23:48:37 +0100 Subject: [PATCH 24/31] Simplify generics on try_start. --- src/librustc_query_system/query/plumbing.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 1bba4bd7e88..e1b86e55ce8 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -168,16 +168,15 @@ where /// This function is inlined because that results in a noticeable speed-up /// for some compile-time benchmarks. #[inline(always)] - fn try_start<'a, 'b, Q, K>( + fn try_start<'a, 'b, Q>( tcx: CTX, span: Span, key: &C::Key, mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>, ) -> TryGetJob<'b, CTX, C> where - K: DepKind, Q: QueryDescription, - CTX: QueryContext, + CTX: QueryContext, { let lock = &mut *lookup.lock; @@ -391,7 +390,7 @@ where Q: QueryDescription, CTX: QueryContext, { - let job = match JobOwner::try_start::(tcx, span, &key, lookup) { + let job = match JobOwner::try_start::(tcx, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, TryGetJob::Cycle(result) => return result, #[cfg(parallel_compiler)] @@ -697,7 +696,7 @@ where // Cache hit, do nothing }, |key, lookup| { - let job = match JobOwner::try_start::(self, span, &key, lookup) { + let job = match JobOwner::try_start::(self, span, &key, lookup) { TryGetJob::NotYetStarted(job) => job, TryGetJob::Cycle(_) => return, #[cfg(parallel_compiler)] From fce0d37619736b0e56eabab8f9064fad471a2b5f Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 24 Mar 2020 23:50:47 +0100 Subject: [PATCH 25/31] Add comment. --- src/librustc_query_system/query/config.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 4800b66d889..10338f65471 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -20,6 +20,8 @@ use std::borrow::Cow; use std::fmt::Debug; use std::hash::Hash; +// The parameter `CTX` is required in librustc: implementations may need to access the `'tcx` +// lifetime in `CTX = TyCtxt<'tcx>`. pub trait QueryConfig { const NAME: &'static str; const CATEGORY: ProfileCategory; From d224e214e051a92c5313a2d4ec0c94d41c4ba01d Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 25 Mar 2020 07:52:12 +0100 Subject: [PATCH 26/31] Rename read_query_job -> current_query_job and simplify it. --- src/librustc/ty/query/plumbing.rs | 4 +-- src/librustc_query_system/query/config.rs | 2 +- src/librustc_query_system/query/job.rs | 31 +++++++++------------ src/librustc_query_system/query/plumbing.rs | 3 +- 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 8e34aba8a9e..8cdc1ae27ee 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -32,8 +32,8 @@ impl QueryContext for TyCtxt<'tcx> { &self.dep_graph } - fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { - tls::with_related_context(*self, move |icx| op(icx.query)) + fn current_query_job(&self) -> Option> { + tls::with_related_context(*self, |icx| icx.query) } fn try_collect_active_jobs( diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 10338f65471..106688d2b54 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -43,7 +43,7 @@ pub trait QueryContext: DepContext { fn dep_graph(&self) -> &DepGraph; /// Get the query information from the TLS context. - fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R; + fn current_query_job(&self) -> Option>; fn try_collect_active_jobs( &self, diff --git a/src/librustc_query_system/query/job.rs b/src/librustc_query_system/query/job.rs index 92ab97f210a..a7488b6fdff 100644 --- a/src/librustc_query_system/query/job.rs +++ b/src/librustc_query_system/query/job.rs @@ -150,7 +150,7 @@ impl QueryLatch { let query_map = tcx.try_collect_active_jobs().unwrap(); // Get the current executing query (waiter) and find the waitee amongst its parents - let mut current_job = tcx.read_query_job(|query| query); + let mut current_job = tcx.current_query_job(); let mut cycle = Vec::new(); while let Some(job) = current_job { @@ -222,23 +222,18 @@ impl QueryLatch { impl QueryLatch { /// Awaits for the query job to complete. pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError> { - tcx.read_query_job(move |query| { - let waiter = Lrc::new(QueryWaiter { - query, - span, - cycle: Lock::new(None), - condvar: Condvar::new(), - }); - self.wait_on_inner(&waiter); - // FIXME: Get rid of this lock. We have ownership of the QueryWaiter - // although another thread may still have a Lrc reference so we cannot - // use Lrc::get_mut - let mut cycle = waiter.cycle.lock(); - match cycle.take() { - None => Ok(()), - Some(cycle) => Err(cycle), - } - }) + let query = tcx.current_query_job(); + let waiter = + Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); + self.wait_on_inner(&waiter); + // FIXME: Get rid of this lock. We have ownership of the QueryWaiter + // although another thread may still have a Lrc reference so we cannot + // use Lrc::get_mut + let mut cycle = waiter.cycle.lock(); + match cycle.take() { + None => Ok(()), + Some(cycle) => Err(cycle), + } } } diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index e1b86e55ce8..b3187ba9189 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -211,7 +211,8 @@ where let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND); - let job = tcx.read_query_job(|query| QueryJob::new(id, span, query)); + let job = tcx.current_query_job(); + let job = QueryJob::new(id, span, job); entry.insert(QueryResult::Started(job)); From 260cfaba1259e8551aec02ef4b846900517be8e5 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 25 Mar 2020 08:02:27 +0100 Subject: [PATCH 27/31] Don't allow access to the Session. --- Cargo.lock | 1 - src/librustc/ty/query/plumbing.rs | 8 +++++--- src/librustc_query_system/Cargo.toml | 1 - src/librustc_query_system/query/config.rs | 7 +++---- src/librustc_query_system/query/plumbing.rs | 2 +- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0a5bed778e..f61f333b6c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4047,7 +4047,6 @@ dependencies = [ "rustc_errors", "rustc_index", "rustc_macros", - "rustc_session", "rustc_span", "serialize", "smallvec 1.0.0", diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 8cdc1ae27ee..c60fdd2f4fc 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -13,15 +13,17 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level}; -use rustc_session::Session; use rustc_span::def_id::DefId; use rustc_span::Span; impl QueryContext for TyCtxt<'tcx> { type Query = Query<'tcx>; - fn session(&self) -> &Session { - &self.sess + fn incremental_verify_ich(&self) -> bool { + self.sess.opts.debugging_opts.incremental_verify_ich + } + fn verbose(&self) -> bool { + self.sess.verbose() } fn def_path_str(&self, def_id: DefId) -> String { diff --git a/src/librustc_query_system/Cargo.toml b/src/librustc_query_system/Cargo.toml index 6304e632b51..e1657a8f3c6 100644 --- a/src/librustc_query_system/Cargo.toml +++ b/src/librustc_query_system/Cargo.toml @@ -17,7 +17,6 @@ rustc_errors = { path = "../librustc_errors" } rustc_index = { path = "../librustc_index" } rustc_macros = { path = "../librustc_macros" } rustc_serialize = { path = "../libserialize", package = "serialize" } -rustc_session = { path = "../librustc_session" } rustc_span = { path = "../librustc_span" } parking_lot = "0.9" smallvec = { version = "1.0", features = ["union", "may_dangle"] } diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 106688d2b54..46a48a31161 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -15,7 +15,6 @@ use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; -use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; use std::hash::Hash; @@ -33,8 +32,8 @@ pub trait QueryConfig { pub trait QueryContext: DepContext { type Query: Clone + HashStable; - /// Access the session. - fn session(&self) -> &Session; + fn incremental_verify_ich(&self) -> bool; + fn verbose(&self) -> bool; /// Get string representation from DefPath. fn def_path_str(&self, def_id: DefId) -> String; @@ -101,7 +100,7 @@ where M: QueryAccessors, { default fn describe(tcx: CTX, def_id: DefId) -> Cow<'static, str> { - if !tcx.session().verbose() { + if !tcx.verbose() { format!("processing `{}`", tcx.def_path_str(def_id)).into() } else { let name = ::std::any::type_name::(); diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index b3187ba9189..97b68042039 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -514,7 +514,7 @@ where // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. - if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) { + if unlikely!(tcx.incremental_verify_ich()) { incremental_verify_ich::(tcx, &result, dep_node, dep_node_index); } From 4faf701d2028dd6031a3c772ae4b3e844d51c3c3 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Fri, 27 Mar 2020 07:35:32 +0100 Subject: [PATCH 28/31] Remove the QueryGetter trait. --- src/librustc/ty/query/plumbing.rs | 4 +- src/librustc_macros/src/query.rs | 3 +- src/librustc_query_system/query/plumbing.rs | 166 +++++++++----------- 3 files changed, 78 insertions(+), 95 deletions(-) diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index c60fdd2f4fc..0fabacb5f69 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -381,7 +381,7 @@ macro_rules! define_queries_inner { $($(#[$attr])* #[inline(always)] pub fn $name(self, key: $K) { - self.tcx.ensure_query::>(key) + ensure_query::, _>(self.tcx, key) })* } @@ -459,7 +459,7 @@ macro_rules! define_queries_inner { $($(#[$attr])* #[inline(always)] pub fn $name(self, key: $K) -> $V { - self.tcx.get_query::>(self.span, key) + get_query::, _>(self.tcx, self.span, key) })* } diff --git a/src/librustc_macros/src/query.rs b/src/librustc_macros/src/query.rs index 57fe8ede9d1..26c3bce4a9a 100644 --- a/src/librustc_macros/src/query.rs +++ b/src/librustc_macros/src/query.rs @@ -489,7 +489,8 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream { ::rustc::dep_graph::DepKind::#name => { if <#arg as DepNodeParams>>::CAN_RECONSTRUCT_QUERY_KEY { if let Some(key) = <#arg as DepNodeParams>>::recover($tcx, $dep_node) { - $tcx.force_query::>( + force_query::, _>( + $tcx, key, DUMMY_SP, *$dep_node diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 97b68042039..bec45b29d30 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -606,105 +606,87 @@ where (result, dep_node_index) } -pub trait QueryGetter: QueryContext { - fn get_query>(self, span: Span, key: Q::Key) -> Q::Value; - - /// Ensure that either this query has all green inputs or been executed. - /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. - /// - /// This function is particularly useful when executing passes for their - /// side-effects -- e.g., in order to report errors for erroneous programs. - /// - /// Note: The optimization is only available during incr. comp. - fn ensure_query>(self, key: Q::Key); - - fn force_query>( - self, - key: Q::Key, - span: Span, - dep_node: DepNode, - ); -} - -impl QueryGetter for CTX +#[inline(never)] +pub fn get_query(tcx: CTX, span: Span, key: Q::Key) -> Q::Value where + Q: QueryDescription, CTX: QueryContext, { - #[inline(never)] - fn get_query>(self, span: Span, key: Q::Key) -> Q::Value { - debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); + debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); - try_get_cached( - self, - Q::query_state(self), - key, - |value, index| { - self.dep_graph().read_index(index); - value.clone() - }, - |key, lookup| try_execute_query::(self, span, key, lookup), - ) + try_get_cached( + tcx, + Q::query_state(tcx), + key, + |value, index| { + tcx.dep_graph().read_index(index); + value.clone() + }, + |key, lookup| try_execute_query::(tcx, span, key, lookup), + ) +} + +/// Ensure that either this query has all green inputs or been executed. +/// Executing `query::ensure(D)` is considered a read of the dep-node `D`. +/// +/// This function is particularly useful when executing passes for their +/// side-effects -- e.g., in order to report errors for erroneous programs. +/// +/// Note: The optimization is only available during incr. comp. +pub fn ensure_query(tcx: CTX, key: Q::Key) +where + Q: QueryDescription, + CTX: QueryContext, +{ + if Q::EVAL_ALWAYS { + let _ = get_query::(tcx, DUMMY_SP, key); + return; } - /// Ensure that either this query has all green inputs or been executed. - /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. - /// - /// This function is particularly useful when executing passes for their - /// side-effects -- e.g., in order to report errors for erroneous programs. - /// - /// Note: The optimization is only available during incr. comp. - fn ensure_query>(self, key: Q::Key) { - if Q::EVAL_ALWAYS { - let _ = self.get_query::(DUMMY_SP, key); - return; + // Ensuring an anonymous query makes no sense + assert!(!Q::ANON); + + let dep_node = Q::to_dep_node(tcx, &key); + + match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) { + None => { + // A None return from `try_mark_green_and_read` means that this is either + // a new dep node or that the dep node has already been marked red. + // Either way, we can't call `dep_graph.read()` as we don't have the + // DepNodeIndex. We must invoke the query itself. The performance cost + // this introduces should be negligible as we'll immediately hit the + // in-memory cache, or another query down the line will. + let _ = get_query::(tcx, DUMMY_SP, key); } - - // Ensuring an anonymous query makes no sense - assert!(!Q::ANON); - - let dep_node = Q::to_dep_node(self, &key); - - match self.dep_graph().try_mark_green_and_read(self, &dep_node) { - None => { - // A None return from `try_mark_green_and_read` means that this is either - // a new dep node or that the dep node has already been marked red. - // Either way, we can't call `dep_graph.read()` as we don't have the - // DepNodeIndex. We must invoke the query itself. The performance cost - // this introduces should be negligible as we'll immediately hit the - // in-memory cache, or another query down the line will. - let _ = self.get_query::(DUMMY_SP, key); - } - Some((_, dep_node_index)) => { - self.profiler().query_cache_hit(dep_node_index.into()); - } + Some((_, dep_node_index)) => { + tcx.profiler().query_cache_hit(dep_node_index.into()); } } - - fn force_query>( - self, - key: Q::Key, - span: Span, - dep_node: DepNode, - ) { - // We may be concurrently trying both execute and force a query. - // Ensure that only one of them runs the query. - - try_get_cached( - self, - Q::query_state(self), - key, - |_, _| { - // Cache hit, do nothing - }, - |key, lookup| { - let job = match JobOwner::try_start::(self, span, &key, lookup) { - TryGetJob::NotYetStarted(job) => job, - TryGetJob::Cycle(_) => return, - #[cfg(parallel_compiler)] - TryGetJob::JobCompleted(_) => return, - }; - force_query_with_job::(self, key, job, dep_node); - }, - ); - } +} + +pub fn force_query(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode) +where + Q: QueryDescription, + CTX: QueryContext, +{ + // We may be concurrently trying both execute and force a query. + // Ensure that only one of them runs the query. + + try_get_cached( + tcx, + Q::query_state(tcx), + key, + |_, _| { + // Cache hit, do nothing + }, + |key, lookup| { + let job = match JobOwner::try_start::(tcx, span, &key, lookup) { + TryGetJob::NotYetStarted(job) => job, + TryGetJob::Cycle(_) => return, + #[cfg(parallel_compiler)] + TryGetJob::JobCompleted(_) => return, + }; + force_query_with_job::(tcx, key, job, dep_node); + }, + ); } From db5be1fe208d0cbdde750a0eb9f01bbd8afda1ee Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Fri, 27 Mar 2020 07:43:11 +0100 Subject: [PATCH 29/31] Move QueryContext to the parent module. --- src/librustc_query_system/query/caches.rs | 2 +- src/librustc_query_system/query/config.rs | 40 +------------------- src/librustc_query_system/query/job.rs | 2 +- src/librustc_query_system/query/mod.rs | 41 ++++++++++++++++++++- src/librustc_query_system/query/plumbing.rs | 3 +- 5 files changed, 46 insertions(+), 42 deletions(-) diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index 51bea58fd80..0c0335ba04f 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -1,6 +1,6 @@ use crate::dep_graph::DepNodeIndex; -use crate::query::config::QueryContext; use crate::query::plumbing::{QueryLookup, QueryState}; +use crate::query::QueryContext; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 46a48a31161..20dad0bd47e 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -1,20 +1,14 @@ //! Query configuration and description traits. +use crate::dep_graph::DepNode; use crate::dep_graph::SerializedDepNodeIndex; -use crate::dep_graph::{DepContext, DepGraph, DepNode}; use crate::query::caches::QueryCache; -use crate::query::job::{QueryJobId, QueryJobInfo}; use crate::query::plumbing::CycleError; -use crate::query::QueryState; +use crate::query::{QueryContext, QueryState}; use rustc_data_structures::profiling::ProfileCategory; use rustc_span::def_id::DefId; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::stable_hasher::HashStable; -use rustc_data_structures::sync::Lock; -use rustc_data_structures::thin_vec::ThinVec; -use rustc_errors::Diagnostic; use std::borrow::Cow; use std::fmt::Debug; use std::hash::Hash; @@ -29,36 +23,6 @@ pub trait QueryConfig { type Value: Clone; } -pub trait QueryContext: DepContext { - type Query: Clone + HashStable; - - fn incremental_verify_ich(&self) -> bool; - fn verbose(&self) -> bool; - - /// Get string representation from DefPath. - fn def_path_str(&self, def_id: DefId) -> String; - - /// Access the DepGraph. - fn dep_graph(&self) -> &DepGraph; - - /// Get the query information from the TLS context. - fn current_query_job(&self) -> Option>; - - fn try_collect_active_jobs( - &self, - ) -> Option, QueryJobInfo>>; - - /// Executes a job by changing the `ImplicitCtxt` to point to the - /// new query job while it executes. It returns the diagnostics - /// captured during execution and the actual result. - fn start_query( - &self, - token: QueryJobId, - diagnostics: Option<&Lock>>, - compute: impl FnOnce(Self) -> R, - ) -> R; -} - pub trait QueryAccessors: QueryConfig { const ANON: bool; const EVAL_ALWAYS: bool; diff --git a/src/librustc_query_system/query/job.rs b/src/librustc_query_system/query/job.rs index a7488b6fdff..de6dc81d868 100644 --- a/src/librustc_query_system/query/job.rs +++ b/src/librustc_query_system/query/job.rs @@ -1,6 +1,6 @@ use crate::dep_graph::{DepContext, DepKind}; -use crate::query::config::QueryContext; use crate::query::plumbing::CycleError; +use crate::query::QueryContext; use rustc_data_structures::fx::FxHashMap; use rustc_span::Span; diff --git a/src/librustc_query_system/query/mod.rs b/src/librustc_query_system/query/mod.rs index 9d0a6665eac..b1677c5c93d 100644 --- a/src/librustc_query_system/query/mod.rs +++ b/src/librustc_query_system/query/mod.rs @@ -10,4 +10,43 @@ mod caches; pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache}; mod config; -pub use self::config::{QueryAccessors, QueryConfig, QueryContext, QueryDescription}; +pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; + +use crate::dep_graph::{DepContext, DepGraph}; + +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::stable_hasher::HashStable; +use rustc_data_structures::sync::Lock; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_errors::Diagnostic; +use rustc_span::def_id::DefId; + +pub trait QueryContext: DepContext { + type Query: Clone + HashStable; + + fn incremental_verify_ich(&self) -> bool; + fn verbose(&self) -> bool; + + /// Get string representation from DefPath. + fn def_path_str(&self, def_id: DefId) -> String; + + /// Access the DepGraph. + fn dep_graph(&self) -> &DepGraph; + + /// Get the query information from the TLS context. + fn current_query_job(&self) -> Option>; + + fn try_collect_active_jobs( + &self, + ) -> Option, QueryJobInfo>>; + + /// Executes a job by changing the `ImplicitCtxt` to point to the + /// new query job while it executes. It returns the diagnostics + /// captured during execution and the actual result. + fn start_query( + &self, + token: QueryJobId, + diagnostics: Option<&Lock>>, + compute: impl FnOnce(Self) -> R, + ) -> R; +} diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index bec45b29d30..b371a914c6f 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -5,8 +5,9 @@ use crate::dep_graph::{DepKind, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::query::caches::QueryCache; -use crate::query::config::{QueryContext, QueryDescription}; +use crate::query::config::QueryDescription; use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; +use crate::query::QueryContext; #[cfg(not(parallel_compiler))] use rustc_data_structures::cold_path; From 222d01025581174b4f6c1afacf673bc6eade3a6a Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Fri, 27 Mar 2020 07:50:28 +0100 Subject: [PATCH 30/31] Cleanups. --- src/librustc/dep_graph/mod.rs | 10 +++++----- src/librustc/ty/query/plumbing.rs | 10 +++++----- src/librustc_query_system/dep_graph/graph.rs | 3 +-- src/librustc_query_system/dep_graph/mod.rs | 2 ++ src/librustc_query_system/lib.rs | 4 +--- 5 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index de94b6b1850..7b05a575b93 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -95,6 +95,10 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { TyCtxt::create_stable_hashing_context(*self) } + fn debug_dep_tasks(&self) -> bool { + self.sess.opts.debugging_opts.dep_tasks + } + fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { // FIXME: This match is just a workaround for incremental bugs and should // be removed. https://github.com/rust-lang/rust/issues/62649 is one such @@ -181,8 +185,4 @@ fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool { def_id.index == hir_id.owner.local_def_index } -impl rustc_query_system::HashStableContext for StableHashingContext<'_> { - fn debug_dep_tasks(&self) -> bool { - self.sess().opts.debugging_opts.dep_tasks - } -} +impl rustc_query_system::HashStableContext for StableHashingContext<'_> {} diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 0fabacb5f69..1bb392f436f 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -209,14 +209,14 @@ macro_rules! is_eval_always { } macro_rules! query_storage { - (<$tcx:tt>[][$K:ty, $V:ty]) => { + ([][$K:ty, $V:ty]) => { <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache }; - (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { + ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { $ty }; - (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*]) + ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { + query_storage!([$($($modifiers)*)*][$($args)*]) }; } @@ -332,7 +332,7 @@ macro_rules! define_queries_inner { const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; - type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]); + type Cache = query_storage!([$($modifiers)*][$K, $V]); #[inline(always)] fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { diff --git a/src/librustc_query_system/dep_graph/graph.rs b/src/librustc_query_system/dep_graph/graph.rs index 60c5dcda425..73983e1644c 100644 --- a/src/librustc_query_system/dep_graph/graph.rs +++ b/src/librustc_query_system/dep_graph/graph.rs @@ -22,7 +22,6 @@ use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use super::{DepContext, DepKind, DepNode, WorkProductId}; -use crate::HashStableContext; #[derive(Clone)] pub struct DepGraph { @@ -259,7 +258,7 @@ impl DepGraph { task_deps.map(|lock| lock.into_inner()), ); - let print_status = cfg!(debug_assertions) && hcx.debug_dep_tasks(); + let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks(); // Determine the color of the new DepNode. if let Some(prev_index) = data.previous.node_to_index_opt(&key) { diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index f215dadc660..232efa8795d 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -27,6 +27,8 @@ pub trait DepContext: Copy { /// Create a hashing context for hashing new results. fn create_stable_hashing_context(&self) -> Self::StableHashingContext; + fn debug_dep_tasks(&self) -> bool; + /// Try to force a dep node to execute and see if it's green. fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool; diff --git a/src/librustc_query_system/lib.rs b/src/librustc_query_system/lib.rs index 1f7fde642eb..6623e74b5cb 100644 --- a/src/librustc_query_system/lib.rs +++ b/src/librustc_query_system/lib.rs @@ -16,6 +16,4 @@ extern crate rustc_data_structures; pub mod dep_graph; pub mod query; -pub trait HashStableContext { - fn debug_dep_tasks(&self) -> bool; -} +pub trait HashStableContext {} From 2d7bbda966744f5eff12135bb523ac9c1d561cf0 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Fri, 27 Mar 2020 08:33:37 +0100 Subject: [PATCH 31/31] Implement HashStable directly. --- src/librustc/dep_graph/mod.rs | 2 -- src/librustc_query_system/dep_graph/dep_node.rs | 9 +++++++-- src/librustc_query_system/dep_graph/mod.rs | 2 +- src/librustc_query_system/lib.rs | 2 -- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 7b05a575b93..f56df19bfb0 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -184,5 +184,3 @@ fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); def_id.index == hir_id.owner.local_def_index } - -impl rustc_query_system::HashStableContext for StableHashingContext<'_> {} diff --git a/src/librustc_query_system/dep_graph/dep_node.rs b/src/librustc_query_system/dep_graph/dep_node.rs index c6fff2f0164..b1d332da115 100644 --- a/src/librustc_query_system/dep_graph/dep_node.rs +++ b/src/librustc_query_system/dep_graph/dep_node.rs @@ -46,7 +46,6 @@ use super::{DepContext, DepKind}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_macros::HashStable_Generic; use std::fmt; use std::hash::Hash; @@ -127,7 +126,6 @@ where /// the need to be mapped or unmapped. (This ensures we can serialize /// them even in the absence of a tcx.) #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -#[derive(HashStable_Generic)] pub struct WorkProductId { hash: Fingerprint, } @@ -144,3 +142,10 @@ impl WorkProductId { WorkProductId { hash: fingerprint } } } + +impl HashStable for WorkProductId { + #[inline] + fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { + self.hash.hash_stable(hcx, hasher) + } +} diff --git a/src/librustc_query_system/dep_graph/mod.rs b/src/librustc_query_system/dep_graph/mod.rs index 232efa8795d..fbc91575ede 100644 --- a/src/librustc_query_system/dep_graph/mod.rs +++ b/src/librustc_query_system/dep_graph/mod.rs @@ -22,7 +22,7 @@ use std::hash::Hash; pub trait DepContext: Copy { type DepKind: self::DepKind; - type StableHashingContext: crate::HashStableContext; + type StableHashingContext; /// Create a hashing context for hashing new results. fn create_stable_hashing_context(&self) -> Self::StableHashingContext; diff --git a/src/librustc_query_system/lib.rs b/src/librustc_query_system/lib.rs index 6623e74b5cb..0e6a07e06d0 100644 --- a/src/librustc_query_system/lib.rs +++ b/src/librustc_query_system/lib.rs @@ -15,5 +15,3 @@ extern crate rustc_data_structures; pub mod dep_graph; pub mod query; - -pub trait HashStableContext {}