diff --git a/Cargo.lock b/Cargo.lock index 8bf61989135..6d70ab32c9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4042,12 +4042,15 @@ version = "0.0.0" dependencies = [ "log", "parking_lot 0.9.0", + "rustc-rayon-core", "rustc_ast", "rustc_data_structures", "rustc_errors", "rustc_hir", "rustc_index", "rustc_macros", + "rustc_session", + "rustc_span", "serialize", "smallvec 1.0.0", ] diff --git a/src/librustc_query_system/Cargo.toml b/src/librustc_query_system/Cargo.toml index a01bb5e5ea3..065c54bb85a 100644 --- a/src/librustc_query_system/Cargo.toml +++ b/src/librustc_query_system/Cargo.toml @@ -11,6 +11,7 @@ doctest = false [dependencies] log = { version = "0.4", features = ["release_max_level_info", "std"] } +rustc-rayon-core = "0.3.0" rustc_ast = { path = "../librustc_ast" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } @@ -18,5 +19,7 @@ rustc_hir = { path = "../librustc_hir" } rustc_index = { path = "../librustc_index" } rustc_macros = { path = "../librustc_macros" } rustc_serialize = { path = "../libserialize", package = "serialize" } +rustc_session = { path = "../librustc_session" } +rustc_span = { path = "../librustc_span" } parking_lot = "0.9" smallvec = { version = "1.0", features = ["union", "may_dangle"] } diff --git a/src/librustc_query_system/lib.rs b/src/librustc_query_system/lib.rs index ef4886828c4..5750d8e8c35 100644 --- a/src/librustc_query_system/lib.rs +++ b/src/librustc_query_system/lib.rs @@ -1,14 +1,20 @@ +#![feature(bool_to_option)] #![feature(const_fn)] #![feature(const_if_match)] #![feature(const_panic)] #![feature(core_intrinsics)] +#![feature(hash_raw_entry)] #![feature(specialization)] #![feature(stmt_expr_attributes)] +#![feature(vec_remove_item)] #[macro_use] extern crate log; +#[macro_use] +extern crate rustc_data_structures; pub mod dep_graph; +pub mod query; pub trait HashStableContext { fn debug_dep_tasks(&self) -> bool; diff --git a/src/librustc_query_system/query/caches.rs b/src/librustc_query_system/query/caches.rs index f740fada1e5..efde51c4db6 100644 --- a/src/librustc_query_system/query/caches.rs +++ b/src/librustc_query_system/query/caches.rs @@ -1,6 +1,6 @@ use crate::dep_graph::DepNodeIndex; -use crate::ty::query::config::QueryContext; -use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; +use crate::query::config::QueryContext; +use crate::query::plumbing::{QueryLookup, QueryState, QueryStateShard}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; @@ -8,11 +8,11 @@ use std::default::Default; use std::hash::Hash; use std::marker::PhantomData; -pub(crate) trait CacheSelector { +pub trait CacheSelector { type Cache: QueryCache; } -pub(crate) trait QueryCache: Default { +pub trait QueryCache: Default { type Key; type Value; type Sharded: Default; diff --git a/src/librustc_query_system/query/config.rs b/src/librustc_query_system/query/config.rs index 91e82858b0b..53adcbdeea7 100644 --- a/src/librustc_query_system/query/config.rs +++ b/src/librustc_query_system/query/config.rs @@ -1,10 +1,11 @@ //! Query configuration and description traits. use crate::dep_graph::SerializedDepNodeIndex; -use crate::ty::query::caches::QueryCache; -use crate::ty::query::job::{QueryJobId, QueryJobInfo}; -use crate::ty::query::plumbing::CycleError; -use crate::ty::query::QueryState; +use crate::dep_graph::{DepContext, DepGraph, DepNode}; +use crate::query::caches::QueryCache; +use crate::query::job::{QueryJobId, QueryJobInfo}; +use crate::query::plumbing::CycleError; +use crate::query::QueryState; use rustc_data_structures::profiling::ProfileCategory; use rustc_hir::def_id::DefId; @@ -14,7 +15,6 @@ use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; -use rustc_query_system::dep_graph::{DepContext, DepGraph, DepNode}; use rustc_session::Session; use std::borrow::Cow; use std::fmt::Debug; @@ -58,7 +58,7 @@ pub trait QueryContext: DepContext { ) -> R; } -pub(crate) trait QueryAccessors: QueryConfig { +pub trait QueryAccessors: QueryConfig { const ANON: bool; const EVAL_ALWAYS: bool; const DEP_KIND: CTX::DepKind; @@ -81,7 +81,7 @@ pub(crate) trait QueryAccessors: QueryConfig { fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; } -pub(crate) trait QueryDescription: QueryAccessors { +pub trait QueryDescription: QueryAccessors { fn describe(tcx: CTX, key: Self::Key) -> Cow<'static, str>; #[inline] @@ -90,7 +90,7 @@ pub(crate) trait QueryDescription: QueryAccessors { } fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { - bug!("QueryDescription::load_from_disk() called for an unsupported query.") + panic!("QueryDescription::load_from_disk() called for an unsupported query.") } } @@ -112,6 +112,6 @@ where } default fn try_load_from_disk(_: CTX, _: SerializedDepNodeIndex) -> Option { - bug!("QueryDescription::load_from_disk() called for an unsupported query.") + panic!("QueryDescription::load_from_disk() called for an unsupported query.") } } diff --git a/src/librustc_query_system/query/job.rs b/src/librustc_query_system/query/job.rs index 7f0156abdea..9068760d323 100644 --- a/src/librustc_query_system/query/job.rs +++ b/src/librustc_query_system/query/job.rs @@ -1,10 +1,8 @@ -use crate::ty::query::config::QueryContext; -use crate::ty::query::plumbing::CycleError; -#[cfg(parallel_compiler)] -use crate::ty::tls; +use crate::dep_graph::{DepKind, DepContext}; +use crate::query::config::QueryContext; +use crate::query::plumbing::CycleError; use rustc_data_structures::fx::FxHashMap; -use rustc_query_system::dep_graph::DepContext; use rustc_span::Span; use std::convert::TryFrom; @@ -22,7 +20,7 @@ use { rustc_rayon_core as rayon_core, rustc_span::DUMMY_SP, std::iter::FromIterator, - std::{mem, process, thread}, + std::{mem, process}, }; /// Represents a span and a query key. @@ -52,7 +50,7 @@ pub struct QueryJobId { pub kind: K, } -impl QueryJobId { +impl QueryJobId { pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self { QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } @@ -529,38 +527,13 @@ fn remove_cycle( } } -/// Creates a new thread and forwards information in thread locals to it. -/// The new thread runs the deadlock handler. -/// Must only be called when a deadlock is about to happen. -#[cfg(parallel_compiler)] -pub unsafe fn handle_deadlock() { - let registry = rayon_core::Registry::current(); - - let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| gcx_ptr as *const _); - let gcx_ptr = &*gcx_ptr; - - let rustc_span_globals = - rustc_span::GLOBALS.with(|rustc_span_globals| rustc_span_globals as *const _); - let rustc_span_globals = &*rustc_span_globals; - let syntax_globals = rustc_ast::attr::GLOBALS.with(|syntax_globals| syntax_globals as *const _); - let syntax_globals = &*syntax_globals; - thread::spawn(move || { - tls::GCX_PTR.set(gcx_ptr, || { - rustc_ast::attr::GLOBALS.set(syntax_globals, || { - rustc_span::GLOBALS - .set(rustc_span_globals, || tls::with_global(|tcx| deadlock(tcx, ®istry))) - }); - }) - }); -} - /// Detects query cycles by using depth first search over all active query jobs. /// If a query cycle is found it will break the cycle by finding an edge which /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { +pub fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); diff --git a/src/librustc_query_system/query/mod.rs b/src/librustc_query_system/query/mod.rs index c75e0d95e8f..0b8ad5c16a5 100644 --- a/src/librustc_query_system/query/mod.rs +++ b/src/librustc_query_system/query/mod.rs @@ -1,195 +1,13 @@ -use crate::dep_graph::{self, DepConstructor, DepNode, DepNodeParams}; -use crate::hir::exports::Export; -use crate::hir::map; -use crate::infer::canonical::{self, Canonical}; -use crate::lint::LintLevelMap; -use crate::middle::codegen_fn_attrs::CodegenFnAttrs; -use crate::middle::cstore::{CrateSource, DepKind, NativeLibraryKind}; -use crate::middle::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLibrary}; -use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel}; -use crate::middle::lang_items::{LangItem, LanguageItems}; -use crate::middle::lib_features::LibFeatures; -use crate::middle::privacy::AccessLevels; -use crate::middle::region; -use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes}; -use crate::middle::stability::{self, DeprecationEntry}; -use crate::mir; -use crate::mir::interpret::GlobalId; -use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult, ConstValue}; -use crate::mir::interpret::{LitToConstError, LitToConstInput}; -use crate::mir::mono::CodegenUnit; -use crate::traits::query::{ - CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, - CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal, - CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution, -}; -use crate::traits::query::{ - DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult, - OutlivesBound, -}; -use crate::traits::specialization_graph; -use crate::traits::Clauses; -use crate::traits::{self, Vtable}; -use crate::ty::steal::Steal; -use crate::ty::subst::{GenericArg, SubstsRef}; -use crate::ty::util::AlwaysRequiresDrop; -use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; -use crate::util::common::ErrorReported; -use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; -use rustc_data_structures::profiling::ProfileCategory::*; -use rustc_data_structures::stable_hasher::StableVec; -use rustc_data_structures::svh::Svh; -use rustc_data_structures::sync::Lrc; -use rustc_hir as hir; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId}; -use rustc_hir::{Crate, HirIdSet, ItemLocalId, TraitCandidate}; -use rustc_index::vec::IndexVec; -use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; -use rustc_session::CrateDisambiguator; -use rustc_target::spec::PanicStrategy; - -use rustc_ast::ast; -use rustc_attr as attr; -use rustc_span::symbol::Symbol; -use rustc_span::{Span, DUMMY_SP}; -use std::borrow::Cow; -use std::collections::BTreeMap; -use std::ops::Deref; -use std::sync::Arc; - -#[macro_use] mod plumbing; -pub(crate) use self::plumbing::CycleError; -use self::plumbing::*; - -mod stats; -pub use self::stats::print_stats; +pub use self::plumbing::*; mod job; +pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; #[cfg(parallel_compiler)] -pub use self::job::handle_deadlock; -use self::job::QueryJobInfo; -pub use self::job::{QueryInfo, QueryJob, QueryJobId}; - -mod keys; -use self::keys::Key; - -mod values; -use self::values::Value; +pub use self::job::deadlock; mod caches; -use self::caches::CacheSelector; +pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache}; mod config; -use self::config::QueryAccessors; -pub use self::config::QueryConfig; -pub(crate) use self::config::QueryDescription; - -mod on_disk_cache; -pub use self::on_disk_cache::OnDiskCache; - -mod profiling_support; -pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder}; - -// Each of these queries corresponds to a function pointer field in the -// `Providers` struct for requesting a value of that type, and a method -// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way -// which memoizes and does dep-graph tracking, wrapping around the actual -// `Providers` that the driver creates (using several `rustc_*` crates). -// -// The result type of each query must implement `Clone`, and additionally -// `ty::query::values::Value`, which produces an appropriate placeholder -// (error) value if the query resulted in a query cycle. -// Queries marked with `fatal_cycle` do not need the latter implementation, -// as they will raise an fatal error on query cycles instead. - -rustc_query_append! { [define_queries!][<'tcx>] } - -/// The red/green evaluation system will try to mark a specific DepNode in the -/// dependency graph as green by recursively trying to mark the dependencies of -/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode` -/// where we don't know if it is red or green and we therefore actually have -/// to recompute its value in order to find out. Since the only piece of -/// information that we have at that point is the `DepNode` we are trying to -/// re-evaluate, we need some way to re-run a query from just that. This is what -/// `force_from_dep_node()` implements. -/// -/// In the general case, a `DepNode` consists of a `DepKind` and an opaque -/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint -/// is usually constructed by computing a stable hash of the query-key that the -/// `DepNode` corresponds to. Consequently, it is not in general possible to go -/// back from hash to query-key (since hash functions are not reversible). For -/// this reason `force_from_dep_node()` is expected to fail from time to time -/// because we just cannot find out, from the `DepNode` alone, what the -/// corresponding query-key is and therefore cannot re-run the query. -/// -/// The system deals with this case letting `try_mark_green` fail which forces -/// the root query to be re-evaluated. -/// -/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless. -/// Fortunately, we can use some contextual information that will allow us to -/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we -/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a -/// valid `DefPathHash`. Since we also always build a huge table that maps every -/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have -/// everything we need to re-run the query. -/// -/// Take the `mir_validated` query as an example. Like many other queries, it -/// just has a single parameter: the `DefId` of the item it will compute the -/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode` -/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` -/// is actually a `DefPathHash`, and can therefore just look up the corresponding -/// `DefId` in `tcx.def_path_hash_to_def_id`. -/// -/// When you implement a new query, it will likely have a corresponding new -/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As -/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter, -/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just -/// add it to the "We don't have enough information to reconstruct..." group in -/// the match below. -pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool { - // We must avoid ever having to call `force_from_dep_node()` for a - // `DepNode::codegen_unit`: - // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!( - dep_node.kind != crate::dep_graph::DepKind::codegen_unit, - "calling force_from_dep_node() on DepKind::codegen_unit" - ); - - if !dep_node.kind.can_reconstruct_query_key() { - return false; - } - - rustc_dep_node_force!([dep_node, tcx] - // These are inputs that are expected to be pre-allocated and that - // should therefore always be red or green already. - crate::dep_graph::DepKind::CrateMetadata | - - // These are anonymous nodes. - crate::dep_graph::DepKind::TraitSelect | - - // We don't have enough information to reconstruct the query key of - // these. - crate::dep_graph::DepKind::CompileCodegenUnit => { - bug!("force_from_dep_node: encountered {:?}", dep_node) - } - ); - - false -} - -pub(crate) fn try_load_from_on_disk_cache<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) { - rustc_dep_node_try_load_from_on_disk_cache!(dep_node, tcx) -} +pub use self::config::{QueryAccessors, QueryConfig, QueryContext, QueryDescription}; diff --git a/src/librustc_query_system/query/plumbing.rs b/src/librustc_query_system/query/plumbing.rs index 65a7081bd38..0bae613fcfb 100644 --- a/src/librustc_query_system/query/plumbing.rs +++ b/src/librustc_query_system/query/plumbing.rs @@ -2,25 +2,21 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. +use crate::dep_graph::{DepKind, DepContext, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; -use crate::ty::query::caches::QueryCache; -use crate::ty::query::config::{QueryContext, QueryDescription}; -use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; -use crate::ty::query::Query; -use crate::ty::tls::{self, ImplicitCtxt}; -use crate::ty::{self, TyCtxt}; +use crate::query::caches::QueryCache; +use crate::query::config::{QueryContext, QueryDescription}; +use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; +use crate::HashStableContextProvider; #[cfg(not(parallel_compiler))] use rustc_data_structures::cold_path; +use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHasher}; use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::{Lock, LockGuard}; use rustc_data_structures::thin_vec::ThinVec; -use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}; -use rustc_query_system::dep_graph::{DepContext, DepGraph, DepKind, DepNode}; -use rustc_query_system::HashStableContextProvider; -use rustc_session::Session; -use rustc_span::def_id::DefId; +use rustc_errors::{Diagnostic, FatalError}; use rustc_span::source_map::DUMMY_SP; use rustc_span::Span; use std::collections::hash_map::Entry; @@ -33,7 +29,7 @@ use std::ptr; #[cfg(debug_assertions)] use std::sync::atomic::{AtomicUsize, Ordering}; -pub(crate) struct QueryStateShard { +pub struct QueryStateShard { cache: C, active: FxHashMap>, @@ -53,15 +49,15 @@ impl Default for QueryStateShard { } } -pub(crate) struct QueryState> { +pub struct QueryState> { cache: C, shards: Sharded>, #[cfg(debug_assertions)] - pub(super) cache_hits: AtomicUsize, + pub cache_hits: AtomicUsize, } impl> QueryState { - pub(super) fn get_lookup( + pub(super) fn get_lookup<'tcx, K2: Hash>( &'tcx self, key: &K2, ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> { @@ -89,7 +85,7 @@ enum QueryResult { } impl> QueryState { - pub(super) fn iter_results( + pub fn iter_results( &self, f: impl for<'a> FnOnce( Box + 'a>, @@ -97,12 +93,13 @@ impl> QueryState { ) -> R { self.cache.iter(&self.shards, |shard| &mut shard.cache, f) } - pub(super) fn all_inactive(&self) -> bool { + + pub fn all_inactive(&self) -> bool { let shards = self.shards.lock_shards(); shards.iter().all(|shard| shard.active.is_empty()) } - pub(super) fn try_collect_active_jobs( + pub fn try_collect_active_jobs( &self, kind: CTX::DepKind, make_query: fn(C::Key) -> CTX::Query, @@ -144,7 +141,7 @@ impl> Default for QueryState { } /// Values used when checking a query cache which can be reused on a cache-miss to execute the query. -pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> { +pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> { pub(super) key_hash: u64, shard: usize, pub(super) lock: LockGuard<'tcx, QueryStateShard>, @@ -329,10 +326,10 @@ where } #[derive(Clone)] -pub(crate) struct CycleError { +pub struct CycleError { /// The query and related span that uses the cycle. - pub(super) usage: Option<(Span, Q)>, - pub(super) cycle: Vec>, + pub usage: Option<(Span, Q)>, + pub cycle: Vec>, } /// The result of `try_start`. @@ -354,152 +351,6 @@ where Cycle(C::Value), } -impl QueryContext for TyCtxt<'tcx> { - type Query = Query<'tcx>; - - fn session(&self) -> &Session { - &self.sess - } - - fn def_path_str(&self, def_id: DefId) -> String { - TyCtxt::def_path_str(*self, def_id) - } - - fn dep_graph(&self) -> &DepGraph { - &self.dep_graph - } - - fn read_query_job(&self, op: impl FnOnce(Option>) -> R) -> R { - tls::with_related_context(*self, move |icx| op(icx.query)) - } - - fn try_collect_active_jobs( - &self, - ) -> Option, QueryJobInfo>> { - self.queries.try_collect_active_jobs() - } - - /// Executes a job by changing the `ImplicitCtxt` to point to the - /// new query job while it executes. It returns the diagnostics - /// captured during execution and the actual result. - #[inline(always)] - fn start_query( - &self, - token: QueryJobId, - diagnostics: Option<&Lock>>, - compute: impl FnOnce(Self) -> R, - ) -> R { - // The `TyCtxt` stored in TLS has the same global interner lifetime - // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes - // when accessing the `ImplicitCtxt`. - tls::with_related_context(*self, move |current_icx| { - // Update the `ImplicitCtxt` to point to our new query job. - let new_icx = ImplicitCtxt { - tcx: *self, - query: Some(token), - diagnostics, - layout_depth: current_icx.layout_depth, - task_deps: current_icx.task_deps, - }; - - // Use the `ImplicitCtxt` while we execute the query. - tls::enter_context(&new_icx, |_| compute(*self)) - }) - } -} - -impl<'tcx> TyCtxt<'tcx> { - #[inline(never)] - #[cold] - pub(super) fn report_cycle( - self, - CycleError { usage, cycle: stack }: CycleError>, - ) -> DiagnosticBuilder<'tcx> { - assert!(!stack.is_empty()); - - let fix_span = |span: Span, query: &Query<'tcx>| { - self.sess.source_map().guess_head_span(query.default_span(self, span)) - }; - - // Disable naming impls with types in this path, since that - // sometimes cycles itself, leading to extra cycle errors. - // (And cycle errors around impls tend to occur during the - // collect/coherence phases anyhow.) - ty::print::with_forced_impl_filename_line(|| { - let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); - let mut err = struct_span_err!( - self.sess, - span, - E0391, - "cycle detected when {}", - stack[0].query.describe(self) - ); - - for i in 1..stack.len() { - let query = &stack[i].query; - let span = fix_span(stack[(i + 1) % stack.len()].span, query); - err.span_note(span, &format!("...which requires {}...", query.describe(self))); - } - - err.note(&format!( - "...which again requires {}, completing the cycle", - stack[0].query.describe(self) - )); - - if let Some((span, query)) = usage { - err.span_note( - fix_span(span, &query), - &format!("cycle used when {}", query.describe(self)), - ); - } - - err - }) - } - - pub fn try_print_query_stack(handler: &Handler) { - eprintln!("query stack during panic:"); - - // Be careful reyling on global state here: this code is called from - // a panic hook, which means that the global `Handler` may be in a weird - // state if it was responsible for triggering the panic. - tls::with_context_opt(|icx| { - if let Some(icx) = icx { - let query_map = icx.tcx.queries.try_collect_active_jobs(); - - let mut current_query = icx.query; - let mut i = 0; - - while let Some(query) = current_query { - let query_info = - if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) { - info - } else { - break; - }; - let mut diag = Diagnostic::new( - Level::FailureNote, - &format!( - "#{} [{}] {}", - i, - query_info.info.query.name(), - query_info.info.query.describe(icx.tcx) - ), - ); - diag.span = - icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into(); - handler.force_print_diagnostic(diag); - - current_query = query_info.job.parent; - i += 1; - } - } - }); - - eprintln!("end of query stack"); - } -} - /// Checks if the query is already computed and in the cache. /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need @@ -697,8 +548,6 @@ impl<'tcx> TyCtxt<'tcx> { CTX: QueryContext, Q: QueryDescription, { - use rustc_data_structures::fingerprint::Fingerprint; - assert!( Some(tcx.dep_graph().fingerprint_of(dep_node_index)) == tcx.dep_graph().prev_fingerprint_of(dep_node), @@ -721,7 +570,7 @@ impl<'tcx> TyCtxt<'tcx> { fn force_query_with_job( tcx: CTX, key: Q::Key, - job: JobOwner<'tcx, CTX, Q::Cache>, + job: JobOwner<'_, CTX, Q::Cache>, dep_node: DepNode, ) -> (Q::Value, DepNodeIndex) where @@ -775,7 +624,7 @@ impl<'tcx> TyCtxt<'tcx> { (result, dep_node_index) } -pub(super) trait QueryGetter: QueryContext { +pub trait QueryGetter: QueryContext { fn get_query>( self, span: Span, @@ -887,383 +736,3 @@ where ); } } - -macro_rules! handle_cycle_error { - ([][$tcx: expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); - Value::from_cycle_error($tcx) - }}; - ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); - $tcx.sess.abort_if_errors(); - unreachable!() - }}; - ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).delay_as_bug(); - Value::from_cycle_error($tcx) - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - handle_cycle_error!([$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! is_anon { - ([]) => {{ - false - }}; - ([anon $($rest:tt)*]) => {{ - true - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { - is_anon!([$($($modifiers)*)*]) - }; -} - -macro_rules! is_eval_always { - ([]) => {{ - false - }}; - ([eval_always $($rest:tt)*]) => {{ - true - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => { - is_eval_always!([$($($modifiers)*)*]) - }; -} - -macro_rules! query_storage { - (<$tcx:tt>[][$K:ty, $V:ty]) => { - <<$K as Key>::CacheSelector as CacheSelector, $K, $V>>::Cache - }; - (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { - $ty - }; - (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! hash_result { - ([][$hcx:expr, $result:expr]) => {{ - dep_graph::hash_result($hcx, &$result) - }}; - ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{ - None - }}; - ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { - hash_result!([$($($modifiers)*)*][$($args)*]) - }; -} - -macro_rules! define_queries { - (<$tcx:tt> $($category:tt { - $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)* - },)*) => { - define_queries_inner! { <$tcx> - $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)* - } - } -} - -macro_rules! define_queries_inner { - (<$tcx:tt> - $($(#[$attr:meta])* category<$category:tt> - [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { - - use std::mem; - use crate::{ - rustc_data_structures::stable_hasher::HashStable, - rustc_data_structures::stable_hasher::StableHasher, - ich::StableHashingContext - }; - use rustc_data_structures::profiling::ProfileCategory; - - define_queries_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) - } - - #[allow(nonstandard_style)] - #[derive(Clone, Debug)] - pub enum Query<$tcx> { - $($(#[$attr])* $name($K)),* - } - - impl<$tcx> Query<$tcx> { - pub fn name(&self) -> &'static str { - match *self { - $(Query::$name(_) => stringify!($name),)* - } - } - - pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> { - let (r, name) = match *self { - $(Query::$name(key) => { - (queries::$name::describe(tcx, key), stringify!($name)) - })* - }; - if tcx.sess.verbose() { - format!("{} [{}]", r, name).into() - } else { - r - } - } - - // FIXME(eddyb) Get more valid `Span`s on queries. - pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span { - if !span.is_dummy() { - return span; - } - // The `def_span` query is used to calculate `default_span`, - // so exit to avoid infinite recursion. - if let Query::def_span(..) = *self { - return span - } - match *self { - $(Query::$name(key) => key.default_span(tcx),)* - } - } - } - - impl<'a, $tcx> HashStable> for Query<$tcx> { - fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - $(Query::$name(key) => key.hash_stable(hcx, hasher),)* - } - } - } - - pub mod queries { - use std::marker::PhantomData; - - $(#[allow(nonstandard_style)] - pub struct $name<$tcx> { - data: PhantomData<&$tcx ()> - })* - } - - $(impl<$tcx> QueryConfig> for queries::$name<$tcx> { - type Key = $K; - type Value = $V; - const NAME: &'static str = stringify!($name); - const CATEGORY: ProfileCategory = $category; - } - - impl<$tcx> QueryAccessors> for queries::$name<$tcx> { - const ANON: bool = is_anon!([$($modifiers)*]); - const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); - const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node; - - type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]); - - #[inline(always)] - fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState, Self::Cache> { - &tcx.queries.$name - } - - #[allow(unused)] - #[inline(always)] - fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode { - DepConstructor::$node(tcx, *key) - } - - #[inline] - fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { - let provider = tcx.queries.providers.get(key.query_crate()) - // HACK(eddyb) it's possible crates may be loaded after - // the query engine is created, and because crate loading - // is not yet integrated with the query engine, such crates - // would be missing appropriate entries in `providers`. - .unwrap_or(&tcx.queries.fallback_extern_providers) - .$name; - provider(tcx, key) - } - - fn hash_result( - _hcx: &mut StableHashingContext<'_>, - _result: &Self::Value - ) -> Option { - hash_result!([$($modifiers)*][_hcx, _result]) - } - - fn handle_cycle_error( - tcx: TyCtxt<'tcx>, - error: CycleError> - ) -> Self::Value { - handle_cycle_error!([$($modifiers)*][tcx, error]) - } - })* - - #[derive(Copy, Clone)] - pub struct TyCtxtEnsure<'tcx> { - pub tcx: TyCtxt<'tcx>, - } - - impl TyCtxtEnsure<$tcx> { - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) { - self.tcx.ensure_query::>(key) - })* - } - - #[derive(Copy, Clone)] - pub struct TyCtxtAt<'tcx> { - pub tcx: TyCtxt<'tcx>, - pub span: Span, - } - - impl Deref for TyCtxtAt<'tcx> { - type Target = TyCtxt<'tcx>; - #[inline(always)] - fn deref(&self) -> &Self::Target { - &self.tcx - } - } - - impl TyCtxt<$tcx> { - /// Returns a transparent wrapper for `TyCtxt`, which ensures queries - /// are executed instead of just returning their results. - #[inline(always)] - pub fn ensure(self) -> TyCtxtEnsure<$tcx> { - TyCtxtEnsure { - tcx: self, - } - } - - /// Returns a transparent wrapper for `TyCtxt` which uses - /// `span` as the location of queries performed through it. - #[inline(always)] - pub fn at(self, span: Span) -> TyCtxtAt<$tcx> { - TyCtxtAt { - tcx: self, - span - } - } - - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) -> $V { - self.at(DUMMY_SP).$name(key) - })* - - /// All self-profiling events generated by the query engine use - /// virtual `StringId`s for their `event_id`. This method makes all - /// those virtual `StringId`s point to actual strings. - /// - /// If we are recording only summary data, the ids will point to - /// just the query names. If we are recording query keys too, we - /// allocate the corresponding strings here. - pub fn alloc_self_profile_query_strings(self) { - use crate::ty::query::profiling_support::{ - alloc_self_profile_query_strings_for_query_cache, - QueryKeyStringCache, - }; - - if !self.prof.enabled() { - return; - } - - let mut string_cache = QueryKeyStringCache::new(); - - $({ - alloc_self_profile_query_strings_for_query_cache( - self, - stringify!($name), - &self.queries.$name, - &mut string_cache, - ); - })* - } - } - - impl TyCtxtAt<$tcx> { - $($(#[$attr])* - #[inline(always)] - pub fn $name(self, key: $K) -> $V { - self.tcx.get_query::>(self.span, key) - })* - } - - define_provider_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$name] [$K] [$V]))*) - } - - impl<$tcx> Copy for Providers<$tcx> {} - impl<$tcx> Clone for Providers<$tcx> { - fn clone(&self) -> Self { *self } - } - } -} - -macro_rules! define_queries_struct { - (tcx: $tcx:tt, - input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - pub struct Queries<$tcx> { - /// This provides access to the incrimental comilation on-disk cache for query results. - /// Do not access this directly. It is only meant to be used by - /// `DepGraph::try_mark_green()` and the query infrastructure. - pub(crate) on_disk_cache: OnDiskCache<'tcx>, - - providers: IndexVec>, - fallback_extern_providers: Box>, - - $($(#[$attr])* $name: QueryState< - TyCtxt<$tcx>, - as QueryAccessors>>::Cache, - >,)* - } - - impl<$tcx> Queries<$tcx> { - pub(crate) fn new( - providers: IndexVec>, - fallback_extern_providers: Providers<$tcx>, - on_disk_cache: OnDiskCache<'tcx>, - ) -> Self { - Queries { - providers, - fallback_extern_providers: Box::new(fallback_extern_providers), - on_disk_cache, - $($name: Default::default()),* - } - } - - pub(crate) fn try_collect_active_jobs( - &self - ) -> Option, QueryJobInfo>>> { - let mut jobs = FxHashMap::default(); - - $( - self.$name.try_collect_active_jobs( - as QueryAccessors>>::DEP_KIND, - Query::$name, - &mut jobs, - )?; - )* - - Some(jobs) - } - } - }; -} - -macro_rules! define_provider_struct { - (tcx: $tcx:tt, - input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => { - pub struct Providers<$tcx> { - $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)* - } - - impl<$tcx> Default for Providers<$tcx> { - fn default() -> Self { - $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R { - bug!("`tcx.{}({:?})` unsupported by its crate", - stringify!($name), key); - })* - Providers { $($name),* } - } - } - }; -}