Skip to content

Commit e240783

Browse files
committedFeb 8, 2022
Switch QueryJobId to a single global counter
This replaces the per-shard counters with a single global counter, simplifying the JobId struct down to just a u64 and removing the need to pipe a DepKind generic through a bunch of code. The performance implications on non-parallel compilers are likely minimal (this switches to `Cell<u64>` as the backing storage over a `u64`, but the latter was already inside a `RefCell` so it's not really a significance divergence). On parallel compilers, the cost of a single global u64 counter may be more significant: it adds a serialization point in theory. On the other hand, we can imagine changing the counter to have a thread-local component if it becomes worrisome or some similar structure. The new design is sufficiently simpler that it warrants the potential for slight changes down the line if/when we get parallel compilation to be more of a default. A u64 counter, instead of u32 (the old per-shard width), is chosen to avoid possibly overflowing it and causing problems; it is effectively impossible that we would overflow a u64 counter in this context.
1 parent 88fb06a commit e240783

File tree

7 files changed

+115
-177
lines changed

7 files changed

+115
-177
lines changed
 

‎compiler/rustc_middle/src/ty/context.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1668,7 +1668,7 @@ CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } }
16681668
pub mod tls {
16691669
use super::{ptr_eq, GlobalCtxt, TyCtxt};
16701670

1671-
use crate::dep_graph::{DepKind, TaskDepsRef};
1671+
use crate::dep_graph::TaskDepsRef;
16721672
use crate::ty::query;
16731673
use rustc_data_structures::sync::{self, Lock};
16741674
use rustc_data_structures::thin_vec::ThinVec;
@@ -1693,7 +1693,7 @@ pub mod tls {
16931693

16941694
/// The current query job, if any. This is updated by `JobOwner::start` in
16951695
/// `ty::query::plumbing` when executing a query.
1696-
pub query: Option<query::QueryJobId<DepKind>>,
1696+
pub query: Option<query::QueryJobId>,
16971697

16981698
/// Where to store diagnostics for the current query job, if any.
16991699
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.

‎compiler/rustc_query_impl/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ extern crate rustc_macros;
1515
extern crate rustc_middle;
1616

1717
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
18+
use rustc_data_structures::sync::AtomicU64;
1819
use rustc_middle::arena::Arena;
1920
use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex};
2021
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};

‎compiler/rustc_query_impl/src/plumbing.rs

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
//! manage the caches, and so forth.
44
55
use crate::{on_disk_cache, Queries};
6-
use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
6+
use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
77
use rustc_middle::ty::tls::{self, ImplicitCtxt};
88
use rustc_middle::ty::TyCtxt;
99
use rustc_query_system::dep_graph::HasDepContext;
@@ -15,6 +15,7 @@ use rustc_errors::{Diagnostic, Handler};
1515
use rustc_serialize::opaque;
1616

1717
use std::any::Any;
18+
use std::num::NonZeroU64;
1819

1920
#[derive(Copy, Clone)]
2021
pub struct QueryCtxt<'tcx> {
@@ -42,11 +43,20 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
4243
}
4344

4445
impl QueryContext for QueryCtxt<'_> {
45-
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
46+
fn next_job_id(&self) -> QueryJobId {
47+
QueryJobId(
48+
NonZeroU64::new(
49+
self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
50+
)
51+
.unwrap(),
52+
)
53+
}
54+
55+
fn current_query_job(&self) -> Option<QueryJobId> {
4656
tls::with_related_context(**self, |icx| icx.query)
4757
}
4858

49-
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>> {
59+
fn try_collect_active_jobs(&self) -> Option<QueryMap> {
5060
self.queries.try_collect_active_jobs(**self)
5161
}
5262

@@ -81,7 +91,7 @@ impl QueryContext for QueryCtxt<'_> {
8191
#[inline(always)]
8292
fn start_query<R>(
8393
&self,
84-
token: QueryJobId<Self::DepKind>,
94+
token: QueryJobId,
8595
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
8696
compute: impl FnOnce() -> R,
8797
) -> R {
@@ -152,7 +162,7 @@ impl<'tcx> QueryCtxt<'tcx> {
152162

153163
pub fn try_print_query_stack(
154164
self,
155-
query: Option<QueryJobId<DepKind>>,
165+
query: Option<QueryJobId>,
156166
handler: &Handler,
157167
num_frames: Option<usize>,
158168
) -> usize {
@@ -320,7 +330,7 @@ macro_rules! define_queries {
320330
type Cache = query_storage::$name<$tcx>;
321331

322332
#[inline(always)]
323-
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Self::Key>
333+
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<Self::Key>
324334
where QueryCtxt<$tcx>: 'a
325335
{
326336
&tcx.queries.$name
@@ -471,10 +481,9 @@ macro_rules! define_queries_struct {
471481

472482
pub on_disk_cache: Option<OnDiskCache<$tcx>>,
473483

474-
$($(#[$attr])* $name: QueryState<
475-
crate::dep_graph::DepKind,
476-
query_keys::$name<$tcx>,
477-
>,)*
484+
jobs: AtomicU64,
485+
486+
$($(#[$attr])* $name: QueryState<query_keys::$name<$tcx>>,)*
478487
}
479488

480489
impl<$tcx> Queries<$tcx> {
@@ -487,21 +496,21 @@ macro_rules! define_queries_struct {
487496
local_providers: Box::new(local_providers),
488497
extern_providers: Box::new(extern_providers),
489498
on_disk_cache,
499+
jobs: AtomicU64::new(1),
490500
$($name: Default::default()),*
491501
}
492502
}
493503

494504
pub(crate) fn try_collect_active_jobs(
495505
&$tcx self,
496506
tcx: TyCtxt<$tcx>,
497-
) -> Option<QueryMap<crate::dep_graph::DepKind>> {
507+
) -> Option<QueryMap> {
498508
let tcx = QueryCtxt { tcx, queries: self };
499509
let mut jobs = QueryMap::default();
500510

501511
$(
502512
self.$name.try_collect_active_jobs(
503513
tcx,
504-
dep_graph::DepKind::$name,
505514
make_query::$name,
506515
&mut jobs,
507516
)?;

‎compiler/rustc_query_system/src/query/config.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
5959
fn describe(tcx: CTX, key: Self::Key) -> String;
6060

6161
// Don't use this method to access query results, instead use the methods on TyCtxt
62-
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, Self::Key>
62+
fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
6363
where
6464
CTX: 'a;
6565

‎compiler/rustc_query_system/src/query/job.rs

Lines changed: 54 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,11 @@ use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Leve
77
use rustc_session::Session;
88
use rustc_span::Span;
99

10-
use std::convert::TryFrom;
1110
use std::hash::Hash;
12-
use std::num::NonZeroU32;
11+
use std::num::NonZeroU64;
1312

1413
#[cfg(parallel_compiler)]
1514
use {
16-
crate::dep_graph::DepKind,
1715
parking_lot::{Condvar, Mutex},
1816
rustc_data_structures::fx::FxHashSet,
1917
rustc_data_structures::sync::Lock,
@@ -33,80 +31,57 @@ pub struct QueryInfo {
3331
pub query: QueryStackFrame,
3432
}
3533

36-
pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
37-
38-
/// A value uniquely identifying an active query job within a shard in the query cache.
39-
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
40-
pub struct QueryShardJobId(pub NonZeroU32);
34+
pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
4135

4236
/// A value uniquely identifying an active query job.
4337
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
44-
pub struct QueryJobId<D> {
45-
/// Which job within a shard is this
46-
pub job: QueryShardJobId,
47-
48-
/// In which shard is this job
49-
pub shard: u16,
38+
pub struct QueryJobId(pub NonZeroU64);
5039

51-
/// What kind of query this job is.
52-
pub kind: D,
53-
}
54-
55-
impl<D> QueryJobId<D>
56-
where
57-
D: Copy + Clone + Eq + Hash,
58-
{
59-
pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
60-
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
61-
}
62-
63-
fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
40+
impl QueryJobId {
41+
fn query(self, map: &QueryMap) -> QueryStackFrame {
6442
map.get(&self).unwrap().query.clone()
6543
}
6644

6745
#[cfg(parallel_compiler)]
68-
fn span(self, map: &QueryMap<D>) -> Span {
46+
fn span(self, map: &QueryMap) -> Span {
6947
map.get(&self).unwrap().job.span
7048
}
7149

7250
#[cfg(parallel_compiler)]
73-
fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
51+
fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
7452
map.get(&self).unwrap().job.parent
7553
}
7654

7755
#[cfg(parallel_compiler)]
78-
fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
56+
fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
7957
map.get(&self).unwrap().job.latch.as_ref()
8058
}
8159
}
8260

83-
pub struct QueryJobInfo<D> {
61+
pub struct QueryJobInfo {
8462
pub query: QueryStackFrame,
85-
pub job: QueryJob<D>,
63+
pub job: QueryJob,
8664
}
8765

8866
/// Represents an active query job.
8967
#[derive(Clone)]
90-
pub struct QueryJob<D> {
91-
pub id: QueryShardJobId,
68+
pub struct QueryJob {
69+
pub id: QueryJobId,
9270

9371
/// The span corresponding to the reason for which this query was required.
9472
pub span: Span,
9573

9674
/// The parent query job which created this job and is implicitly waiting on it.
97-
pub parent: Option<QueryJobId<D>>,
75+
pub parent: Option<QueryJobId>,
9876

9977
/// The latch that is used to wait on this job.
10078
#[cfg(parallel_compiler)]
101-
latch: Option<QueryLatch<D>>,
79+
latch: Option<QueryLatch>,
10280
}
10381

104-
impl<D> QueryJob<D>
105-
where
106-
D: Copy + Clone + Eq + Hash,
107-
{
82+
impl QueryJob {
10883
/// Creates a new query job.
109-
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
84+
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
11085
QueryJob {
11186
id,
11287
span,
@@ -117,7 +92,7 @@ where
11792
}
11893

11994
#[cfg(parallel_compiler)]
120-
pub(super) fn latch(&mut self) -> QueryLatch<D> {
95+
pub(super) fn latch(&mut self) -> QueryLatch {
12196
if self.latch.is_none() {
12297
self.latch = Some(QueryLatch::new());
12398
}
@@ -139,16 +114,13 @@ where
139114
}
140115

141116
#[cfg(not(parallel_compiler))]
142-
impl<D> QueryJobId<D>
143-
where
144-
D: Copy + Clone + Eq + Hash,
145-
{
117+
impl QueryJobId {
146118
#[cold]
147119
#[inline(never)]
148120
pub(super) fn find_cycle_in_stack(
149121
&self,
150-
query_map: QueryMap<D>,
151-
current_job: &Option<QueryJobId<D>>,
122+
query_map: QueryMap,
123+
current_job: &Option<QueryJobId>,
152124
span: Span,
153125
) -> CycleError {
154126
// Find the waitee amongst `current_job` parents
@@ -184,50 +156,43 @@ where
184156
}
185157

186158
#[cfg(parallel_compiler)]
187-
struct QueryWaiter<D> {
188-
query: Option<QueryJobId<D>>,
159+
struct QueryWaiter {
160+
query: Option<QueryJobId>,
189161
condvar: Condvar,
190162
span: Span,
191163
cycle: Lock<Option<CycleError>>,
192164
}
193165

194166
#[cfg(parallel_compiler)]
195-
impl<D> QueryWaiter<D> {
167+
impl QueryWaiter {
196168
fn notify(&self, registry: &rayon_core::Registry) {
197169
rayon_core::mark_unblocked(registry);
198170
self.condvar.notify_one();
199171
}
200172
}
201173

202174
#[cfg(parallel_compiler)]
203-
struct QueryLatchInfo<D> {
175+
struct QueryLatchInfo {
204176
complete: bool,
205-
waiters: Vec<Lrc<QueryWaiter<D>>>,
177+
waiters: Vec<Lrc<QueryWaiter>>,
206178
}
207179

208180
#[cfg(parallel_compiler)]
209181
#[derive(Clone)]
210-
pub(super) struct QueryLatch<D> {
211-
info: Lrc<Mutex<QueryLatchInfo<D>>>,
182+
pub(super) struct QueryLatch {
183+
info: Lrc<Mutex<QueryLatchInfo>>,
212184
}
213185

214186
#[cfg(parallel_compiler)]
215-
impl<D: Eq + Hash> QueryLatch<D> {
187+
impl QueryLatch {
216188
fn new() -> Self {
217189
QueryLatch {
218190
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
219191
}
220192
}
221-
}
222193

223-
#[cfg(parallel_compiler)]
224-
impl<D> QueryLatch<D> {
225194
/// Awaits for the query job to complete.
226-
pub(super) fn wait_on(
227-
&self,
228-
query: Option<QueryJobId<D>>,
229-
span: Span,
230-
) -> Result<(), CycleError> {
195+
pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
231196
let waiter =
232197
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
233198
self.wait_on_inner(&waiter);
@@ -242,7 +207,7 @@ impl<D> QueryLatch<D> {
242207
}
243208

244209
/// Awaits the caller on this latch by blocking the current thread.
245-
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
210+
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
246211
let mut info = self.info.lock();
247212
if !info.complete {
248213
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -276,7 +241,7 @@ impl<D> QueryLatch<D> {
276241

277242
/// Removes a single waiter from the list of waiters.
278243
/// This is used to break query cycles.
279-
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
244+
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
280245
let mut info = self.info.lock();
281246
debug_assert!(!info.complete);
282247
// Remove the waiter from the list of waiters
@@ -286,7 +251,7 @@ impl<D> QueryLatch<D> {
286251

287252
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
288253
#[cfg(parallel_compiler)]
289-
type Waiter<D> = (QueryJobId<D>, usize);
254+
type Waiter = (QueryJobId, usize);
290255

291256
/// Visits all the non-resumable and resumable waiters of a query.
292257
/// Only waiters in a query are visited.
@@ -298,14 +263,9 @@ type Waiter<D> = (QueryJobId<D>, usize);
298263
/// required information to resume the waiter.
299264
/// If all `visit` calls returns None, this function also returns None.
300265
#[cfg(parallel_compiler)]
301-
fn visit_waiters<D, F>(
302-
query_map: &QueryMap<D>,
303-
query: QueryJobId<D>,
304-
mut visit: F,
305-
) -> Option<Option<Waiter<D>>>
266+
fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
306267
where
307-
D: Copy + Clone + Eq + Hash,
308-
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
268+
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
309269
{
310270
// Visit the parent query which is a non-resumable waiter since it's on the same stack
311271
if let Some(parent) = query.parent(query_map) {
@@ -334,16 +294,13 @@ where
334294
/// If a cycle is detected, this initial value is replaced with the span causing
335295
/// the cycle.
336296
#[cfg(parallel_compiler)]
337-
fn cycle_check<D>(
338-
query_map: &QueryMap<D>,
339-
query: QueryJobId<D>,
297+
fn cycle_check(
298+
query_map: &QueryMap,
299+
query: QueryJobId,
340300
span: Span,
341-
stack: &mut Vec<(Span, QueryJobId<D>)>,
342-
visited: &mut FxHashSet<QueryJobId<D>>,
343-
) -> Option<Option<Waiter<D>>>
344-
where
345-
D: Copy + Clone + Eq + Hash,
346-
{
301+
stack: &mut Vec<(Span, QueryJobId)>,
302+
visited: &mut FxHashSet<QueryJobId>,
303+
) -> Option<Option<Waiter>> {
347304
if !visited.insert(query) {
348305
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
349306
// We detected a query cycle, fix up the initial span and return Some
@@ -378,14 +335,11 @@ where
378335
/// from `query` without going through any of the queries in `visited`.
379336
/// This is achieved with a depth first search.
380337
#[cfg(parallel_compiler)]
381-
fn connected_to_root<D>(
382-
query_map: &QueryMap<D>,
383-
query: QueryJobId<D>,
384-
visited: &mut FxHashSet<QueryJobId<D>>,
385-
) -> bool
386-
where
387-
D: Copy + Clone + Eq + Hash,
388-
{
338+
fn connected_to_root(
339+
query_map: &QueryMap,
340+
query: QueryJobId,
341+
visited: &mut FxHashSet<QueryJobId>,
342+
) -> bool {
389343
// We already visited this or we're deliberately ignoring it
390344
if !visited.insert(query) {
391345
return false;
@@ -404,10 +358,9 @@ where
404358

405359
// Deterministically pick an query from a list
406360
#[cfg(parallel_compiler)]
407-
fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
361+
fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
408362
where
409-
D: Copy + Clone + Eq + Hash,
410-
F: Fn(&T) -> (Span, QueryJobId<D>),
363+
F: Fn(&T) -> (Span, QueryJobId),
411364
{
412365
// Deterministically pick an entry point
413366
// FIXME: Sort this instead
@@ -431,10 +384,10 @@ where
431384
/// If a cycle was not found, the starting query is removed from `jobs` and
432385
/// the function returns false.
433386
#[cfg(parallel_compiler)]
434-
fn remove_cycle<D: DepKind>(
435-
query_map: &QueryMap<D>,
436-
jobs: &mut Vec<QueryJobId<D>>,
437-
wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
387+
fn remove_cycle(
388+
query_map: &QueryMap,
389+
jobs: &mut Vec<QueryJobId>,
390+
wakelist: &mut Vec<Lrc<QueryWaiter>>,
438391
) -> bool {
439392
let mut visited = FxHashSet::default();
440393
let mut stack = Vec::new();
@@ -489,7 +442,7 @@ fn remove_cycle<D: DepKind>(
489442
}
490443
}
491444
})
492-
.collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
445+
.collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
493446

494447
// Deterministically pick an entry point
495448
let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
@@ -544,7 +497,7 @@ pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) {
544497

545498
let mut wakelist = Vec::new();
546499
let query_map = tcx.try_collect_active_jobs().unwrap();
547-
let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect();
500+
let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
548501

549502
let mut found_cycle = false;
550503

@@ -630,7 +583,7 @@ pub(crate) fn report_cycle<'a>(
630583

631584
pub fn print_query_stack<CTX: QueryContext>(
632585
tcx: CTX,
633-
mut current_query: Option<QueryJobId<CTX::DepKind>>,
586+
mut current_query: Option<QueryJobId>,
634587
handler: &Handler,
635588
num_frames: Option<usize>,
636589
) -> usize {

‎compiler/rustc_query_system/src/query/mod.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -117,10 +117,12 @@ impl QuerySideEffects {
117117
}
118118

119119
pub trait QueryContext: HasDepContext {
120+
fn next_job_id(&self) -> QueryJobId;
121+
120122
/// Get the query information from the TLS context.
121-
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
123+
fn current_query_job(&self) -> Option<QueryJobId>;
122124

123-
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
125+
fn try_collect_active_jobs(&self) -> Option<QueryMap>;
124126

125127
/// Load side effects associated to the node in the previous session.
126128
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
@@ -140,7 +142,7 @@ pub trait QueryContext: HasDepContext {
140142
/// captured during execution and the actual result.
141143
fn start_query<R>(
142144
&self,
143-
token: QueryJobId<Self::DepKind>,
145+
token: QueryJobId,
144146
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
145147
compute: impl FnOnce() -> R,
146148
) -> R;

‎compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 31 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,7 @@
55
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
66
use crate::query::caches::QueryCache;
77
use crate::query::config::{QueryDescription, QueryVtable};
8-
use crate::query::job::{
9-
report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
10-
};
8+
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
119
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
1210
use rustc_data_structures::fingerprint::Fingerprint;
1311
use rustc_data_structures::fx::{FxHashMap, FxHasher};
@@ -24,7 +22,6 @@ use std::collections::hash_map::Entry;
2422
use std::fmt::Debug;
2523
use std::hash::{Hash, Hasher};
2624
use std::mem;
27-
use std::num::NonZeroU32;
2825
use std::ptr;
2926

3027
pub struct QueryCacheStore<C: QueryCache> {
@@ -69,36 +66,32 @@ impl<C: QueryCache> QueryCacheStore<C> {
6966
}
7067
}
7168

72-
struct QueryStateShard<D, K> {
73-
active: FxHashMap<K, QueryResult<D>>,
74-
75-
/// Used to generate unique ids for active jobs.
76-
jobs: u32,
69+
struct QueryStateShard<K> {
70+
active: FxHashMap<K, QueryResult>,
7771
}
7872

79-
impl<D, K> Default for QueryStateShard<D, K> {
80-
fn default() -> QueryStateShard<D, K> {
81-
QueryStateShard { active: Default::default(), jobs: 0 }
73+
impl<K> Default for QueryStateShard<K> {
74+
fn default() -> QueryStateShard<K> {
75+
QueryStateShard { active: Default::default() }
8276
}
8377
}
8478

85-
pub struct QueryState<D, K> {
86-
shards: Sharded<QueryStateShard<D, K>>,
79+
pub struct QueryState<K> {
80+
shards: Sharded<QueryStateShard<K>>,
8781
}
8882

8983
/// Indicates the state of a query for a given key in a query map.
90-
enum QueryResult<D> {
84+
enum QueryResult {
9185
/// An already executing query. The query job can be used to await for its completion.
92-
Started(QueryJob<D>),
86+
Started(QueryJob),
9387

9488
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
9589
/// silently panic.
9690
Poisoned,
9791
}
9892

99-
impl<D, K> QueryState<D, K>
93+
impl<K> QueryState<K>
10094
where
101-
D: Copy + Clone + Eq + Hash,
10295
K: Eq + Hash + Clone + Debug,
10396
{
10497
pub fn all_inactive(&self) -> bool {
@@ -109,19 +102,17 @@ where
109102
pub fn try_collect_active_jobs<CTX: Copy>(
110103
&self,
111104
tcx: CTX,
112-
kind: D,
113105
make_query: fn(CTX, K) -> QueryStackFrame,
114-
jobs: &mut QueryMap<D>,
106+
jobs: &mut QueryMap,
115107
) -> Option<()> {
116108
// We use try_lock_shards here since we are called from the
117109
// deadlock handler, and this shouldn't be locked.
118110
let shards = self.shards.try_lock_shards()?;
119-
for (shard_id, shard) in shards.iter().enumerate() {
111+
for shard in shards.iter() {
120112
for (k, v) in shard.active.iter() {
121113
if let QueryResult::Started(ref job) = *v {
122-
let id = QueryJobId::new(job.id, shard_id, kind);
123114
let query = make_query(tcx, k.clone());
124-
jobs.insert(id, QueryJobInfo { query, job: job.clone() });
115+
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
125116
}
126117
}
127118
}
@@ -130,22 +121,21 @@ where
130121
}
131122
}
132123

133-
impl<D, K> Default for QueryState<D, K> {
134-
fn default() -> QueryState<D, K> {
124+
impl<K> Default for QueryState<K> {
125+
fn default() -> QueryState<K> {
135126
QueryState { shards: Default::default() }
136127
}
137128
}
138129

139130
/// A type representing the responsibility to execute the job in the `job` field.
140131
/// This will poison the relevant query if dropped.
141-
struct JobOwner<'tcx, D, K>
132+
struct JobOwner<'tcx, K>
142133
where
143-
D: Copy + Clone + Eq + Hash,
144134
K: Eq + Hash + Clone,
145135
{
146-
state: &'tcx QueryState<D, K>,
136+
state: &'tcx QueryState<K>,
147137
key: K,
148-
id: QueryJobId<D>,
138+
id: QueryJobId,
149139
}
150140

151141
#[cold]
@@ -166,9 +156,8 @@ where
166156
cache.store_nocache(value)
167157
}
168158

169-
impl<'tcx, D, K> JobOwner<'tcx, D, K>
159+
impl<'tcx, K> JobOwner<'tcx, K>
170160
where
171-
D: Copy + Clone + Eq + Hash,
172161
K: Eq + Hash + Clone,
173162
{
174163
/// Either gets a `JobOwner` corresponding the query, allowing us to
@@ -182,12 +171,11 @@ where
182171
#[inline(always)]
183172
fn try_start<'b, CTX>(
184173
tcx: &'b CTX,
185-
state: &'b QueryState<CTX::DepKind, K>,
174+
state: &'b QueryState<K>,
186175
span: Span,
187176
key: K,
188177
lookup: QueryLookup,
189-
dep_kind: CTX::DepKind,
190-
) -> TryGetJob<'b, CTX::DepKind, K>
178+
) -> TryGetJob<'b, K>
191179
where
192180
CTX: QueryContext,
193181
{
@@ -197,27 +185,21 @@ where
197185

198186
match lock.active.entry(key) {
199187
Entry::Vacant(entry) => {
200-
// Generate an id unique within this shard.
201-
let id = lock.jobs.checked_add(1).unwrap();
202-
lock.jobs = id;
203-
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
204-
188+
let id = tcx.next_job_id();
205189
let job = tcx.current_query_job();
206190
let job = QueryJob::new(id, span, job);
207191

208192
let key = entry.key().clone();
209193
entry.insert(QueryResult::Started(job));
210194

211-
let global_id = QueryJobId::new(id, shard, dep_kind);
212-
let owner = JobOwner { state, id: global_id, key };
195+
let owner = JobOwner { state, id, key };
213196
return TryGetJob::NotYetStarted(owner);
214197
}
215198
Entry::Occupied(mut entry) => {
216199
match entry.get_mut() {
217200
#[cfg(not(parallel_compiler))]
218201
QueryResult::Started(job) => {
219-
let id = QueryJobId::new(job.id, shard, dep_kind);
220-
202+
let id = job.id;
221203
drop(state_lock);
222204

223205
// If we are single-threaded we know that we have cycle error,
@@ -295,9 +277,8 @@ where
295277
}
296278
}
297279

298-
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
280+
impl<'tcx, K> Drop for JobOwner<'tcx, K>
299281
where
300-
D: Copy + Clone + Eq + Hash,
301282
K: Eq + Hash + Clone,
302283
{
303284
#[inline(never)]
@@ -329,13 +310,12 @@ pub(crate) struct CycleError {
329310
}
330311

331312
/// The result of `try_start`.
332-
enum TryGetJob<'tcx, D, K>
313+
enum TryGetJob<'tcx, K>
333314
where
334-
D: Copy + Clone + Eq + Hash,
335315
K: Eq + Hash + Clone,
336316
{
337317
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
338-
NotYetStarted(JobOwner<'tcx, D, K>),
318+
NotYetStarted(JobOwner<'tcx, K>),
339319

340320
/// The query was already completed.
341321
/// Returns the result of the query and its dep-node index
@@ -375,7 +355,7 @@ where
375355

376356
fn try_execute_query<CTX, C>(
377357
tcx: CTX,
378-
state: &QueryState<CTX::DepKind, C::Key>,
358+
state: &QueryState<C::Key>,
379359
cache: &QueryCacheStore<C>,
380360
span: Span,
381361
key: C::Key,
@@ -388,14 +368,7 @@ where
388368
C::Key: Clone + DepNodeParams<CTX::DepContext>,
389369
CTX: QueryContext,
390370
{
391-
match JobOwner::<'_, CTX::DepKind, C::Key>::try_start(
392-
&tcx,
393-
state,
394-
span,
395-
key.clone(),
396-
lookup,
397-
query.dep_kind,
398-
) {
371+
match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone(), lookup) {
399372
TryGetJob::NotYetStarted(job) => {
400373
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
401374
let result = job.complete(cache, result, dep_node_index);
@@ -427,7 +400,7 @@ fn execute_job<CTX, K, V>(
427400
key: K,
428401
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
429402
query: &QueryVtable<CTX, K, V>,
430-
job_id: QueryJobId<CTX::DepKind>,
403+
job_id: QueryJobId,
431404
) -> (V, DepNodeIndex)
432405
where
433406
K: Clone + DepNodeParams<CTX::DepContext>,

0 commit comments

Comments
 (0)
Please sign in to comment.