Skip to main content

rustc_query_system/query/
plumbing.rs

1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
5use std::cell::Cell;
6use std::fmt::Debug;
7use std::hash::Hash;
8use std::mem;
9
10use rustc_data_structures::fingerprint::Fingerprint;
11use rustc_data_structures::hash_table::{self, Entry, HashTable};
12use rustc_data_structures::sharded::{self, Sharded};
13use rustc_data_structures::stack::ensure_sufficient_stack;
14use rustc_data_structures::sync::LockGuard;
15use rustc_data_structures::{outline, sync};
16use rustc_errors::{Diag, FatalError, StashKey};
17use rustc_span::{DUMMY_SP, Span};
18use tracing::instrument;
19
20use super::{QueryDispatcher, QueryStackDeferred, QueryStackFrameExtra};
21use crate::dep_graph::{
22    DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams, HasDepContext,
23};
24use crate::ich::StableHashingContext;
25use crate::query::caches::QueryCache;
26use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
27use crate::query::{
28    CycleErrorHandling, QueryContext, QueryMap, QueryStackFrame, SerializedDepNodeIndex,
29};
30
31#[inline]
32fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
33    move |x| x.0 == *k
34}
35
36/// For a particular query, keeps track of "active" keys, i.e. keys whose
37/// evaluation has started but has not yet finished successfully.
38///
39/// (Successful query evaluation for a key is represented by an entry in the
40/// query's in-memory cache.)
41pub struct QueryState<'tcx, K> {
42    active: Sharded<hash_table::HashTable<(K, ActiveKeyStatus<'tcx>)>>,
43}
44
45/// For a particular query and key, tracks the status of a query evaluation
46/// that has started, but has not yet finished successfully.
47///
48/// (Successful query evaluation for a key is represented by an entry in the
49/// query's in-memory cache.)
50enum ActiveKeyStatus<'tcx> {
51    /// Some thread is already evaluating the query for this key.
52    ///
53    /// The enclosed [`QueryJob`] can be used to wait for it to finish.
54    Started(QueryJob<'tcx>),
55
56    /// The query panicked. Queries trying to wait on this will raise a fatal error which will
57    /// silently panic.
58    Poisoned,
59}
60
61impl<'tcx> ActiveKeyStatus<'tcx> {
62    /// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
63    /// was poisoned by a panic.
64    fn expect_job(self) -> QueryJob<'tcx> {
65        match self {
66            Self::Started(job) => job,
67            Self::Poisoned => {
68                {
    ::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
69            }
70        }
71    }
72}
73
74impl<'tcx, K> QueryState<'tcx, K>
75where
76    K: Eq + Hash + Copy + Debug,
77{
78    pub fn all_inactive(&self) -> bool {
79        self.active.lock_shards().all(|shard| shard.is_empty())
80    }
81
82    pub fn collect_active_jobs<Qcx: Copy>(
83        &self,
84        qcx: Qcx,
85        make_query: fn(Qcx, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
86        jobs: &mut QueryMap<'tcx>,
87        require_complete: bool,
88    ) -> Option<()> {
89        let mut active = Vec::new();
90
91        let mut collect = |iter: LockGuard<'_, HashTable<(K, ActiveKeyStatus<'tcx>)>>| {
92            for (k, v) in iter.iter() {
93                if let ActiveKeyStatus::Started(ref job) = *v {
94                    active.push((*k, job.clone()));
95                }
96            }
97        };
98
99        if require_complete {
100            for shard in self.active.lock_shards() {
101                collect(shard);
102            }
103        } else {
104            // We use try_lock_shards here since we are called from the
105            // deadlock handler, and this shouldn't be locked.
106            for shard in self.active.try_lock_shards() {
107                collect(shard?);
108            }
109        }
110
111        // Call `make_query` while we're not holding a `self.active` lock as `make_query` may call
112        // queries leading to a deadlock.
113        for (key, job) in active {
114            let query = make_query(qcx, key);
115            jobs.insert(job.id, QueryJobInfo { query, job });
116        }
117
118        Some(())
119    }
120}
121
122impl<'tcx, K> Default for QueryState<'tcx, K> {
123    fn default() -> QueryState<'tcx, K> {
124        QueryState { active: Default::default() }
125    }
126}
127
128/// A type representing the responsibility to execute the job in the `job` field.
129/// This will poison the relevant query if dropped.
130struct JobOwner<'a, 'tcx, K>
131where
132    K: Eq + Hash + Copy,
133{
134    state: &'a QueryState<'tcx, K>,
135    key: K,
136}
137
138#[cold]
139#[inline(never)]
140fn mk_cycle<'tcx, Q>(query: Q, qcx: Q::Qcx, cycle_error: CycleError) -> Q::Value
141where
142    Q: QueryDispatcher<'tcx>,
143{
144    let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
145    handle_cycle_error(query, qcx, &cycle_error, error)
146}
147
148fn handle_cycle_error<'tcx, Q>(
149    query: Q,
150    qcx: Q::Qcx,
151    cycle_error: &CycleError,
152    error: Diag<'_>,
153) -> Q::Value
154where
155    Q: QueryDispatcher<'tcx>,
156{
157    match query.cycle_error_handling() {
158        CycleErrorHandling::Error => {
159            let guar = error.emit();
160            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
161        }
162        CycleErrorHandling::Fatal => {
163            error.emit();
164            qcx.dep_context().sess().dcx().abort_if_errors();
165            ::core::panicking::panic("internal error: entered unreachable code")unreachable!()
166        }
167        CycleErrorHandling::DelayBug => {
168            let guar = error.delay_as_bug();
169            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
170        }
171        CycleErrorHandling::Stash => {
172            let guar = if let Some(root) = cycle_error.cycle.first()
173                && let Some(span) = root.query.info.span
174            {
175                error.stash(span, StashKey::Cycle).unwrap()
176            } else {
177                error.emit()
178            };
179            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
180        }
181    }
182}
183
184impl<'a, 'tcx, K> JobOwner<'a, 'tcx, K>
185where
186    K: Eq + Hash + Copy,
187{
188    /// Completes the query by updating the query cache with the `result`,
189    /// signals the waiter and forgets the JobOwner, so it won't poison the query
190    fn complete<C>(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex)
191    where
192        C: QueryCache<Key = K>,
193    {
194        let key = self.key;
195        let state = self.state;
196
197        // Forget ourself so our destructor won't poison the query
198        mem::forget(self);
199
200        // Mark as complete before we remove the job from the active state
201        // so no other thread can re-execute this query.
202        cache.complete(key, result, dep_node_index);
203
204        let job = {
205            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
206            // underlying shard.
207            // since unwinding also wants to look at this map, this can also prevent a double
208            // panic.
209            let mut shard = state.active.lock_shard_by_hash(key_hash);
210            match shard.find_entry(key_hash, equivalent_key(&key)) {
211                Err(_) => None,
212                Ok(occupied) => Some(occupied.remove().0.1),
213            }
214        };
215        let job = job.expect("active query job entry").expect_job();
216
217        job.signal_complete();
218    }
219}
220
221impl<'a, 'tcx, K> Drop for JobOwner<'a, 'tcx, K>
222where
223    K: Eq + Hash + Copy,
224{
225    #[inline(never)]
226    #[cold]
227    fn drop(&mut self) {
228        // Poison the query so jobs waiting on it panic.
229        let state = self.state;
230        let job = {
231            let key_hash = sharded::make_hash(&self.key);
232            let mut shard = state.active.lock_shard_by_hash(key_hash);
233            match shard.find_entry(key_hash, equivalent_key(&self.key)) {
234                Err(_) => ::core::panicking::panic("explicit panic")panic!(),
235                Ok(occupied) => {
236                    let ((key, value), vacant) = occupied.remove();
237                    vacant.insert((key, ActiveKeyStatus::Poisoned));
238                    value.expect_job()
239                }
240            }
241        };
242        // Also signal the completion of the job, so waiters
243        // will continue execution.
244        job.signal_complete();
245    }
246}
247
248#[derive(#[automatically_derived]
impl<I: ::core::clone::Clone> ::core::clone::Clone for CycleError<I> {
    #[inline]
    fn clone(&self) -> CycleError<I> {
        CycleError {
            usage: ::core::clone::Clone::clone(&self.usage),
            cycle: ::core::clone::Clone::clone(&self.cycle),
        }
    }
}Clone, #[automatically_derived]
impl<I: ::core::fmt::Debug> ::core::fmt::Debug for CycleError<I> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "CycleError",
            "usage", &self.usage, "cycle", &&self.cycle)
    }
}Debug)]
249pub struct CycleError<I = QueryStackFrameExtra> {
250    /// The query and related span that uses the cycle.
251    pub usage: Option<(Span, QueryStackFrame<I>)>,
252    pub cycle: Vec<QueryInfo<I>>,
253}
254
255impl<'tcx> CycleError<QueryStackDeferred<'tcx>> {
256    fn lift<Qcx: QueryContext<'tcx>>(&self, qcx: Qcx) -> CycleError<QueryStackFrameExtra> {
257        CycleError {
258            usage: self.usage.as_ref().map(|(span, frame)| (*span, frame.lift(qcx))),
259            cycle: self.cycle.iter().map(|info| info.lift(qcx)).collect(),
260        }
261    }
262}
263
264/// Checks whether there is already a value for this key in the in-memory
265/// query cache, returning that value if present.
266///
267/// (Also performs some associated bookkeeping, if a value was found.)
268#[inline(always)]
269pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
270where
271    C: QueryCache,
272    Tcx: DepContext,
273{
274    match cache.lookup(key) {
275        Some((value, index)) => {
276            tcx.profiler().query_cache_hit(index.into());
277            tcx.dep_graph().read_index(index);
278            Some(value)
279        }
280        None => None,
281    }
282}
283
284#[cold]
285#[inline(never)]
286fn cycle_error<'tcx, Q>(
287    query: Q,
288    qcx: Q::Qcx,
289    try_execute: QueryJobId,
290    span: Span,
291) -> (Q::Value, Option<DepNodeIndex>)
292where
293    Q: QueryDispatcher<'tcx>,
294{
295    // Ensure there was no errors collecting all active jobs.
296    // We need the complete map to ensure we find a cycle to break.
297    let query_map = qcx.collect_active_jobs(false).ok().expect("failed to collect active queries");
298
299    let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
300    (mk_cycle(query, qcx, error.lift(qcx)), None)
301}
302
303#[inline(always)]
304fn wait_for_query<'tcx, Q>(
305    query: Q,
306    qcx: Q::Qcx,
307    span: Span,
308    key: Q::Key,
309    latch: QueryLatch<'tcx>,
310    current: Option<QueryJobId>,
311) -> (Q::Value, Option<DepNodeIndex>)
312where
313    Q: QueryDispatcher<'tcx>,
314{
315    // For parallel queries, we'll block and wait until the query running
316    // in another thread has completed. Record how long we wait in the
317    // self-profiler.
318    let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
319
320    // With parallel queries we might just have to wait on some other
321    // thread.
322    let result = latch.wait_on(qcx, current, span);
323
324    match result {
325        Ok(()) => {
326            let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
327                outline(|| {
328                    // We didn't find the query result in the query cache. Check if it was
329                    // poisoned due to a panic instead.
330                    let key_hash = sharded::make_hash(&key);
331                    let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
332                    match shard.find(key_hash, equivalent_key(&key)) {
333                        // The query we waited on panicked. Continue unwinding here.
334                        Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
335                        _ => {
    ::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
            query.name()));
}panic!(
336                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
337                            query.name()
338                        ),
339                    }
340                })
341            };
342
343            qcx.dep_context().profiler().query_cache_hit(index.into());
344            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
345
346            (v, Some(index))
347        }
348        Err(cycle) => (mk_cycle(query, qcx, cycle.lift(qcx)), None),
349    }
350}
351
352#[inline(never)]
353fn try_execute_query<'tcx, Q, const INCR: bool>(
354    query: Q,
355    qcx: Q::Qcx,
356    span: Span,
357    key: Q::Key,
358    dep_node: Option<DepNode>,
359) -> (Q::Value, Option<DepNodeIndex>)
360where
361    Q: QueryDispatcher<'tcx>,
362{
363    let state = query.query_state(qcx);
364    let key_hash = sharded::make_hash(&key);
365    let mut state_lock = state.active.lock_shard_by_hash(key_hash);
366
367    // For the parallel compiler we need to check both the query cache and query state structures
368    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
369    // query is not still executing. Without checking the query cache here, we can end up
370    // re-executing the query since `try_start` only checks that the query is not currently
371    // executing, but another thread may have already completed the query and stores it result
372    // in the query cache.
373    if qcx.dep_context().sess().threads() > 1 {
374        if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
375            qcx.dep_context().profiler().query_cache_hit(index.into());
376            return (value, Some(index));
377        }
378    }
379
380    let current_job_id = qcx.current_query_job();
381
382    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
383        Entry::Vacant(entry) => {
384            // Nothing has computed or is computing the query, so we start a new job and insert it in the
385            // state map.
386            let id = qcx.next_job_id();
387            let job = QueryJob::new(id, span, current_job_id);
388            entry.insert((key, ActiveKeyStatus::Started(job)));
389
390            // Drop the lock before we start executing the query
391            drop(state_lock);
392
393            execute_job::<Q, INCR>(query, qcx, state, key, key_hash, id, dep_node)
394        }
395        Entry::Occupied(mut entry) => {
396            match &mut entry.get_mut().1 {
397                ActiveKeyStatus::Started(job) => {
398                    if sync::is_dyn_thread_safe() {
399                        // Get the latch out
400                        let latch = job.latch();
401                        drop(state_lock);
402
403                        // Only call `wait_for_query` if we're using a Rayon thread pool
404                        // as it will attempt to mark the worker thread as blocked.
405                        return wait_for_query(query, qcx, span, key, latch, current_job_id);
406                    }
407
408                    let id = job.id;
409                    drop(state_lock);
410
411                    // If we are single-threaded we know that we have cycle error,
412                    // so we just return the error.
413                    cycle_error(query, qcx, id, span)
414                }
415                ActiveKeyStatus::Poisoned => FatalError.raise(),
416            }
417        }
418    }
419}
420
421#[inline(always)]
422fn execute_job<'tcx, Q, const INCR: bool>(
423    query: Q,
424    qcx: Q::Qcx,
425    state: &QueryState<'tcx, Q::Key>,
426    key: Q::Key,
427    key_hash: u64,
428    id: QueryJobId,
429    dep_node: Option<DepNode>,
430) -> (Q::Value, Option<DepNodeIndex>)
431where
432    Q: QueryDispatcher<'tcx>,
433{
434    // Use `JobOwner` so the query will be poisoned if executing it panics.
435    let job_owner = JobOwner { state, key };
436
437    if true {
    match (&qcx.dep_context().dep_graph().is_fully_enabled(), &INCR) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
438
439    let (result, dep_node_index) = if INCR {
440        execute_job_incr(
441            query,
442            qcx,
443            qcx.dep_context().dep_graph().data().unwrap(),
444            key,
445            dep_node,
446            id,
447        )
448    } else {
449        execute_job_non_incr(query, qcx, key, id)
450    };
451
452    let cache = query.query_cache(qcx);
453    if query.feedable() {
454        // We should not compute queries that also got a value via feeding.
455        // This can't happen, as query feeding adds the very dependencies to the fed query
456        // as its feeding query had. So if the fed query is red, so is its feeder, which will
457        // get evaluated first, and re-feed the query.
458        if let Some((cached_result, _)) = cache.lookup(&key) {
459            let Some(hasher) = query.hash_result() else {
460                {
    ::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
            (query.format_value())(&cached_result)));
};panic!(
461                    "no_hash fed query later has its value computed.\n\
462                    Remove `no_hash` modifier to allow recomputation.\n\
463                    The already cached value: {}",
464                    (query.format_value())(&cached_result)
465                );
466            };
467
468            let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
469                (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
470            });
471            let formatter = query.format_value();
472            if old_hash != new_hash {
473                // We have an inconsistency. This can happen if one of the two
474                // results is tainted by errors.
475                if !qcx.dep_context().sess().dcx().has_errors().is_some() {
    {
        ::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
                query.dep_kind(), key, formatter(&result),
                formatter(&cached_result)));
    }
};assert!(
476                    qcx.dep_context().sess().dcx().has_errors().is_some(),
477                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
478                        computed={:#?}\nfed={:#?}",
479                    query.dep_kind(),
480                    key,
481                    formatter(&result),
482                    formatter(&cached_result),
483                );
484            }
485        }
486    }
487    job_owner.complete(cache, key_hash, result, dep_node_index);
488
489    (result, Some(dep_node_index))
490}
491
492// Fast path for when incr. comp. is off.
493#[inline(always)]
494fn execute_job_non_incr<'tcx, Q>(
495    query: Q,
496    qcx: Q::Qcx,
497    key: Q::Key,
498    job_id: QueryJobId,
499) -> (Q::Value, DepNodeIndex)
500where
501    Q: QueryDispatcher<'tcx>,
502{
503    if true {
    if !!qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
504
505    // Fingerprint the key, just to assert that it doesn't
506    // have anything we don't consider hashable
507    if truecfg!(debug_assertions) {
508        let _ = key.to_fingerprint(*qcx.dep_context());
509    }
510
511    let prof_timer = qcx.dep_context().profiler().query_provider();
512    let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key));
513    let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
514    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
515
516    // Similarly, fingerprint the result to assert that
517    // it doesn't have anything not considered hashable.
518    if truecfg!(debug_assertions)
519        && let Some(hash_result) = query.hash_result()
520    {
521        qcx.dep_context().with_stable_hashing_context(|mut hcx| {
522            hash_result(&mut hcx, &result);
523        });
524    }
525
526    (result, dep_node_index)
527}
528
529#[inline(always)]
530fn execute_job_incr<'tcx, Q>(
531    query: Q,
532    qcx: Q::Qcx,
533    dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
534    key: Q::Key,
535    mut dep_node_opt: Option<DepNode>,
536    job_id: QueryJobId,
537) -> (Q::Value, DepNodeIndex)
538where
539    Q: QueryDispatcher<'tcx>,
540{
541    if !query.anon() && !query.eval_always() {
542        // `to_dep_node` is expensive for some `DepKind`s.
543        let dep_node =
544            dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
545
546        // The diagnostics for this query will be promoted to the current session during
547        // `try_mark_green()`, so we can ignore them here.
548        if let Some(ret) = qcx.start_query(job_id, false, || {
549            try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
550        }) {
551            return ret;
552        }
553    }
554
555    let prof_timer = qcx.dep_context().profiler().query_provider();
556
557    let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
558        if query.anon() {
559            return dep_graph_data.with_anon_task_inner(
560                *qcx.dep_context(),
561                query.dep_kind(),
562                || query.compute(qcx, key),
563            );
564        }
565
566        // `to_dep_node` is expensive for some `DepKind`s.
567        let dep_node =
568            dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
569
570        dep_graph_data.with_task(
571            dep_node,
572            (qcx, query),
573            key,
574            |(qcx, query), key| query.compute(qcx, key),
575            query.hash_result(),
576        )
577    });
578
579    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
580
581    (result, dep_node_index)
582}
583
584#[inline(always)]
585fn try_load_from_disk_and_cache_in_memory<'tcx, Q>(
586    query: Q,
587    dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
588    qcx: Q::Qcx,
589    key: &Q::Key,
590    dep_node: &DepNode,
591) -> Option<(Q::Value, DepNodeIndex)>
592where
593    Q: QueryDispatcher<'tcx>,
594{
595    // Note this function can be called concurrently from the same query
596    // We must ensure that this is handled correctly.
597
598    let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
599
600    if true {
    if !dep_graph_data.is_index_green(prev_dep_node_index) {
        ::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_dep_node_index)")
    };
};debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
601
602    // First we try to load the result from the on-disk cache.
603    // Some things are never cached on disk.
604    if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
605        if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
606            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
607        }
608
609        let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
610        // If `-Zincremental-verify-ich` is specified, re-hash results from
611        // the cache and make sure that they have the expected fingerprint.
612        //
613        // If not, we still seek to verify a subset of fingerprints loaded
614        // from disk. Re-hashing results is fairly expensive, so we can't
615        // currently afford to verify every hash. This subset should still
616        // give us some coverage of potential bugs though.
617        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
618        if std::intrinsics::unlikely(
619            try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
620        ) {
621            incremental_verify_ich(
622                *qcx.dep_context(),
623                dep_graph_data,
624                &result,
625                prev_dep_node_index,
626                query.hash_result(),
627                query.format_value(),
628            );
629        }
630
631        return Some((result, dep_node_index));
632    }
633
634    // We always expect to find a cached result for things that
635    // can be forced from `DepNode`.
636    if true {
    if !(!query.will_cache_on_disk_for_key(*qcx.dep_context(), key) ||
                !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible())
        {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
637        !query.will_cache_on_disk_for_key(*qcx.dep_context(), key)
638            || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
639        "missing on-disk cache entry for {dep_node:?}"
640    );
641
642    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
643    // we should actually be able to load it.
644    if true {
    if !!query.is_loadable_from_disk(qcx, key, prev_dep_node_index) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
645        !query.is_loadable_from_disk(qcx, key, prev_dep_node_index),
646        "missing on-disk cache entry for loadable {dep_node:?}"
647    );
648
649    // We could not load a result from the on-disk cache, so
650    // recompute.
651    let prof_timer = qcx.dep_context().profiler().query_provider();
652
653    // The dep-graph for this computation is already in-place.
654    let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
655
656    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
657
658    // Verify that re-running the query produced a result with the expected hash
659    // This catches bugs in query implementations, turning them into ICEs.
660    // For example, a query might sort its result by `DefId` - since `DefId`s are
661    // not stable across compilation sessions, the result could get up getting sorted
662    // in a different order when the query is re-run, even though all of the inputs
663    // (e.g. `DefPathHash` values) were green.
664    //
665    // See issue #82920 for an example of a miscompilation that would get turned into
666    // an ICE by this check
667    incremental_verify_ich(
668        *qcx.dep_context(),
669        dep_graph_data,
670        &result,
671        prev_dep_node_index,
672        query.hash_result(),
673        query.format_value(),
674    );
675
676    Some((result, dep_node_index))
677}
678
679#[inline]
680#[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("incremental_verify_ich",
                                    "rustc_query_system::query::plumbing",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/query/plumbing.rs"),
                                    ::tracing_core::__macro_support::Option::Some(680u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::query::plumbing"),
                                    ::tracing_core::field::FieldSet::new(&["prev_index"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = meta.fields().iter();
                                meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&prev_index)
                                                            as &dyn Value))])
                            })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: () = loop {};
            return __tracing_attr_fake_return;
        }
        {
            if !dep_graph_data.is_index_green(prev_index) {
                incremental_verify_ich_not_green(tcx, prev_index)
            }
            let new_hash =
                hash_result.map_or(Fingerprint::ZERO,
                    |f|
                        {
                            tcx.with_stable_hashing_context(|mut hcx|
                                    f(&mut hcx, result))
                        });
            let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
            if new_hash != old_hash {
                incremental_verify_ich_failed(tcx, prev_index,
                    &|| format_value(result));
            }
        }
    }
}#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
681pub(crate) fn incremental_verify_ich<Tcx, V>(
682    tcx: Tcx,
683    dep_graph_data: &DepGraphData<Tcx::Deps>,
684    result: &V,
685    prev_index: SerializedDepNodeIndex,
686    hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
687    format_value: fn(&V) -> String,
688) where
689    Tcx: DepContext,
690{
691    if !dep_graph_data.is_index_green(prev_index) {
692        incremental_verify_ich_not_green(tcx, prev_index)
693    }
694
695    let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
696        tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
697    });
698
699    let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
700
701    if new_hash != old_hash {
702        incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result));
703    }
704}
705
706#[cold]
707#[inline(never)]
708fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
709where
710    Tcx: DepContext,
711{
712    {
    ::core::panicking::panic_fmt(format_args!("fingerprint for green query instance not loaded from cache: {0:?}",
            tcx.dep_graph().data().unwrap().prev_node_of(prev_index)));
}panic!(
713        "fingerprint for green query instance not loaded from cache: {:?}",
714        tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
715    )
716}
717
718// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
719// as we want to avoid generating a bunch of different implementations for LLVM to
720// chew on (and filling up the final binary, too).
721#[cold]
722#[inline(never)]
723fn incremental_verify_ich_failed<Tcx>(
724    tcx: Tcx,
725    prev_index: SerializedDepNodeIndex,
726    result: &dyn Fn() -> String,
727) where
728    Tcx: DepContext,
729{
730    // When we emit an error message and panic, we try to debug-print the `DepNode`
731    // and query result. Unfortunately, this can cause us to run additional queries,
732    // which may result in another fingerprint mismatch while we're in the middle
733    // of processing this one. To avoid a double-panic (which kills the process
734    // before we can print out the query static), we print out a terse
735    // but 'safe' message if we detect a reentrant call to this method.
736    const INSIDE_VERIFY_PANIC: ::std::thread::LocalKey<Cell<bool>> =
    {
        const __RUST_STD_INTERNAL_INIT: Cell<bool> = { Cell::new(false) };
        unsafe {
            ::std::thread::LocalKey::new(const {
                        if ::std::mem::needs_drop::<Cell<bool>>() {
                            |_|
                                {
                                    #[thread_local]
                                    static __RUST_STD_INTERNAL_VAL:
                                        ::std::thread::local_impl::EagerStorage<Cell<bool>> =
                                        ::std::thread::local_impl::EagerStorage::new(__RUST_STD_INTERNAL_INIT);
                                    __RUST_STD_INTERNAL_VAL.get()
                                }
                        } else {
                            |_|
                                {
                                    #[thread_local]
                                    static __RUST_STD_INTERNAL_VAL: Cell<bool> =
                                        __RUST_STD_INTERNAL_INIT;
                                    &__RUST_STD_INTERNAL_VAL
                                }
                        }
                    })
        }
    };thread_local! {
737        static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
738    };
739
740    let old_in_panic = INSIDE_VERIFY_PANIC.replace(true);
741
742    if old_in_panic {
743        tcx.sess().dcx().emit_err(crate::error::Reentrant);
744    } else {
745        let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
746            ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("`cargo clean -p {0}` or `cargo clean`",
                crate_name))
    })format!("`cargo clean -p {crate_name}` or `cargo clean`")
747        } else {
748            "`cargo clean`".to_string()
749        };
750
751        let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
752        tcx.sess().dcx().emit_err(crate::error::IncrementCompilation {
753            run_cmd,
754            dep_node: ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0:?}", dep_node))
    })format!("{dep_node:?}"),
755        });
756        {
    ::core::panicking::panic_fmt(format_args!("Found unstable fingerprints for {1:?}: {0}",
            result(), dep_node));
};panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
757    }
758
759    INSIDE_VERIFY_PANIC.set(old_in_panic);
760}
761
762/// Ensure that either this query has all green inputs or been executed.
763/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
764/// Returns true if the query should still run.
765///
766/// This function is particularly useful when executing passes for their
767/// side-effects -- e.g., in order to report errors for erroneous programs.
768///
769/// Note: The optimization is only available during incr. comp.
770#[inline(never)]
771fn ensure_must_run<'tcx, Q>(
772    query: Q,
773    qcx: Q::Qcx,
774    key: &Q::Key,
775    check_cache: bool,
776) -> (bool, Option<DepNode>)
777where
778    Q: QueryDispatcher<'tcx>,
779{
780    if query.eval_always() {
781        return (true, None);
782    }
783
784    // Ensuring an anonymous query makes no sense
785    if !!query.anon() {
    ::core::panicking::panic("assertion failed: !query.anon()")
};assert!(!query.anon());
786
787    let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
788
789    let dep_graph = qcx.dep_context().dep_graph();
790    let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
791        None => {
792            // A None return from `try_mark_green` means that this is either
793            // a new dep node or that the dep node has already been marked red.
794            // Either way, we can't call `dep_graph.read()` as we don't have the
795            // DepNodeIndex. We must invoke the query itself. The performance cost
796            // this introduces should be negligible as we'll immediately hit the
797            // in-memory cache, or another query down the line will.
798            return (true, Some(dep_node));
799        }
800        Some((serialized_dep_node_index, dep_node_index)) => {
801            dep_graph.read_index(dep_node_index);
802            qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
803            serialized_dep_node_index
804        }
805    };
806
807    // We do not need the value at all, so do not check the cache.
808    if !check_cache {
809        return (false, None);
810    }
811
812    let loadable = query.is_loadable_from_disk(qcx, key, serialized_dep_node_index);
813    (!loadable, Some(dep_node))
814}
815
816#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QueryMode {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            QueryMode::Get => ::core::fmt::Formatter::write_str(f, "Get"),
            QueryMode::Ensure { check_cache: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Ensure", "check_cache", &__self_0),
        }
    }
}Debug)]
817pub enum QueryMode {
818    Get,
819    Ensure { check_cache: bool },
820}
821
822#[inline(always)]
823pub fn get_query_non_incr<'tcx, Q>(query: Q, qcx: Q::Qcx, span: Span, key: Q::Key) -> Q::Value
824where
825    Q: QueryDispatcher<'tcx>,
826{
827    if true {
    if !!qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
828
829    ensure_sufficient_stack(|| try_execute_query::<Q, false>(query, qcx, span, key, None).0)
830}
831
832#[inline(always)]
833pub fn get_query_incr<'tcx, Q>(
834    query: Q,
835    qcx: Q::Qcx,
836    span: Span,
837    key: Q::Key,
838    mode: QueryMode,
839) -> Option<Q::Value>
840where
841    Q: QueryDispatcher<'tcx>,
842{
843    if true {
    if !qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
844
845    let dep_node = if let QueryMode::Ensure { check_cache } = mode {
846        let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
847        if !must_run {
848            return None;
849        }
850        dep_node
851    } else {
852        None
853    };
854
855    let (result, dep_node_index) =
856        ensure_sufficient_stack(|| try_execute_query::<Q, true>(query, qcx, span, key, dep_node));
857    if let Some(dep_node_index) = dep_node_index {
858        qcx.dep_context().dep_graph().read_index(dep_node_index)
859    }
860    Some(result)
861}
862
863pub fn force_query<'tcx, Q>(query: Q, qcx: Q::Qcx, key: Q::Key, dep_node: DepNode)
864where
865    Q: QueryDispatcher<'tcx>,
866{
867    // We may be concurrently trying both execute and force a query.
868    // Ensure that only one of them runs the query.
869    if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
870        qcx.dep_context().profiler().query_cache_hit(index.into());
871        return;
872    }
873
874    if true {
    if !!query.anon() {
        ::core::panicking::panic("assertion failed: !query.anon()")
    };
};debug_assert!(!query.anon());
875
876    ensure_sufficient_stack(|| {
877        try_execute_query::<Q, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
878    });
879}