Skip to main content

rustc_query_impl/
execution.rs

1use std::hash::Hash;
2use std::mem;
3
4use rustc_data_structures::hash_table::{Entry, HashTable};
5use rustc_data_structures::stack::ensure_sufficient_stack;
6use rustc_data_structures::sync::{DynSend, DynSync};
7use rustc_data_structures::{outline, sharded, sync};
8use rustc_errors::{FatalError, StashKey};
9use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex};
10use rustc_middle::query::plumbing::QueryVTable;
11use rustc_middle::query::{
12    ActiveKeyStatus, CycleError, CycleErrorHandling, EnsureMode, QueryCache, QueryJob, QueryJobId,
13    QueryKey, QueryLatch, QueryMode, QueryState,
14};
15use rustc_middle::ty::TyCtxt;
16use rustc_middle::verify_ich::incremental_verify_ich;
17use rustc_span::{DUMMY_SP, Span};
18
19use crate::collect_active_jobs_from_all_queries;
20use crate::dep_graph::{DepNode, DepNodeIndex};
21use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle};
22use crate::plumbing::{current_query_job, next_job_id, start_query};
23
24#[inline]
25fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
26    move |x| x.0 == *k
27}
28
29/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
30/// was poisoned by a panic.
31fn expect_job<'tcx>(status: ActiveKeyStatus<'tcx>) -> QueryJob<'tcx> {
32    match status {
33        ActiveKeyStatus::Started(job) => job,
34        ActiveKeyStatus::Poisoned => {
35            {
    ::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
36        }
37    }
38}
39
40pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
41    state.active.lock_shards().all(|shard| shard.is_empty())
42}
43
44/// Internal plumbing for collecting the set of active jobs for this query.
45///
46/// Should only be called from `collect_active_jobs_from_all_queries`.
47///
48/// (We arbitrarily use the word "gather" when collecting the jobs for
49/// each individual query, so that we have distinct function names to
50/// grep for.)
51pub(crate) fn gather_active_jobs<'tcx, C>(
52    query: &'tcx QueryVTable<'tcx, C>,
53    tcx: TyCtxt<'tcx>,
54    require_complete: bool,
55    job_map_out: &mut QueryJobMap<'tcx>, // Out-param; job info is gathered into this map
56) -> Option<()>
57where
58    C: QueryCache<Key: QueryKey + DynSend + DynSync>,
59    QueryVTable<'tcx, C>: DynSync,
60{
61    let mut active = Vec::new();
62
63    // Helper to gather active jobs from a single shard.
64    let mut gather_shard_jobs = |shard: &HashTable<(C::Key, ActiveKeyStatus<'tcx>)>| {
65        for (k, v) in shard.iter() {
66            if let ActiveKeyStatus::Started(ref job) = *v {
67                active.push((*k, job.clone()));
68            }
69        }
70    };
71
72    // Lock shards and gather jobs from each shard.
73    if require_complete {
74        for shard in query.state.active.lock_shards() {
75            gather_shard_jobs(&shard);
76        }
77    } else {
78        // We use try_lock_shards here since we are called from the
79        // deadlock handler, and this shouldn't be locked.
80        for shard in query.state.active.try_lock_shards() {
81            // This can be called during unwinding, and the function has a `try_`-prefix, so
82            // don't `unwrap()` here, just manually check for `None` and do best-effort error
83            // reporting.
84            match shard {
85                None => {
86                    {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_impl/src/execution.rs:86",
                        "rustc_query_impl::execution", ::tracing::Level::WARN,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_impl/src/execution.rs"),
                        ::tracing_core::__macro_support::Option::Some(86u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_query_impl::execution"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::WARN <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::WARN <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("Failed to collect active jobs for query with name `{0}`!",
                                                    query.name) as &dyn Value))])
            });
    } else { ; }
};tracing::warn!(
87                        "Failed to collect active jobs for query with name `{}`!",
88                        query.name
89                    );
90                    return None;
91                }
92                Some(shard) => gather_shard_jobs(&shard),
93            }
94        }
95    }
96
97    // Call `make_frame` while we're not holding a `state.active` lock as `make_frame` may call
98    // queries leading to a deadlock.
99    for (key, job) in active {
100        let frame = crate::plumbing::create_deferred_query_stack_frame(tcx, query, key);
101        job_map_out.insert(job.id, QueryJobInfo { frame, job });
102    }
103
104    Some(())
105}
106
107/// Guard object representing the responsibility to execute a query job and
108/// mark it as completed.
109///
110/// This will poison the relevant query key if it is dropped without calling
111/// [`Self::complete`].
112struct ActiveJobGuard<'tcx, K>
113where
114    K: Eq + Hash + Copy,
115{
116    state: &'tcx QueryState<'tcx, K>,
117    key: K,
118    key_hash: u64,
119}
120
121#[cold]
122#[inline(never)]
123fn mk_cycle<'tcx, C: QueryCache>(
124    query: &'tcx QueryVTable<'tcx, C>,
125    tcx: TyCtxt<'tcx>,
126    cycle_error: CycleError,
127) -> C::Value {
128    let error = report_cycle(tcx.sess, &cycle_error);
129    match query.cycle_error_handling {
130        CycleErrorHandling::Error => {
131            let guar = error.emit();
132            (query.value_from_cycle_error)(tcx, cycle_error, guar)
133        }
134        CycleErrorHandling::DelayBug => {
135            let guar = error.delay_as_bug();
136            (query.value_from_cycle_error)(tcx, cycle_error, guar)
137        }
138        CycleErrorHandling::Stash => {
139            let guar = if let Some(root) = cycle_error.cycle.first()
140                && let Some(span) = root.frame.info.span
141            {
142                error.stash(span, StashKey::Cycle).unwrap()
143            } else {
144                error.emit()
145            };
146            (query.value_from_cycle_error)(tcx, cycle_error, guar)
147        }
148    }
149}
150
151impl<'tcx, K> ActiveJobGuard<'tcx, K>
152where
153    K: Eq + Hash + Copy,
154{
155    /// Completes the query by updating the query cache with the `result`,
156    /// signals the waiter, and forgets the guard so it won't poison the query.
157    fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
158    where
159        C: QueryCache<Key = K>,
160    {
161        // Forget ourself so our destructor won't poison the query.
162        // (Extract fields by value first to make sure we don't leak anything.)
163        let Self { state, key, key_hash }: Self = self;
164        mem::forget(self);
165
166        // Mark as complete before we remove the job from the active state
167        // so no other thread can re-execute this query.
168        cache.complete(key, result, dep_node_index);
169
170        let job = {
171            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
172            // underlying shard.
173            // since unwinding also wants to look at this map, this can also prevent a double
174            // panic.
175            let mut shard = state.active.lock_shard_by_hash(key_hash);
176            match shard.find_entry(key_hash, equivalent_key(&key)) {
177                Err(_) => None,
178                Ok(occupied) => Some(occupied.remove().0.1),
179            }
180        };
181        let job = expect_job(job.expect("active query job entry"));
182
183        job.signal_complete();
184    }
185}
186
187impl<'tcx, K> Drop for ActiveJobGuard<'tcx, K>
188where
189    K: Eq + Hash + Copy,
190{
191    #[inline(never)]
192    #[cold]
193    fn drop(&mut self) {
194        // Poison the query so jobs waiting on it panic.
195        let Self { state, key, key_hash } = *self;
196        let job = {
197            let mut shard = state.active.lock_shard_by_hash(key_hash);
198            match shard.find_entry(key_hash, equivalent_key(&key)) {
199                Err(_) => ::core::panicking::panic("explicit panic")panic!(),
200                Ok(occupied) => {
201                    let ((key, value), vacant) = occupied.remove();
202                    vacant.insert((key, ActiveKeyStatus::Poisoned));
203                    expect_job(value)
204                }
205            }
206        };
207        // Also signal the completion of the job, so waiters
208        // will continue execution.
209        job.signal_complete();
210    }
211}
212
213#[cold]
214#[inline(never)]
215fn cycle_error<'tcx, C: QueryCache>(
216    query: &'tcx QueryVTable<'tcx, C>,
217    tcx: TyCtxt<'tcx>,
218    try_execute: QueryJobId,
219    span: Span,
220) -> (C::Value, Option<DepNodeIndex>) {
221    // Ensure there was no errors collecting all active jobs.
222    // We need the complete map to ensure we find a cycle to break.
223    let job_map = collect_active_jobs_from_all_queries(tcx, false)
224        .ok()
225        .expect("failed to collect active queries");
226
227    let error = find_cycle_in_stack(try_execute, job_map, &current_query_job(tcx), span);
228    (mk_cycle(query, tcx, error.lift()), None)
229}
230
231#[inline(always)]
232fn wait_for_query<'tcx, C: QueryCache>(
233    query: &'tcx QueryVTable<'tcx, C>,
234    tcx: TyCtxt<'tcx>,
235    span: Span,
236    key: C::Key,
237    latch: QueryLatch<'tcx>,
238    current: Option<QueryJobId>,
239) -> (C::Value, Option<DepNodeIndex>) {
240    // For parallel queries, we'll block and wait until the query running
241    // in another thread has completed. Record how long we wait in the
242    // self-profiler.
243    let query_blocked_prof_timer = tcx.prof.query_blocked();
244
245    // With parallel queries we might just have to wait on some other
246    // thread.
247    let result = latch.wait_on(tcx, current, span);
248
249    match result {
250        Ok(()) => {
251            let Some((v, index)) = query.cache.lookup(&key) else {
252                outline(|| {
253                    // We didn't find the query result in the query cache. Check if it was
254                    // poisoned due to a panic instead.
255                    let key_hash = sharded::make_hash(&key);
256                    let shard = query.state.active.lock_shard_by_hash(key_hash);
257                    match shard.find(key_hash, equivalent_key(&key)) {
258                        // The query we waited on panicked. Continue unwinding here.
259                        Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
260                        _ => {
    ::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
            query.name));
}panic!(
261                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
262                            query.name
263                        ),
264                    }
265                })
266            };
267
268            tcx.prof.query_cache_hit(index.into());
269            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
270
271            (v, Some(index))
272        }
273        Err(cycle) => (mk_cycle(query, tcx, cycle.lift()), None),
274    }
275}
276
277/// Shared main part of both [`execute_query_incr_inner`] and [`execute_query_non_incr_inner`].
278#[inline(never)]
279fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
280    query: &'tcx QueryVTable<'tcx, C>,
281    tcx: TyCtxt<'tcx>,
282    span: Span,
283    key: C::Key,
284    // If present, some previous step has already created a `DepNode` for this
285    // query+key, which we should reuse instead of creating a new one.
286    dep_node: Option<DepNode>,
287) -> (C::Value, Option<DepNodeIndex>) {
288    let key_hash = sharded::make_hash(&key);
289    let mut state_lock = query.state.active.lock_shard_by_hash(key_hash);
290
291    // For the parallel compiler we need to check both the query cache and query state structures
292    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
293    // query is not still executing. Without checking the query cache here, we can end up
294    // re-executing the query since `try_start` only checks that the query is not currently
295    // executing, but another thread may have already completed the query and stores it result
296    // in the query cache.
297    if tcx.sess.threads() > 1 {
298        if let Some((value, index)) = query.cache.lookup(&key) {
299            tcx.prof.query_cache_hit(index.into());
300            return (value, Some(index));
301        }
302    }
303
304    let current_job_id = current_query_job(tcx);
305
306    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
307        Entry::Vacant(entry) => {
308            // Nothing has computed or is computing the query, so we start a new job and insert it in the
309            // state map.
310            let id = next_job_id(tcx);
311            let job = QueryJob::new(id, span, current_job_id);
312            entry.insert((key, ActiveKeyStatus::Started(job)));
313
314            // Drop the lock before we start executing the query
315            drop(state_lock);
316
317            execute_job::<C, INCR>(query, tcx, key, key_hash, id, dep_node)
318        }
319        Entry::Occupied(mut entry) => {
320            match &mut entry.get_mut().1 {
321                ActiveKeyStatus::Started(job) => {
322                    if sync::is_dyn_thread_safe() {
323                        // Get the latch out
324                        let latch = job.latch();
325                        drop(state_lock);
326
327                        // Only call `wait_for_query` if we're using a Rayon thread pool
328                        // as it will attempt to mark the worker thread as blocked.
329                        wait_for_query(query, tcx, span, key, latch, current_job_id)
330                    } else {
331                        let id = job.id;
332                        drop(state_lock);
333
334                        // If we are single-threaded we know that we have cycle error,
335                        // so we just return the error.
336                        cycle_error(query, tcx, id, span)
337                    }
338                }
339                ActiveKeyStatus::Poisoned => FatalError.raise(),
340            }
341        }
342    }
343}
344
345#[inline(always)]
346fn execute_job<'tcx, C: QueryCache, const INCR: bool>(
347    query: &'tcx QueryVTable<'tcx, C>,
348    tcx: TyCtxt<'tcx>,
349    key: C::Key,
350    key_hash: u64,
351    id: QueryJobId,
352    dep_node: Option<DepNode>,
353) -> (C::Value, Option<DepNodeIndex>) {
354    // Set up a guard object that will automatically poison the query if a
355    // panic occurs while executing the query (or any intermediate plumbing).
356    let job_guard = ActiveJobGuard { state: &query.state, key, key_hash };
357
358    if true {
    match (&tcx.dep_graph.is_fully_enabled(), &INCR) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);
359
360    // Delegate to another function to actually execute the query job.
361    let (value, dep_node_index) = if INCR {
362        execute_job_incr(query, tcx, key, dep_node, id)
363    } else {
364        execute_job_non_incr(query, tcx, key, id)
365    };
366
367    let cache = &query.cache;
368    if query.feedable {
369        // We should not compute queries that also got a value via feeding.
370        // This can't happen, as query feeding adds the very dependencies to the fed query
371        // as its feeding query had. So if the fed query is red, so is its feeder, which will
372        // get evaluated first, and re-feed the query.
373        if let Some((cached_value, _)) = cache.lookup(&key) {
374            let Some(hash_value_fn) = query.hash_value_fn else {
375                {
    ::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
            (query.format_value)(&cached_value)));
};panic!(
376                    "no_hash fed query later has its value computed.\n\
377                    Remove `no_hash` modifier to allow recomputation.\n\
378                    The already cached value: {}",
379                    (query.format_value)(&cached_value)
380                );
381            };
382
383            let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| {
384                (hash_value_fn(&mut hcx, &cached_value), hash_value_fn(&mut hcx, &value))
385            });
386            let formatter = query.format_value;
387            if old_hash != new_hash {
388                // We have an inconsistency. This can happen if one of the two
389                // results is tainted by errors.
390                if !tcx.dcx().has_errors().is_some() {
    {
        ::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
                query.dep_kind, key, formatter(&value),
                formatter(&cached_value)));
    }
};assert!(
391                    tcx.dcx().has_errors().is_some(),
392                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
393                        computed={:#?}\nfed={:#?}",
394                    query.dep_kind,
395                    key,
396                    formatter(&value),
397                    formatter(&cached_value),
398                );
399            }
400        }
401    }
402
403    // Tell the guard to perform completion bookkeeping, and also to not poison the query.
404    job_guard.complete(cache, value, dep_node_index);
405
406    (value, Some(dep_node_index))
407}
408
409// Fast path for when incr. comp. is off.
410#[inline(always)]
411fn execute_job_non_incr<'tcx, C: QueryCache>(
412    query: &'tcx QueryVTable<'tcx, C>,
413    tcx: TyCtxt<'tcx>,
414    key: C::Key,
415    job_id: QueryJobId,
416) -> (C::Value, DepNodeIndex) {
417    if true {
    if !!tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
418
419    let prof_timer = tcx.prof.query_provider();
420    // Call the query provider.
421    let value =
422        start_query(tcx, job_id, query.depth_limit, || (query.invoke_provider_fn)(tcx, key));
423    let dep_node_index = tcx.dep_graph.next_virtual_depnode_index();
424    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
425
426    // Sanity: Fingerprint the key and the result to assert they don't contain anything unhashable.
427    if truecfg!(debug_assertions) {
428        let _ = key.to_fingerprint(tcx);
429        if let Some(hash_value_fn) = query.hash_value_fn {
430            tcx.with_stable_hashing_context(|mut hcx| {
431                hash_value_fn(&mut hcx, &value);
432            });
433        }
434    }
435
436    (value, dep_node_index)
437}
438
439#[inline(always)]
440fn execute_job_incr<'tcx, C: QueryCache>(
441    query: &'tcx QueryVTable<'tcx, C>,
442    tcx: TyCtxt<'tcx>,
443    key: C::Key,
444    mut dep_node_opt: Option<DepNode>,
445    job_id: QueryJobId,
446) -> (C::Value, DepNodeIndex) {
447    let dep_graph_data =
448        tcx.dep_graph.data().expect("should always be present in incremental mode");
449
450    if !query.anon && !query.eval_always {
451        // `to_dep_node` is expensive for some `DepKind`s.
452        let dep_node =
453            dep_node_opt.get_or_insert_with(|| DepNode::construct(tcx, query.dep_kind, &key));
454
455        // The diagnostics for this query will be promoted to the current session during
456        // `try_mark_green()`, so we can ignore them here.
457        if let Some(ret) = start_query(tcx, job_id, false, || try {
458            let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, dep_node)?;
459            let value = load_from_disk_or_invoke_provider_green(
460                tcx,
461                dep_graph_data,
462                query,
463                &key,
464                dep_node,
465                prev_index,
466                dep_node_index,
467            );
468            (value, dep_node_index)
469        }) {
470            return ret;
471        }
472    }
473
474    let prof_timer = tcx.prof.query_provider();
475
476    let (result, dep_node_index) = start_query(tcx, job_id, query.depth_limit, || {
477        if query.anon {
478            // Call the query provider inside an anon task.
479            return dep_graph_data.with_anon_task_inner(tcx, query.dep_kind, || {
480                (query.invoke_provider_fn)(tcx, key)
481            });
482        }
483
484        // `to_dep_node` is expensive for some `DepKind`s.
485        let dep_node =
486            dep_node_opt.unwrap_or_else(|| DepNode::construct(tcx, query.dep_kind, &key));
487
488        // Call the query provider.
489        dep_graph_data.with_task(
490            dep_node,
491            tcx,
492            (query, key),
493            |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key),
494            query.hash_value_fn,
495        )
496    });
497
498    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
499
500    (result, dep_node_index)
501}
502
503/// Given that the dep node for this query+key is green, obtain a value for it
504/// by loading one from disk if possible, or by invoking its query provider if
505/// necessary.
506#[inline(always)]
507fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
508    tcx: TyCtxt<'tcx>,
509    dep_graph_data: &DepGraphData,
510    query: &'tcx QueryVTable<'tcx, C>,
511    key: &C::Key,
512    dep_node: &DepNode,
513    prev_index: SerializedDepNodeIndex,
514    dep_node_index: DepNodeIndex,
515) -> C::Value {
516    // Note this function can be called concurrently from the same query
517    // We must ensure that this is handled correctly.
518
519    if true {
    if !dep_graph_data.is_index_green(prev_index) {
        ::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_index)")
    };
};debug_assert!(dep_graph_data.is_index_green(prev_index));
520
521    // First we try to load the result from the on-disk cache.
522    // Some things are never cached on disk.
523    if let Some(value) = (query.try_load_from_disk_fn)(tcx, key, prev_index, dep_node_index) {
524        if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) {
525            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
526        }
527
528        let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index);
529        // If `-Zincremental-verify-ich` is specified, re-hash results from
530        // the cache and make sure that they have the expected fingerprint.
531        //
532        // If not, we still seek to verify a subset of fingerprints loaded
533        // from disk. Re-hashing results is fairly expensive, so we can't
534        // currently afford to verify every hash. This subset should still
535        // give us some coverage of potential bugs though.
536        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
537        if std::intrinsics::unlikely(
538            try_verify || tcx.sess.opts.unstable_opts.incremental_verify_ich,
539        ) {
540            incremental_verify_ich(
541                tcx,
542                dep_graph_data,
543                &value,
544                prev_index,
545                query.hash_value_fn,
546                query.format_value,
547            );
548        }
549
550        return value;
551    }
552
553    // We always expect to find a cached result for things that
554    // can be forced from `DepNode`.
555    if true {
    if !(!(query.will_cache_on_disk_for_key_fn)(tcx, key) ||
                !tcx.key_fingerprint_style(dep_node.kind).is_maybe_recoverable())
        {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
556        !(query.will_cache_on_disk_for_key_fn)(tcx, key)
557            || !tcx.key_fingerprint_style(dep_node.kind).is_maybe_recoverable(),
558        "missing on-disk cache entry for {dep_node:?}"
559    );
560
561    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
562    // we should actually be able to load it.
563    if true {
    if !!(query.is_loadable_from_disk_fn)(tcx, key, prev_index) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
564        !(query.is_loadable_from_disk_fn)(tcx, key, prev_index),
565        "missing on-disk cache entry for loadable {dep_node:?}"
566    );
567
568    // We could not load a result from the on-disk cache, so
569    // recompute.
570    let prof_timer = tcx.prof.query_provider();
571
572    // The dep-graph for this computation is already in-place.
573    // Call the query provider.
574    let value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, *key));
575
576    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
577
578    // Verify that re-running the query produced a result with the expected hash
579    // This catches bugs in query implementations, turning them into ICEs.
580    // For example, a query might sort its result by `DefId` - since `DefId`s are
581    // not stable across compilation sessions, the result could get up getting sorted
582    // in a different order when the query is re-run, even though all of the inputs
583    // (e.g. `DefPathHash` values) were green.
584    //
585    // See issue #82920 for an example of a miscompilation that would get turned into
586    // an ICE by this check
587    incremental_verify_ich(
588        tcx,
589        dep_graph_data,
590        &value,
591        prev_index,
592        query.hash_value_fn,
593        query.format_value,
594    );
595
596    value
597}
598
599/// Return value struct for [`check_if_ensure_can_skip_execution`].
600struct EnsureCanSkip {
601    /// If true, the current `tcx.ensure_ok()` or `tcx.ensure_done()` query
602    /// can return early without actually trying to execute.
603    skip_execution: bool,
604    /// A dep node that was prepared while checking whether execution can be
605    /// skipped, to be reused by execution itself if _not_ skipped.
606    dep_node: Option<DepNode>,
607}
608
609/// Checks whether a `tcx.ensure_ok()` or `tcx.ensure_done()` query call can
610/// return early without actually trying to execute.
611///
612/// This only makes sense during incremental compilation, because it relies
613/// on having the dependency graph (and in some cases a disk-cached value)
614/// from the previous incr-comp session.
615#[inline(never)]
616fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
617    query: &'tcx QueryVTable<'tcx, C>,
618    tcx: TyCtxt<'tcx>,
619    key: &C::Key,
620    ensure_mode: EnsureMode,
621) -> EnsureCanSkip {
622    // Queries with `eval_always` should never skip execution.
623    if query.eval_always {
624        return EnsureCanSkip { skip_execution: false, dep_node: None };
625    }
626
627    // Ensuring an anonymous query makes no sense
628    if !!query.anon { ::core::panicking::panic("assertion failed: !query.anon") };assert!(!query.anon);
629
630    let dep_node = DepNode::construct(tcx, query.dep_kind, key);
631
632    let dep_graph = &tcx.dep_graph;
633    let serialized_dep_node_index = match dep_graph.try_mark_green(tcx, &dep_node) {
634        None => {
635            // A None return from `try_mark_green` means that this is either
636            // a new dep node or that the dep node has already been marked red.
637            // Either way, we can't call `dep_graph.read()` as we don't have the
638            // DepNodeIndex. We must invoke the query itself. The performance cost
639            // this introduces should be negligible as we'll immediately hit the
640            // in-memory cache, or another query down the line will.
641            return EnsureCanSkip { skip_execution: false, dep_node: Some(dep_node) };
642        }
643        Some((serialized_dep_node_index, dep_node_index)) => {
644            dep_graph.read_index(dep_node_index);
645            tcx.prof.query_cache_hit(dep_node_index.into());
646            serialized_dep_node_index
647        }
648    };
649
650    match ensure_mode {
651        EnsureMode::Ok => {
652            // In ensure-ok mode, we can skip execution for this key if the node
653            // is green. It must have succeeded in the previous session, and
654            // therefore would succeed in the current session if executed.
655            EnsureCanSkip { skip_execution: true, dep_node: None }
656        }
657        EnsureMode::Done => {
658            // In ensure-done mode, we can only skip execution for this key if
659            // there's a disk-cached value available to load later if needed,
660            // which guarantees the query provider will never run for this key.
661            let is_loadable = (query.is_loadable_from_disk_fn)(tcx, key, serialized_dep_node_index);
662            EnsureCanSkip { skip_execution: is_loadable, dep_node: Some(dep_node) }
663        }
664    }
665}
666
667/// Called by a macro-generated impl of [`QueryVTable::execute_query_fn`],
668/// in non-incremental mode.
669#[inline(always)]
670pub(super) fn execute_query_non_incr_inner<'tcx, C: QueryCache>(
671    query: &'tcx QueryVTable<'tcx, C>,
672    tcx: TyCtxt<'tcx>,
673    span: Span,
674    key: C::Key,
675) -> C::Value {
676    if true {
    if !!tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
677
678    ensure_sufficient_stack(|| try_execute_query::<C, false>(query, tcx, span, key, None).0)
679}
680
681/// Called by a macro-generated impl of [`QueryVTable::execute_query_fn`],
682/// in incremental mode.
683#[inline(always)]
684pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>(
685    query: &'tcx QueryVTable<'tcx, C>,
686    tcx: TyCtxt<'tcx>,
687    span: Span,
688    key: C::Key,
689    mode: QueryMode,
690) -> Option<C::Value> {
691    if true {
    if !tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(tcx.dep_graph.is_fully_enabled());
692
693    // Check if query execution can be skipped, for `ensure_ok` or `ensure_done`.
694    // This might have the side-effect of creating a suitable DepNode, which
695    // we should reuse for execution instead of creating a new one.
696    let dep_node: Option<DepNode> = match mode {
697        QueryMode::Ensure { ensure_mode } => {
698            let EnsureCanSkip { skip_execution, dep_node } =
699                check_if_ensure_can_skip_execution(query, tcx, &key, ensure_mode);
700            if skip_execution {
701                // Return early to skip execution.
702                return None;
703            }
704            dep_node
705        }
706        QueryMode::Get => None,
707    };
708
709    let (result, dep_node_index) =
710        ensure_sufficient_stack(|| try_execute_query::<C, true>(query, tcx, span, key, dep_node));
711    if let Some(dep_node_index) = dep_node_index {
712        tcx.dep_graph.read_index(dep_node_index)
713    }
714    Some(result)
715}
716
717pub(crate) fn force_query<'tcx, C: QueryCache>(
718    query: &'tcx QueryVTable<'tcx, C>,
719    tcx: TyCtxt<'tcx>,
720    key: C::Key,
721    dep_node: DepNode,
722) {
723    // We may be concurrently trying both execute and force a query.
724    // Ensure that only one of them runs the query.
725    if let Some((_, index)) = query.cache.lookup(&key) {
726        tcx.prof.query_cache_hit(index.into());
727        return;
728    }
729
730    if true {
    if !!query.anon {
        ::core::panicking::panic("assertion failed: !query.anon")
    };
};debug_assert!(!query.anon);
731
732    ensure_sufficient_stack(|| {
733        try_execute_query::<C, true>(query, tcx, DUMMY_SP, key, Some(dep_node))
734    });
735}