Skip to main content

rustc_query_impl/
execution.rs

1use std::hash::Hash;
2use std::mem;
3
4use rustc_data_structures::hash_table::{Entry, HashTable};
5use rustc_data_structures::stack::ensure_sufficient_stack;
6use rustc_data_structures::{outline, sharded, sync};
7use rustc_errors::{Diag, FatalError, StashKey};
8use rustc_middle::dep_graph::DepsType;
9use rustc_middle::ty::TyCtxt;
10use rustc_query_system::dep_graph::{DepGraphData, DepNodeKey, HasDepContext};
11use rustc_query_system::query::{
12    ActiveKeyStatus, CycleError, CycleErrorHandling, QueryCache, QueryJob, QueryJobId, QueryLatch,
13    QueryMode, QueryStackDeferred, QueryStackFrame, QueryState, incremental_verify_ich,
14};
15use rustc_span::{DUMMY_SP, Span};
16
17use crate::dep_graph::{DepContext, DepNode, DepNodeIndex};
18use crate::job::{QueryJobInfo, QueryMap, find_cycle_in_stack, report_cycle};
19use crate::{QueryCtxt, QueryFlags, SemiDynamicQueryDispatcher};
20
21#[inline]
22fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
23    move |x| x.0 == *k
24}
25
26/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
27/// was poisoned by a panic.
28fn expect_job<'tcx>(status: ActiveKeyStatus<'tcx>) -> QueryJob<'tcx> {
29    match status {
30        ActiveKeyStatus::Started(job) => job,
31        ActiveKeyStatus::Poisoned => {
32            {
    ::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
33        }
34    }
35}
36
37pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
38    state.active.lock_shards().all(|shard| shard.is_empty())
39}
40
41/// Internal plumbing for collecting the set of active jobs for this query.
42///
43/// Should only be called from `gather_active_jobs`.
44pub(crate) fn gather_active_jobs_inner<'tcx, K: Copy>(
45    state: &QueryState<'tcx, K>,
46    tcx: TyCtxt<'tcx>,
47    make_frame: fn(TyCtxt<'tcx>, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
48    jobs: &mut QueryMap<'tcx>,
49    require_complete: bool,
50) -> Option<()> {
51    let mut active = Vec::new();
52
53    // Helper to gather active jobs from a single shard.
54    let mut gather_shard_jobs = |shard: &HashTable<(K, ActiveKeyStatus<'tcx>)>| {
55        for (k, v) in shard.iter() {
56            if let ActiveKeyStatus::Started(ref job) = *v {
57                active.push((*k, job.clone()));
58            }
59        }
60    };
61
62    // Lock shards and gather jobs from each shard.
63    if require_complete {
64        for shard in state.active.lock_shards() {
65            gather_shard_jobs(&shard);
66        }
67    } else {
68        // We use try_lock_shards here since we are called from the
69        // deadlock handler, and this shouldn't be locked.
70        for shard in state.active.try_lock_shards() {
71            let shard = shard?;
72            gather_shard_jobs(&shard);
73        }
74    }
75
76    // Call `make_frame` while we're not holding a `state.active` lock as `make_frame` may call
77    // queries leading to a deadlock.
78    for (key, job) in active {
79        let frame = make_frame(tcx, key);
80        jobs.insert(job.id, QueryJobInfo { frame, job });
81    }
82
83    Some(())
84}
85
86/// Guard object representing the responsibility to execute a query job and
87/// mark it as completed.
88///
89/// This will poison the relevant query key if it is dropped without calling
90/// [`Self::complete`].
91struct ActiveJobGuard<'tcx, K>
92where
93    K: Eq + Hash + Copy,
94{
95    state: &'tcx QueryState<'tcx, K>,
96    key: K,
97    key_hash: u64,
98}
99
100#[cold]
101#[inline(never)]
102fn mk_cycle<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
103    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
104    qcx: QueryCtxt<'tcx>,
105    cycle_error: CycleError,
106) -> C::Value {
107    let error = report_cycle(qcx.tcx.sess, &cycle_error);
108    handle_cycle_error(query, qcx, &cycle_error, error)
109}
110
111fn handle_cycle_error<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
112    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
113    qcx: QueryCtxt<'tcx>,
114    cycle_error: &CycleError,
115    error: Diag<'_>,
116) -> C::Value {
117    match query.cycle_error_handling() {
118        CycleErrorHandling::Error => {
119            let guar = error.emit();
120            query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
121        }
122        CycleErrorHandling::Fatal => {
123            error.emit();
124            qcx.tcx.dcx().abort_if_errors();
125            ::core::panicking::panic("internal error: entered unreachable code")unreachable!()
126        }
127        CycleErrorHandling::DelayBug => {
128            let guar = error.delay_as_bug();
129            query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
130        }
131        CycleErrorHandling::Stash => {
132            let guar = if let Some(root) = cycle_error.cycle.first()
133                && let Some(span) = root.frame.info.span
134            {
135                error.stash(span, StashKey::Cycle).unwrap()
136            } else {
137                error.emit()
138            };
139            query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
140        }
141    }
142}
143
144impl<'tcx, K> ActiveJobGuard<'tcx, K>
145where
146    K: Eq + Hash + Copy,
147{
148    /// Completes the query by updating the query cache with the `result`,
149    /// signals the waiter, and forgets the guard so it won't poison the query.
150    fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
151    where
152        C: QueryCache<Key = K>,
153    {
154        // Forget ourself so our destructor won't poison the query.
155        // (Extract fields by value first to make sure we don't leak anything.)
156        let Self { state, key, key_hash }: Self = self;
157        mem::forget(self);
158
159        // Mark as complete before we remove the job from the active state
160        // so no other thread can re-execute this query.
161        cache.complete(key, result, dep_node_index);
162
163        let job = {
164            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
165            // underlying shard.
166            // since unwinding also wants to look at this map, this can also prevent a double
167            // panic.
168            let mut shard = state.active.lock_shard_by_hash(key_hash);
169            match shard.find_entry(key_hash, equivalent_key(&key)) {
170                Err(_) => None,
171                Ok(occupied) => Some(occupied.remove().0.1),
172            }
173        };
174        let job = expect_job(job.expect("active query job entry"));
175
176        job.signal_complete();
177    }
178}
179
180impl<'tcx, K> Drop for ActiveJobGuard<'tcx, K>
181where
182    K: Eq + Hash + Copy,
183{
184    #[inline(never)]
185    #[cold]
186    fn drop(&mut self) {
187        // Poison the query so jobs waiting on it panic.
188        let Self { state, key, key_hash } = *self;
189        let job = {
190            let mut shard = state.active.lock_shard_by_hash(key_hash);
191            match shard.find_entry(key_hash, equivalent_key(&key)) {
192                Err(_) => ::core::panicking::panic("explicit panic")panic!(),
193                Ok(occupied) => {
194                    let ((key, value), vacant) = occupied.remove();
195                    vacant.insert((key, ActiveKeyStatus::Poisoned));
196                    expect_job(value)
197                }
198            }
199        };
200        // Also signal the completion of the job, so waiters
201        // will continue execution.
202        job.signal_complete();
203    }
204}
205
206#[cold]
207#[inline(never)]
208fn cycle_error<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
209    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
210    qcx: QueryCtxt<'tcx>,
211    try_execute: QueryJobId,
212    span: Span,
213) -> (C::Value, Option<DepNodeIndex>) {
214    // Ensure there was no errors collecting all active jobs.
215    // We need the complete map to ensure we find a cycle to break.
216    let query_map = qcx
217        .collect_active_jobs_from_all_queries(false)
218        .ok()
219        .expect("failed to collect active queries");
220
221    let error = find_cycle_in_stack(try_execute, query_map, &qcx.current_query_job(), span);
222    (mk_cycle(query, qcx, error.lift()), None)
223}
224
225#[inline(always)]
226fn wait_for_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
227    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
228    qcx: QueryCtxt<'tcx>,
229    span: Span,
230    key: C::Key,
231    latch: QueryLatch<'tcx>,
232    current: Option<QueryJobId>,
233) -> (C::Value, Option<DepNodeIndex>) {
234    // For parallel queries, we'll block and wait until the query running
235    // in another thread has completed. Record how long we wait in the
236    // self-profiler.
237    let query_blocked_prof_timer = qcx.tcx.prof.query_blocked();
238
239    // With parallel queries we might just have to wait on some other
240    // thread.
241    let result = latch.wait_on(qcx, current, span);
242
243    match result {
244        Ok(()) => {
245            let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
246                outline(|| {
247                    // We didn't find the query result in the query cache. Check if it was
248                    // poisoned due to a panic instead.
249                    let key_hash = sharded::make_hash(&key);
250                    let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
251                    match shard.find(key_hash, equivalent_key(&key)) {
252                        // The query we waited on panicked. Continue unwinding here.
253                        Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
254                        _ => {
    ::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
            query.name()));
}panic!(
255                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
256                            query.name()
257                        ),
258                    }
259                })
260            };
261
262            qcx.tcx.prof.query_cache_hit(index.into());
263            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
264
265            (v, Some(index))
266        }
267        Err(cycle) => (mk_cycle(query, qcx, cycle.lift()), None),
268    }
269}
270
271#[inline(never)]
272fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
273    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
274    qcx: QueryCtxt<'tcx>,
275    span: Span,
276    key: C::Key,
277    dep_node: Option<DepNode>,
278) -> (C::Value, Option<DepNodeIndex>) {
279    let state = query.query_state(qcx);
280    let key_hash = sharded::make_hash(&key);
281    let mut state_lock = state.active.lock_shard_by_hash(key_hash);
282
283    // For the parallel compiler we need to check both the query cache and query state structures
284    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
285    // query is not still executing. Without checking the query cache here, we can end up
286    // re-executing the query since `try_start` only checks that the query is not currently
287    // executing, but another thread may have already completed the query and stores it result
288    // in the query cache.
289    if qcx.tcx.sess.threads() > 1 {
290        if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
291            qcx.tcx.prof.query_cache_hit(index.into());
292            return (value, Some(index));
293        }
294    }
295
296    let current_job_id = qcx.current_query_job();
297
298    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
299        Entry::Vacant(entry) => {
300            // Nothing has computed or is computing the query, so we start a new job and insert it in the
301            // state map.
302            let id = qcx.next_job_id();
303            let job = QueryJob::new(id, span, current_job_id);
304            entry.insert((key, ActiveKeyStatus::Started(job)));
305
306            // Drop the lock before we start executing the query
307            drop(state_lock);
308
309            execute_job::<C, FLAGS, INCR>(query, qcx, state, key, key_hash, id, dep_node)
310        }
311        Entry::Occupied(mut entry) => {
312            match &mut entry.get_mut().1 {
313                ActiveKeyStatus::Started(job) => {
314                    if sync::is_dyn_thread_safe() {
315                        // Get the latch out
316                        let latch = job.latch();
317                        drop(state_lock);
318
319                        // Only call `wait_for_query` if we're using a Rayon thread pool
320                        // as it will attempt to mark the worker thread as blocked.
321                        return wait_for_query(query, qcx, span, key, latch, current_job_id);
322                    }
323
324                    let id = job.id;
325                    drop(state_lock);
326
327                    // If we are single-threaded we know that we have cycle error,
328                    // so we just return the error.
329                    cycle_error(query, qcx, id, span)
330                }
331                ActiveKeyStatus::Poisoned => FatalError.raise(),
332            }
333        }
334    }
335}
336
337#[inline(always)]
338fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
339    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
340    qcx: QueryCtxt<'tcx>,
341    state: &'tcx QueryState<'tcx, C::Key>,
342    key: C::Key,
343    key_hash: u64,
344    id: QueryJobId,
345    dep_node: Option<DepNode>,
346) -> (C::Value, Option<DepNodeIndex>) {
347    // Set up a guard object that will automatically poison the query if a
348    // panic occurs while executing the query (or any intermediate plumbing).
349    let job_guard = ActiveJobGuard { state, key, key_hash };
350
351    if true {
    match (&qcx.tcx.dep_graph.is_fully_enabled(), &INCR) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(qcx.tcx.dep_graph.is_fully_enabled(), INCR);
352
353    // Delegate to another function to actually execute the query job.
354    let (result, dep_node_index) = if INCR {
355        execute_job_incr(query, qcx, qcx.tcx.dep_graph.data().unwrap(), key, dep_node, id)
356    } else {
357        execute_job_non_incr(query, qcx, key, id)
358    };
359
360    let cache = query.query_cache(qcx);
361    if query.feedable() {
362        // We should not compute queries that also got a value via feeding.
363        // This can't happen, as query feeding adds the very dependencies to the fed query
364        // as its feeding query had. So if the fed query is red, so is its feeder, which will
365        // get evaluated first, and re-feed the query.
366        if let Some((cached_result, _)) = cache.lookup(&key) {
367            let Some(hasher) = query.hash_result() else {
368                {
    ::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
            (query.format_value())(&cached_result)));
};panic!(
369                    "no_hash fed query later has its value computed.\n\
370                    Remove `no_hash` modifier to allow recomputation.\n\
371                    The already cached value: {}",
372                    (query.format_value())(&cached_result)
373                );
374            };
375
376            let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
377                (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
378            });
379            let formatter = query.format_value();
380            if old_hash != new_hash {
381                // We have an inconsistency. This can happen if one of the two
382                // results is tainted by errors.
383                if !qcx.tcx.dcx().has_errors().is_some() {
    {
        ::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
                query.dep_kind(), key, formatter(&result),
                formatter(&cached_result)));
    }
};assert!(
384                    qcx.tcx.dcx().has_errors().is_some(),
385                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
386                        computed={:#?}\nfed={:#?}",
387                    query.dep_kind(),
388                    key,
389                    formatter(&result),
390                    formatter(&cached_result),
391                );
392            }
393        }
394    }
395
396    // Tell the guard to perform completion bookkeeping, and also to not poison the query.
397    job_guard.complete(cache, result, dep_node_index);
398
399    (result, Some(dep_node_index))
400}
401
402// Fast path for when incr. comp. is off.
403#[inline(always)]
404fn execute_job_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
405    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
406    qcx: QueryCtxt<'tcx>,
407    key: C::Key,
408    job_id: QueryJobId,
409) -> (C::Value, DepNodeIndex) {
410    if true {
    if !!qcx.tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
411
412    // Fingerprint the key, just to assert that it doesn't
413    // have anything we don't consider hashable
414    if truecfg!(debug_assertions) {
415        let _ = key.to_fingerprint(qcx.tcx);
416    }
417
418    let prof_timer = qcx.tcx.prof.query_provider();
419    // Call the query provider.
420    let result = qcx.start_query(job_id, query.depth_limit(), || query.invoke_provider(qcx, key));
421    let dep_node_index = qcx.tcx.dep_graph.next_virtual_depnode_index();
422    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
423
424    // Similarly, fingerprint the result to assert that
425    // it doesn't have anything not considered hashable.
426    if truecfg!(debug_assertions)
427        && let Some(hash_result) = query.hash_result()
428    {
429        qcx.dep_context().with_stable_hashing_context(|mut hcx| {
430            hash_result(&mut hcx, &result);
431        });
432    }
433
434    (result, dep_node_index)
435}
436
437#[inline(always)]
438fn execute_job_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
439    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
440    qcx: QueryCtxt<'tcx>,
441    dep_graph_data: &DepGraphData<DepsType>,
442    key: C::Key,
443    mut dep_node_opt: Option<DepNode>,
444    job_id: QueryJobId,
445) -> (C::Value, DepNodeIndex) {
446    if !query.anon() && !query.eval_always() {
447        // `to_dep_node` is expensive for some `DepKind`s.
448        let dep_node = dep_node_opt.get_or_insert_with(|| query.construct_dep_node(qcx.tcx, &key));
449
450        // The diagnostics for this query will be promoted to the current session during
451        // `try_mark_green()`, so we can ignore them here.
452        if let Some(ret) = qcx.start_query(job_id, false, || {
453            try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
454        }) {
455            return ret;
456        }
457    }
458
459    let prof_timer = qcx.tcx.prof.query_provider();
460
461    let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
462        if query.anon() {
463            // Call the query provider inside an anon task.
464            return dep_graph_data.with_anon_task_inner(qcx.tcx, query.dep_kind(), || {
465                query.invoke_provider(qcx, key)
466            });
467        }
468
469        // `to_dep_node` is expensive for some `DepKind`s.
470        let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(qcx.tcx, &key));
471
472        // Call the query provider.
473        dep_graph_data.with_task(
474            dep_node,
475            (qcx, query),
476            key,
477            |(qcx, query), key| query.invoke_provider(qcx, key),
478            query.hash_result(),
479        )
480    });
481
482    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
483
484    (result, dep_node_index)
485}
486
487#[inline(always)]
488fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
489    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
490    dep_graph_data: &DepGraphData<DepsType>,
491    qcx: QueryCtxt<'tcx>,
492    key: &C::Key,
493    dep_node: &DepNode,
494) -> Option<(C::Value, DepNodeIndex)> {
495    // Note this function can be called concurrently from the same query
496    // We must ensure that this is handled correctly.
497
498    let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
499
500    if true {
    if !dep_graph_data.is_index_green(prev_dep_node_index) {
        ::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_dep_node_index)")
    };
};debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
501
502    // First we try to load the result from the on-disk cache.
503    // Some things are never cached on disk.
504    if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
505        if std::intrinsics::unlikely(qcx.tcx.sess.opts.unstable_opts.query_dep_graph) {
506            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
507        }
508
509        let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
510        // If `-Zincremental-verify-ich` is specified, re-hash results from
511        // the cache and make sure that they have the expected fingerprint.
512        //
513        // If not, we still seek to verify a subset of fingerprints loaded
514        // from disk. Re-hashing results is fairly expensive, so we can't
515        // currently afford to verify every hash. This subset should still
516        // give us some coverage of potential bugs though.
517        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
518        if std::intrinsics::unlikely(
519            try_verify || qcx.tcx.sess.opts.unstable_opts.incremental_verify_ich,
520        ) {
521            incremental_verify_ich(
522                qcx.tcx,
523                dep_graph_data,
524                &result,
525                prev_dep_node_index,
526                query.hash_result(),
527                query.format_value(),
528            );
529        }
530
531        return Some((result, dep_node_index));
532    }
533
534    // We always expect to find a cached result for things that
535    // can be forced from `DepNode`.
536    if true {
    if !(!query.will_cache_on_disk_for_key(qcx.tcx, key) ||
                !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible())
        {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
537        !query.will_cache_on_disk_for_key(qcx.tcx, key)
538            || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
539        "missing on-disk cache entry for {dep_node:?}"
540    );
541
542    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
543    // we should actually be able to load it.
544    if true {
    if !!query.is_loadable_from_disk(qcx, key, prev_dep_node_index) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
545        !query.is_loadable_from_disk(qcx, key, prev_dep_node_index),
546        "missing on-disk cache entry for loadable {dep_node:?}"
547    );
548
549    // We could not load a result from the on-disk cache, so
550    // recompute.
551    let prof_timer = qcx.tcx.prof.query_provider();
552
553    // The dep-graph for this computation is already in-place.
554    // Call the query provider.
555    let result = qcx.tcx.dep_graph.with_ignore(|| query.invoke_provider(qcx, *key));
556
557    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
558
559    // Verify that re-running the query produced a result with the expected hash
560    // This catches bugs in query implementations, turning them into ICEs.
561    // For example, a query might sort its result by `DefId` - since `DefId`s are
562    // not stable across compilation sessions, the result could get up getting sorted
563    // in a different order when the query is re-run, even though all of the inputs
564    // (e.g. `DefPathHash` values) were green.
565    //
566    // See issue #82920 for an example of a miscompilation that would get turned into
567    // an ICE by this check
568    incremental_verify_ich(
569        qcx.tcx,
570        dep_graph_data,
571        &result,
572        prev_dep_node_index,
573        query.hash_result(),
574        query.format_value(),
575    );
576
577    Some((result, dep_node_index))
578}
579
580/// Ensure that either this query has all green inputs or been executed.
581/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
582/// Returns true if the query should still run.
583///
584/// This function is particularly useful when executing passes for their
585/// side-effects -- e.g., in order to report errors for erroneous programs.
586///
587/// Note: The optimization is only available during incr. comp.
588#[inline(never)]
589fn ensure_must_run<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
590    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
591    qcx: QueryCtxt<'tcx>,
592    key: &C::Key,
593    check_cache: bool,
594) -> (bool, Option<DepNode>) {
595    if query.eval_always() {
596        return (true, None);
597    }
598
599    // Ensuring an anonymous query makes no sense
600    if !!query.anon() {
    ::core::panicking::panic("assertion failed: !query.anon()")
};assert!(!query.anon());
601
602    let dep_node = query.construct_dep_node(qcx.tcx, key);
603
604    let dep_graph = &qcx.tcx.dep_graph;
605    let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
606        None => {
607            // A None return from `try_mark_green` means that this is either
608            // a new dep node or that the dep node has already been marked red.
609            // Either way, we can't call `dep_graph.read()` as we don't have the
610            // DepNodeIndex. We must invoke the query itself. The performance cost
611            // this introduces should be negligible as we'll immediately hit the
612            // in-memory cache, or another query down the line will.
613            return (true, Some(dep_node));
614        }
615        Some((serialized_dep_node_index, dep_node_index)) => {
616            dep_graph.read_index(dep_node_index);
617            qcx.tcx.prof.query_cache_hit(dep_node_index.into());
618            serialized_dep_node_index
619        }
620    };
621
622    // We do not need the value at all, so do not check the cache.
623    if !check_cache {
624        return (false, None);
625    }
626
627    let loadable = query.is_loadable_from_disk(qcx, key, serialized_dep_node_index);
628    (!loadable, Some(dep_node))
629}
630
631#[inline(always)]
632pub(super) fn get_query_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
633    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
634    qcx: QueryCtxt<'tcx>,
635    span: Span,
636    key: C::Key,
637) -> C::Value {
638    if true {
    if !!qcx.tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
639
640    ensure_sufficient_stack(|| try_execute_query::<C, FLAGS, false>(query, qcx, span, key, None).0)
641}
642
643#[inline(always)]
644pub(super) fn get_query_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
645    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
646    qcx: QueryCtxt<'tcx>,
647    span: Span,
648    key: C::Key,
649    mode: QueryMode,
650) -> Option<C::Value> {
651    if true {
    if !qcx.tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: qcx.tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(qcx.tcx.dep_graph.is_fully_enabled());
652
653    let dep_node = if let QueryMode::Ensure { check_cache } = mode {
654        let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
655        if !must_run {
656            return None;
657        }
658        dep_node
659    } else {
660        None
661    };
662
663    let (result, dep_node_index) = ensure_sufficient_stack(|| {
664        try_execute_query::<C, FLAGS, true>(query, qcx, span, key, dep_node)
665    });
666    if let Some(dep_node_index) = dep_node_index {
667        qcx.tcx.dep_graph.read_index(dep_node_index)
668    }
669    Some(result)
670}
671
672pub(crate) fn force_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
673    query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
674    qcx: QueryCtxt<'tcx>,
675    key: C::Key,
676    dep_node: DepNode,
677) {
678    // We may be concurrently trying both execute and force a query.
679    // Ensure that only one of them runs the query.
680    if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
681        qcx.tcx.prof.query_cache_hit(index.into());
682        return;
683    }
684
685    if true {
    if !!query.anon() {
        ::core::panicking::panic("assertion failed: !query.anon()")
    };
};debug_assert!(!query.anon());
686
687    ensure_sufficient_stack(|| {
688        try_execute_query::<C, FLAGS, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
689    });
690}