Skip to main content

rustc_query_impl/
execution.rs

1use std::hash::Hash;
2use std::mem;
3
4use rustc_data_structures::hash_table::{Entry, HashTable};
5use rustc_data_structures::stack::ensure_sufficient_stack;
6use rustc_data_structures::{outline, sharded, sync};
7use rustc_errors::{Diag, FatalError, StashKey};
8use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex};
9use rustc_middle::query::plumbing::QueryVTable;
10use rustc_middle::query::{
11    ActiveKeyStatus, CycleError, CycleErrorHandling, EnsureMode, QueryCache, QueryJob, QueryJobId,
12    QueryLatch, QueryMode, QueryStackDeferred, QueryStackFrame, QueryState,
13};
14use rustc_middle::ty::TyCtxt;
15use rustc_middle::verify_ich::incremental_verify_ich;
16use rustc_span::{DUMMY_SP, Span};
17
18use crate::dep_graph::{DepNode, DepNodeIndex};
19use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle};
20use crate::plumbing::{
21    collect_active_jobs_from_all_queries, current_query_job, next_job_id, start_query,
22};
23
24#[inline]
25fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
26    move |x| x.0 == *k
27}
28
29/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
30/// was poisoned by a panic.
31fn expect_job<'tcx>(status: ActiveKeyStatus<'tcx>) -> QueryJob<'tcx> {
32    match status {
33        ActiveKeyStatus::Started(job) => job,
34        ActiveKeyStatus::Poisoned => {
35            {
    ::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
36        }
37    }
38}
39
40pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
41    state.active.lock_shards().all(|shard| shard.is_empty())
42}
43
44/// Internal plumbing for collecting the set of active jobs for this query.
45///
46/// Should only be called from `gather_active_jobs`.
47pub(crate) fn gather_active_jobs_inner<'tcx, K: Copy>(
48    state: &QueryState<'tcx, K>,
49    tcx: TyCtxt<'tcx>,
50    make_frame: fn(TyCtxt<'tcx>, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
51    require_complete: bool,
52    job_map_out: &mut QueryJobMap<'tcx>, // Out-param; job info is gathered into this map
53) -> Option<()> {
54    let mut active = Vec::new();
55
56    // Helper to gather active jobs from a single shard.
57    let mut gather_shard_jobs = |shard: &HashTable<(K, ActiveKeyStatus<'tcx>)>| {
58        for (k, v) in shard.iter() {
59            if let ActiveKeyStatus::Started(ref job) = *v {
60                active.push((*k, job.clone()));
61            }
62        }
63    };
64
65    // Lock shards and gather jobs from each shard.
66    if require_complete {
67        for shard in state.active.lock_shards() {
68            gather_shard_jobs(&shard);
69        }
70    } else {
71        // We use try_lock_shards here since we are called from the
72        // deadlock handler, and this shouldn't be locked.
73        for shard in state.active.try_lock_shards() {
74            let shard = shard?;
75            gather_shard_jobs(&shard);
76        }
77    }
78
79    // Call `make_frame` while we're not holding a `state.active` lock as `make_frame` may call
80    // queries leading to a deadlock.
81    for (key, job) in active {
82        let frame = make_frame(tcx, key);
83        job_map_out.insert(job.id, QueryJobInfo { frame, job });
84    }
85
86    Some(())
87}
88
89/// Guard object representing the responsibility to execute a query job and
90/// mark it as completed.
91///
92/// This will poison the relevant query key if it is dropped without calling
93/// [`Self::complete`].
94struct ActiveJobGuard<'tcx, K>
95where
96    K: Eq + Hash + Copy,
97{
98    state: &'tcx QueryState<'tcx, K>,
99    key: K,
100    key_hash: u64,
101}
102
103#[cold]
104#[inline(never)]
105fn mk_cycle<'tcx, C: QueryCache>(
106    query: &'tcx QueryVTable<'tcx, C>,
107    tcx: TyCtxt<'tcx>,
108    cycle_error: CycleError,
109) -> C::Value {
110    let error = report_cycle(tcx.sess, &cycle_error);
111    handle_cycle_error(query, tcx, &cycle_error, error)
112}
113
114fn handle_cycle_error<'tcx, C: QueryCache>(
115    query: &'tcx QueryVTable<'tcx, C>,
116    tcx: TyCtxt<'tcx>,
117    cycle_error: &CycleError,
118    error: Diag<'_>,
119) -> C::Value {
120    match query.cycle_error_handling {
121        CycleErrorHandling::Error => {
122            let guar = error.emit();
123            query.value_from_cycle_error(tcx, cycle_error, guar)
124        }
125        CycleErrorHandling::Fatal => {
126            error.emit();
127            tcx.dcx().abort_if_errors();
128            ::core::panicking::panic("internal error: entered unreachable code")unreachable!()
129        }
130        CycleErrorHandling::DelayBug => {
131            let guar = error.delay_as_bug();
132            query.value_from_cycle_error(tcx, cycle_error, guar)
133        }
134        CycleErrorHandling::Stash => {
135            let guar = if let Some(root) = cycle_error.cycle.first()
136                && let Some(span) = root.frame.info.span
137            {
138                error.stash(span, StashKey::Cycle).unwrap()
139            } else {
140                error.emit()
141            };
142            query.value_from_cycle_error(tcx, cycle_error, guar)
143        }
144    }
145}
146
147impl<'tcx, K> ActiveJobGuard<'tcx, K>
148where
149    K: Eq + Hash + Copy,
150{
151    /// Completes the query by updating the query cache with the `result`,
152    /// signals the waiter, and forgets the guard so it won't poison the query.
153    fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
154    where
155        C: QueryCache<Key = K>,
156    {
157        // Forget ourself so our destructor won't poison the query.
158        // (Extract fields by value first to make sure we don't leak anything.)
159        let Self { state, key, key_hash }: Self = self;
160        mem::forget(self);
161
162        // Mark as complete before we remove the job from the active state
163        // so no other thread can re-execute this query.
164        cache.complete(key, result, dep_node_index);
165
166        let job = {
167            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
168            // underlying shard.
169            // since unwinding also wants to look at this map, this can also prevent a double
170            // panic.
171            let mut shard = state.active.lock_shard_by_hash(key_hash);
172            match shard.find_entry(key_hash, equivalent_key(&key)) {
173                Err(_) => None,
174                Ok(occupied) => Some(occupied.remove().0.1),
175            }
176        };
177        let job = expect_job(job.expect("active query job entry"));
178
179        job.signal_complete();
180    }
181}
182
183impl<'tcx, K> Drop for ActiveJobGuard<'tcx, K>
184where
185    K: Eq + Hash + Copy,
186{
187    #[inline(never)]
188    #[cold]
189    fn drop(&mut self) {
190        // Poison the query so jobs waiting on it panic.
191        let Self { state, key, key_hash } = *self;
192        let job = {
193            let mut shard = state.active.lock_shard_by_hash(key_hash);
194            match shard.find_entry(key_hash, equivalent_key(&key)) {
195                Err(_) => ::core::panicking::panic("explicit panic")panic!(),
196                Ok(occupied) => {
197                    let ((key, value), vacant) = occupied.remove();
198                    vacant.insert((key, ActiveKeyStatus::Poisoned));
199                    expect_job(value)
200                }
201            }
202        };
203        // Also signal the completion of the job, so waiters
204        // will continue execution.
205        job.signal_complete();
206    }
207}
208
209#[cold]
210#[inline(never)]
211fn cycle_error<'tcx, C: QueryCache>(
212    query: &'tcx QueryVTable<'tcx, C>,
213    tcx: TyCtxt<'tcx>,
214    try_execute: QueryJobId,
215    span: Span,
216) -> (C::Value, Option<DepNodeIndex>) {
217    // Ensure there was no errors collecting all active jobs.
218    // We need the complete map to ensure we find a cycle to break.
219    let job_map = collect_active_jobs_from_all_queries(tcx, false)
220        .ok()
221        .expect("failed to collect active queries");
222
223    let error = find_cycle_in_stack(try_execute, job_map, &current_query_job(tcx), span);
224    (mk_cycle(query, tcx, error.lift()), None)
225}
226
227#[inline(always)]
228fn wait_for_query<'tcx, C: QueryCache>(
229    query: &'tcx QueryVTable<'tcx, C>,
230    tcx: TyCtxt<'tcx>,
231    span: Span,
232    key: C::Key,
233    latch: QueryLatch<'tcx>,
234    current: Option<QueryJobId>,
235) -> (C::Value, Option<DepNodeIndex>) {
236    // For parallel queries, we'll block and wait until the query running
237    // in another thread has completed. Record how long we wait in the
238    // self-profiler.
239    let query_blocked_prof_timer = tcx.prof.query_blocked();
240
241    // With parallel queries we might just have to wait on some other
242    // thread.
243    let result = latch.wait_on(tcx, current, span);
244
245    match result {
246        Ok(()) => {
247            let Some((v, index)) = query.cache.lookup(&key) else {
248                outline(|| {
249                    // We didn't find the query result in the query cache. Check if it was
250                    // poisoned due to a panic instead.
251                    let key_hash = sharded::make_hash(&key);
252                    let shard = query.state.active.lock_shard_by_hash(key_hash);
253                    match shard.find(key_hash, equivalent_key(&key)) {
254                        // The query we waited on panicked. Continue unwinding here.
255                        Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
256                        _ => {
    ::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
            query.name));
}panic!(
257                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
258                            query.name
259                        ),
260                    }
261                })
262            };
263
264            tcx.prof.query_cache_hit(index.into());
265            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
266
267            (v, Some(index))
268        }
269        Err(cycle) => (mk_cycle(query, tcx, cycle.lift()), None),
270    }
271}
272
273/// Shared main part of both [`execute_query_incr_inner`] and [`execute_query_non_incr_inner`].
274#[inline(never)]
275fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
276    query: &'tcx QueryVTable<'tcx, C>,
277    tcx: TyCtxt<'tcx>,
278    span: Span,
279    key: C::Key,
280    // If present, some previous step has already created a `DepNode` for this
281    // query+key, which we should reuse instead of creating a new one.
282    dep_node: Option<DepNode>,
283) -> (C::Value, Option<DepNodeIndex>) {
284    let key_hash = sharded::make_hash(&key);
285    let mut state_lock = query.state.active.lock_shard_by_hash(key_hash);
286
287    // For the parallel compiler we need to check both the query cache and query state structures
288    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
289    // query is not still executing. Without checking the query cache here, we can end up
290    // re-executing the query since `try_start` only checks that the query is not currently
291    // executing, but another thread may have already completed the query and stores it result
292    // in the query cache.
293    if tcx.sess.threads() > 1 {
294        if let Some((value, index)) = query.cache.lookup(&key) {
295            tcx.prof.query_cache_hit(index.into());
296            return (value, Some(index));
297        }
298    }
299
300    let current_job_id = current_query_job(tcx);
301
302    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
303        Entry::Vacant(entry) => {
304            // Nothing has computed or is computing the query, so we start a new job and insert it in the
305            // state map.
306            let id = next_job_id(tcx);
307            let job = QueryJob::new(id, span, current_job_id);
308            entry.insert((key, ActiveKeyStatus::Started(job)));
309
310            // Drop the lock before we start executing the query
311            drop(state_lock);
312
313            execute_job::<C, INCR>(query, tcx, key, key_hash, id, dep_node)
314        }
315        Entry::Occupied(mut entry) => {
316            match &mut entry.get_mut().1 {
317                ActiveKeyStatus::Started(job) => {
318                    if sync::is_dyn_thread_safe() {
319                        // Get the latch out
320                        let latch = job.latch();
321                        drop(state_lock);
322
323                        // Only call `wait_for_query` if we're using a Rayon thread pool
324                        // as it will attempt to mark the worker thread as blocked.
325                        return wait_for_query(query, tcx, span, key, latch, current_job_id);
326                    }
327
328                    let id = job.id;
329                    drop(state_lock);
330
331                    // If we are single-threaded we know that we have cycle error,
332                    // so we just return the error.
333                    cycle_error(query, tcx, id, span)
334                }
335                ActiveKeyStatus::Poisoned => FatalError.raise(),
336            }
337        }
338    }
339}
340
341#[inline(always)]
342fn execute_job<'tcx, C: QueryCache, const INCR: bool>(
343    query: &'tcx QueryVTable<'tcx, C>,
344    tcx: TyCtxt<'tcx>,
345    key: C::Key,
346    key_hash: u64,
347    id: QueryJobId,
348    dep_node: Option<DepNode>,
349) -> (C::Value, Option<DepNodeIndex>) {
350    // Set up a guard object that will automatically poison the query if a
351    // panic occurs while executing the query (or any intermediate plumbing).
352    let job_guard = ActiveJobGuard { state: &query.state, key, key_hash };
353
354    if true {
    match (&tcx.dep_graph.is_fully_enabled(), &INCR) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);
355
356    // Delegate to another function to actually execute the query job.
357    let (result, dep_node_index) = if INCR {
358        execute_job_incr(query, tcx, key, dep_node, id)
359    } else {
360        execute_job_non_incr(query, tcx, key, id)
361    };
362
363    let cache = &query.cache;
364    if query.feedable {
365        // We should not compute queries that also got a value via feeding.
366        // This can't happen, as query feeding adds the very dependencies to the fed query
367        // as its feeding query had. So if the fed query is red, so is its feeder, which will
368        // get evaluated first, and re-feed the query.
369        if let Some((cached_result, _)) = cache.lookup(&key) {
370            let Some(hasher) = query.hash_result else {
371                {
    ::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
            (query.format_value)(&cached_result)));
};panic!(
372                    "no_hash fed query later has its value computed.\n\
373                    Remove `no_hash` modifier to allow recomputation.\n\
374                    The already cached value: {}",
375                    (query.format_value)(&cached_result)
376                );
377            };
378
379            let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| {
380                (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
381            });
382            let formatter = query.format_value;
383            if old_hash != new_hash {
384                // We have an inconsistency. This can happen if one of the two
385                // results is tainted by errors.
386                if !tcx.dcx().has_errors().is_some() {
    {
        ::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
                query.dep_kind, key, formatter(&result),
                formatter(&cached_result)));
    }
};assert!(
387                    tcx.dcx().has_errors().is_some(),
388                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
389                        computed={:#?}\nfed={:#?}",
390                    query.dep_kind,
391                    key,
392                    formatter(&result),
393                    formatter(&cached_result),
394                );
395            }
396        }
397    }
398
399    // Tell the guard to perform completion bookkeeping, and also to not poison the query.
400    job_guard.complete(cache, result, dep_node_index);
401
402    (result, Some(dep_node_index))
403}
404
405// Fast path for when incr. comp. is off.
406#[inline(always)]
407fn execute_job_non_incr<'tcx, C: QueryCache>(
408    query: &'tcx QueryVTable<'tcx, C>,
409    tcx: TyCtxt<'tcx>,
410    key: C::Key,
411    job_id: QueryJobId,
412) -> (C::Value, DepNodeIndex) {
413    if true {
    if !!tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
414
415    // Fingerprint the key, just to assert that it doesn't
416    // have anything we don't consider hashable
417    if truecfg!(debug_assertions) {
418        let _ = key.to_fingerprint(tcx);
419    }
420
421    let prof_timer = tcx.prof.query_provider();
422    // Call the query provider.
423    let result =
424        start_query(tcx, job_id, query.depth_limit, || (query.invoke_provider_fn)(tcx, key));
425    let dep_node_index = tcx.dep_graph.next_virtual_depnode_index();
426    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
427
428    // Similarly, fingerprint the result to assert that
429    // it doesn't have anything not considered hashable.
430    if truecfg!(debug_assertions)
431        && let Some(hash_result) = query.hash_result
432    {
433        tcx.with_stable_hashing_context(|mut hcx| {
434            hash_result(&mut hcx, &result);
435        });
436    }
437
438    (result, dep_node_index)
439}
440
441#[inline(always)]
442fn execute_job_incr<'tcx, C: QueryCache>(
443    query: &'tcx QueryVTable<'tcx, C>,
444    tcx: TyCtxt<'tcx>,
445    key: C::Key,
446    mut dep_node_opt: Option<DepNode>,
447    job_id: QueryJobId,
448) -> (C::Value, DepNodeIndex) {
449    let dep_graph_data =
450        tcx.dep_graph.data().expect("should always be present in incremental mode");
451
452    if !query.anon && !query.eval_always {
453        // `to_dep_node` is expensive for some `DepKind`s.
454        let dep_node = dep_node_opt.get_or_insert_with(|| query.construct_dep_node(tcx, &key));
455
456        // The diagnostics for this query will be promoted to the current session during
457        // `try_mark_green()`, so we can ignore them here.
458        if let Some(ret) = start_query(tcx, job_id, false, || try {
459            let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, dep_node)?;
460            let value = load_from_disk_or_invoke_provider_green(
461                tcx,
462                dep_graph_data,
463                query,
464                &key,
465                dep_node,
466                prev_index,
467                dep_node_index,
468            );
469            (value, dep_node_index)
470        }) {
471            return ret;
472        }
473    }
474
475    let prof_timer = tcx.prof.query_provider();
476
477    let (result, dep_node_index) = start_query(tcx, job_id, query.depth_limit, || {
478        if query.anon {
479            // Call the query provider inside an anon task.
480            return dep_graph_data.with_anon_task_inner(tcx, query.dep_kind, || {
481                (query.invoke_provider_fn)(tcx, key)
482            });
483        }
484
485        // `to_dep_node` is expensive for some `DepKind`s.
486        let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(tcx, &key));
487
488        // Call the query provider.
489        dep_graph_data.with_task(
490            dep_node,
491            tcx,
492            (query, key),
493            |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key),
494            query.hash_result,
495        )
496    });
497
498    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
499
500    (result, dep_node_index)
501}
502
503/// Given that the dep node for this query+key is green, obtain a value for it
504/// by loading one from disk if possible, or by invoking its query provider if
505/// necessary.
506#[inline(always)]
507fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
508    tcx: TyCtxt<'tcx>,
509    dep_graph_data: &DepGraphData,
510    query: &'tcx QueryVTable<'tcx, C>,
511    key: &C::Key,
512    dep_node: &DepNode,
513    prev_index: SerializedDepNodeIndex,
514    dep_node_index: DepNodeIndex,
515) -> C::Value {
516    // Note this function can be called concurrently from the same query
517    // We must ensure that this is handled correctly.
518
519    if true {
    if !dep_graph_data.is_index_green(prev_index) {
        ::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_index)")
    };
};debug_assert!(dep_graph_data.is_index_green(prev_index));
520
521    // First we try to load the result from the on-disk cache.
522    // Some things are never cached on disk.
523    if let Some(value) = query.try_load_from_disk(tcx, key, prev_index, dep_node_index) {
524        if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) {
525            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
526        }
527
528        let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index);
529        // If `-Zincremental-verify-ich` is specified, re-hash results from
530        // the cache and make sure that they have the expected fingerprint.
531        //
532        // If not, we still seek to verify a subset of fingerprints loaded
533        // from disk. Re-hashing results is fairly expensive, so we can't
534        // currently afford to verify every hash. This subset should still
535        // give us some coverage of potential bugs though.
536        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
537        if std::intrinsics::unlikely(
538            try_verify || tcx.sess.opts.unstable_opts.incremental_verify_ich,
539        ) {
540            incremental_verify_ich(
541                tcx,
542                dep_graph_data,
543                &value,
544                prev_index,
545                query.hash_result,
546                query.format_value,
547            );
548        }
549
550        return value;
551    }
552
553    // We always expect to find a cached result for things that
554    // can be forced from `DepNode`.
555    if true {
    if !(!query.will_cache_on_disk_for_key(tcx, key) ||
                !tcx.key_fingerprint_style(dep_node.kind).reconstructible()) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
556        !query.will_cache_on_disk_for_key(tcx, key)
557            || !tcx.key_fingerprint_style(dep_node.kind).reconstructible(),
558        "missing on-disk cache entry for {dep_node:?}"
559    );
560
561    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
562    // we should actually be able to load it.
563    if true {
    if !!query.is_loadable_from_disk(tcx, key, prev_index) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
564        !query.is_loadable_from_disk(tcx, key, prev_index),
565        "missing on-disk cache entry for loadable {dep_node:?}"
566    );
567
568    // We could not load a result from the on-disk cache, so
569    // recompute.
570    let prof_timer = tcx.prof.query_provider();
571
572    // The dep-graph for this computation is already in-place.
573    // Call the query provider.
574    let value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, *key));
575
576    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
577
578    // Verify that re-running the query produced a result with the expected hash
579    // This catches bugs in query implementations, turning them into ICEs.
580    // For example, a query might sort its result by `DefId` - since `DefId`s are
581    // not stable across compilation sessions, the result could get up getting sorted
582    // in a different order when the query is re-run, even though all of the inputs
583    // (e.g. `DefPathHash` values) were green.
584    //
585    // See issue #82920 for an example of a miscompilation that would get turned into
586    // an ICE by this check
587    incremental_verify_ich(
588        tcx,
589        dep_graph_data,
590        &value,
591        prev_index,
592        query.hash_result,
593        query.format_value,
594    );
595
596    value
597}
598
599/// Return value struct for [`check_if_ensure_can_skip_execution`].
600struct EnsureCanSkip {
601    /// If true, the current `tcx.ensure_ok()` or `tcx.ensure_done()` query
602    /// can return early without actually trying to execute.
603    skip_execution: bool,
604    /// A dep node that was prepared while checking whether execution can be
605    /// skipped, to be reused by execution itself if _not_ skipped.
606    dep_node: Option<DepNode>,
607}
608
609/// Checks whether a `tcx.ensure_ok()` or `tcx.ensure_done()` query call can
610/// return early without actually trying to execute.
611///
612/// This only makes sense during incremental compilation, because it relies
613/// on having the dependency graph (and in some cases a disk-cached value)
614/// from the previous incr-comp session.
615#[inline(never)]
616fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
617    query: &'tcx QueryVTable<'tcx, C>,
618    tcx: TyCtxt<'tcx>,
619    key: &C::Key,
620    ensure_mode: EnsureMode,
621) -> EnsureCanSkip {
622    // Queries with `eval_always` should never skip execution.
623    if query.eval_always {
624        return EnsureCanSkip { skip_execution: false, dep_node: None };
625    }
626
627    // Ensuring an anonymous query makes no sense
628    if !!query.anon { ::core::panicking::panic("assertion failed: !query.anon") };assert!(!query.anon);
629
630    let dep_node = query.construct_dep_node(tcx, key);
631
632    let dep_graph = &tcx.dep_graph;
633    let serialized_dep_node_index = match dep_graph.try_mark_green(tcx, &dep_node) {
634        None => {
635            // A None return from `try_mark_green` means that this is either
636            // a new dep node or that the dep node has already been marked red.
637            // Either way, we can't call `dep_graph.read()` as we don't have the
638            // DepNodeIndex. We must invoke the query itself. The performance cost
639            // this introduces should be negligible as we'll immediately hit the
640            // in-memory cache, or another query down the line will.
641            return EnsureCanSkip { skip_execution: false, dep_node: Some(dep_node) };
642        }
643        Some((serialized_dep_node_index, dep_node_index)) => {
644            dep_graph.read_index(dep_node_index);
645            tcx.prof.query_cache_hit(dep_node_index.into());
646            serialized_dep_node_index
647        }
648    };
649
650    match ensure_mode {
651        EnsureMode::Ok => {
652            // In ensure-ok mode, we can skip execution for this key if the node
653            // is green. It must have succeeded in the previous session, and
654            // therefore would succeed in the current session if executed.
655            EnsureCanSkip { skip_execution: true, dep_node: None }
656        }
657        EnsureMode::Done => {
658            // In ensure-done mode, we can only skip execution for this key if
659            // there's a disk-cached value available to load later if needed,
660            // which guarantees the query provider will never run for this key.
661            let is_loadable = query.is_loadable_from_disk(tcx, key, serialized_dep_node_index);
662            EnsureCanSkip { skip_execution: is_loadable, dep_node: Some(dep_node) }
663        }
664    }
665}
666
667/// Called by a macro-generated impl of [`QueryVTable::execute_query_fn`],
668/// in non-incremental mode.
669#[inline(always)]
670pub(super) fn execute_query_non_incr_inner<'tcx, C: QueryCache>(
671    query: &'tcx QueryVTable<'tcx, C>,
672    tcx: TyCtxt<'tcx>,
673    span: Span,
674    key: C::Key,
675) -> C::Value {
676    if true {
    if !!tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
677
678    ensure_sufficient_stack(|| try_execute_query::<C, false>(query, tcx, span, key, None).0)
679}
680
681/// Called by a macro-generated impl of [`QueryVTable::execute_query_fn`],
682/// in incremental mode.
683#[inline(always)]
684pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>(
685    query: &'tcx QueryVTable<'tcx, C>,
686    tcx: TyCtxt<'tcx>,
687    span: Span,
688    key: C::Key,
689    mode: QueryMode,
690) -> Option<C::Value> {
691    if true {
    if !tcx.dep_graph.is_fully_enabled() {
        ::core::panicking::panic("assertion failed: tcx.dep_graph.is_fully_enabled()")
    };
};debug_assert!(tcx.dep_graph.is_fully_enabled());
692
693    // Check if query execution can be skipped, for `ensure_ok` or `ensure_done`.
694    // This might have the side-effect of creating a suitable DepNode, which
695    // we should reuse for execution instead of creating a new one.
696    let dep_node: Option<DepNode> = match mode {
697        QueryMode::Ensure { ensure_mode } => {
698            let EnsureCanSkip { skip_execution, dep_node } =
699                check_if_ensure_can_skip_execution(query, tcx, &key, ensure_mode);
700            if skip_execution {
701                // Return early to skip execution.
702                return None;
703            }
704            dep_node
705        }
706        QueryMode::Get => None,
707    };
708
709    let (result, dep_node_index) =
710        ensure_sufficient_stack(|| try_execute_query::<C, true>(query, tcx, span, key, dep_node));
711    if let Some(dep_node_index) = dep_node_index {
712        tcx.dep_graph.read_index(dep_node_index)
713    }
714    Some(result)
715}
716
717pub(crate) fn force_query<'tcx, C: QueryCache>(
718    query: &'tcx QueryVTable<'tcx, C>,
719    tcx: TyCtxt<'tcx>,
720    key: C::Key,
721    dep_node: DepNode,
722) {
723    // We may be concurrently trying both execute and force a query.
724    // Ensure that only one of them runs the query.
725    if let Some((_, index)) = query.cache.lookup(&key) {
726        tcx.prof.query_cache_hit(index.into());
727        return;
728    }
729
730    if true {
    if !!query.anon {
        ::core::panicking::panic("assertion failed: !query.anon")
    };
};debug_assert!(!query.anon);
731
732    ensure_sufficient_stack(|| {
733        try_execute_query::<C, true>(query, tcx, DUMMY_SP, key, Some(dep_node))
734    });
735}