1use std::cell::Cell;
6use std::fmt::Debug;
7use std::hash::Hash;
8use std::mem;
9
10use hashbrown::HashTable;
11use hashbrown::hash_table::Entry;
12use rustc_data_structures::fingerprint::Fingerprint;
13use rustc_data_structures::sharded::{self, Sharded};
14use rustc_data_structures::stack::ensure_sufficient_stack;
15use rustc_data_structures::sync::LockGuard;
16use rustc_data_structures::{outline, sync};
17use rustc_errors::{Diag, FatalError, StashKey};
18use rustc_span::{DUMMY_SP, Span};
19use tracing::instrument;
20
21use super::QueryConfig;
22use crate::HandleCycleError;
23use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams};
24use crate::ich::StableHashingContext;
25use crate::query::caches::QueryCache;
26use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
27use crate::query::{QueryContext, QueryMap, QueryStackFrame, SerializedDepNodeIndex};
28
29#[inline]
30fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
31 move |x| x.0 == *k
32}
33
34pub struct QueryState<K> {
35 active: Sharded<hashbrown::HashTable<(K, QueryResult)>>,
36}
37
38enum QueryResult {
40 Started(QueryJob),
42
43 Poisoned,
46}
47
48impl QueryResult {
49 fn expect_job(self) -> QueryJob {
51 match self {
52 Self::Started(job) => job,
53 Self::Poisoned => {
54 {
::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
55 }
56 }
57 }
58}
59
60impl<K> QueryState<K>
61where
62 K: Eq + Hash + Copy + Debug,
63{
64 pub fn all_inactive(&self) -> bool {
65 self.active.lock_shards().all(|shard| shard.is_empty())
66 }
67
68 pub fn collect_active_jobs<Qcx: Copy>(
69 &self,
70 qcx: Qcx,
71 make_query: fn(Qcx, K) -> QueryStackFrame,
72 jobs: &mut QueryMap,
73 require_complete: bool,
74 ) -> Option<()> {
75 let mut active = Vec::new();
76
77 let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult)>>| {
78 for (k, v) in iter.iter() {
79 if let QueryResult::Started(ref job) = *v {
80 active.push((*k, job.clone()));
81 }
82 }
83 };
84
85 if require_complete {
86 for shard in self.active.lock_shards() {
87 collect(shard);
88 }
89 } else {
90 for shard in self.active.try_lock_shards() {
93 collect(shard?);
94 }
95 }
96
97 for (key, job) in active {
100 let query = make_query(qcx, key);
101 jobs.insert(job.id, QueryJobInfo { query, job });
102 }
103
104 Some(())
105 }
106}
107
108impl<K> Default for QueryState<K> {
109 fn default() -> QueryState<K> {
110 QueryState { active: Default::default() }
111 }
112}
113
114struct JobOwner<'tcx, K>
117where
118 K: Eq + Hash + Copy,
119{
120 state: &'tcx QueryState<K>,
121 key: K,
122}
123
124#[cold]
125#[inline(never)]
126fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
127where
128 Q: QueryConfig<Qcx>,
129 Qcx: QueryContext,
130{
131 let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
132 handle_cycle_error(query, qcx, &cycle_error, error)
133}
134
135fn handle_cycle_error<Q, Qcx>(
136 query: Q,
137 qcx: Qcx,
138 cycle_error: &CycleError,
139 error: Diag<'_>,
140) -> Q::Value
141where
142 Q: QueryConfig<Qcx>,
143 Qcx: QueryContext,
144{
145 use HandleCycleError::*;
146 match query.handle_cycle_error() {
147 Error => {
148 let guar = error.emit();
149 query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
150 }
151 Fatal => {
152 error.emit();
153 qcx.dep_context().sess().dcx().abort_if_errors();
154 ::core::panicking::panic("internal error: entered unreachable code")unreachable!()
155 }
156 DelayBug => {
157 let guar = error.delay_as_bug();
158 query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
159 }
160 Stash => {
161 let guar = if let Some(root) = cycle_error.cycle.first()
162 && let Some(span) = root.query.span
163 {
164 error.stash(span, StashKey::Cycle).unwrap()
165 } else {
166 error.emit()
167 };
168 query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
169 }
170 }
171}
172
173impl<'tcx, K> JobOwner<'tcx, K>
174where
175 K: Eq + Hash + Copy,
176{
177 fn complete<C>(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex)
180 where
181 C: QueryCache<Key = K>,
182 {
183 let key = self.key;
184 let state = self.state;
185
186 mem::forget(self);
188
189 cache.complete(key, result, dep_node_index);
192
193 let job = {
194 let mut shard = state.active.lock_shard_by_hash(key_hash);
199 match shard.find_entry(key_hash, equivalent_key(&key)) {
200 Err(_) => None,
201 Ok(occupied) => Some(occupied.remove().0.1),
202 }
203 };
204 let job = job.expect("active query job entry").expect_job();
205
206 job.signal_complete();
207 }
208}
209
210impl<'tcx, K> Drop for JobOwner<'tcx, K>
211where
212 K: Eq + Hash + Copy,
213{
214 #[inline(never)]
215 #[cold]
216 fn drop(&mut self) {
217 let state = self.state;
219 let job = {
220 let key_hash = sharded::make_hash(&self.key);
221 let mut shard = state.active.lock_shard_by_hash(key_hash);
222 match shard.find_entry(key_hash, equivalent_key(&self.key)) {
223 Err(_) => ::core::panicking::panic("explicit panic")panic!(),
224 Ok(occupied) => {
225 let ((key, value), vacant) = occupied.remove();
226 vacant.insert((key, QueryResult::Poisoned));
227 value.expect_job()
228 }
229 }
230 };
231 job.signal_complete();
234 }
235}
236
237#[derive(#[automatically_derived]
impl ::core::clone::Clone for CycleError {
#[inline]
fn clone(&self) -> CycleError {
CycleError {
usage: ::core::clone::Clone::clone(&self.usage),
cycle: ::core::clone::Clone::clone(&self.cycle),
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for CycleError {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "CycleError",
"usage", &self.usage, "cycle", &&self.cycle)
}
}Debug)]
238pub struct CycleError {
239 pub usage: Option<(Span, QueryStackFrame)>,
241 pub cycle: Vec<QueryInfo>,
242}
243
244#[inline(always)]
249pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
250where
251 C: QueryCache,
252 Tcx: DepContext,
253{
254 match cache.lookup(key) {
255 Some((value, index)) => {
256 tcx.profiler().query_cache_hit(index.into());
257 tcx.dep_graph().read_index(index);
258 Some(value)
259 }
260 None => None,
261 }
262}
263
264#[cold]
265#[inline(never)]
266fn cycle_error<Q, Qcx>(
267 query: Q,
268 qcx: Qcx,
269 try_execute: QueryJobId,
270 span: Span,
271) -> (Q::Value, Option<DepNodeIndex>)
272where
273 Q: QueryConfig<Qcx>,
274 Qcx: QueryContext,
275{
276 let query_map = qcx.collect_active_jobs(false).expect("failed to collect active queries");
279
280 let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
281 (mk_cycle(query, qcx, error), None)
282}
283
284#[inline(always)]
285fn wait_for_query<Q, Qcx>(
286 query: Q,
287 qcx: Qcx,
288 span: Span,
289 key: Q::Key,
290 latch: QueryLatch,
291 current: Option<QueryJobId>,
292) -> (Q::Value, Option<DepNodeIndex>)
293where
294 Q: QueryConfig<Qcx>,
295 Qcx: QueryContext,
296{
297 let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
301
302 let result = latch.wait_on(qcx, current, span);
305
306 match result {
307 Ok(()) => {
308 let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
309 outline(|| {
310 let key_hash = sharded::make_hash(&key);
313 let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
314 match shard.find(key_hash, equivalent_key(&key)) {
315 Some((_, QueryResult::Poisoned)) => FatalError.raise(),
317 _ => {
::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
query.name()));
}panic!(
318 "query '{}' result must be in the cache or the query must be poisoned after a wait",
319 query.name()
320 ),
321 }
322 })
323 };
324
325 qcx.dep_context().profiler().query_cache_hit(index.into());
326 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
327
328 (v, Some(index))
329 }
330 Err(cycle) => (mk_cycle(query, qcx, cycle), None),
331 }
332}
333
334#[inline(never)]
335fn try_execute_query<Q, Qcx, const INCR: bool>(
336 query: Q,
337 qcx: Qcx,
338 span: Span,
339 key: Q::Key,
340 dep_node: Option<DepNode>,
341) -> (Q::Value, Option<DepNodeIndex>)
342where
343 Q: QueryConfig<Qcx>,
344 Qcx: QueryContext,
345{
346 let state = query.query_state(qcx);
347 let key_hash = sharded::make_hash(&key);
348 let mut state_lock = state.active.lock_shard_by_hash(key_hash);
349
350 if qcx.dep_context().sess().threads() > 1 {
357 if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
358 qcx.dep_context().profiler().query_cache_hit(index.into());
359 return (value, Some(index));
360 }
361 }
362
363 let current_job_id = qcx.current_query_job();
364
365 match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
366 Entry::Vacant(entry) => {
367 let id = qcx.next_job_id();
370 let job = QueryJob::new(id, span, current_job_id);
371 entry.insert((key, QueryResult::Started(job)));
372
373 drop(state_lock);
375
376 execute_job::<_, _, INCR>(query, qcx, state, key, key_hash, id, dep_node)
377 }
378 Entry::Occupied(mut entry) => {
379 match &mut entry.get_mut().1 {
380 QueryResult::Started(job) => {
381 if sync::is_dyn_thread_safe() {
382 let latch = job.latch();
384 drop(state_lock);
385
386 return wait_for_query(query, qcx, span, key, latch, current_job_id);
389 }
390
391 let id = job.id;
392 drop(state_lock);
393
394 cycle_error(query, qcx, id, span)
397 }
398 QueryResult::Poisoned => FatalError.raise(),
399 }
400 }
401 }
402}
403
404#[inline(always)]
405fn execute_job<Q, Qcx, const INCR: bool>(
406 query: Q,
407 qcx: Qcx,
408 state: &QueryState<Q::Key>,
409 key: Q::Key,
410 key_hash: u64,
411 id: QueryJobId,
412 dep_node: Option<DepNode>,
413) -> (Q::Value, Option<DepNodeIndex>)
414where
415 Q: QueryConfig<Qcx>,
416 Qcx: QueryContext,
417{
418 let job_owner = JobOwner { state, key };
420
421 if true {
match (&qcx.dep_context().dep_graph().is_fully_enabled(), &INCR) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
422
423 let (result, dep_node_index) = if INCR {
424 execute_job_incr(
425 query,
426 qcx,
427 qcx.dep_context().dep_graph().data().unwrap(),
428 key,
429 dep_node,
430 id,
431 )
432 } else {
433 execute_job_non_incr(query, qcx, key, id)
434 };
435
436 let cache = query.query_cache(qcx);
437 if query.feedable() {
438 if let Some((cached_result, _)) = cache.lookup(&key) {
443 let Some(hasher) = query.hash_result() else {
444 {
::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
(query.format_value())(&cached_result)));
};panic!(
445 "no_hash fed query later has its value computed.\n\
446 Remove `no_hash` modifier to allow recomputation.\n\
447 The already cached value: {}",
448 (query.format_value())(&cached_result)
449 );
450 };
451
452 let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
453 (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
454 });
455 let formatter = query.format_value();
456 if old_hash != new_hash {
457 if !qcx.dep_context().sess().dcx().has_errors().is_some() {
{
::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
query.dep_kind(), key, formatter(&result),
formatter(&cached_result)));
}
};assert!(
460 qcx.dep_context().sess().dcx().has_errors().is_some(),
461 "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
462 computed={:#?}\nfed={:#?}",
463 query.dep_kind(),
464 key,
465 formatter(&result),
466 formatter(&cached_result),
467 );
468 }
469 }
470 }
471 job_owner.complete(cache, key_hash, result, dep_node_index);
472
473 (result, Some(dep_node_index))
474}
475
476#[inline(always)]
478fn execute_job_non_incr<Q, Qcx>(
479 query: Q,
480 qcx: Qcx,
481 key: Q::Key,
482 job_id: QueryJobId,
483) -> (Q::Value, DepNodeIndex)
484where
485 Q: QueryConfig<Qcx>,
486 Qcx: QueryContext,
487{
488 if true {
if !!qcx.dep_context().dep_graph().is_fully_enabled() {
::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
};
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
489
490 if truecfg!(debug_assertions) {
493 let _ = key.to_fingerprint(*qcx.dep_context());
494 }
495
496 let prof_timer = qcx.dep_context().profiler().query_provider();
497 let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key));
498 let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
499 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
500
501 if truecfg!(debug_assertions)
504 && let Some(hash_result) = query.hash_result()
505 {
506 qcx.dep_context().with_stable_hashing_context(|mut hcx| {
507 hash_result(&mut hcx, &result);
508 });
509 }
510
511 (result, dep_node_index)
512}
513
514#[inline(always)]
515fn execute_job_incr<Q, Qcx>(
516 query: Q,
517 qcx: Qcx,
518 dep_graph_data: &DepGraphData<Qcx::Deps>,
519 key: Q::Key,
520 mut dep_node_opt: Option<DepNode>,
521 job_id: QueryJobId,
522) -> (Q::Value, DepNodeIndex)
523where
524 Q: QueryConfig<Qcx>,
525 Qcx: QueryContext,
526{
527 if !query.anon() && !query.eval_always() {
528 let dep_node =
530 dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
531
532 if let Some(ret) = qcx.start_query(job_id, false, || {
535 try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
536 }) {
537 return ret;
538 }
539 }
540
541 let prof_timer = qcx.dep_context().profiler().query_provider();
542
543 let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
544 if query.anon() {
545 return dep_graph_data.with_anon_task_inner(
546 *qcx.dep_context(),
547 query.dep_kind(),
548 || query.compute(qcx, key),
549 );
550 }
551
552 let dep_node =
554 dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
555
556 dep_graph_data.with_task(
557 dep_node,
558 (qcx, query),
559 key,
560 |(qcx, query), key| query.compute(qcx, key),
561 query.hash_result(),
562 )
563 });
564
565 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
566
567 (result, dep_node_index)
568}
569
570#[inline(always)]
571fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
572 query: Q,
573 dep_graph_data: &DepGraphData<Qcx::Deps>,
574 qcx: Qcx,
575 key: &Q::Key,
576 dep_node: &DepNode,
577) -> Option<(Q::Value, DepNodeIndex)>
578where
579 Q: QueryConfig<Qcx>,
580 Qcx: QueryContext,
581{
582 let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
586
587 if true {
if !dep_graph_data.is_index_green(prev_dep_node_index) {
::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_dep_node_index)")
};
};debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
588
589 if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
592 if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
593 dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
594 }
595
596 let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
597 let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
605 if std::intrinsics::unlikely(
606 try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
607 ) {
608 incremental_verify_ich(
609 *qcx.dep_context(),
610 dep_graph_data,
611 &result,
612 prev_dep_node_index,
613 query.hash_result(),
614 query.format_value(),
615 );
616 }
617
618 return Some((result, dep_node_index));
619 }
620
621 if true {
if !(!query.cache_on_disk(*qcx.dep_context(), key) ||
!qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible())
{
{
::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
dep_node));
}
};
};debug_assert!(
624 !query.cache_on_disk(*qcx.dep_context(), key)
625 || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
626 "missing on-disk cache entry for {dep_node:?}"
627 );
628
629 if true {
if !!query.loadable_from_disk(qcx, key, prev_dep_node_index) {
{
::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
dep_node));
}
};
};debug_assert!(
632 !query.loadable_from_disk(qcx, key, prev_dep_node_index),
633 "missing on-disk cache entry for loadable {dep_node:?}"
634 );
635
636 let prof_timer = qcx.dep_context().profiler().query_provider();
639
640 let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
642
643 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
644
645 incremental_verify_ich(
655 *qcx.dep_context(),
656 dep_graph_data,
657 &result,
658 prev_dep_node_index,
659 query.hash_result(),
660 query.format_value(),
661 );
662
663 Some((result, dep_node_index))
664}
665
666#[inline]
667#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("incremental_verify_ich",
"rustc_query_system::query::plumbing",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/query/plumbing.rs"),
::tracing_core::__macro_support::Option::Some(667u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::query::plumbing"),
::tracing_core::field::FieldSet::new(&["prev_index"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = meta.fields().iter();
meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&prev_index)
as &dyn Value))])
})
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: () = loop {};
return __tracing_attr_fake_return;
}
{
if !dep_graph_data.is_index_green(prev_index) {
incremental_verify_ich_not_green(tcx, prev_index)
}
let new_hash =
hash_result.map_or(Fingerprint::ZERO,
|f|
{
tcx.with_stable_hashing_context(|mut hcx|
f(&mut hcx, result))
});
let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
if new_hash != old_hash {
incremental_verify_ich_failed(tcx, prev_index,
&|| format_value(result));
}
}
}
}#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
668pub(crate) fn incremental_verify_ich<Tcx, V>(
669 tcx: Tcx,
670 dep_graph_data: &DepGraphData<Tcx::Deps>,
671 result: &V,
672 prev_index: SerializedDepNodeIndex,
673 hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
674 format_value: fn(&V) -> String,
675) where
676 Tcx: DepContext,
677{
678 if !dep_graph_data.is_index_green(prev_index) {
679 incremental_verify_ich_not_green(tcx, prev_index)
680 }
681
682 let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
683 tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
684 });
685
686 let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
687
688 if new_hash != old_hash {
689 incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result));
690 }
691}
692
693#[cold]
694#[inline(never)]
695fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
696where
697 Tcx: DepContext,
698{
699 {
::core::panicking::panic_fmt(format_args!("fingerprint for green query instance not loaded from cache: {0:?}",
tcx.dep_graph().data().unwrap().prev_node_of(prev_index)));
}panic!(
700 "fingerprint for green query instance not loaded from cache: {:?}",
701 tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
702 )
703}
704
705#[cold]
709#[inline(never)]
710fn incremental_verify_ich_failed<Tcx>(
711 tcx: Tcx,
712 prev_index: SerializedDepNodeIndex,
713 result: &dyn Fn() -> String,
714) where
715 Tcx: DepContext,
716{
717 const INSIDE_VERIFY_PANIC: ::std::thread::LocalKey<Cell<bool>> =
{
const __RUST_STD_INTERNAL_INIT: Cell<bool> = { Cell::new(false) };
unsafe {
::std::thread::LocalKey::new(const {
if ::std::mem::needs_drop::<Cell<bool>>() {
|_|
{
#[thread_local]
static __RUST_STD_INTERNAL_VAL:
::std::thread::local_impl::EagerStorage<Cell<bool>> =
::std::thread::local_impl::EagerStorage::new(__RUST_STD_INTERNAL_INIT);
__RUST_STD_INTERNAL_VAL.get()
}
} else {
|_|
{
#[thread_local]
static __RUST_STD_INTERNAL_VAL: Cell<bool> =
__RUST_STD_INTERNAL_INIT;
&__RUST_STD_INTERNAL_VAL
}
}
})
}
};thread_local! {
724 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
725 };
726
727 let old_in_panic = INSIDE_VERIFY_PANIC.replace(true);
728
729 if old_in_panic {
730 tcx.sess().dcx().emit_err(crate::error::Reentrant);
731 } else {
732 let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
733 ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("`cargo clean -p {0}` or `cargo clean`",
crate_name))
})format!("`cargo clean -p {crate_name}` or `cargo clean`")
734 } else {
735 "`cargo clean`".to_string()
736 };
737
738 let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
739 tcx.sess().dcx().emit_err(crate::error::IncrementCompilation {
740 run_cmd,
741 dep_node: ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", dep_node))
})format!("{dep_node:?}"),
742 });
743 {
::core::panicking::panic_fmt(format_args!("Found unstable fingerprints for {1:?}: {0}",
result(), dep_node));
};panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
744 }
745
746 INSIDE_VERIFY_PANIC.set(old_in_panic);
747}
748
749#[inline(never)]
758fn ensure_must_run<Q, Qcx>(
759 query: Q,
760 qcx: Qcx,
761 key: &Q::Key,
762 check_cache: bool,
763) -> (bool, Option<DepNode>)
764where
765 Q: QueryConfig<Qcx>,
766 Qcx: QueryContext,
767{
768 if query.eval_always() {
769 return (true, None);
770 }
771
772 if !!query.anon() {
::core::panicking::panic("assertion failed: !query.anon()")
};assert!(!query.anon());
774
775 let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
776
777 let dep_graph = qcx.dep_context().dep_graph();
778 let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
779 None => {
780 return (true, Some(dep_node));
787 }
788 Some((serialized_dep_node_index, dep_node_index)) => {
789 dep_graph.read_index(dep_node_index);
790 qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
791 serialized_dep_node_index
792 }
793 };
794
795 if !check_cache {
797 return (false, None);
798 }
799
800 let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index);
801 (!loadable, Some(dep_node))
802}
803
804#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QueryMode {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
QueryMode::Get => ::core::fmt::Formatter::write_str(f, "Get"),
QueryMode::Ensure { check_cache: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"Ensure", "check_cache", &__self_0),
}
}
}Debug)]
805pub enum QueryMode {
806 Get,
807 Ensure { check_cache: bool },
808}
809
810#[inline(always)]
811pub fn get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value
812where
813 Q: QueryConfig<Qcx>,
814 Qcx: QueryContext,
815{
816 if true {
if !!qcx.dep_context().dep_graph().is_fully_enabled() {
::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
};
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
817
818 ensure_sufficient_stack(|| try_execute_query::<Q, Qcx, false>(query, qcx, span, key, None).0)
819}
820
821#[inline(always)]
822pub fn get_query_incr<Q, Qcx>(
823 query: Q,
824 qcx: Qcx,
825 span: Span,
826 key: Q::Key,
827 mode: QueryMode,
828) -> Option<Q::Value>
829where
830 Q: QueryConfig<Qcx>,
831 Qcx: QueryContext,
832{
833 if true {
if !qcx.dep_context().dep_graph().is_fully_enabled() {
::core::panicking::panic("assertion failed: qcx.dep_context().dep_graph().is_fully_enabled()")
};
};debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
834
835 let dep_node = if let QueryMode::Ensure { check_cache } = mode {
836 let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
837 if !must_run {
838 return None;
839 }
840 dep_node
841 } else {
842 None
843 };
844
845 let (result, dep_node_index) = ensure_sufficient_stack(|| {
846 try_execute_query::<_, _, true>(query, qcx, span, key, dep_node)
847 });
848 if let Some(dep_node_index) = dep_node_index {
849 qcx.dep_context().dep_graph().read_index(dep_node_index)
850 }
851 Some(result)
852}
853
854pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
855where
856 Q: QueryConfig<Qcx>,
857 Qcx: QueryContext,
858{
859 if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
862 qcx.dep_context().profiler().query_cache_hit(index.into());
863 return;
864 }
865
866 if true {
if !!query.anon() {
::core::panicking::panic("assertion failed: !query.anon()")
};
};debug_assert!(!query.anon());
867
868 ensure_sufficient_stack(|| {
869 try_execute_query::<_, _, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
870 });
871}