1use std::hash::Hash;
2use std::mem;
3
4use rustc_data_structures::hash_table::{Entry, HashTable};
5use rustc_data_structures::stack::ensure_sufficient_stack;
6use rustc_data_structures::sync::{DynSend, DynSync};
7use rustc_data_structures::{outline, sharded, sync};
8use rustc_errors::{FatalError, StashKey};
9use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex};
10use rustc_middle::query::plumbing::QueryVTable;
11use rustc_middle::query::{
12 ActiveKeyStatus, CycleError, CycleErrorHandling, EnsureMode, QueryCache, QueryJob, QueryJobId,
13 QueryKey, QueryLatch, QueryMode, QueryState,
14};
15use rustc_middle::ty::TyCtxt;
16use rustc_middle::verify_ich::incremental_verify_ich;
17use rustc_span::{DUMMY_SP, Span};
18
19use crate::collect_active_jobs_from_all_queries;
20use crate::dep_graph::{DepNode, DepNodeIndex};
21use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle};
22use crate::plumbing::{current_query_job, next_job_id, start_query};
23
24#[inline]
25fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
26 move |x| x.0 == *k
27}
28
29fn expect_job<'tcx>(status: ActiveKeyStatus<'tcx>) -> QueryJob<'tcx> {
32 match status {
33 ActiveKeyStatus::Started(job) => job,
34 ActiveKeyStatus::Poisoned => {
35 {
::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
36 }
37 }
38}
39
40pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
41 state.active.lock_shards().all(|shard| shard.is_empty())
42}
43
44pub(crate) fn gather_active_jobs<'tcx, C>(
52 query: &'tcx QueryVTable<'tcx, C>,
53 tcx: TyCtxt<'tcx>,
54 require_complete: bool,
55 job_map_out: &mut QueryJobMap<'tcx>, ) -> Option<()>
57where
58 C: QueryCache<Key: QueryKey + DynSend + DynSync>,
59 QueryVTable<'tcx, C>: DynSync,
60{
61 let mut active = Vec::new();
62
63 let mut gather_shard_jobs = |shard: &HashTable<(C::Key, ActiveKeyStatus<'tcx>)>| {
65 for (k, v) in shard.iter() {
66 if let ActiveKeyStatus::Started(ref job) = *v {
67 active.push((*k, job.clone()));
68 }
69 }
70 };
71
72 if require_complete {
74 for shard in query.state.active.lock_shards() {
75 gather_shard_jobs(&shard);
76 }
77 } else {
78 for shard in query.state.active.try_lock_shards() {
81 match shard {
85 None => {
86 {
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_impl/src/execution.rs:86",
"rustc_query_impl::execution", ::tracing::Level::WARN,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_impl/src/execution.rs"),
::tracing_core::__macro_support::Option::Some(86u32),
::tracing_core::__macro_support::Option::Some("rustc_query_impl::execution"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::WARN <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::WARN <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("Failed to collect active jobs for query with name `{0}`!",
query.name) as &dyn Value))])
});
} else { ; }
};tracing::warn!(
87 "Failed to collect active jobs for query with name `{}`!",
88 query.name
89 );
90 return None;
91 }
92 Some(shard) => gather_shard_jobs(&shard),
93 }
94 }
95 }
96
97 for (key, job) in active {
100 let frame = crate::plumbing::create_deferred_query_stack_frame(tcx, query, key);
101 job_map_out.insert(job.id, QueryJobInfo { frame, job });
102 }
103
104 Some(())
105}
106
107struct ActiveJobGuard<'tcx, K>
113where
114 K: Eq + Hash + Copy,
115{
116 state: &'tcx QueryState<'tcx, K>,
117 key: K,
118 key_hash: u64,
119}
120
121#[cold]
122#[inline(never)]
123fn mk_cycle<'tcx, C: QueryCache>(
124 query: &'tcx QueryVTable<'tcx, C>,
125 tcx: TyCtxt<'tcx>,
126 cycle_error: CycleError,
127) -> C::Value {
128 let error = report_cycle(tcx.sess, &cycle_error);
129 match query.cycle_error_handling {
130 CycleErrorHandling::Error => {
131 let guar = error.emit();
132 (query.value_from_cycle_error)(tcx, cycle_error, guar)
133 }
134 CycleErrorHandling::DelayBug => {
135 let guar = error.delay_as_bug();
136 (query.value_from_cycle_error)(tcx, cycle_error, guar)
137 }
138 CycleErrorHandling::Stash => {
139 let guar = if let Some(root) = cycle_error.cycle.first()
140 && let Some(span) = root.frame.info.span
141 {
142 error.stash(span, StashKey::Cycle).unwrap()
143 } else {
144 error.emit()
145 };
146 (query.value_from_cycle_error)(tcx, cycle_error, guar)
147 }
148 }
149}
150
151impl<'tcx, K> ActiveJobGuard<'tcx, K>
152where
153 K: Eq + Hash + Copy,
154{
155 fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
158 where
159 C: QueryCache<Key = K>,
160 {
161 let Self { state, key, key_hash }: Self = self;
164 mem::forget(self);
165
166 cache.complete(key, result, dep_node_index);
169
170 let job = {
171 let mut shard = state.active.lock_shard_by_hash(key_hash);
176 match shard.find_entry(key_hash, equivalent_key(&key)) {
177 Err(_) => None,
178 Ok(occupied) => Some(occupied.remove().0.1),
179 }
180 };
181 let job = expect_job(job.expect("active query job entry"));
182
183 job.signal_complete();
184 }
185}
186
187impl<'tcx, K> Drop for ActiveJobGuard<'tcx, K>
188where
189 K: Eq + Hash + Copy,
190{
191 #[inline(never)]
192 #[cold]
193 fn drop(&mut self) {
194 let Self { state, key, key_hash } = *self;
196 let job = {
197 let mut shard = state.active.lock_shard_by_hash(key_hash);
198 match shard.find_entry(key_hash, equivalent_key(&key)) {
199 Err(_) => ::core::panicking::panic("explicit panic")panic!(),
200 Ok(occupied) => {
201 let ((key, value), vacant) = occupied.remove();
202 vacant.insert((key, ActiveKeyStatus::Poisoned));
203 expect_job(value)
204 }
205 }
206 };
207 job.signal_complete();
210 }
211}
212
213#[cold]
214#[inline(never)]
215fn cycle_error<'tcx, C: QueryCache>(
216 query: &'tcx QueryVTable<'tcx, C>,
217 tcx: TyCtxt<'tcx>,
218 try_execute: QueryJobId,
219 span: Span,
220) -> (C::Value, Option<DepNodeIndex>) {
221 let job_map = collect_active_jobs_from_all_queries(tcx, false)
224 .ok()
225 .expect("failed to collect active queries");
226
227 let error = find_cycle_in_stack(try_execute, job_map, ¤t_query_job(tcx), span);
228 (mk_cycle(query, tcx, error.lift()), None)
229}
230
231#[inline(always)]
232fn wait_for_query<'tcx, C: QueryCache>(
233 query: &'tcx QueryVTable<'tcx, C>,
234 tcx: TyCtxt<'tcx>,
235 span: Span,
236 key: C::Key,
237 latch: QueryLatch<'tcx>,
238 current: Option<QueryJobId>,
239) -> (C::Value, Option<DepNodeIndex>) {
240 let query_blocked_prof_timer = tcx.prof.query_blocked();
244
245 let result = latch.wait_on(tcx, current, span);
248
249 match result {
250 Ok(()) => {
251 let Some((v, index)) = query.cache.lookup(&key) else {
252 outline(|| {
253 let key_hash = sharded::make_hash(&key);
256 let shard = query.state.active.lock_shard_by_hash(key_hash);
257 match shard.find(key_hash, equivalent_key(&key)) {
258 Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
260 _ => {
::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
query.name));
}panic!(
261 "query '{}' result must be in the cache or the query must be poisoned after a wait",
262 query.name
263 ),
264 }
265 })
266 };
267
268 tcx.prof.query_cache_hit(index.into());
269 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
270
271 (v, Some(index))
272 }
273 Err(cycle) => (mk_cycle(query, tcx, cycle.lift()), None),
274 }
275}
276
277#[inline(never)]
279fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
280 query: &'tcx QueryVTable<'tcx, C>,
281 tcx: TyCtxt<'tcx>,
282 span: Span,
283 key: C::Key,
284 dep_node: Option<DepNode>,
287) -> (C::Value, Option<DepNodeIndex>) {
288 let key_hash = sharded::make_hash(&key);
289 let mut state_lock = query.state.active.lock_shard_by_hash(key_hash);
290
291 if tcx.sess.threads() > 1 {
298 if let Some((value, index)) = query.cache.lookup(&key) {
299 tcx.prof.query_cache_hit(index.into());
300 return (value, Some(index));
301 }
302 }
303
304 let current_job_id = current_query_job(tcx);
305
306 match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
307 Entry::Vacant(entry) => {
308 let id = next_job_id(tcx);
311 let job = QueryJob::new(id, span, current_job_id);
312 entry.insert((key, ActiveKeyStatus::Started(job)));
313
314 drop(state_lock);
316
317 execute_job::<C, INCR>(query, tcx, key, key_hash, id, dep_node)
318 }
319 Entry::Occupied(mut entry) => {
320 match &mut entry.get_mut().1 {
321 ActiveKeyStatus::Started(job) => {
322 if sync::is_dyn_thread_safe() {
323 let latch = job.latch();
325 drop(state_lock);
326
327 wait_for_query(query, tcx, span, key, latch, current_job_id)
330 } else {
331 let id = job.id;
332 drop(state_lock);
333
334 cycle_error(query, tcx, id, span)
337 }
338 }
339 ActiveKeyStatus::Poisoned => FatalError.raise(),
340 }
341 }
342 }
343}
344
345#[inline(always)]
346fn execute_job<'tcx, C: QueryCache, const INCR: bool>(
347 query: &'tcx QueryVTable<'tcx, C>,
348 tcx: TyCtxt<'tcx>,
349 key: C::Key,
350 key_hash: u64,
351 id: QueryJobId,
352 dep_node: Option<DepNode>,
353) -> (C::Value, Option<DepNodeIndex>) {
354 let job_guard = ActiveJobGuard { state: &query.state, key, key_hash };
357
358 if true {
match (&tcx.dep_graph.is_fully_enabled(), &INCR) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);
359
360 let (value, dep_node_index) = if INCR {
362 execute_job_incr(query, tcx, key, dep_node, id)
363 } else {
364 execute_job_non_incr(query, tcx, key, id)
365 };
366
367 let cache = &query.cache;
368 if query.feedable {
369 if let Some((cached_value, _)) = cache.lookup(&key) {
374 let Some(hash_value_fn) = query.hash_value_fn else {
375 {
::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
(query.format_value)(&cached_value)));
};panic!(
376 "no_hash fed query later has its value computed.\n\
377 Remove `no_hash` modifier to allow recomputation.\n\
378 The already cached value: {}",
379 (query.format_value)(&cached_value)
380 );
381 };
382
383 let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| {
384 (hash_value_fn(&mut hcx, &cached_value), hash_value_fn(&mut hcx, &value))
385 });
386 let formatter = query.format_value;
387 if old_hash != new_hash {
388 if !tcx.dcx().has_errors().is_some() {
{
::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
query.dep_kind, key, formatter(&value),
formatter(&cached_value)));
}
};assert!(
391 tcx.dcx().has_errors().is_some(),
392 "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
393 computed={:#?}\nfed={:#?}",
394 query.dep_kind,
395 key,
396 formatter(&value),
397 formatter(&cached_value),
398 );
399 }
400 }
401 }
402
403 job_guard.complete(cache, value, dep_node_index);
405
406 (value, Some(dep_node_index))
407}
408
409#[inline(always)]
411fn execute_job_non_incr<'tcx, C: QueryCache>(
412 query: &'tcx QueryVTable<'tcx, C>,
413 tcx: TyCtxt<'tcx>,
414 key: C::Key,
415 job_id: QueryJobId,
416) -> (C::Value, DepNodeIndex) {
417 if true {
if !!tcx.dep_graph.is_fully_enabled() {
::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
};
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
418
419 let prof_timer = tcx.prof.query_provider();
420 let value =
422 start_query(tcx, job_id, query.depth_limit, || (query.invoke_provider_fn)(tcx, key));
423 let dep_node_index = tcx.dep_graph.next_virtual_depnode_index();
424 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
425
426 if truecfg!(debug_assertions) {
428 let _ = key.to_fingerprint(tcx);
429 if let Some(hash_value_fn) = query.hash_value_fn {
430 tcx.with_stable_hashing_context(|mut hcx| {
431 hash_value_fn(&mut hcx, &value);
432 });
433 }
434 }
435
436 (value, dep_node_index)
437}
438
439#[inline(always)]
440fn execute_job_incr<'tcx, C: QueryCache>(
441 query: &'tcx QueryVTable<'tcx, C>,
442 tcx: TyCtxt<'tcx>,
443 key: C::Key,
444 mut dep_node_opt: Option<DepNode>,
445 job_id: QueryJobId,
446) -> (C::Value, DepNodeIndex) {
447 let dep_graph_data =
448 tcx.dep_graph.data().expect("should always be present in incremental mode");
449
450 if !query.anon && !query.eval_always {
451 let dep_node =
453 dep_node_opt.get_or_insert_with(|| DepNode::construct(tcx, query.dep_kind, &key));
454
455 if let Some(ret) = start_query(tcx, job_id, false, || try {
458 let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, dep_node)?;
459 let value = load_from_disk_or_invoke_provider_green(
460 tcx,
461 dep_graph_data,
462 query,
463 &key,
464 dep_node,
465 prev_index,
466 dep_node_index,
467 );
468 (value, dep_node_index)
469 }) {
470 return ret;
471 }
472 }
473
474 let prof_timer = tcx.prof.query_provider();
475
476 let (result, dep_node_index) = start_query(tcx, job_id, query.depth_limit, || {
477 if query.anon {
478 return dep_graph_data.with_anon_task_inner(tcx, query.dep_kind, || {
480 (query.invoke_provider_fn)(tcx, key)
481 });
482 }
483
484 let dep_node =
486 dep_node_opt.unwrap_or_else(|| DepNode::construct(tcx, query.dep_kind, &key));
487
488 dep_graph_data.with_task(
490 dep_node,
491 tcx,
492 (query, key),
493 |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key),
494 query.hash_value_fn,
495 )
496 });
497
498 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
499
500 (result, dep_node_index)
501}
502
503#[inline(always)]
507fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
508 tcx: TyCtxt<'tcx>,
509 dep_graph_data: &DepGraphData,
510 query: &'tcx QueryVTable<'tcx, C>,
511 key: &C::Key,
512 dep_node: &DepNode,
513 prev_index: SerializedDepNodeIndex,
514 dep_node_index: DepNodeIndex,
515) -> C::Value {
516 if true {
if !dep_graph_data.is_index_green(prev_index) {
::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_index)")
};
};debug_assert!(dep_graph_data.is_index_green(prev_index));
520
521 if let Some(value) = (query.try_load_from_disk_fn)(tcx, key, prev_index, dep_node_index) {
524 if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) {
525 dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
526 }
527
528 let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index);
529 let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
537 if std::intrinsics::unlikely(
538 try_verify || tcx.sess.opts.unstable_opts.incremental_verify_ich,
539 ) {
540 incremental_verify_ich(
541 tcx,
542 dep_graph_data,
543 &value,
544 prev_index,
545 query.hash_value_fn,
546 query.format_value,
547 );
548 }
549
550 return value;
551 }
552
553 if true {
if !(!(query.will_cache_on_disk_for_key_fn)(tcx, key) ||
!tcx.key_fingerprint_style(dep_node.kind).is_maybe_recoverable())
{
{
::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
dep_node));
}
};
};debug_assert!(
556 !(query.will_cache_on_disk_for_key_fn)(tcx, key)
557 || !tcx.key_fingerprint_style(dep_node.kind).is_maybe_recoverable(),
558 "missing on-disk cache entry for {dep_node:?}"
559 );
560
561 if true {
if !!(query.is_loadable_from_disk_fn)(tcx, key, prev_index) {
{
::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
dep_node));
}
};
};debug_assert!(
564 !(query.is_loadable_from_disk_fn)(tcx, key, prev_index),
565 "missing on-disk cache entry for loadable {dep_node:?}"
566 );
567
568 let prof_timer = tcx.prof.query_provider();
571
572 let value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, *key));
575
576 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
577
578 incremental_verify_ich(
588 tcx,
589 dep_graph_data,
590 &value,
591 prev_index,
592 query.hash_value_fn,
593 query.format_value,
594 );
595
596 value
597}
598
599struct EnsureCanSkip {
601 skip_execution: bool,
604 dep_node: Option<DepNode>,
607}
608
609#[inline(never)]
616fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
617 query: &'tcx QueryVTable<'tcx, C>,
618 tcx: TyCtxt<'tcx>,
619 key: &C::Key,
620 ensure_mode: EnsureMode,
621) -> EnsureCanSkip {
622 if query.eval_always {
624 return EnsureCanSkip { skip_execution: false, dep_node: None };
625 }
626
627 if !!query.anon { ::core::panicking::panic("assertion failed: !query.anon") };assert!(!query.anon);
629
630 let dep_node = DepNode::construct(tcx, query.dep_kind, key);
631
632 let dep_graph = &tcx.dep_graph;
633 let serialized_dep_node_index = match dep_graph.try_mark_green(tcx, &dep_node) {
634 None => {
635 return EnsureCanSkip { skip_execution: false, dep_node: Some(dep_node) };
642 }
643 Some((serialized_dep_node_index, dep_node_index)) => {
644 dep_graph.read_index(dep_node_index);
645 tcx.prof.query_cache_hit(dep_node_index.into());
646 serialized_dep_node_index
647 }
648 };
649
650 match ensure_mode {
651 EnsureMode::Ok => {
652 EnsureCanSkip { skip_execution: true, dep_node: None }
656 }
657 EnsureMode::Done => {
658 let is_loadable = (query.is_loadable_from_disk_fn)(tcx, key, serialized_dep_node_index);
662 EnsureCanSkip { skip_execution: is_loadable, dep_node: Some(dep_node) }
663 }
664 }
665}
666
667#[inline(always)]
670pub(super) fn execute_query_non_incr_inner<'tcx, C: QueryCache>(
671 query: &'tcx QueryVTable<'tcx, C>,
672 tcx: TyCtxt<'tcx>,
673 span: Span,
674 key: C::Key,
675) -> C::Value {
676 if true {
if !!tcx.dep_graph.is_fully_enabled() {
::core::panicking::panic("assertion failed: !tcx.dep_graph.is_fully_enabled()")
};
};debug_assert!(!tcx.dep_graph.is_fully_enabled());
677
678 ensure_sufficient_stack(|| try_execute_query::<C, false>(query, tcx, span, key, None).0)
679}
680
681#[inline(always)]
684pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>(
685 query: &'tcx QueryVTable<'tcx, C>,
686 tcx: TyCtxt<'tcx>,
687 span: Span,
688 key: C::Key,
689 mode: QueryMode,
690) -> Option<C::Value> {
691 if true {
if !tcx.dep_graph.is_fully_enabled() {
::core::panicking::panic("assertion failed: tcx.dep_graph.is_fully_enabled()")
};
};debug_assert!(tcx.dep_graph.is_fully_enabled());
692
693 let dep_node: Option<DepNode> = match mode {
697 QueryMode::Ensure { ensure_mode } => {
698 let EnsureCanSkip { skip_execution, dep_node } =
699 check_if_ensure_can_skip_execution(query, tcx, &key, ensure_mode);
700 if skip_execution {
701 return None;
703 }
704 dep_node
705 }
706 QueryMode::Get => None,
707 };
708
709 let (result, dep_node_index) =
710 ensure_sufficient_stack(|| try_execute_query::<C, true>(query, tcx, span, key, dep_node));
711 if let Some(dep_node_index) = dep_node_index {
712 tcx.dep_graph.read_index(dep_node_index)
713 }
714 Some(result)
715}
716
717pub(crate) fn force_query<'tcx, C: QueryCache>(
718 query: &'tcx QueryVTable<'tcx, C>,
719 tcx: TyCtxt<'tcx>,
720 key: C::Key,
721 dep_node: DepNode,
722) {
723 if let Some((_, index)) = query.cache.lookup(&key) {
726 tcx.prof.query_cache_hit(index.into());
727 return;
728 }
729
730 if true {
if !!query.anon {
::core::panicking::panic("assertion failed: !query.anon")
};
};debug_assert!(!query.anon);
731
732 ensure_sufficient_stack(|| {
733 try_execute_query::<C, true>(query, tcx, DUMMY_SP, key, Some(dep_node))
734 });
735}