1use std::fmt::Debug;
2use std::hash::Hash;
3use std::io::Write;
4use std::iter;
5use std::num::NonZero;
6use std::sync::Arc;
78use parking_lot::{Condvar, Mutex};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_errors::{Diag, DiagCtxtHandle};
11use rustc_hir::def::DefKind;
12use rustc_session::Session;
13use rustc_span::{DUMMY_SP, Span};
1415use super::{QueryStackDeferred, QueryStackFrameExtra};
16use crate::dep_graph::DepContext;
17use crate::error::CycleStack;
18use crate::query::plumbing::CycleError;
19use crate::query::{QueryContext, QueryStackFrame};
2021/// Represents a span and a query key.
22#[derive(#[automatically_derived]
impl<I: ::core::clone::Clone> ::core::clone::Clone for QueryInfo<I> {
#[inline]
fn clone(&self) -> QueryInfo<I> {
QueryInfo {
span: ::core::clone::Clone::clone(&self.span),
query: ::core::clone::Clone::clone(&self.query),
}
}
}Clone, #[automatically_derived]
impl<I: ::core::fmt::Debug> ::core::fmt::Debug for QueryInfo<I> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "QueryInfo",
"span", &self.span, "query", &&self.query)
}
}Debug)]
23pub struct QueryInfo<I> {
24/// The span corresponding to the reason for which this query was required.
25pub span: Span,
26pub query: QueryStackFrame<I>,
27}
2829impl<'tcx> QueryInfo<QueryStackDeferred<'tcx>> {
30pub(crate) fn lift<Qcx: QueryContext<'tcx>>(
31&self,
32 qcx: Qcx,
33 ) -> QueryInfo<QueryStackFrameExtra> {
34QueryInfo { span: self.span, query: self.query.lift(qcx) }
35 }
36}
3738pub type QueryMap<'tcx> = FxHashMap<QueryJobId, QueryJobInfo<'tcx>>;
3940/// A value uniquely identifying an active query job.
41#[derive(#[automatically_derived]
impl ::core::marker::Copy for QueryJobId { }Copy, #[automatically_derived]
impl ::core::clone::Clone for QueryJobId {
#[inline]
fn clone(&self) -> QueryJobId {
let _: ::core::clone::AssertParamIsClone<NonZero<u64>>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::Eq for QueryJobId {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<NonZero<u64>>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for QueryJobId {
#[inline]
fn eq(&self, other: &QueryJobId) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::hash::Hash for QueryJobId {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for QueryJobId {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "QueryJobId",
&&self.0)
}
}Debug)]
42pub struct QueryJobId(pub NonZero<u64>);
4344impl QueryJobId {
45fn query<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> QueryStackFrame<QueryStackDeferred<'tcx>> {
46map.get(&self).unwrap().query.clone()
47 }
4849fn span<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Span {
50map.get(&self).unwrap().job.span
51 }
5253fn parent<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<QueryJobId> {
54map.get(&self).unwrap().job.parent
55 }
5657fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch<'tcx>> {
58map.get(&self).unwrap().job.latch.as_ref()
59 }
60}
6162#[derive(#[automatically_derived]
impl<'tcx> ::core::clone::Clone for QueryJobInfo<'tcx> {
#[inline]
fn clone(&self) -> QueryJobInfo<'tcx> {
QueryJobInfo {
query: ::core::clone::Clone::clone(&self.query),
job: ::core::clone::Clone::clone(&self.job),
}
}
}Clone, #[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryJobInfo<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "QueryJobInfo",
"query", &self.query, "job", &&self.job)
}
}Debug)]
63pub struct QueryJobInfo<'tcx> {
64pub query: QueryStackFrame<QueryStackDeferred<'tcx>>,
65pub job: QueryJob<'tcx>,
66}
6768/// Represents an active query job.
69#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryJob<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "QueryJob",
"id", &self.id, "span", &self.span, "parent", &self.parent,
"latch", &&self.latch)
}
}Debug)]
70pub struct QueryJob<'tcx> {
71pub id: QueryJobId,
7273/// The span corresponding to the reason for which this query was required.
74pub span: Span,
7576/// The parent query job which created this job and is implicitly waiting on it.
77pub parent: Option<QueryJobId>,
7879/// The latch that is used to wait on this job.
80latch: Option<QueryLatch<'tcx>>,
81}
8283impl<'tcx> Clonefor QueryJob<'tcx> {
84fn clone(&self) -> Self {
85Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() }
86 }
87}
8889impl<'tcx> QueryJob<'tcx> {
90/// Creates a new query job.
91#[inline]
92pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
93QueryJob { id, span, parent, latch: None }
94 }
9596pub(super) fn latch(&mut self) -> QueryLatch<'tcx> {
97if self.latch.is_none() {
98self.latch = Some(QueryLatch::new());
99 }
100self.latch.as_ref().unwrap().clone()
101 }
102103/// Signals to waiters that the query is complete.
104 ///
105 /// This does nothing for single threaded rustc,
106 /// as there are no concurrent jobs which could be waiting on us
107#[inline]
108pub fn signal_complete(self) {
109if let Some(latch) = self.latch {
110latch.set();
111 }
112 }
113}
114115impl QueryJobId {
116pub(super) fn find_cycle_in_stack<'tcx>(
117&self,
118 query_map: QueryMap<'tcx>,
119 current_job: &Option<QueryJobId>,
120 span: Span,
121 ) -> CycleError<QueryStackDeferred<'tcx>> {
122// Find the waitee amongst `current_job` parents
123let mut cycle = Vec::new();
124let mut current_job = Option::clone(current_job);
125126while let Some(job) = current_job {
127let info = query_map.get(&job).unwrap();
128 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
129130if job == *self {
131 cycle.reverse();
132133// This is the end of the cycle
134 // The span entry we included was for the usage
135 // of the cycle itself, and not part of the cycle
136 // Replace it with the span which caused the cycle to form
137cycle[0].span = span;
138// Find out why the cycle itself was used
139let usage = info
140 .job
141 .parent
142 .as_ref()
143 .map(|parent| (info.job.span, parent.query(&query_map)));
144return CycleError { usage, cycle };
145 }
146147 current_job = info.job.parent;
148 }
149150{ ::core::panicking::panic_fmt(format_args!("did not find a cycle")); }panic!("did not find a cycle")151 }
152153#[cold]
154 #[inline(never)]
155pub fn find_dep_kind_root<'tcx>(
156&self,
157 query_map: QueryMap<'tcx>,
158 ) -> (QueryJobInfo<'tcx>, usize) {
159let mut depth = 1;
160let info = query_map.get(&self).unwrap();
161let dep_kind = info.query.dep_kind;
162let mut current_id = info.job.parent;
163let mut last_layout = (info.clone(), depth);
164165while let Some(id) = current_id {
166let info = query_map.get(&id).unwrap();
167if info.query.dep_kind == dep_kind {
168 depth += 1;
169 last_layout = (info.clone(), depth);
170 }
171 current_id = info.job.parent;
172 }
173last_layout174 }
175}
176177#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryWaiter<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "QueryWaiter",
"query", &self.query, "condvar", &self.condvar, "span",
&self.span, "cycle", &&self.cycle)
}
}Debug)]
178struct QueryWaiter<'tcx> {
179 query: Option<QueryJobId>,
180 condvar: Condvar,
181 span: Span,
182 cycle: Mutex<Option<CycleError<QueryStackDeferred<'tcx>>>>,
183}
184185#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryLatchInfo<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f,
"QueryLatchInfo", "complete", &self.complete, "waiters",
&&self.waiters)
}
}Debug)]
186struct QueryLatchInfo<'tcx> {
187 complete: bool,
188 waiters: Vec<Arc<QueryWaiter<'tcx>>>,
189}
190191#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryLatch<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f, "QueryLatch",
"info", &&self.info)
}
}Debug)]
192pub(super) struct QueryLatch<'tcx> {
193 info: Arc<Mutex<QueryLatchInfo<'tcx>>>,
194}
195196impl<'tcx> Clonefor QueryLatch<'tcx> {
197fn clone(&self) -> Self {
198Self { info: Arc::clone(&self.info) }
199 }
200}
201202impl<'tcx> QueryLatch<'tcx> {
203fn new() -> Self {
204QueryLatch {
205 info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
206 }
207 }
208209/// Awaits for the query job to complete.
210pub(super) fn wait_on(
211&self,
212 qcx: impl QueryContext<'tcx>,
213 query: Option<QueryJobId>,
214 span: Span,
215 ) -> Result<(), CycleError<QueryStackDeferred<'tcx>>> {
216let waiter =
217Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
218self.wait_on_inner(qcx, &waiter);
219// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
220 // although another thread may still have a Arc reference so we cannot
221 // use Arc::get_mut
222let mut cycle = waiter.cycle.lock();
223match cycle.take() {
224None => Ok(()),
225Some(cycle) => Err(cycle),
226 }
227 }
228229/// Awaits the caller on this latch by blocking the current thread.
230fn wait_on_inner(&self, qcx: impl QueryContext<'tcx>, waiter: &Arc<QueryWaiter<'tcx>>) {
231let mut info = self.info.lock();
232if !info.complete {
233// We push the waiter on to the `waiters` list. It can be accessed inside
234 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
235 // Both of these will remove it from the `waiters` list before resuming
236 // this thread.
237info.waiters.push(Arc::clone(waiter));
238239// If this detects a deadlock and the deadlock handler wants to resume this thread
240 // we have to be in the `wait` call. This is ensured by the deadlock handler
241 // getting the self.info lock.
242rustc_thread_pool::mark_blocked();
243let proxy = qcx.jobserver_proxy();
244proxy.release_thread();
245waiter.condvar.wait(&mut info);
246// Release the lock before we potentially block in `acquire_thread`
247drop(info);
248proxy.acquire_thread();
249 }
250 }
251252/// Sets the latch and resumes all waiters on it
253fn set(&self) {
254let mut info = self.info.lock();
255if true {
if !!info.complete {
::core::panicking::panic("assertion failed: !info.complete")
};
};debug_assert!(!info.complete);
256info.complete = true;
257let registry = rustc_thread_pool::Registry::current();
258for waiter in info.waiters.drain(..) {
259 rustc_thread_pool::mark_unblocked(®istry);
260 waiter.condvar.notify_one();
261 }
262 }
263264/// Removes a single waiter from the list of waiters.
265 /// This is used to break query cycles.
266fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<'tcx>> {
267let mut info = self.info.lock();
268if true {
if !!info.complete {
::core::panicking::panic("assertion failed: !info.complete")
};
};debug_assert!(!info.complete);
269// Remove the waiter from the list of waiters
270info.waiters.remove(waiter)
271 }
272}
273274/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
275type Waiter = (QueryJobId, usize);
276277/// Visits all the non-resumable and resumable waiters of a query.
278/// Only waiters in a query are visited.
279/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
280/// and a span indicating the reason the query waited on `query_ref`.
281/// If `visit` returns Some, this function returns.
282/// For visits of non-resumable waiters it returns the return value of `visit`.
283/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
284/// required information to resume the waiter.
285/// If all `visit` calls returns None, this function also returns None.
286fn visit_waiters<'tcx, F>(
287 query_map: &QueryMap<'tcx>,
288 query: QueryJobId,
289mut visit: F,
290) -> Option<Option<Waiter>>
291where
292F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
293{
294// Visit the parent query which is a non-resumable waiter since it's on the same stack
295if let Some(parent) = query.parent(query_map)
296 && let Some(cycle) = visit(query.span(query_map), parent)
297 {
298return Some(cycle);
299 }
300301// Visit the explicit waiters which use condvars and are resumable
302if let Some(latch) = query.latch(query_map) {
303for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
304if let Some(waiter_query) = waiter.query {
305if visit(waiter.span, waiter_query).is_some() {
306// Return a value which indicates that this waiter can be resumed
307return Some(Some((query, i)));
308 }
309 }
310 }
311 }
312313None314}
315316/// Look for query cycles by doing a depth first search starting at `query`.
317/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
318/// If a cycle is detected, this initial value is replaced with the span causing
319/// the cycle.
320fn cycle_check<'tcx>(
321 query_map: &QueryMap<'tcx>,
322 query: QueryJobId,
323 span: Span,
324 stack: &mut Vec<(Span, QueryJobId)>,
325 visited: &mut FxHashSet<QueryJobId>,
326) -> Option<Option<Waiter>> {
327if !visited.insert(query) {
328return if let Some(p) = stack.iter().position(|q| q.1 == query) {
329// We detected a query cycle, fix up the initial span and return Some
330331 // Remove previous stack entries
332stack.drain(0..p);
333// Replace the span for the first query with the cycle cause
334stack[0].0 = span;
335Some(None)
336 } else {
337None338 };
339 }
340341// Query marked as visited is added it to the stack
342stack.push((span, query));
343344// Visit all the waiters
345let r = visit_waiters(query_map, query, |span, successor| {
346cycle_check(query_map, successor, span, stack, visited)
347 });
348349// Remove the entry in our stack if we didn't find a cycle
350if r.is_none() {
351stack.pop();
352 }
353354r355}
356357/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
358/// from `query` without going through any of the queries in `visited`.
359/// This is achieved with a depth first search.
360fn connected_to_root<'tcx>(
361 query_map: &QueryMap<'tcx>,
362 query: QueryJobId,
363 visited: &mut FxHashSet<QueryJobId>,
364) -> bool {
365// We already visited this or we're deliberately ignoring it
366if !visited.insert(query) {
367return false;
368 }
369370// This query is connected to the root (it has no query parent), return true
371if query.parent(query_map).is_none() {
372return true;
373 }
374375visit_waiters(query_map, query, |_, successor| {
376connected_to_root(query_map, successor, visited).then_some(None)
377 })
378 .is_some()
379}
380381// Deterministically pick an query from a list
382fn pick_query<'a, 'tcx, T, F>(query_map: &QueryMap<'tcx>, queries: &'a [T], f: F) -> &'a T
383where
384F: Fn(&T) -> (Span, QueryJobId),
385{
386// Deterministically pick an entry point
387 // FIXME: Sort this instead
388queries389 .iter()
390 .min_by_key(|v| {
391let (span, query) = f(v);
392let hash = query.query(query_map).hash;
393// Prefer entry points which have valid spans for nicer error messages
394 // We add an integer to the tuple ensuring that entry points
395 // with valid spans are picked first
396let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
397 (span_cmp, hash)
398 })
399 .unwrap()
400}
401402/// Looks for query cycles starting from the last query in `jobs`.
403/// If a cycle is found, all queries in the cycle is removed from `jobs` and
404/// the function return true.
405/// If a cycle was not found, the starting query is removed from `jobs` and
406/// the function returns false.
407fn remove_cycle<'tcx>(
408 query_map: &QueryMap<'tcx>,
409 jobs: &mut Vec<QueryJobId>,
410 wakelist: &mut Vec<Arc<QueryWaiter<'tcx>>>,
411) -> bool {
412let mut visited = FxHashSet::default();
413let mut stack = Vec::new();
414// Look for a cycle starting with the last query in `jobs`
415if let Some(waiter) =
416cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
417 {
418// The stack is a vector of pairs of spans and queries; reverse it so that
419 // the earlier entries require later entries
420let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
421422// Shift the spans so that queries are matched with the span for their waitee
423spans.rotate_right(1);
424425// Zip them back together
426let mut stack: Vec<_> = iter::zip(spans, queries).collect();
427428// Remove the queries in our cycle from the list of jobs to look at
429for r in &stack {
430if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
431 jobs.remove(pos);
432 }
433 }
434435// Find the queries in the cycle which are
436 // connected to queries outside the cycle
437let entry_points = stack438 .iter()
439 .filter_map(|&(span, query)| {
440if query.parent(query_map).is_none() {
441// This query is connected to the root (it has no query parent)
442Some((span, query, None))
443 } else {
444let mut waiters = Vec::new();
445// Find all the direct waiters who lead to the root
446visit_waiters(query_map, query, |span, waiter| {
447// Mark all the other queries in the cycle as already visited
448let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
449450if connected_to_root(query_map, waiter, &mut visited) {
451waiters.push((span, waiter));
452 }
453454None455 });
456if waiters.is_empty() {
457None458 } else {
459// Deterministically pick one of the waiters to show to the user
460let waiter = *pick_query(query_map, &waiters, |s| *s);
461Some((span, query, Some(waiter)))
462 }
463 }
464 })
465 .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
466467// Deterministically pick an entry point
468let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
469470// Shift the stack so that our entry point is first
471let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
472if let Some(pos) = entry_point_pos {
473stack.rotate_left(pos);
474 }
475476let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
477478// Create the cycle error
479let error = CycleError {
480usage,
481 cycle: stack482 .iter()
483 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
484 .collect(),
485 };
486487// We unwrap `waiter` here since there must always be one
488 // edge which is resumable / waited using a query latch
489let (waitee_query, waiter_idx) = waiter.unwrap();
490491// Extract the waiter we want to resume
492let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
493494// Set the cycle error so it will be picked up when resumed
495*waiter.cycle.lock() = Some(error);
496497// Put the waiter on the list of things to resume
498wakelist.push(waiter);
499500true
501} else {
502false
503}
504}
505506/// Detects query cycles by using depth first search over all active query jobs.
507/// If a query cycle is found it will break the cycle by finding an edge which
508/// uses a query latch and then resuming that waiter.
509/// There may be multiple cycles involved in a deadlock, so this searches
510/// all active queries for cycles before finally resuming all the waiters at once.
511pub fn break_query_cycles<'tcx>(query_map: QueryMap<'tcx>, registry: &rustc_thread_pool::Registry) {
512let mut wakelist = Vec::new();
513// It is OK per the comments:
514 // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932
515 // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798866392
516#[allow(rustc::potential_query_instability)]
517let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
518519let mut found_cycle = false;
520521while jobs.len() > 0 {
522if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
523 found_cycle = true;
524 }
525 }
526527// Check that a cycle was found. It is possible for a deadlock to occur without
528 // a query cycle if a query which can be waited on uses Rayon to do multithreading
529 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
530 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
531 // which in turn will wait on X causing a deadlock. We have a false dependency from
532 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
533 // only considers the true dependency and won't detect a cycle.
534if !found_cycle {
535{
::core::panicking::panic_fmt(format_args!("deadlock detected as we\'re unable to find a query cycle to break\ncurrent query map:\n{0:#?}",
query_map));
};panic!(
536"deadlock detected as we're unable to find a query cycle to break\n\
537 current query map:\n{:#?}",
538 query_map
539 );
540 }
541542// Mark all the thread we're about to wake up as unblocked. This needs to be done before
543 // we wake the threads up as otherwise Rayon could detect a deadlock if a thread we
544 // resumed fell asleep and this thread had yet to mark the remaining threads as unblocked.
545for _ in 0..wakelist.len() {
546 rustc_thread_pool::mark_unblocked(registry);
547 }
548549for waiter in wakelist.into_iter() {
550 waiter.condvar.notify_one();
551 }
552}
553554#[inline(never)]
555#[cold]
556pub fn report_cycle<'a>(
557 sess: &'a Session,
558CycleError { usage, cycle: stack }: &CycleError,
559) -> Diag<'a> {
560if !!stack.is_empty() {
::core::panicking::panic("assertion failed: !stack.is_empty()")
};assert!(!stack.is_empty());
561562let span = stack[0].query.info.default_span(stack[1 % stack.len()].span);
563564let mut cycle_stack = Vec::new();
565566use crate::error::StackCount;
567let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
568569for i in 1..stack.len() {
570let query = &stack[i].query;
571let span = query.info.default_span(stack[(i + 1) % stack.len()].span);
572 cycle_stack.push(CycleStack { span, desc: query.info.description.to_owned() });
573 }
574575let mut cycle_usage = None;
576if let Some((span, ref query)) = *usage {
577cycle_usage = Some(crate::error::CycleUsage {
578 span: query.info.default_span(span),
579 usage: query.info.description.to_string(),
580 });
581 }
582583let alias =
584if stack.iter().all(|entry| #[allow(non_exhaustive_omitted_patterns)] match entry.query.info.def_kind {
Some(DefKind::TyAlias) => true,
_ => false,
}matches!(entry.query.info.def_kind, Some(DefKind::TyAlias))) {
585Some(crate::error::Alias::Ty)
586 } else if stack.iter().all(|entry| entry.query.info.def_kind == Some(DefKind::TraitAlias)) {
587Some(crate::error::Alias::Trait)
588 } else {
589None590 };
591592let cycle_diag = crate::error::Cycle {
593span,
594cycle_stack,
595 stack_bottom: stack[0].query.info.description.to_owned(),
596alias,
597cycle_usage,
598stack_count,
599 note_span: (),
600 };
601602sess.dcx().create_err(cycle_diag)
603}
604605pub fn print_query_stack<'tcx, Qcx: QueryContext<'tcx>>(
606 qcx: Qcx,
607mut current_query: Option<QueryJobId>,
608 dcx: DiagCtxtHandle<'_>,
609 limit_frames: Option<usize>,
610mut file: Option<std::fs::File>,
611) -> usize {
612// Be careful relying on global state here: this code is called from
613 // a panic hook, which means that the global `DiagCtxt` may be in a weird
614 // state if it was responsible for triggering the panic.
615let mut count_printed = 0;
616let mut count_total = 0;
617618// Make use of a partial query map if we fail to take locks collecting active queries.
619let query_map = match qcx.collect_active_jobs(false) {
620Ok(query_map) => query_map,
621Err(query_map) => query_map,
622 };
623624if let Some(ref mut file) = file {
625let _ = file.write_fmt(format_args!("\n\nquery stack during panic:\n"))writeln!(file, "\n\nquery stack during panic:");
626 }
627while let Some(query) = current_query {
628let Some(query_info) = query_map.get(&query) else {
629break;
630 };
631let query_extra = qcx.lift_query_info(&query_info.query.info);
632if Some(count_printed) < limit_frames || limit_frames.is_none() {
633// Only print to stderr as many stack frames as `num_frames` when present.
634 dcx.struct_failure_note(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("#{0} [{1:?}] {2}", count_printed,
query_info.query.dep_kind, query_extra.description))
})format!(
635"#{} [{:?}] {}",
636 count_printed, query_info.query.dep_kind, query_extra.description
637 ))
638 .with_span(query_info.job.span)
639 .emit();
640 count_printed += 1;
641 }
642643if let Some(ref mut file) = file {
644let _ = file.write_fmt(format_args!("#{0} [{1}] {2}\n", count_total,
qcx.dep_context().dep_kind_vtable(query_info.query.dep_kind).name,
query_extra.description))writeln!(
645 file,
646"#{} [{}] {}",
647 count_total,
648 qcx.dep_context().dep_kind_vtable(query_info.query.dep_kind).name,
649 query_extra.description
650 );
651 }
652653 current_query = query_info.job.parent;
654 count_total += 1;
655 }
656657if let Some(ref mut file) = file {
658let _ = file.write_fmt(format_args!("end of query stack\n"))writeln!(file, "end of query stack");
659 }
660count_total661}