Skip to main content

rustc_query_system/query/
job.rs

1use std::fmt::Debug;
2use std::hash::Hash;
3use std::io::Write;
4use std::iter;
5use std::num::NonZero;
6use std::sync::Arc;
7
8use parking_lot::{Condvar, Mutex};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_errors::{Diag, DiagCtxtHandle};
11use rustc_hir::def::DefKind;
12use rustc_session::Session;
13use rustc_span::{DUMMY_SP, Span};
14
15use super::{QueryStackDeferred, QueryStackFrameExtra};
16use crate::dep_graph::DepContext;
17use crate::error::CycleStack;
18use crate::query::plumbing::CycleError;
19use crate::query::{QueryContext, QueryStackFrame};
20
21/// Represents a span and a query key.
22#[derive(#[automatically_derived]
impl<I: ::core::clone::Clone> ::core::clone::Clone for QueryInfo<I> {
    #[inline]
    fn clone(&self) -> QueryInfo<I> {
        QueryInfo {
            span: ::core::clone::Clone::clone(&self.span),
            query: ::core::clone::Clone::clone(&self.query),
        }
    }
}Clone, #[automatically_derived]
impl<I: ::core::fmt::Debug> ::core::fmt::Debug for QueryInfo<I> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "QueryInfo",
            "span", &self.span, "query", &&self.query)
    }
}Debug)]
23pub struct QueryInfo<I> {
24    /// The span corresponding to the reason for which this query was required.
25    pub span: Span,
26    pub query: QueryStackFrame<I>,
27}
28
29impl<'tcx> QueryInfo<QueryStackDeferred<'tcx>> {
30    pub(crate) fn lift<Qcx: QueryContext<'tcx>>(
31        &self,
32        qcx: Qcx,
33    ) -> QueryInfo<QueryStackFrameExtra> {
34        QueryInfo { span: self.span, query: self.query.lift(qcx) }
35    }
36}
37
38pub type QueryMap<'tcx> = FxHashMap<QueryJobId, QueryJobInfo<'tcx>>;
39
40/// A value uniquely identifying an active query job.
41#[derive(#[automatically_derived]
impl ::core::marker::Copy for QueryJobId { }Copy, #[automatically_derived]
impl ::core::clone::Clone for QueryJobId {
    #[inline]
    fn clone(&self) -> QueryJobId {
        let _: ::core::clone::AssertParamIsClone<NonZero<u64>>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::Eq for QueryJobId {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_receiver_is_total_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<NonZero<u64>>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for QueryJobId {
    #[inline]
    fn eq(&self, other: &QueryJobId) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::hash::Hash for QueryJobId {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for QueryJobId {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f, "QueryJobId",
            &&self.0)
    }
}Debug)]
42pub struct QueryJobId(pub NonZero<u64>);
43
44impl QueryJobId {
45    fn query<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> QueryStackFrame<QueryStackDeferred<'tcx>> {
46        map.get(&self).unwrap().query.clone()
47    }
48
49    fn span<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Span {
50        map.get(&self).unwrap().job.span
51    }
52
53    fn parent<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<QueryJobId> {
54        map.get(&self).unwrap().job.parent
55    }
56
57    fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch<'tcx>> {
58        map.get(&self).unwrap().job.latch.as_ref()
59    }
60}
61
62#[derive(#[automatically_derived]
impl<'tcx> ::core::clone::Clone for QueryJobInfo<'tcx> {
    #[inline]
    fn clone(&self) -> QueryJobInfo<'tcx> {
        QueryJobInfo {
            query: ::core::clone::Clone::clone(&self.query),
            job: ::core::clone::Clone::clone(&self.job),
        }
    }
}Clone, #[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryJobInfo<'tcx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "QueryJobInfo",
            "query", &self.query, "job", &&self.job)
    }
}Debug)]
63pub struct QueryJobInfo<'tcx> {
64    pub query: QueryStackFrame<QueryStackDeferred<'tcx>>,
65    pub job: QueryJob<'tcx>,
66}
67
68/// Represents an active query job.
69#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryJob<'tcx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "QueryJob",
            "id", &self.id, "span", &self.span, "parent", &self.parent,
            "latch", &&self.latch)
    }
}Debug)]
70pub struct QueryJob<'tcx> {
71    pub id: QueryJobId,
72
73    /// The span corresponding to the reason for which this query was required.
74    pub span: Span,
75
76    /// The parent query job which created this job and is implicitly waiting on it.
77    pub parent: Option<QueryJobId>,
78
79    /// The latch that is used to wait on this job.
80    latch: Option<QueryLatch<'tcx>>,
81}
82
83impl<'tcx> Clone for QueryJob<'tcx> {
84    fn clone(&self) -> Self {
85        Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() }
86    }
87}
88
89impl<'tcx> QueryJob<'tcx> {
90    /// Creates a new query job.
91    #[inline]
92    pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
93        QueryJob { id, span, parent, latch: None }
94    }
95
96    pub(super) fn latch(&mut self) -> QueryLatch<'tcx> {
97        if self.latch.is_none() {
98            self.latch = Some(QueryLatch::new());
99        }
100        self.latch.as_ref().unwrap().clone()
101    }
102
103    /// Signals to waiters that the query is complete.
104    ///
105    /// This does nothing for single threaded rustc,
106    /// as there are no concurrent jobs which could be waiting on us
107    #[inline]
108    pub fn signal_complete(self) {
109        if let Some(latch) = self.latch {
110            latch.set();
111        }
112    }
113}
114
115impl QueryJobId {
116    pub(super) fn find_cycle_in_stack<'tcx>(
117        &self,
118        query_map: QueryMap<'tcx>,
119        current_job: &Option<QueryJobId>,
120        span: Span,
121    ) -> CycleError<QueryStackDeferred<'tcx>> {
122        // Find the waitee amongst `current_job` parents
123        let mut cycle = Vec::new();
124        let mut current_job = Option::clone(current_job);
125
126        while let Some(job) = current_job {
127            let info = query_map.get(&job).unwrap();
128            cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
129
130            if job == *self {
131                cycle.reverse();
132
133                // This is the end of the cycle
134                // The span entry we included was for the usage
135                // of the cycle itself, and not part of the cycle
136                // Replace it with the span which caused the cycle to form
137                cycle[0].span = span;
138                // Find out why the cycle itself was used
139                let usage = info
140                    .job
141                    .parent
142                    .as_ref()
143                    .map(|parent| (info.job.span, parent.query(&query_map)));
144                return CycleError { usage, cycle };
145            }
146
147            current_job = info.job.parent;
148        }
149
150        { ::core::panicking::panic_fmt(format_args!("did not find a cycle")); }panic!("did not find a cycle")
151    }
152
153    #[cold]
154    #[inline(never)]
155    pub fn find_dep_kind_root<'tcx>(
156        &self,
157        query_map: QueryMap<'tcx>,
158    ) -> (QueryJobInfo<'tcx>, usize) {
159        let mut depth = 1;
160        let info = query_map.get(&self).unwrap();
161        let dep_kind = info.query.dep_kind;
162        let mut current_id = info.job.parent;
163        let mut last_layout = (info.clone(), depth);
164
165        while let Some(id) = current_id {
166            let info = query_map.get(&id).unwrap();
167            if info.query.dep_kind == dep_kind {
168                depth += 1;
169                last_layout = (info.clone(), depth);
170            }
171            current_id = info.job.parent;
172        }
173        last_layout
174    }
175}
176
177#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryWaiter<'tcx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "QueryWaiter",
            "query", &self.query, "condvar", &self.condvar, "span",
            &self.span, "cycle", &&self.cycle)
    }
}Debug)]
178struct QueryWaiter<'tcx> {
179    query: Option<QueryJobId>,
180    condvar: Condvar,
181    span: Span,
182    cycle: Mutex<Option<CycleError<QueryStackDeferred<'tcx>>>>,
183}
184
185#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryLatchInfo<'tcx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f,
            "QueryLatchInfo", "complete", &self.complete, "waiters",
            &&self.waiters)
    }
}Debug)]
186struct QueryLatchInfo<'tcx> {
187    complete: bool,
188    waiters: Vec<Arc<QueryWaiter<'tcx>>>,
189}
190
191#[derive(#[automatically_derived]
impl<'tcx> ::core::fmt::Debug for QueryLatch<'tcx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field1_finish(f, "QueryLatch",
            "info", &&self.info)
    }
}Debug)]
192pub(super) struct QueryLatch<'tcx> {
193    info: Arc<Mutex<QueryLatchInfo<'tcx>>>,
194}
195
196impl<'tcx> Clone for QueryLatch<'tcx> {
197    fn clone(&self) -> Self {
198        Self { info: Arc::clone(&self.info) }
199    }
200}
201
202impl<'tcx> QueryLatch<'tcx> {
203    fn new() -> Self {
204        QueryLatch {
205            info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
206        }
207    }
208
209    /// Awaits for the query job to complete.
210    pub(super) fn wait_on(
211        &self,
212        qcx: impl QueryContext<'tcx>,
213        query: Option<QueryJobId>,
214        span: Span,
215    ) -> Result<(), CycleError<QueryStackDeferred<'tcx>>> {
216        let waiter =
217            Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
218        self.wait_on_inner(qcx, &waiter);
219        // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
220        // although another thread may still have a Arc reference so we cannot
221        // use Arc::get_mut
222        let mut cycle = waiter.cycle.lock();
223        match cycle.take() {
224            None => Ok(()),
225            Some(cycle) => Err(cycle),
226        }
227    }
228
229    /// Awaits the caller on this latch by blocking the current thread.
230    fn wait_on_inner(&self, qcx: impl QueryContext<'tcx>, waiter: &Arc<QueryWaiter<'tcx>>) {
231        let mut info = self.info.lock();
232        if !info.complete {
233            // We push the waiter on to the `waiters` list. It can be accessed inside
234            // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
235            // Both of these will remove it from the `waiters` list before resuming
236            // this thread.
237            info.waiters.push(Arc::clone(waiter));
238
239            // If this detects a deadlock and the deadlock handler wants to resume this thread
240            // we have to be in the `wait` call. This is ensured by the deadlock handler
241            // getting the self.info lock.
242            rustc_thread_pool::mark_blocked();
243            let proxy = qcx.jobserver_proxy();
244            proxy.release_thread();
245            waiter.condvar.wait(&mut info);
246            // Release the lock before we potentially block in `acquire_thread`
247            drop(info);
248            proxy.acquire_thread();
249        }
250    }
251
252    /// Sets the latch and resumes all waiters on it
253    fn set(&self) {
254        let mut info = self.info.lock();
255        if true {
    if !!info.complete {
        ::core::panicking::panic("assertion failed: !info.complete")
    };
};debug_assert!(!info.complete);
256        info.complete = true;
257        let registry = rustc_thread_pool::Registry::current();
258        for waiter in info.waiters.drain(..) {
259            rustc_thread_pool::mark_unblocked(&registry);
260            waiter.condvar.notify_one();
261        }
262    }
263
264    /// Removes a single waiter from the list of waiters.
265    /// This is used to break query cycles.
266    fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<'tcx>> {
267        let mut info = self.info.lock();
268        if true {
    if !!info.complete {
        ::core::panicking::panic("assertion failed: !info.complete")
    };
};debug_assert!(!info.complete);
269        // Remove the waiter from the list of waiters
270        info.waiters.remove(waiter)
271    }
272}
273
274/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
275type Waiter = (QueryJobId, usize);
276
277/// Visits all the non-resumable and resumable waiters of a query.
278/// Only waiters in a query are visited.
279/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
280/// and a span indicating the reason the query waited on `query_ref`.
281/// If `visit` returns Some, this function returns.
282/// For visits of non-resumable waiters it returns the return value of `visit`.
283/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
284/// required information to resume the waiter.
285/// If all `visit` calls returns None, this function also returns None.
286fn visit_waiters<'tcx, F>(
287    query_map: &QueryMap<'tcx>,
288    query: QueryJobId,
289    mut visit: F,
290) -> Option<Option<Waiter>>
291where
292    F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
293{
294    // Visit the parent query which is a non-resumable waiter since it's on the same stack
295    if let Some(parent) = query.parent(query_map)
296        && let Some(cycle) = visit(query.span(query_map), parent)
297    {
298        return Some(cycle);
299    }
300
301    // Visit the explicit waiters which use condvars and are resumable
302    if let Some(latch) = query.latch(query_map) {
303        for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
304            if let Some(waiter_query) = waiter.query {
305                if visit(waiter.span, waiter_query).is_some() {
306                    // Return a value which indicates that this waiter can be resumed
307                    return Some(Some((query, i)));
308                }
309            }
310        }
311    }
312
313    None
314}
315
316/// Look for query cycles by doing a depth first search starting at `query`.
317/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
318/// If a cycle is detected, this initial value is replaced with the span causing
319/// the cycle.
320fn cycle_check<'tcx>(
321    query_map: &QueryMap<'tcx>,
322    query: QueryJobId,
323    span: Span,
324    stack: &mut Vec<(Span, QueryJobId)>,
325    visited: &mut FxHashSet<QueryJobId>,
326) -> Option<Option<Waiter>> {
327    if !visited.insert(query) {
328        return if let Some(p) = stack.iter().position(|q| q.1 == query) {
329            // We detected a query cycle, fix up the initial span and return Some
330
331            // Remove previous stack entries
332            stack.drain(0..p);
333            // Replace the span for the first query with the cycle cause
334            stack[0].0 = span;
335            Some(None)
336        } else {
337            None
338        };
339    }
340
341    // Query marked as visited is added it to the stack
342    stack.push((span, query));
343
344    // Visit all the waiters
345    let r = visit_waiters(query_map, query, |span, successor| {
346        cycle_check(query_map, successor, span, stack, visited)
347    });
348
349    // Remove the entry in our stack if we didn't find a cycle
350    if r.is_none() {
351        stack.pop();
352    }
353
354    r
355}
356
357/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
358/// from `query` without going through any of the queries in `visited`.
359/// This is achieved with a depth first search.
360fn connected_to_root<'tcx>(
361    query_map: &QueryMap<'tcx>,
362    query: QueryJobId,
363    visited: &mut FxHashSet<QueryJobId>,
364) -> bool {
365    // We already visited this or we're deliberately ignoring it
366    if !visited.insert(query) {
367        return false;
368    }
369
370    // This query is connected to the root (it has no query parent), return true
371    if query.parent(query_map).is_none() {
372        return true;
373    }
374
375    visit_waiters(query_map, query, |_, successor| {
376        connected_to_root(query_map, successor, visited).then_some(None)
377    })
378    .is_some()
379}
380
381// Deterministically pick an query from a list
382fn pick_query<'a, 'tcx, T, F>(query_map: &QueryMap<'tcx>, queries: &'a [T], f: F) -> &'a T
383where
384    F: Fn(&T) -> (Span, QueryJobId),
385{
386    // Deterministically pick an entry point
387    // FIXME: Sort this instead
388    queries
389        .iter()
390        .min_by_key(|v| {
391            let (span, query) = f(v);
392            let hash = query.query(query_map).hash;
393            // Prefer entry points which have valid spans for nicer error messages
394            // We add an integer to the tuple ensuring that entry points
395            // with valid spans are picked first
396            let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
397            (span_cmp, hash)
398        })
399        .unwrap()
400}
401
402/// Looks for query cycles starting from the last query in `jobs`.
403/// If a cycle is found, all queries in the cycle is removed from `jobs` and
404/// the function return true.
405/// If a cycle was not found, the starting query is removed from `jobs` and
406/// the function returns false.
407fn remove_cycle<'tcx>(
408    query_map: &QueryMap<'tcx>,
409    jobs: &mut Vec<QueryJobId>,
410    wakelist: &mut Vec<Arc<QueryWaiter<'tcx>>>,
411) -> bool {
412    let mut visited = FxHashSet::default();
413    let mut stack = Vec::new();
414    // Look for a cycle starting with the last query in `jobs`
415    if let Some(waiter) =
416        cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
417    {
418        // The stack is a vector of pairs of spans and queries; reverse it so that
419        // the earlier entries require later entries
420        let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
421
422        // Shift the spans so that queries are matched with the span for their waitee
423        spans.rotate_right(1);
424
425        // Zip them back together
426        let mut stack: Vec<_> = iter::zip(spans, queries).collect();
427
428        // Remove the queries in our cycle from the list of jobs to look at
429        for r in &stack {
430            if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
431                jobs.remove(pos);
432            }
433        }
434
435        // Find the queries in the cycle which are
436        // connected to queries outside the cycle
437        let entry_points = stack
438            .iter()
439            .filter_map(|&(span, query)| {
440                if query.parent(query_map).is_none() {
441                    // This query is connected to the root (it has no query parent)
442                    Some((span, query, None))
443                } else {
444                    let mut waiters = Vec::new();
445                    // Find all the direct waiters who lead to the root
446                    visit_waiters(query_map, query, |span, waiter| {
447                        // Mark all the other queries in the cycle as already visited
448                        let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
449
450                        if connected_to_root(query_map, waiter, &mut visited) {
451                            waiters.push((span, waiter));
452                        }
453
454                        None
455                    });
456                    if waiters.is_empty() {
457                        None
458                    } else {
459                        // Deterministically pick one of the waiters to show to the user
460                        let waiter = *pick_query(query_map, &waiters, |s| *s);
461                        Some((span, query, Some(waiter)))
462                    }
463                }
464            })
465            .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
466
467        // Deterministically pick an entry point
468        let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
469
470        // Shift the stack so that our entry point is first
471        let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
472        if let Some(pos) = entry_point_pos {
473            stack.rotate_left(pos);
474        }
475
476        let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
477
478        // Create the cycle error
479        let error = CycleError {
480            usage,
481            cycle: stack
482                .iter()
483                .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
484                .collect(),
485        };
486
487        // We unwrap `waiter` here since there must always be one
488        // edge which is resumable / waited using a query latch
489        let (waitee_query, waiter_idx) = waiter.unwrap();
490
491        // Extract the waiter we want to resume
492        let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
493
494        // Set the cycle error so it will be picked up when resumed
495        *waiter.cycle.lock() = Some(error);
496
497        // Put the waiter on the list of things to resume
498        wakelist.push(waiter);
499
500        true
501    } else {
502        false
503    }
504}
505
506/// Detects query cycles by using depth first search over all active query jobs.
507/// If a query cycle is found it will break the cycle by finding an edge which
508/// uses a query latch and then resuming that waiter.
509/// There may be multiple cycles involved in a deadlock, so this searches
510/// all active queries for cycles before finally resuming all the waiters at once.
511pub fn break_query_cycles<'tcx>(query_map: QueryMap<'tcx>, registry: &rustc_thread_pool::Registry) {
512    let mut wakelist = Vec::new();
513    // It is OK per the comments:
514    // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932
515    // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798866392
516    #[allow(rustc::potential_query_instability)]
517    let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
518
519    let mut found_cycle = false;
520
521    while jobs.len() > 0 {
522        if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
523            found_cycle = true;
524        }
525    }
526
527    // Check that a cycle was found. It is possible for a deadlock to occur without
528    // a query cycle if a query which can be waited on uses Rayon to do multithreading
529    // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
530    // wait using Rayon on B. Rayon may then switch to executing another query (Y)
531    // which in turn will wait on X causing a deadlock. We have a false dependency from
532    // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
533    // only considers the true dependency and won't detect a cycle.
534    if !found_cycle {
535        {
    ::core::panicking::panic_fmt(format_args!("deadlock detected as we\'re unable to find a query cycle to break\ncurrent query map:\n{0:#?}",
            query_map));
};panic!(
536            "deadlock detected as we're unable to find a query cycle to break\n\
537            current query map:\n{:#?}",
538            query_map
539        );
540    }
541
542    // Mark all the thread we're about to wake up as unblocked. This needs to be done before
543    // we wake the threads up as otherwise Rayon could detect a deadlock if a thread we
544    // resumed fell asleep and this thread had yet to mark the remaining threads as unblocked.
545    for _ in 0..wakelist.len() {
546        rustc_thread_pool::mark_unblocked(registry);
547    }
548
549    for waiter in wakelist.into_iter() {
550        waiter.condvar.notify_one();
551    }
552}
553
554#[inline(never)]
555#[cold]
556pub fn report_cycle<'a>(
557    sess: &'a Session,
558    CycleError { usage, cycle: stack }: &CycleError,
559) -> Diag<'a> {
560    if !!stack.is_empty() {
    ::core::panicking::panic("assertion failed: !stack.is_empty()")
};assert!(!stack.is_empty());
561
562    let span = stack[0].query.info.default_span(stack[1 % stack.len()].span);
563
564    let mut cycle_stack = Vec::new();
565
566    use crate::error::StackCount;
567    let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
568
569    for i in 1..stack.len() {
570        let query = &stack[i].query;
571        let span = query.info.default_span(stack[(i + 1) % stack.len()].span);
572        cycle_stack.push(CycleStack { span, desc: query.info.description.to_owned() });
573    }
574
575    let mut cycle_usage = None;
576    if let Some((span, ref query)) = *usage {
577        cycle_usage = Some(crate::error::CycleUsage {
578            span: query.info.default_span(span),
579            usage: query.info.description.to_string(),
580        });
581    }
582
583    let alias =
584        if stack.iter().all(|entry| #[allow(non_exhaustive_omitted_patterns)] match entry.query.info.def_kind {
    Some(DefKind::TyAlias) => true,
    _ => false,
}matches!(entry.query.info.def_kind, Some(DefKind::TyAlias))) {
585            Some(crate::error::Alias::Ty)
586        } else if stack.iter().all(|entry| entry.query.info.def_kind == Some(DefKind::TraitAlias)) {
587            Some(crate::error::Alias::Trait)
588        } else {
589            None
590        };
591
592    let cycle_diag = crate::error::Cycle {
593        span,
594        cycle_stack,
595        stack_bottom: stack[0].query.info.description.to_owned(),
596        alias,
597        cycle_usage,
598        stack_count,
599        note_span: (),
600    };
601
602    sess.dcx().create_err(cycle_diag)
603}
604
605pub fn print_query_stack<'tcx, Qcx: QueryContext<'tcx>>(
606    qcx: Qcx,
607    mut current_query: Option<QueryJobId>,
608    dcx: DiagCtxtHandle<'_>,
609    limit_frames: Option<usize>,
610    mut file: Option<std::fs::File>,
611) -> usize {
612    // Be careful relying on global state here: this code is called from
613    // a panic hook, which means that the global `DiagCtxt` may be in a weird
614    // state if it was responsible for triggering the panic.
615    let mut count_printed = 0;
616    let mut count_total = 0;
617
618    // Make use of a partial query map if we fail to take locks collecting active queries.
619    let query_map = match qcx.collect_active_jobs(false) {
620        Ok(query_map) => query_map,
621        Err(query_map) => query_map,
622    };
623
624    if let Some(ref mut file) = file {
625        let _ = file.write_fmt(format_args!("\n\nquery stack during panic:\n"))writeln!(file, "\n\nquery stack during panic:");
626    }
627    while let Some(query) = current_query {
628        let Some(query_info) = query_map.get(&query) else {
629            break;
630        };
631        let query_extra = qcx.lift_query_info(&query_info.query.info);
632        if Some(count_printed) < limit_frames || limit_frames.is_none() {
633            // Only print to stderr as many stack frames as `num_frames` when present.
634            dcx.struct_failure_note(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("#{0} [{1:?}] {2}", count_printed,
                query_info.query.dep_kind, query_extra.description))
    })format!(
635                "#{} [{:?}] {}",
636                count_printed, query_info.query.dep_kind, query_extra.description
637            ))
638            .with_span(query_info.job.span)
639            .emit();
640            count_printed += 1;
641        }
642
643        if let Some(ref mut file) = file {
644            let _ = file.write_fmt(format_args!("#{0} [{1}] {2}\n", count_total,
        qcx.dep_context().dep_kind_vtable(query_info.query.dep_kind).name,
        query_extra.description))writeln!(
645                file,
646                "#{} [{}] {}",
647                count_total,
648                qcx.dep_context().dep_kind_vtable(query_info.query.dep_kind).name,
649                query_extra.description
650            );
651        }
652
653        current_query = query_info.job.parent;
654        count_total += 1;
655    }
656
657    if let Some(ref mut file) = file {
658        let _ = file.write_fmt(format_args!("end of query stack\n"))writeln!(file, "end of query stack");
659    }
660    count_total
661}