rustc_query_system/query/
job.rs

1use std::fmt::Debug;
2use std::hash::Hash;
3use std::io::Write;
4use std::iter;
5use std::num::NonZero;
6use std::sync::Arc;
7
8use parking_lot::{Condvar, Mutex};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_data_structures::jobserver;
11use rustc_errors::{Diag, DiagCtxtHandle};
12use rustc_hir::def::DefKind;
13use rustc_session::Session;
14use rustc_span::{DUMMY_SP, Span};
15
16use super::QueryStackFrameExtra;
17use crate::dep_graph::DepContext;
18use crate::error::CycleStack;
19use crate::query::plumbing::CycleError;
20use crate::query::{QueryContext, QueryStackFrame};
21
22/// Represents a span and a query key.
23#[derive(Clone, Debug)]
24pub struct QueryInfo<I> {
25    /// The span corresponding to the reason for which this query was required.
26    pub span: Span,
27    pub query: QueryStackFrame<I>,
28}
29
30impl<I> QueryInfo<I> {
31    pub(crate) fn lift<Qcx: QueryContext<QueryInfo = I>>(
32        &self,
33        qcx: Qcx,
34    ) -> QueryInfo<QueryStackFrameExtra> {
35        QueryInfo { span: self.span, query: self.query.lift(qcx) }
36    }
37}
38
39pub type QueryMap<I> = FxHashMap<QueryJobId, QueryJobInfo<I>>;
40
41/// A value uniquely identifying an active query job.
42#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
43pub struct QueryJobId(pub NonZero<u64>);
44
45impl QueryJobId {
46    fn query<I: Clone>(self, map: &QueryMap<I>) -> QueryStackFrame<I> {
47        map.get(&self).unwrap().query.clone()
48    }
49
50    fn span<I>(self, map: &QueryMap<I>) -> Span {
51        map.get(&self).unwrap().job.span
52    }
53
54    fn parent<I>(self, map: &QueryMap<I>) -> Option<QueryJobId> {
55        map.get(&self).unwrap().job.parent
56    }
57
58    fn latch<I>(self, map: &QueryMap<I>) -> Option<&QueryLatch<I>> {
59        map.get(&self).unwrap().job.latch.as_ref()
60    }
61}
62
63#[derive(Clone, Debug)]
64pub struct QueryJobInfo<I> {
65    pub query: QueryStackFrame<I>,
66    pub job: QueryJob<I>,
67}
68
69/// Represents an active query job.
70#[derive(Debug)]
71pub struct QueryJob<I> {
72    pub id: QueryJobId,
73
74    /// The span corresponding to the reason for which this query was required.
75    pub span: Span,
76
77    /// The parent query job which created this job and is implicitly waiting on it.
78    pub parent: Option<QueryJobId>,
79
80    /// The latch that is used to wait on this job.
81    latch: Option<QueryLatch<I>>,
82}
83
84impl<I> Clone for QueryJob<I> {
85    fn clone(&self) -> Self {
86        Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() }
87    }
88}
89
90impl<I> QueryJob<I> {
91    /// Creates a new query job.
92    #[inline]
93    pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
94        QueryJob { id, span, parent, latch: None }
95    }
96
97    pub(super) fn latch(&mut self) -> QueryLatch<I> {
98        if self.latch.is_none() {
99            self.latch = Some(QueryLatch::new());
100        }
101        self.latch.as_ref().unwrap().clone()
102    }
103
104    /// Signals to waiters that the query is complete.
105    ///
106    /// This does nothing for single threaded rustc,
107    /// as there are no concurrent jobs which could be waiting on us
108    #[inline]
109    pub fn signal_complete(self) {
110        if let Some(latch) = self.latch {
111            latch.set();
112        }
113    }
114}
115
116impl QueryJobId {
117    pub(super) fn find_cycle_in_stack<I: Clone>(
118        &self,
119        query_map: QueryMap<I>,
120        current_job: &Option<QueryJobId>,
121        span: Span,
122    ) -> CycleError<I> {
123        // Find the waitee amongst `current_job` parents
124        let mut cycle = Vec::new();
125        let mut current_job = Option::clone(current_job);
126
127        while let Some(job) = current_job {
128            let info = query_map.get(&job).unwrap();
129            cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
130
131            if job == *self {
132                cycle.reverse();
133
134                // This is the end of the cycle
135                // The span entry we included was for the usage
136                // of the cycle itself, and not part of the cycle
137                // Replace it with the span which caused the cycle to form
138                cycle[0].span = span;
139                // Find out why the cycle itself was used
140                let usage = info
141                    .job
142                    .parent
143                    .as_ref()
144                    .map(|parent| (info.job.span, parent.query(&query_map)));
145                return CycleError { usage, cycle };
146            }
147
148            current_job = info.job.parent;
149        }
150
151        panic!("did not find a cycle")
152    }
153
154    #[cold]
155    #[inline(never)]
156    pub fn find_dep_kind_root<I: Clone>(&self, query_map: QueryMap<I>) -> (QueryJobInfo<I>, usize) {
157        let mut depth = 1;
158        let info = query_map.get(&self).unwrap();
159        let dep_kind = info.query.dep_kind;
160        let mut current_id = info.job.parent;
161        let mut last_layout = (info.clone(), depth);
162
163        while let Some(id) = current_id {
164            let info = query_map.get(&id).unwrap();
165            if info.query.dep_kind == dep_kind {
166                depth += 1;
167                last_layout = (info.clone(), depth);
168            }
169            current_id = info.job.parent;
170        }
171        last_layout
172    }
173}
174
175#[derive(Debug)]
176struct QueryWaiter<I> {
177    query: Option<QueryJobId>,
178    condvar: Condvar,
179    span: Span,
180    cycle: Mutex<Option<CycleError<I>>>,
181}
182
183#[derive(Debug)]
184struct QueryLatchInfo<I> {
185    complete: bool,
186    waiters: Vec<Arc<QueryWaiter<I>>>,
187}
188
189#[derive(Debug)]
190pub(super) struct QueryLatch<I> {
191    info: Arc<Mutex<QueryLatchInfo<I>>>,
192}
193
194impl<I> Clone for QueryLatch<I> {
195    fn clone(&self) -> Self {
196        Self { info: Arc::clone(&self.info) }
197    }
198}
199
200impl<I> QueryLatch<I> {
201    fn new() -> Self {
202        QueryLatch {
203            info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
204        }
205    }
206
207    /// Awaits for the query job to complete.
208    pub(super) fn wait_on(
209        &self,
210        query: Option<QueryJobId>,
211        span: Span,
212    ) -> Result<(), CycleError<I>> {
213        let waiter =
214            Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
215        self.wait_on_inner(&waiter);
216        // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
217        // although another thread may still have a Arc reference so we cannot
218        // use Arc::get_mut
219        let mut cycle = waiter.cycle.lock();
220        match cycle.take() {
221            None => Ok(()),
222            Some(cycle) => Err(cycle),
223        }
224    }
225
226    /// Awaits the caller on this latch by blocking the current thread.
227    fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<I>>) {
228        let mut info = self.info.lock();
229        if !info.complete {
230            // We push the waiter on to the `waiters` list. It can be accessed inside
231            // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
232            // Both of these will remove it from the `waiters` list before resuming
233            // this thread.
234            info.waiters.push(Arc::clone(waiter));
235
236            // If this detects a deadlock and the deadlock handler wants to resume this thread
237            // we have to be in the `wait` call. This is ensured by the deadlock handler
238            // getting the self.info lock.
239            rayon_core::mark_blocked();
240            jobserver::release_thread();
241            waiter.condvar.wait(&mut info);
242            // Release the lock before we potentially block in `acquire_thread`
243            drop(info);
244            jobserver::acquire_thread();
245        }
246    }
247
248    /// Sets the latch and resumes all waiters on it
249    fn set(&self) {
250        let mut info = self.info.lock();
251        debug_assert!(!info.complete);
252        info.complete = true;
253        let registry = rayon_core::Registry::current();
254        for waiter in info.waiters.drain(..) {
255            rayon_core::mark_unblocked(&registry);
256            waiter.condvar.notify_one();
257        }
258    }
259
260    /// Removes a single waiter from the list of waiters.
261    /// This is used to break query cycles.
262    fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<I>> {
263        let mut info = self.info.lock();
264        debug_assert!(!info.complete);
265        // Remove the waiter from the list of waiters
266        info.waiters.remove(waiter)
267    }
268}
269
270/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
271type Waiter = (QueryJobId, usize);
272
273/// Visits all the non-resumable and resumable waiters of a query.
274/// Only waiters in a query are visited.
275/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
276/// and a span indicating the reason the query waited on `query_ref`.
277/// If `visit` returns Some, this function returns.
278/// For visits of non-resumable waiters it returns the return value of `visit`.
279/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
280/// required information to resume the waiter.
281/// If all `visit` calls returns None, this function also returns None.
282fn visit_waiters<I, F>(
283    query_map: &QueryMap<I>,
284    query: QueryJobId,
285    mut visit: F,
286) -> Option<Option<Waiter>>
287where
288    F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
289{
290    // Visit the parent query which is a non-resumable waiter since it's on the same stack
291    if let Some(parent) = query.parent(query_map) {
292        if let Some(cycle) = visit(query.span(query_map), parent) {
293            return Some(cycle);
294        }
295    }
296
297    // Visit the explicit waiters which use condvars and are resumable
298    if let Some(latch) = query.latch(query_map) {
299        for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
300            if let Some(waiter_query) = waiter.query {
301                if visit(waiter.span, waiter_query).is_some() {
302                    // Return a value which indicates that this waiter can be resumed
303                    return Some(Some((query, i)));
304                }
305            }
306        }
307    }
308
309    None
310}
311
312/// Look for query cycles by doing a depth first search starting at `query`.
313/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
314/// If a cycle is detected, this initial value is replaced with the span causing
315/// the cycle.
316fn cycle_check<I>(
317    query_map: &QueryMap<I>,
318    query: QueryJobId,
319    span: Span,
320    stack: &mut Vec<(Span, QueryJobId)>,
321    visited: &mut FxHashSet<QueryJobId>,
322) -> Option<Option<Waiter>> {
323    if !visited.insert(query) {
324        return if let Some(p) = stack.iter().position(|q| q.1 == query) {
325            // We detected a query cycle, fix up the initial span and return Some
326
327            // Remove previous stack entries
328            stack.drain(0..p);
329            // Replace the span for the first query with the cycle cause
330            stack[0].0 = span;
331            Some(None)
332        } else {
333            None
334        };
335    }
336
337    // Query marked as visited is added it to the stack
338    stack.push((span, query));
339
340    // Visit all the waiters
341    let r = visit_waiters(query_map, query, |span, successor| {
342        cycle_check(query_map, successor, span, stack, visited)
343    });
344
345    // Remove the entry in our stack if we didn't find a cycle
346    if r.is_none() {
347        stack.pop();
348    }
349
350    r
351}
352
353/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
354/// from `query` without going through any of the queries in `visited`.
355/// This is achieved with a depth first search.
356fn connected_to_root<I>(
357    query_map: &QueryMap<I>,
358    query: QueryJobId,
359    visited: &mut FxHashSet<QueryJobId>,
360) -> bool {
361    // We already visited this or we're deliberately ignoring it
362    if !visited.insert(query) {
363        return false;
364    }
365
366    // This query is connected to the root (it has no query parent), return true
367    if query.parent(query_map).is_none() {
368        return true;
369    }
370
371    visit_waiters(query_map, query, |_, successor| {
372        connected_to_root(query_map, successor, visited).then_some(None)
373    })
374    .is_some()
375}
376
377// Deterministically pick an query from a list
378fn pick_query<'a, I: Clone, T, F>(query_map: &QueryMap<I>, queries: &'a [T], f: F) -> &'a T
379where
380    F: Fn(&T) -> (Span, QueryJobId),
381{
382    // Deterministically pick an entry point
383    // FIXME: Sort this instead
384    queries
385        .iter()
386        .min_by_key(|v| {
387            let (span, query) = f(v);
388            let hash = query.query(query_map).hash;
389            // Prefer entry points which have valid spans for nicer error messages
390            // We add an integer to the tuple ensuring that entry points
391            // with valid spans are picked first
392            let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
393            (span_cmp, hash)
394        })
395        .unwrap()
396}
397
398/// Looks for query cycles starting from the last query in `jobs`.
399/// If a cycle is found, all queries in the cycle is removed from `jobs` and
400/// the function return true.
401/// If a cycle was not found, the starting query is removed from `jobs` and
402/// the function returns false.
403fn remove_cycle<I: Clone>(
404    query_map: &QueryMap<I>,
405    jobs: &mut Vec<QueryJobId>,
406    wakelist: &mut Vec<Arc<QueryWaiter<I>>>,
407) -> bool {
408    let mut visited = FxHashSet::default();
409    let mut stack = Vec::new();
410    // Look for a cycle starting with the last query in `jobs`
411    if let Some(waiter) =
412        cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
413    {
414        // The stack is a vector of pairs of spans and queries; reverse it so that
415        // the earlier entries require later entries
416        let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
417
418        // Shift the spans so that queries are matched with the span for their waitee
419        spans.rotate_right(1);
420
421        // Zip them back together
422        let mut stack: Vec<_> = iter::zip(spans, queries).collect();
423
424        // Remove the queries in our cycle from the list of jobs to look at
425        for r in &stack {
426            if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
427                jobs.remove(pos);
428            }
429        }
430
431        // Find the queries in the cycle which are
432        // connected to queries outside the cycle
433        let entry_points = stack
434            .iter()
435            .filter_map(|&(span, query)| {
436                if query.parent(query_map).is_none() {
437                    // This query is connected to the root (it has no query parent)
438                    Some((span, query, None))
439                } else {
440                    let mut waiters = Vec::new();
441                    // Find all the direct waiters who lead to the root
442                    visit_waiters(query_map, query, |span, waiter| {
443                        // Mark all the other queries in the cycle as already visited
444                        let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
445
446                        if connected_to_root(query_map, waiter, &mut visited) {
447                            waiters.push((span, waiter));
448                        }
449
450                        None
451                    });
452                    if waiters.is_empty() {
453                        None
454                    } else {
455                        // Deterministically pick one of the waiters to show to the user
456                        let waiter = *pick_query(query_map, &waiters, |s| *s);
457                        Some((span, query, Some(waiter)))
458                    }
459                }
460            })
461            .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
462
463        // Deterministically pick an entry point
464        let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
465
466        // Shift the stack so that our entry point is first
467        let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
468        if let Some(pos) = entry_point_pos {
469            stack.rotate_left(pos);
470        }
471
472        let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
473
474        // Create the cycle error
475        let error = CycleError {
476            usage,
477            cycle: stack
478                .iter()
479                .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
480                .collect(),
481        };
482
483        // We unwrap `waiter` here since there must always be one
484        // edge which is resumable / waited using a query latch
485        let (waitee_query, waiter_idx) = waiter.unwrap();
486
487        // Extract the waiter we want to resume
488        let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
489
490        // Set the cycle error so it will be picked up when resumed
491        *waiter.cycle.lock() = Some(error);
492
493        // Put the waiter on the list of things to resume
494        wakelist.push(waiter);
495
496        true
497    } else {
498        false
499    }
500}
501
502/// Detects query cycles by using depth first search over all active query jobs.
503/// If a query cycle is found it will break the cycle by finding an edge which
504/// uses a query latch and then resuming that waiter.
505/// There may be multiple cycles involved in a deadlock, so this searches
506/// all active queries for cycles before finally resuming all the waiters at once.
507pub fn break_query_cycles<I: Clone + Debug>(
508    query_map: QueryMap<I>,
509    registry: &rayon_core::Registry,
510) {
511    let mut wakelist = Vec::new();
512    let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
513
514    let mut found_cycle = false;
515
516    while jobs.len() > 0 {
517        if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
518            found_cycle = true;
519        }
520    }
521
522    // Check that a cycle was found. It is possible for a deadlock to occur without
523    // a query cycle if a query which can be waited on uses Rayon to do multithreading
524    // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
525    // wait using Rayon on B. Rayon may then switch to executing another query (Y)
526    // which in turn will wait on X causing a deadlock. We have a false dependency from
527    // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
528    // only considers the true dependency and won't detect a cycle.
529    if !found_cycle {
530        panic!(
531            "deadlock detected as we're unable to find a query cycle to break\n\
532            current query map:\n{:#?}",
533            query_map
534        );
535    }
536
537    // Mark all the thread we're about to wake up as unblocked. This needs to be done before
538    // we wake the threads up as otherwise Rayon could detect a deadlock if a thread we
539    // resumed fell asleep and this thread had yet to mark the remaining threads as unblocked.
540    for _ in 0..wakelist.len() {
541        rayon_core::mark_unblocked(registry);
542    }
543
544    for waiter in wakelist.into_iter() {
545        waiter.condvar.notify_one();
546    }
547}
548
549#[inline(never)]
550#[cold]
551pub fn report_cycle<'a>(
552    sess: &'a Session,
553    CycleError { usage, cycle: stack }: &CycleError,
554) -> Diag<'a> {
555    assert!(!stack.is_empty());
556
557    let span = stack[0].query.info.default_span(stack[1 % stack.len()].span);
558
559    let mut cycle_stack = Vec::new();
560
561    use crate::error::StackCount;
562    let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
563
564    for i in 1..stack.len() {
565        let query = &stack[i].query;
566        let span = query.info.default_span(stack[(i + 1) % stack.len()].span);
567        cycle_stack.push(CycleStack { span, desc: query.info.description.to_owned() });
568    }
569
570    let mut cycle_usage = None;
571    if let Some((span, ref query)) = *usage {
572        cycle_usage = Some(crate::error::CycleUsage {
573            span: query.info.default_span(span),
574            usage: query.info.description.to_string(),
575        });
576    }
577
578    let alias =
579        if stack.iter().all(|entry| matches!(entry.query.info.def_kind, Some(DefKind::TyAlias))) {
580            Some(crate::error::Alias::Ty)
581        } else if stack.iter().all(|entry| entry.query.info.def_kind == Some(DefKind::TraitAlias)) {
582            Some(crate::error::Alias::Trait)
583        } else {
584            None
585        };
586
587    let cycle_diag = crate::error::Cycle {
588        span,
589        cycle_stack,
590        stack_bottom: stack[0].query.info.description.to_owned(),
591        alias,
592        cycle_usage,
593        stack_count,
594        note_span: (),
595    };
596
597    sess.dcx().create_err(cycle_diag)
598}
599
600pub fn print_query_stack<Qcx: QueryContext>(
601    qcx: Qcx,
602    mut current_query: Option<QueryJobId>,
603    dcx: DiagCtxtHandle<'_>,
604    limit_frames: Option<usize>,
605    mut file: Option<std::fs::File>,
606) -> usize {
607    // Be careful relying on global state here: this code is called from
608    // a panic hook, which means that the global `DiagCtxt` may be in a weird
609    // state if it was responsible for triggering the panic.
610    let mut count_printed = 0;
611    let mut count_total = 0;
612
613    // Make use of a partial query map if we fail to take locks collecting active queries.
614    let query_map = match qcx.collect_active_jobs() {
615        Ok(query_map) => query_map,
616        Err(query_map) => query_map,
617    };
618
619    if let Some(ref mut file) = file {
620        let _ = writeln!(file, "\n\nquery stack during panic:");
621    }
622    while let Some(query) = current_query {
623        let Some(query_info) = query_map.get(&query) else {
624            break;
625        };
626        let query_extra = qcx.lift_query_info(&query_info.query.info);
627        if Some(count_printed) < limit_frames || limit_frames.is_none() {
628            // Only print to stderr as many stack frames as `num_frames` when present.
629            // FIXME: needs translation
630            #[allow(rustc::diagnostic_outside_of_impl)]
631            #[allow(rustc::untranslatable_diagnostic)]
632            dcx.struct_failure_note(format!(
633                "#{} [{:?}] {}",
634                count_printed, query_info.query.dep_kind, query_extra.description
635            ))
636            .with_span(query_info.job.span)
637            .emit();
638            count_printed += 1;
639        }
640
641        if let Some(ref mut file) = file {
642            let _ = writeln!(
643                file,
644                "#{} [{}] {}",
645                count_total,
646                qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
647                query_extra.description
648            );
649        }
650
651        current_query = query_info.job.parent;
652        count_total += 1;
653    }
654
655    if let Some(ref mut file) = file {
656        let _ = writeln!(file, "end of query stack");
657    }
658    count_total
659}