Skip to main content

rustc_query_system/dep_graph/
graph.rs

1use std::fmt::Debug;
2use std::hash::Hash;
3use std::marker::PhantomData;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
6
7use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::{FxHashMap, FxHashSet};
9use rustc_data_structures::profiling::QueryInvocationId;
10use rustc_data_structures::sharded::{self, ShardedHashMap};
11use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
12use rustc_data_structures::sync::{AtomicU64, Lock};
13use rustc_data_structures::unord::UnordMap;
14use rustc_data_structures::{assert_matches, outline};
15use rustc_errors::DiagInner;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use rustc_session::Session;
20use tracing::{debug, instrument};
21#[cfg(debug_assertions)]
22use {super::debug::EdgeFilter, std::env};
23
24use super::query::DepGraphQuery;
25use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
26use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
27use crate::dep_graph::edges::EdgesVec;
28use crate::ich::StableHashingContext;
29use crate::query::{QueryContext, QuerySideEffect};
30
31pub struct DepGraph<D: Deps> {
32    data: Option<Arc<DepGraphData<D>>>,
33
34    /// This field is used for assigning DepNodeIndices when running in
35    /// non-incremental mode. Even in non-incremental mode we make sure that
36    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
37    /// ID is used for self-profiling.
38    virtual_dep_node_index: Arc<AtomicU32>,
39}
40
41/// Manual clone impl that does not require `D: Clone`.
42impl<D: Deps> Clone for DepGraph<D> {
43    fn clone(&self) -> Self {
44        let Self { data, virtual_dep_node_index } = self;
45        Self {
46            data: Option::<Arc<_>>::clone(data),
47            virtual_dep_node_index: Arc::clone(virtual_dep_node_index),
48        }
49    }
50}
51
52impl ::std::fmt::Debug for DepNodeIndex {
    fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        fmt.write_fmt(format_args!("{0}", self.as_u32()))
    }
}rustc_index::newtype_index! {
53    pub struct DepNodeIndex {}
54}
55
56// We store a large collection of these in `prev_index_to_index` during
57// non-full incremental builds, and want to ensure that the element size
58// doesn't inadvertently increase.
59const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
60
61impl DepNodeIndex {
62    const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
63    pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
64}
65
66impl From<DepNodeIndex> for QueryInvocationId {
67    #[inline(always)]
68    fn from(dep_node_index: DepNodeIndex) -> Self {
69        QueryInvocationId(dep_node_index.as_u32())
70    }
71}
72
73pub struct MarkFrame<'a> {
74    index: SerializedDepNodeIndex,
75    parent: Option<&'a MarkFrame<'a>>,
76}
77
78#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            DepNodeColor::Green(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
                    &__self_0),
            DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
            DepNodeColor::Unknown =>
                ::core::fmt::Formatter::write_str(f, "Unknown"),
        }
    }
}Debug)]
79pub(super) enum DepNodeColor {
80    Green(DepNodeIndex),
81    Red,
82    Unknown,
83}
84
85pub(crate) struct DepGraphData<D: Deps> {
86    /// The new encoding of the dependency graph, optimized for red/green
87    /// tracking. The `current` field is the dependency graph of only the
88    /// current compilation session: We don't merge the previous dep-graph into
89    /// current one anymore, but we do reference shared data to save space.
90    current: CurrentDepGraph<D>,
91
92    /// The dep-graph from the previous compilation session. It contains all
93    /// nodes and edges as well as all fingerprints of nodes that have them.
94    previous: Arc<SerializedDepGraph>,
95
96    colors: DepNodeColorMap,
97
98    /// When we load, there may be `.o` files, cached MIR, or other such
99    /// things available to us. If we find that they are not dirty, we
100    /// load the path to the file storing those work-products here into
101    /// this map. We can later look for and extract that data.
102    previous_work_products: WorkProductMap,
103
104    dep_node_debug: Lock<FxHashMap<DepNode, String>>,
105
106    /// Used by incremental compilation tests to assert that
107    /// a particular query result was decoded from disk
108    /// (not just marked green)
109    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
110}
111
112pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
113where
114    R: for<'a> HashStable<StableHashingContext<'a>>,
115{
116    let mut stable_hasher = StableHasher::new();
117    result.hash_stable(hcx, &mut stable_hasher);
118    stable_hasher.finish()
119}
120
121impl<D: Deps> DepGraph<D> {
122    pub fn new(
123        session: &Session,
124        prev_graph: Arc<SerializedDepGraph>,
125        prev_work_products: WorkProductMap,
126        encoder: FileEncoder,
127    ) -> DepGraph<D> {
128        let prev_graph_node_count = prev_graph.node_count();
129
130        let current =
131            CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
132
133        let colors = DepNodeColorMap::new(prev_graph_node_count);
134
135        // Instantiate a node with zero dependencies only once for anonymous queries.
136        let _green_node_index = current.alloc_new_node(
137            DepNode { kind: D::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
138            EdgesVec::new(),
139            Fingerprint::ZERO,
140        );
141        match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
142
143        // Instantiate a dependy-less red node only once for anonymous queries.
144        let red_node_index = current.alloc_new_node(
145            DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
146            EdgesVec::new(),
147            Fingerprint::ZERO,
148        );
149        match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
150        if prev_graph_node_count > 0 {
151            colors.insert_red(SerializedDepNodeIndex::from_u32(
152                DepNodeIndex::FOREVER_RED_NODE.as_u32(),
153            ));
154        }
155
156        DepGraph {
157            data: Some(Arc::new(DepGraphData {
158                previous_work_products: prev_work_products,
159                dep_node_debug: Default::default(),
160                current,
161                previous: prev_graph,
162                colors,
163                debug_loaded_from_disk: Default::default(),
164            })),
165            virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
166        }
167    }
168
169    pub fn new_disabled() -> DepGraph<D> {
170        DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
171    }
172
173    #[inline]
174    pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
175        self.data.as_deref()
176    }
177
178    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
179    #[inline]
180    pub fn is_fully_enabled(&self) -> bool {
181        self.data.is_some()
182    }
183
184    pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
185        if let Some(data) = &self.data {
186            data.current.encoder.with_query(f)
187        }
188    }
189
190    pub fn assert_ignored(&self) {
191        if let Some(..) = self.data {
192            D::read_deps(|task_deps| {
193                match task_deps {
    TaskDepsRef::Ignore => {}
    ref left_val => {
        ::core::panicking::assert_matches_failed(left_val,
            "TaskDepsRef::Ignore",
            ::core::option::Option::Some(format_args!("expected no task dependency tracking")));
    }
};assert_matches!(
194                    task_deps,
195                    TaskDepsRef::Ignore,
196                    "expected no task dependency tracking"
197                );
198            })
199        }
200    }
201
202    pub fn with_ignore<OP, R>(&self, op: OP) -> R
203    where
204        OP: FnOnce() -> R,
205    {
206        D::with_deps(TaskDepsRef::Ignore, op)
207    }
208
209    /// Used to wrap the deserialization of a query result from disk,
210    /// This method enforces that no new `DepNodes` are created during
211    /// query result deserialization.
212    ///
213    /// Enforcing this makes the query dep graph simpler - all nodes
214    /// must be created during the query execution, and should be
215    /// created from inside the 'body' of a query (the implementation
216    /// provided by a particular compiler crate).
217    ///
218    /// Consider the case of three queries `A`, `B`, and `C`, where
219    /// `A` invokes `B` and `B` invokes `C`:
220    ///
221    /// `A -> B -> C`
222    ///
223    /// Suppose that decoding the result of query `B` required re-computing
224    /// the query `C`. If we did not create a fresh `TaskDeps` when
225    /// decoding `B`, we would still be using the `TaskDeps` for query `A`
226    /// (if we needed to re-execute `A`). This would cause us to create
227    /// a new edge `A -> C`. If this edge did not previously
228    /// exist in the `DepGraph`, then we could end up with a different
229    /// `DepGraph` at the end of compilation, even if there were no
230    /// meaningful changes to the overall program (e.g. a newline was added).
231    /// In addition, this edge might cause a subsequent compilation run
232    /// to try to force `C` before marking other necessary nodes green. If
233    /// `C` did not exist in the new compilation session, then we could
234    /// get an ICE. Normally, we would have tried (and failed) to mark
235    /// some other query green (e.g. `item_children`) which was used
236    /// to obtain `C`, which would prevent us from ever trying to force
237    /// a nonexistent `D`.
238    ///
239    /// It might be possible to enforce that all `DepNode`s read during
240    /// deserialization already exist in the previous `DepGraph`. In
241    /// the above example, we would invoke `D` during the deserialization
242    /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
243    /// of `B`, this would result in an edge `B -> D`. If that edge already
244    /// existed (with the same `DepPathHash`es), then it should be correct
245    /// to allow the invocation of the query to proceed during deserialization
246    /// of a query result. We would merely assert that the dep-graph fragment
247    /// that would have been added by invoking `C` while decoding `B`
248    /// is equivalent to the dep-graph fragment that we already instantiated for B
249    /// (at the point where we successfully marked B as green).
250    ///
251    /// However, this would require additional complexity
252    /// in the query infrastructure, and is not currently needed by the
253    /// decoding of any query results. Should the need arise in the future,
254    /// we should consider extending the query system with this functionality.
255    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
256    where
257        OP: FnOnce() -> R,
258    {
259        D::with_deps(TaskDepsRef::Forbid, op)
260    }
261
262    #[inline(always)]
263    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
264        &self,
265        key: DepNode,
266        cx: Ctxt,
267        arg: A,
268        task: fn(Ctxt, A) -> R,
269        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
270    ) -> (R, DepNodeIndex) {
271        match self.data() {
272            Some(data) => data.with_task(key, cx, arg, task, hash_result),
273            None => (task(cx, arg), self.next_virtual_depnode_index()),
274        }
275    }
276
277    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
278        &self,
279        cx: Tcx,
280        dep_kind: DepKind,
281        op: OP,
282    ) -> (R, DepNodeIndex)
283    where
284        OP: FnOnce() -> R,
285    {
286        match self.data() {
287            Some(data) => {
288                let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
289                self.read_index(index);
290                (result, index)
291            }
292            None => (op(), self.next_virtual_depnode_index()),
293        }
294    }
295}
296
297impl<D: Deps> DepGraphData<D> {
298    /// Starts a new dep-graph task. Dep-graph tasks are specified
299    /// using a free function (`task`) and **not** a closure -- this
300    /// is intentional because we want to exercise tight control over
301    /// what state they have access to. In particular, we want to
302    /// prevent implicit 'leaks' of tracked state into the task (which
303    /// could then be read without generating correct edges in the
304    /// dep-graph -- see the [rustc dev guide] for more details on
305    /// the dep-graph). To this end, the task function gets exactly two
306    /// pieces of state: the context `cx` and an argument `arg`. Both
307    /// of these bits of state must be of some type that implements
308    /// `DepGraphSafe` and hence does not leak.
309    ///
310    /// The choice of two arguments is not fundamental. One argument
311    /// would work just as well, since multiple values can be
312    /// collected using tuples. However, using two arguments works out
313    /// to be quite convenient, since it is common to need a context
314    /// (`cx`) and some argument (e.g., a `DefId` identifying what
315    /// item to process).
316    ///
317    /// For cases where you need some other number of arguments:
318    ///
319    /// - If you only need one argument, just use `()` for the `arg`
320    ///   parameter.
321    /// - If you need 3+ arguments, use a tuple for the
322    ///   `arg` parameter.
323    ///
324    /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
325    #[inline(always)]
326    pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
327        &self,
328        key: DepNode,
329        cx: Ctxt,
330        arg: A,
331        task: fn(Ctxt, A) -> R,
332        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
333    ) -> (R, DepNodeIndex) {
334        // If the following assertion triggers, it can have two reasons:
335        // 1. Something is wrong with DepNode creation, either here or
336        //    in `DepGraph::try_mark_green()`.
337        // 2. Two distinct query keys get mapped to the same `DepNode`
338        //    (see for example #48923).
339        self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
340            ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`\n- query-key: {0:?}\n- dep-node: {1:?}",
                arg, key))
    })format!(
341                "forcing query with already existing `DepNode`\n\
342                 - query-key: {arg:?}\n\
343                 - dep-node: {key:?}"
344            )
345        });
346
347        let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
348        let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
349            (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
350        } else {
351            let task_deps = Lock::new(TaskDeps::new(
352                #[cfg(debug_assertions)]
353                Some(key),
354                0,
355            ));
356            (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
357        };
358
359        let dcx = cx.dep_context();
360        let dep_node_index = self.hash_result_and_alloc_node(dcx, key, edges, &result, hash_result);
361
362        (result, dep_node_index)
363    }
364
365    /// Executes something within an "anonymous" task, that is, a task the
366    /// `DepNode` of which is determined by the list of inputs it read from.
367    ///
368    /// NOTE: this does not actually count as a read of the DepNode here.
369    /// Using the result of this task without reading the DepNode will result
370    /// in untracked dependencies which may lead to ICEs as nodes are
371    /// incorrectly marked green.
372    ///
373    /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
374    /// user of this function actually performs the read; we'll have to see
375    /// how to make that work with `anon` in `execute_job_incr`, though.
376    pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
377        &self,
378        cx: Tcx,
379        dep_kind: DepKind,
380        op: OP,
381    ) -> (R, DepNodeIndex)
382    where
383        OP: FnOnce() -> R,
384    {
385        if true {
    if !!cx.is_eval_always(dep_kind) {
        ::core::panicking::panic("assertion failed: !cx.is_eval_always(dep_kind)")
    };
};debug_assert!(!cx.is_eval_always(dep_kind));
386
387        // Large numbers of reads are common enough here that pre-sizing `read_set`
388        // to 128 actually helps perf on some benchmarks.
389        let task_deps = Lock::new(TaskDeps::new(
390            #[cfg(debug_assertions)]
391            None,
392            128,
393        ));
394        let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
395        let task_deps = task_deps.into_inner();
396        let reads = task_deps.reads;
397
398        let dep_node_index = match reads.len() {
399            0 => {
400                // Because the dep-node id of anon nodes is computed from the sets of its
401                // dependencies we already know what the ID of this dependency-less node is
402                // going to be (i.e. equal to the precomputed
403                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
404                // a `StableHasher` and sending the node through interning.
405                DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE
406            }
407            1 => {
408                // When there is only one dependency, don't bother creating a node.
409                reads[0]
410            }
411            _ => {
412                // The dep node indices are hashed here instead of hashing the dep nodes of the
413                // dependencies. These indices may refer to different nodes per session, but this isn't
414                // a problem here because we that ensure the final dep node hash is per session only by
415                // combining it with the per session random number `anon_id_seed`. This hash only need
416                // to map the dependencies to a single value on a per session basis.
417                let mut hasher = StableHasher::new();
418                reads.hash(&mut hasher);
419
420                let target_dep_node = DepNode {
421                    kind: dep_kind,
422                    // Fingerprint::combine() is faster than sending Fingerprint
423                    // through the StableHasher (at least as long as StableHasher
424                    // is so slow).
425                    hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
426                };
427
428                // The DepNodes generated by the process above are not unique. 2 queries could
429                // have exactly the same dependencies. However, deserialization does not handle
430                // duplicated nodes, so we do the deduplication here directly.
431                //
432                // As anonymous nodes are a small quantity compared to the full dep-graph, the
433                // memory impact of this `anon_node_to_index` map remains tolerable, and helps
434                // us avoid useless growth of the graph with almost-equivalent nodes.
435                self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
436                    self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
437                })
438            }
439        };
440
441        (result, dep_node_index)
442    }
443
444    /// Intern the new `DepNode` with the dependencies up-to-now.
445    fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
446        &self,
447        cx: &Ctxt,
448        node: DepNode,
449        edges: EdgesVec,
450        result: &R,
451        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
452    ) -> DepNodeIndex {
453        let hashing_timer = cx.profiler().incr_result_hashing();
454        let current_fingerprint = hash_result.map(|hash_result| {
455            cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
456        });
457        let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
458        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
459        dep_node_index
460    }
461}
462
463impl<D: Deps> DepGraph<D> {
464    #[inline]
465    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
466        if let Some(ref data) = self.data {
467            D::read_deps(|task_deps| {
468                let mut task_deps = match task_deps {
469                    TaskDepsRef::Allow(deps) => deps.lock(),
470                    TaskDepsRef::EvalAlways => {
471                        // We don't need to record dependencies of eval_always
472                        // queries. They are re-evaluated unconditionally anyway.
473                        return;
474                    }
475                    TaskDepsRef::Ignore => return,
476                    TaskDepsRef::Forbid => {
477                        // Reading is forbidden in this context. ICE with a useful error message.
478                        panic_on_forbidden_read(data, dep_node_index)
479                    }
480                };
481                let task_deps = &mut *task_deps;
482
483                if truecfg!(debug_assertions) {
484                    data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
485                }
486
487                // Has `dep_node_index` been seen before? Use either a linear scan or a hashset
488                // lookup to determine this. See `TaskDeps::read_set` for details.
489                let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
490                    !task_deps.reads.contains(&dep_node_index)
491                } else {
492                    task_deps.read_set.insert(dep_node_index)
493                };
494                if new_read {
495                    task_deps.reads.push(dep_node_index);
496                    if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
497                        // Fill `read_set` with what we have so far. Future lookups will use it.
498                        task_deps.read_set.extend(task_deps.reads.iter().copied());
499                    }
500
501                    #[cfg(debug_assertions)]
502                    {
503                        if let Some(target) = task_deps.node
504                            && let Some(ref forbidden_edge) = data.current.forbidden_edge
505                        {
506                            let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
507                            if forbidden_edge.test(&src, &target) {
508                                {
    ::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
            src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)
509                            }
510                        }
511                    }
512                } else if truecfg!(debug_assertions) {
513                    data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
514                }
515            })
516        }
517    }
518
519    /// This encodes a diagnostic by creating a node with an unique index and associating
520    /// `diagnostic` with it, for use in the next session.
521    #[inline]
522    pub fn record_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
523        &self,
524        qcx: Qcx,
525        diagnostic: &DiagInner,
526    ) {
527        if let Some(ref data) = self.data {
528            D::read_deps(|task_deps| match task_deps {
529                TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
530                TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
531                    self.read_index(data.encode_diagnostic(qcx, diagnostic));
532                }
533            })
534        }
535    }
536    /// This forces a diagnostic node green by running its side effect. `prev_index` would
537    /// refer to a node created used `encode_diagnostic` in the previous session.
538    #[inline]
539    pub fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
540        &self,
541        qcx: Qcx,
542        prev_index: SerializedDepNodeIndex,
543    ) {
544        if let Some(ref data) = self.data {
545            data.force_diagnostic_node(qcx, prev_index);
546        }
547    }
548
549    /// Create a node when we force-feed a value into the query cache.
550    /// This is used to remove cycles during type-checking const generic parameters.
551    ///
552    /// As usual in the query system, we consider the current state of the calling query
553    /// only depends on the list of dependencies up to now. As a consequence, the value
554    /// that this query gives us can only depend on those dependencies too. Therefore,
555    /// it is sound to use the current dependency set for the created node.
556    ///
557    /// During replay, the order of the nodes is relevant in the dependency graph.
558    /// So the unchanged replay will mark the caller query before trying to mark this one.
559    /// If there is a change to report, the caller query will be re-executed before this one.
560    ///
561    /// FIXME: If the code is changed enough for this node to be marked before requiring the
562    /// caller's node, we suppose that those changes will be enough to mark this node red and
563    /// force a recomputation using the "normal" way.
564    pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
565        &self,
566        node: DepNode,
567        cx: Ctxt,
568        result: &R,
569        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
570    ) -> DepNodeIndex {
571        if let Some(data) = self.data.as_ref() {
572            // The caller query has more dependencies than the node we are creating. We may
573            // encounter a case where this created node is marked as green, but the caller query is
574            // subsequently marked as red or recomputed. In this case, we will end up feeding a
575            // value to an existing node.
576            //
577            // For sanity, we still check that the loaded stable hash and the new one match.
578            if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
579                let dep_node_index = data.colors.current(prev_index);
580                if let Some(dep_node_index) = dep_node_index {
581                    crate::query::incremental_verify_ich(
582                        cx,
583                        data,
584                        result,
585                        prev_index,
586                        hash_result,
587                        |value| ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0:?}", value))
    })format!("{value:?}"),
588                    );
589
590                    #[cfg(debug_assertions)]
591                    if hash_result.is_some() {
592                        data.current.record_edge(
593                            dep_node_index,
594                            node,
595                            data.prev_fingerprint_of(prev_index),
596                        );
597                    }
598
599                    return dep_node_index;
600                }
601            }
602
603            let mut edges = EdgesVec::new();
604            D::read_deps(|task_deps| match task_deps {
605                TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
606                TaskDepsRef::EvalAlways => {
607                    edges.push(DepNodeIndex::FOREVER_RED_NODE);
608                }
609                TaskDepsRef::Ignore => {}
610                TaskDepsRef::Forbid => {
611                    {
    ::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")
612                }
613            });
614
615            data.hash_result_and_alloc_node(&cx, node, edges, result, hash_result)
616        } else {
617            // Incremental compilation is turned off. We just execute the task
618            // without tracking. We still provide a dep-node index that uniquely
619            // identifies the task so that we have a cheap way of referring to
620            // the query for self-profiling.
621            self.next_virtual_depnode_index()
622        }
623    }
624}
625
626impl<D: Deps> DepGraphData<D> {
627    fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
628        &self,
629        dep_node: &DepNode,
630        msg: impl FnOnce() -> S,
631    ) {
632        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
633            let current = self.colors.get(prev_index);
634            match current {
    DepNodeColor::Unknown => {}
    ref left_val => {
        ::core::panicking::assert_matches_failed(left_val,
            "DepNodeColor::Unknown",
            ::core::option::Option::Some(format_args!("{0}", msg())));
    }
}assert_matches!(current, DepNodeColor::Unknown, "{}", msg())
635        } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
636            outline(|| {
637                let seen = nodes_in_current_session.lock().contains_key(dep_node);
638                if !!seen { { ::core::panicking::panic_display(&msg()); } };assert!(!seen, "{}", msg());
639            });
640        }
641    }
642
643    fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
644        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
645            self.colors.get(prev_index)
646        } else {
647            // This is a node that did not exist in the previous compilation session.
648            DepNodeColor::Unknown
649        }
650    }
651
652    /// Returns true if the given node has been marked as green during the
653    /// current compilation session. Used in various assertions
654    #[inline]
655    pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
656        #[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
    DepNodeColor::Green(_) => true,
    _ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))
657    }
658
659    #[inline]
660    pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
661        self.previous.fingerprint_by_index(prev_index)
662    }
663
664    #[inline]
665    pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
666        self.previous.index_to_node(prev_index)
667    }
668
669    pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
670        self.debug_loaded_from_disk.lock().insert(dep_node);
671    }
672
673    /// This encodes a diagnostic by creating a node with an unique index and associating
674    /// `diagnostic` with it, for use in the next session.
675    #[inline]
676    fn encode_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
677        &self,
678        qcx: Qcx,
679        diagnostic: &DiagInner,
680    ) -> DepNodeIndex {
681        // Use `send_new` so we get an unique index, even though the dep node is not.
682        let dep_node_index = self.current.encoder.send_new(
683            DepNode {
684                kind: D::DEP_KIND_SIDE_EFFECT,
685                hash: PackedFingerprint::from(Fingerprint::ZERO),
686            },
687            Fingerprint::ZERO,
688            // We want the side effect node to always be red so it will be forced and emit the
689            // diagnostic.
690            std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
691        );
692        let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
693        qcx.store_side_effect(dep_node_index, side_effect);
694        dep_node_index
695    }
696
697    /// This forces a diagnostic node green by running its side effect. `prev_index` would
698    /// refer to a node created used `encode_diagnostic` in the previous session.
699    #[inline]
700    fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
701        &self,
702        qcx: Qcx,
703        prev_index: SerializedDepNodeIndex,
704    ) {
705        D::with_deps(TaskDepsRef::Ignore, || {
706            let side_effect = qcx.load_side_effect(prev_index).unwrap();
707
708            match &side_effect {
709                QuerySideEffect::Diagnostic(diagnostic) => {
710                    qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
711                }
712            }
713
714            // Use `send_and_color` as `promote_node_and_deps_to_current` expects all
715            // green dependencies. `send_and_color` will also prevent multiple nodes
716            // being encoded for concurrent calls.
717            let dep_node_index = self.current.encoder.send_and_color(
718                prev_index,
719                &self.colors,
720                DepNode {
721                    kind: D::DEP_KIND_SIDE_EFFECT,
722                    hash: PackedFingerprint::from(Fingerprint::ZERO),
723                },
724                Fingerprint::ZERO,
725                std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
726                true,
727            );
728            // This will just overwrite the same value for concurrent calls.
729            qcx.store_side_effect(dep_node_index, side_effect);
730        })
731    }
732
733    fn alloc_and_color_node(
734        &self,
735        key: DepNode,
736        edges: EdgesVec,
737        fingerprint: Option<Fingerprint>,
738    ) -> DepNodeIndex {
739        if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
740            // Determine the color and index of the new `DepNode`.
741            let is_green = if let Some(fingerprint) = fingerprint {
742                if fingerprint == self.previous.fingerprint_by_index(prev_index) {
743                    // This is a green node: it existed in the previous compilation,
744                    // its query was re-executed, and it has the same result as before.
745                    true
746                } else {
747                    // This is a red node: it existed in the previous compilation, its query
748                    // was re-executed, but it has a different result from before.
749                    false
750                }
751            } else {
752                // This is a red node, effectively: it existed in the previous compilation
753                // session, its query was re-executed, but it doesn't compute a result hash
754                // (i.e. it represents a `no_hash` query), so we have no way of determining
755                // whether or not the result was the same as before.
756                false
757            };
758
759            let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
760
761            let dep_node_index = self.current.encoder.send_and_color(
762                prev_index,
763                &self.colors,
764                key,
765                fingerprint,
766                edges,
767                is_green,
768            );
769
770            self.current.record_node(dep_node_index, key, fingerprint);
771
772            dep_node_index
773        } else {
774            self.current.alloc_new_node(key, edges, fingerprint.unwrap_or(Fingerprint::ZERO))
775        }
776    }
777
778    fn promote_node_and_deps_to_current(&self, prev_index: SerializedDepNodeIndex) -> DepNodeIndex {
779        self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
780
781        let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
782
783        #[cfg(debug_assertions)]
784        self.current.record_edge(
785            dep_node_index,
786            *self.previous.index_to_node(prev_index),
787            self.previous.fingerprint_by_index(prev_index),
788        );
789
790        dep_node_index
791    }
792}
793
794impl<D: Deps> DepGraph<D> {
795    /// Checks whether a previous work product exists for `v` and, if
796    /// so, return the path that leads to it. Used to skip doing work.
797    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
798        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
799    }
800
801    /// Access the map of work-products created during the cached run. Only
802    /// used during saving of the dep-graph.
803    pub fn previous_work_products(&self) -> &WorkProductMap {
804        &self.data.as_ref().unwrap().previous_work_products
805    }
806
807    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
808        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
809    }
810
811    pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
812        // We only check if we have a dep node corresponding to the given dep kind.
813        #[allow(rustc::potential_query_instability)]
814        self.data
815            .as_ref()
816            .unwrap()
817            .debug_loaded_from_disk
818            .lock()
819            .iter()
820            .any(|node| node.kind == dep_kind)
821    }
822
823    #[cfg(debug_assertions)]
824    #[inline(always)]
825    pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
826    where
827        F: FnOnce() -> String,
828    {
829        let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
830
831        if dep_node_debug.borrow().contains_key(&dep_node) {
832            return;
833        }
834        let debug_str = self.with_ignore(debug_str_gen);
835        dep_node_debug.borrow_mut().insert(dep_node, debug_str);
836    }
837
838    pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
839        self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
840    }
841
842    fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
843        if let Some(ref data) = self.data {
844            return data.node_color(dep_node);
845        }
846
847        DepNodeColor::Unknown
848    }
849
850    pub fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
851        &self,
852        qcx: Qcx,
853        dep_node: &DepNode,
854    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
855        self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
856    }
857}
858
859impl<D: Deps> DepGraphData<D> {
860    /// Try to mark a node index for the node dep_node.
861    ///
862    /// A node will have an index, when it's already been marked green, or when we can mark it
863    /// green. This function will mark the current task as a reader of the specified node, when
864    /// a node index can be found for that node.
865    pub(crate) fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
866        &self,
867        qcx: Qcx,
868        dep_node: &DepNode,
869    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
870        if true {
    if !!qcx.dep_context().is_eval_always(dep_node.kind) {
        ::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(dep_node.kind)")
    };
};debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
871
872        // Return None if the dep node didn't exist in the previous session
873        let prev_index = self.previous.node_to_index_opt(dep_node)?;
874
875        if true {
    match (&self.previous.index_to_node(prev_index), &dep_node) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
876
877        match self.colors.get(prev_index) {
878            DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
879            DepNodeColor::Red => None,
880            DepNodeColor::Unknown => {
881                // This DepNode and the corresponding query invocation existed
882                // in the previous compilation session too, so we can try to
883                // mark it as green by recursively marking all of its
884                // dependencies green.
885                self.try_mark_previous_green(qcx, prev_index, None)
886                    .map(|dep_node_index| (prev_index, dep_node_index))
887            }
888        }
889    }
890
891    #[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("try_mark_parent_green",
                                    "rustc_query_system::dep_graph::graph",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(891u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&[],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{ meta.fields().value_set(&[]) })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: Option<()> = loop {};
            return __tracing_attr_fake_return;
        }
        {
            let get_dep_dep_node =
                || self.previous.index_to_node(parent_dep_node_index);
            match self.colors.get(parent_dep_node_index) {
                DepNodeColor::Green(_) => {
                    {
                        use ::tracing::__macro_support::Callsite as _;
                        static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                            {
                                static META: ::tracing::Metadata<'static> =
                                    {
                                        ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:909",
                                            "rustc_query_system::dep_graph::graph",
                                            ::tracing::Level::DEBUG,
                                            ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                            ::tracing_core::__macro_support::Option::Some(909u32),
                                            ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                            ::tracing_core::field::FieldSet::new(&["message"],
                                                ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                            ::tracing::metadata::Kind::EVENT)
                                    };
                                ::tracing::callsite::DefaultCallsite::new(&META)
                            };
                        let enabled =
                            ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                    ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::LevelFilter::current() &&
                                {
                                    let interest = __CALLSITE.interest();
                                    !interest.is_never() &&
                                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                            interest)
                                };
                        if enabled {
                            (|value_set: ::tracing::field::ValueSet|
                                        {
                                            let meta = __CALLSITE.metadata();
                                            ::tracing::Event::dispatch(meta, &value_set);
                                            ;
                                        })({
                                    #[allow(unused_imports)]
                                    use ::tracing::field::{debug, display, Value};
                                    let mut iter = __CALLSITE.metadata().fields().iter();
                                    __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                        ::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately green",
                                                                        get_dep_dep_node()) as &dyn Value))])
                                });
                        } else { ; }
                    };
                    return Some(());
                }
                DepNodeColor::Red => {
                    {
                        use ::tracing::__macro_support::Callsite as _;
                        static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                            {
                                static META: ::tracing::Metadata<'static> =
                                    {
                                        ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:917",
                                            "rustc_query_system::dep_graph::graph",
                                            ::tracing::Level::DEBUG,
                                            ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                            ::tracing_core::__macro_support::Option::Some(917u32),
                                            ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                            ::tracing_core::field::FieldSet::new(&["message"],
                                                ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                            ::tracing::metadata::Kind::EVENT)
                                    };
                                ::tracing::callsite::DefaultCallsite::new(&META)
                            };
                        let enabled =
                            ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                    ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::LevelFilter::current() &&
                                {
                                    let interest = __CALLSITE.interest();
                                    !interest.is_never() &&
                                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                            interest)
                                };
                        if enabled {
                            (|value_set: ::tracing::field::ValueSet|
                                        {
                                            let meta = __CALLSITE.metadata();
                                            ::tracing::Event::dispatch(meta, &value_set);
                                            ;
                                        })({
                                    #[allow(unused_imports)]
                                    use ::tracing::field::{debug, display, Value};
                                    let mut iter = __CALLSITE.metadata().fields().iter();
                                    __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                        ::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately red",
                                                                        get_dep_dep_node()) as &dyn Value))])
                                });
                        } else { ; }
                    };
                    return None;
                }
                DepNodeColor::Unknown => {}
            }
            let dep_dep_node = get_dep_dep_node();
            if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
                {
                    use ::tracing::__macro_support::Callsite as _;
                    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                        {
                            static META: ::tracing::Metadata<'static> =
                                {
                                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:928",
                                        "rustc_query_system::dep_graph::graph",
                                        ::tracing::Level::DEBUG,
                                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                        ::tracing_core::__macro_support::Option::Some(928u32),
                                        ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                        ::tracing_core::field::FieldSet::new(&["message"],
                                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                        ::tracing::metadata::Kind::EVENT)
                                };
                            ::tracing::callsite::DefaultCallsite::new(&META)
                        };
                    let enabled =
                        ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            {
                                let interest = __CALLSITE.interest();
                                !interest.is_never() &&
                                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                        interest)
                            };
                    if enabled {
                        (|value_set: ::tracing::field::ValueSet|
                                    {
                                        let meta = __CALLSITE.metadata();
                                        ::tracing::Event::dispatch(meta, &value_set);
                                        ;
                                    })({
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = __CALLSITE.metadata().fields().iter();
                                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&format_args!("state of dependency {0:?} ({1}) is unknown, trying to mark it green",
                                                                    dep_dep_node, dep_dep_node.hash) as &dyn Value))])
                            });
                    } else { ; }
                };
                let node_index =
                    self.try_mark_previous_green(qcx, parent_dep_node_index,
                        Some(frame));
                if node_index.is_some() {
                    {
                        use ::tracing::__macro_support::Callsite as _;
                        static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                            {
                                static META: ::tracing::Metadata<'static> =
                                    {
                                        ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:936",
                                            "rustc_query_system::dep_graph::graph",
                                            ::tracing::Level::DEBUG,
                                            ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                            ::tracing_core::__macro_support::Option::Some(936u32),
                                            ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                            ::tracing_core::field::FieldSet::new(&["message"],
                                                ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                            ::tracing::metadata::Kind::EVENT)
                                    };
                                ::tracing::callsite::DefaultCallsite::new(&META)
                            };
                        let enabled =
                            ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                    ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::LevelFilter::current() &&
                                {
                                    let interest = __CALLSITE.interest();
                                    !interest.is_never() &&
                                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                            interest)
                                };
                        if enabled {
                            (|value_set: ::tracing::field::ValueSet|
                                        {
                                            let meta = __CALLSITE.metadata();
                                            ::tracing::Event::dispatch(meta, &value_set);
                                            ;
                                        })({
                                    #[allow(unused_imports)]
                                    use ::tracing::field::{debug, display, Value};
                                    let mut iter = __CALLSITE.metadata().fields().iter();
                                    __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                        ::tracing::__macro_support::Option::Some(&format_args!("managed to MARK dependency {0:?} as green",
                                                                        dep_dep_node) as &dyn Value))])
                                });
                        } else { ; }
                    };
                    return Some(());
                }
            }
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:942",
                                    "rustc_query_system::dep_graph::graph",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(942u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&["message"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::EVENT)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let enabled =
                    ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::STATIC_MAX_LEVEL &&
                            ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::LevelFilter::current() &&
                        {
                            let interest = __CALLSITE.interest();
                            !interest.is_never() &&
                                ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                    interest)
                        };
                if enabled {
                    (|value_set: ::tracing::field::ValueSet|
                                {
                                    let meta = __CALLSITE.metadata();
                                    ::tracing::Event::dispatch(meta, &value_set);
                                    ;
                                })({
                            #[allow(unused_imports)]
                            use ::tracing::field::{debug, display, Value};
                            let mut iter = __CALLSITE.metadata().fields().iter();
                            __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                ::tracing::__macro_support::Option::Some(&format_args!("trying to force dependency {0:?}",
                                                                dep_dep_node) as &dyn Value))])
                        });
                } else { ; }
            };
            if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node,
                        parent_dep_node_index, frame) {
                {
                    use ::tracing::__macro_support::Callsite as _;
                    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                        {
                            static META: ::tracing::Metadata<'static> =
                                {
                                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:945",
                                        "rustc_query_system::dep_graph::graph",
                                        ::tracing::Level::DEBUG,
                                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                        ::tracing_core::__macro_support::Option::Some(945u32),
                                        ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                        ::tracing_core::field::FieldSet::new(&["message"],
                                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                        ::tracing::metadata::Kind::EVENT)
                                };
                            ::tracing::callsite::DefaultCallsite::new(&META)
                        };
                    let enabled =
                        ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            {
                                let interest = __CALLSITE.interest();
                                !interest.is_never() &&
                                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                        interest)
                            };
                    if enabled {
                        (|value_set: ::tracing::field::ValueSet|
                                    {
                                        let meta = __CALLSITE.metadata();
                                        ::tracing::Event::dispatch(meta, &value_set);
                                        ;
                                    })({
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = __CALLSITE.metadata().fields().iter();
                                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} could not be forced",
                                                                    dep_dep_node) as &dyn Value))])
                            });
                    } else { ; }
                };
                return None;
            }
            match self.colors.get(parent_dep_node_index) {
                DepNodeColor::Green(_) => {
                    {
                        use ::tracing::__macro_support::Callsite as _;
                        static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                            {
                                static META: ::tracing::Metadata<'static> =
                                    {
                                        ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:951",
                                            "rustc_query_system::dep_graph::graph",
                                            ::tracing::Level::DEBUG,
                                            ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                            ::tracing_core::__macro_support::Option::Some(951u32),
                                            ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                            ::tracing_core::field::FieldSet::new(&["message"],
                                                ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                            ::tracing::metadata::Kind::EVENT)
                                    };
                                ::tracing::callsite::DefaultCallsite::new(&META)
                            };
                        let enabled =
                            ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                    ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::LevelFilter::current() &&
                                {
                                    let interest = __CALLSITE.interest();
                                    !interest.is_never() &&
                                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                            interest)
                                };
                        if enabled {
                            (|value_set: ::tracing::field::ValueSet|
                                        {
                                            let meta = __CALLSITE.metadata();
                                            ::tracing::Event::dispatch(meta, &value_set);
                                            ;
                                        })({
                                    #[allow(unused_imports)]
                                    use ::tracing::field::{debug, display, Value};
                                    let mut iter = __CALLSITE.metadata().fields().iter();
                                    __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                        ::tracing::__macro_support::Option::Some(&format_args!("managed to FORCE dependency {0:?} to green",
                                                                        dep_dep_node) as &dyn Value))])
                                });
                        } else { ; }
                    };
                    return Some(());
                }
                DepNodeColor::Red => {
                    {
                        use ::tracing::__macro_support::Callsite as _;
                        static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                            {
                                static META: ::tracing::Metadata<'static> =
                                    {
                                        ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:955",
                                            "rustc_query_system::dep_graph::graph",
                                            ::tracing::Level::DEBUG,
                                            ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                            ::tracing_core::__macro_support::Option::Some(955u32),
                                            ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                            ::tracing_core::field::FieldSet::new(&["message"],
                                                ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                            ::tracing::metadata::Kind::EVENT)
                                    };
                                ::tracing::callsite::DefaultCallsite::new(&META)
                            };
                        let enabled =
                            ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                    ::tracing::Level::DEBUG <=
                                        ::tracing::level_filters::LevelFilter::current() &&
                                {
                                    let interest = __CALLSITE.interest();
                                    !interest.is_never() &&
                                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                            interest)
                                };
                        if enabled {
                            (|value_set: ::tracing::field::ValueSet|
                                        {
                                            let meta = __CALLSITE.metadata();
                                            ::tracing::Event::dispatch(meta, &value_set);
                                            ;
                                        })({
                                    #[allow(unused_imports)]
                                    use ::tracing::field::{debug, display, Value};
                                    let mut iter = __CALLSITE.metadata().fields().iter();
                                    __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                        ::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was red after forcing",
                                                                        dep_dep_node) as &dyn Value))])
                                });
                        } else { ; }
                    };
                    return None;
                }
                DepNodeColor::Unknown => {}
            }
            if let None =
                    qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs()
                {
                {
                    ::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - Forcing the DepNode should have set its color"));
                }
            }
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:975",
                                    "rustc_query_system::dep_graph::graph",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(975u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&["message"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::EVENT)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let enabled =
                    ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::STATIC_MAX_LEVEL &&
                            ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::LevelFilter::current() &&
                        {
                            let interest = __CALLSITE.interest();
                            !interest.is_never() &&
                                ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                    interest)
                        };
                if enabled {
                    (|value_set: ::tracing::field::ValueSet|
                                {
                                    let meta = __CALLSITE.metadata();
                                    ::tracing::Event::dispatch(meta, &value_set);
                                    ;
                                })({
                            #[allow(unused_imports)]
                            use ::tracing::field::{debug, display, Value};
                            let mut iter = __CALLSITE.metadata().fields().iter();
                            __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                ::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} resulted in compilation error",
                                                                dep_dep_node) as &dyn Value))])
                        });
                } else { ; }
            };
            return None;
        }
    }
}#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
892    fn try_mark_parent_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
893        &self,
894        qcx: Qcx,
895        parent_dep_node_index: SerializedDepNodeIndex,
896        frame: &MarkFrame<'_>,
897    ) -> Option<()> {
898        let get_dep_dep_node = || self.previous.index_to_node(parent_dep_node_index);
899
900        match self.colors.get(parent_dep_node_index) {
901            DepNodeColor::Green(_) => {
902                // This dependency has been marked as green before, we are
903                // still fine and can continue with checking the other
904                // dependencies.
905                //
906                // This path is extremely hot. We don't want to get the
907                // `dep_dep_node` unless it's necessary. Hence the
908                // `get_dep_dep_node` closure.
909                debug!("dependency {:?} was immediately green", get_dep_dep_node());
910                return Some(());
911            }
912            DepNodeColor::Red => {
913                // We found a dependency the value of which has changed
914                // compared to the previous compilation session. We cannot
915                // mark the DepNode as green and also don't need to bother
916                // with checking any of the other dependencies.
917                debug!("dependency {:?} was immediately red", get_dep_dep_node());
918                return None;
919            }
920            DepNodeColor::Unknown => {}
921        }
922
923        let dep_dep_node = get_dep_dep_node();
924
925        // We don't know the state of this dependency. If it isn't
926        // an eval_always node, let's try to mark it green recursively.
927        if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
928            debug!(
929                "state of dependency {:?} ({}) is unknown, trying to mark it green",
930                dep_dep_node, dep_dep_node.hash,
931            );
932
933            let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, Some(frame));
934
935            if node_index.is_some() {
936                debug!("managed to MARK dependency {dep_dep_node:?} as green");
937                return Some(());
938            }
939        }
940
941        // We failed to mark it green, so we try to force the query.
942        debug!("trying to force dependency {dep_dep_node:?}");
943        if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
944            // The DepNode could not be forced.
945            debug!("dependency {dep_dep_node:?} could not be forced");
946            return None;
947        }
948
949        match self.colors.get(parent_dep_node_index) {
950            DepNodeColor::Green(_) => {
951                debug!("managed to FORCE dependency {dep_dep_node:?} to green");
952                return Some(());
953            }
954            DepNodeColor::Red => {
955                debug!("dependency {dep_dep_node:?} was red after forcing");
956                return None;
957            }
958            DepNodeColor::Unknown => {}
959        }
960
961        if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
962            panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
963        }
964
965        // If the query we just forced has resulted in
966        // some kind of compilation error, we cannot rely on
967        // the dep-node color having been properly updated.
968        // This means that the query system has reached an
969        // invalid state. We let the compiler continue (by
970        // returning `None`) so it can emit error messages
971        // and wind down, but rely on the fact that this
972        // invalid state will not be persisted to the
973        // incremental compilation cache because of
974        // compilation errors being present.
975        debug!("dependency {dep_dep_node:?} resulted in compilation error");
976        return None;
977    }
978
979    /// Try to mark a dep-node which existed in the previous compilation session as green.
980    #[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("try_mark_previous_green",
                                    "rustc_query_system::dep_graph::graph",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(980u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&[],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{ meta.fields().value_set(&[]) })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
            return __tracing_attr_fake_return;
        }
        {
            let frame =
                MarkFrame { index: prev_dep_node_index, parent: frame };
            if true {
                if !!qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
                    {
                    ::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
                };
            };
            let prev_deps =
                self.previous.edge_targets_from(prev_dep_node_index);
            for dep_dep_node_index in prev_deps {
                self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
            }
            let dep_node_index =
                self.promote_node_and_deps_to_current(prev_dep_node_index);
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:1014",
                                    "rustc_query_system::dep_graph::graph",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(1014u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&["message"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::EVENT)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let enabled =
                    ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::STATIC_MAX_LEVEL &&
                            ::tracing::Level::DEBUG <=
                                ::tracing::level_filters::LevelFilter::current() &&
                        {
                            let interest = __CALLSITE.interest();
                            !interest.is_never() &&
                                ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                    interest)
                        };
                if enabled {
                    (|value_set: ::tracing::field::ValueSet|
                                {
                                    let meta = __CALLSITE.metadata();
                                    ::tracing::Event::dispatch(meta, &value_set);
                                    ;
                                })({
                            #[allow(unused_imports)]
                            use ::tracing::field::{debug, display, Value};
                            let mut iter = __CALLSITE.metadata().fields().iter();
                            __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                ::tracing::__macro_support::Option::Some(&format_args!("successfully marked {0:?} as green",
                                                                self.previous.index_to_node(prev_dep_node_index)) as
                                                        &dyn Value))])
                        });
                } else { ; }
            };
            Some(dep_node_index)
        }
    }
}#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
981    fn try_mark_previous_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
982        &self,
983        qcx: Qcx,
984        prev_dep_node_index: SerializedDepNodeIndex,
985        frame: Option<&MarkFrame<'_>>,
986    ) -> Option<DepNodeIndex> {
987        let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
988
989        // We never try to mark eval_always nodes as green
990        debug_assert!(
991            !qcx.dep_context()
992                .is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
993        );
994
995        let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
996
997        for dep_dep_node_index in prev_deps {
998            self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
999        }
1000
1001        // If we got here without hitting a `return` that means that all
1002        // dependencies of this DepNode could be marked as green. Therefore we
1003        // can also mark this DepNode as green.
1004
1005        // There may be multiple threads trying to mark the same dep node green concurrently
1006
1007        // We allocating an entry for the node in the current dependency graph and
1008        // adding all the appropriate edges imported from the previous graph
1009        let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index);
1010
1011        // ... and finally storing a "Green" entry in the color map.
1012        // Multiple threads can all write the same color here
1013
1014        debug!(
1015            "successfully marked {:?} as green",
1016            self.previous.index_to_node(prev_dep_node_index)
1017        );
1018        Some(dep_node_index)
1019    }
1020}
1021
1022impl<D: Deps> DepGraph<D> {
1023    /// Returns true if the given node has been marked as red during the
1024    /// current compilation session. Used in various assertions
1025    pub fn is_red(&self, dep_node: &DepNode) -> bool {
1026        #[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
    DepNodeColor::Red => true,
    _ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)
1027    }
1028
1029    /// Returns true if the given node has been marked as green during the
1030    /// current compilation session. Used in various assertions
1031    pub fn is_green(&self, dep_node: &DepNode) -> bool {
1032        #[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
    DepNodeColor::Green(_) => true,
    _ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))
1033    }
1034
1035    pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1036        &self,
1037        dep_node: &DepNode,
1038        msg: impl FnOnce() -> S,
1039    ) {
1040        if let Some(data) = &self.data {
1041            data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
1042        }
1043    }
1044
1045    /// This method loads all on-disk cacheable query results into memory, so
1046    /// they can be written out to the new cache file again. Most query results
1047    /// will already be in memory but in the case where we marked something as
1048    /// green but then did not need the value, that value will never have been
1049    /// loaded from disk.
1050    ///
1051    /// This method will only load queries that will end up in the disk cache.
1052    /// Other queries will not be executed.
1053    pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
1054        let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
1055
1056        let data = self.data.as_ref().unwrap();
1057        for prev_index in data.colors.values.indices() {
1058            match data.colors.get(prev_index) {
1059                DepNodeColor::Green(_) => {
1060                    let dep_node = data.previous.index_to_node(prev_index);
1061                    tcx.try_load_from_on_disk_cache(dep_node);
1062                }
1063                DepNodeColor::Unknown | DepNodeColor::Red => {
1064                    // We can skip red nodes because a node can only be marked
1065                    // as red if the query result was recomputed and thus is
1066                    // already in memory.
1067                }
1068            }
1069        }
1070    }
1071
1072    pub fn finish_encoding(&self) -> FileEncodeResult {
1073        if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1074    }
1075
1076    pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1077        if true {
    if !self.data.is_none() {
        ::core::panicking::panic("assertion failed: self.data.is_none()")
    };
};debug_assert!(self.data.is_none());
1078        let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1079        DepNodeIndex::from_u32(index)
1080    }
1081}
1082
1083/// A "work product" is an intermediate result that we save into the
1084/// incremental directory for later re-use. The primary example are
1085/// the object files that we save for each partition at code
1086/// generation time.
1087///
1088/// Each work product is associated with a dep-node, representing the
1089/// process that produced the work-product. If that dep-node is found
1090/// to be dirty when we load up, then we will delete the work-product
1091/// at load time. If the work-product is found to be clean, then we
1092/// will keep a record in the `previous_work_products` list.
1093///
1094/// In addition, work products have an associated hash. This hash is
1095/// an extra hash that can be used to decide if the work-product from
1096/// a previous compilation can be re-used (in addition to the dirty
1097/// edges check).
1098///
1099/// As the primary example, consider the object files we generate for
1100/// each partition. In the first run, we create partitions based on
1101/// the symbols that need to be compiled. For each partition P, we
1102/// hash the symbols in P and create a `WorkProduct` record associated
1103/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1104/// in P.
1105///
1106/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1107/// judged to be clean (which means none of the things we read to
1108/// generate the partition were found to be dirty), it will be loaded
1109/// into previous work products. We will then regenerate the set of
1110/// symbols in the partition P and hash them (note that new symbols
1111/// may be added -- for example, new monomorphizations -- even if
1112/// nothing in P changed!). We will compare that hash against the
1113/// previous hash. If it matches up, we can reuse the object file.
1114#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
    #[inline]
    fn clone(&self) -> WorkProduct {
        WorkProduct {
            cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
            saved_files: ::core::clone::Clone::clone(&self.saved_files),
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
            "cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
    }
}Debug, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for WorkProduct {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    WorkProduct {
                        cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
                        {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for WorkProduct {
            fn decode(__decoder: &mut __D) -> Self {
                WorkProduct {
                    cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
                    saved_files: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
1115pub struct WorkProduct {
1116    pub cgu_name: String,
1117    /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1118    /// saved file and the key is some identifier for the type of file being saved.
1119    ///
1120    /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1121    /// the object file's path, and "dwo" to the dwarf object file's path.
1122    pub saved_files: UnordMap<String, String>,
1123}
1124
1125pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1126
1127// Index type for `DepNodeData`'s edges.
1128impl ::std::fmt::Debug for EdgeIndex {
    fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        fmt.write_fmt(format_args!("{0}", self.as_u32()))
    }
}rustc_index::newtype_index! {
1129    struct EdgeIndex {}
1130}
1131
1132/// `CurrentDepGraph` stores the dependency graph for the current session. It
1133/// will be populated as we run queries or tasks. We never remove nodes from the
1134/// graph: they are only added.
1135///
1136/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1137/// in memory. This is important, because these graph structures are some of the
1138/// largest in the compiler.
1139///
1140/// For this reason, we avoid storing `DepNode`s more than once as map
1141/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1142/// graph, and we map nodes in the previous graph to indices via a two-step
1143/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1144/// and the `prev_index_to_index` vector (which is more compact and faster than
1145/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1146///
1147/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1148/// and `prev_index_to_index` fields are locked separately. Operations that take
1149/// a `DepNodeIndex` typically just access the `data` field.
1150///
1151/// We only need to manipulate at most two locks simultaneously:
1152/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1153/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1154/// first, and `data` second.
1155pub(super) struct CurrentDepGraph<D: Deps> {
1156    encoder: GraphEncoder<D>,
1157    anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1158
1159    /// This is used to verify that fingerprints do not change between the creation of a node
1160    /// and its recomputation.
1161    #[cfg(debug_assertions)]
1162    fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1163
1164    /// Used to trap when a specific edge is added to the graph.
1165    /// This is used for debug purposes and is only active with `debug_assertions`.
1166    #[cfg(debug_assertions)]
1167    forbidden_edge: Option<EdgeFilter>,
1168
1169    /// Used to verify the absence of hash collisions among DepNodes.
1170    /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1171    /// or if `debug_assertions` are enabled.
1172    ///
1173    /// The map contains all DepNodes that have been allocated in the current session so far.
1174    nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
1175
1176    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1177    /// their edges. This has the beneficial side-effect that multiple anonymous
1178    /// nodes can be coalesced into one without changing the semantics of the
1179    /// dependency graph. However, the merging of nodes can lead to a subtle
1180    /// problem during red-green marking: The color of an anonymous node from
1181    /// the current session might "shadow" the color of the node with the same
1182    /// ID from the previous session. In order to side-step this problem, we make
1183    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1184    /// This is implemented by mixing a session-key into the ID fingerprint of
1185    /// each anon node. The session-key is a hash of the number of previous sessions.
1186    anon_id_seed: Fingerprint,
1187
1188    /// These are simple counters that are for profiling and
1189    /// debugging and only active with `debug_assertions`.
1190    pub(super) total_read_count: AtomicU64,
1191    pub(super) total_duplicate_read_count: AtomicU64,
1192}
1193
1194impl<D: Deps> CurrentDepGraph<D> {
1195    fn new(
1196        session: &Session,
1197        prev_graph_node_count: usize,
1198        encoder: FileEncoder,
1199        previous: Arc<SerializedDepGraph>,
1200    ) -> Self {
1201        let mut stable_hasher = StableHasher::new();
1202        previous.session_count().hash(&mut stable_hasher);
1203        let anon_id_seed = stable_hasher.finish();
1204
1205        #[cfg(debug_assertions)]
1206        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1207            Ok(s) => match EdgeFilter::new(&s) {
1208                Ok(f) => Some(f),
1209                Err(err) => {
    ::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
            err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1210            },
1211            Err(_) => None,
1212        };
1213
1214        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1215
1216        let new_node_dbg =
1217            session.opts.unstable_opts.incremental_verify_ich || truecfg!(debug_assertions);
1218
1219        CurrentDepGraph {
1220            encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1221            anon_node_to_index: ShardedHashMap::with_capacity(
1222                // FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1223                new_node_count_estimate / sharded::shards(),
1224            ),
1225            anon_id_seed,
1226            #[cfg(debug_assertions)]
1227            forbidden_edge,
1228            #[cfg(debug_assertions)]
1229            fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1230            nodes_in_current_session: new_node_dbg.then(|| {
1231                Lock::new(FxHashMap::with_capacity_and_hasher(
1232                    new_node_count_estimate,
1233                    Default::default(),
1234                ))
1235            }),
1236            total_read_count: AtomicU64::new(0),
1237            total_duplicate_read_count: AtomicU64::new(0),
1238        }
1239    }
1240
1241    #[cfg(debug_assertions)]
1242    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1243        if let Some(forbidden_edge) = &self.forbidden_edge {
1244            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1245        }
1246        let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1247        match (&previous, &fingerprint) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
                        key)));
        }
    }
};assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1248    }
1249
1250    #[inline(always)]
1251    fn record_node(
1252        &self,
1253        dep_node_index: DepNodeIndex,
1254        key: DepNode,
1255        _current_fingerprint: Fingerprint,
1256    ) {
1257        #[cfg(debug_assertions)]
1258        self.record_edge(dep_node_index, key, _current_fingerprint);
1259
1260        if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1261            outline(|| {
1262                if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1263                    {
    ::core::panicking::panic_fmt(format_args!("Found duplicate dep-node {0:?}",
            key));
};panic!("Found duplicate dep-node {key:?}");
1264                }
1265            });
1266        }
1267    }
1268
1269    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1270    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1271    #[inline(always)]
1272    fn alloc_new_node(
1273        &self,
1274        key: DepNode,
1275        edges: EdgesVec,
1276        current_fingerprint: Fingerprint,
1277    ) -> DepNodeIndex {
1278        let dep_node_index = self.encoder.send_new(key, current_fingerprint, edges);
1279
1280        self.record_node(dep_node_index, key, current_fingerprint);
1281
1282        dep_node_index
1283    }
1284
1285    #[inline]
1286    fn debug_assert_not_in_new_nodes(
1287        &self,
1288        prev_graph: &SerializedDepGraph,
1289        prev_index: SerializedDepNodeIndex,
1290    ) {
1291        if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1292            if true {
    if !!nodes_in_current_session.lock().contains_key(&prev_graph.index_to_node(prev_index))
        {
        {
            ::core::panicking::panic_fmt(format_args!("node from previous graph present in new node collection"));
        }
    };
};debug_assert!(
1293                !nodes_in_current_session
1294                    .lock()
1295                    .contains_key(&prev_graph.index_to_node(prev_index)),
1296                "node from previous graph present in new node collection"
1297            );
1298        }
1299    }
1300}
1301
1302#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TaskDepsRef::Allow(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
                    &__self_0),
            TaskDepsRef::EvalAlways =>
                ::core::fmt::Formatter::write_str(f, "EvalAlways"),
            TaskDepsRef::Ignore =>
                ::core::fmt::Formatter::write_str(f, "Ignore"),
            TaskDepsRef::Forbid =>
                ::core::fmt::Formatter::write_str(f, "Forbid"),
        }
    }
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
    #[inline]
    fn clone(&self) -> TaskDepsRef<'a> {
        let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
        *self
    }
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1303pub enum TaskDepsRef<'a> {
1304    /// New dependencies can be added to the
1305    /// `TaskDeps`. This is used when executing a 'normal' query
1306    /// (no `eval_always` modifier)
1307    Allow(&'a Lock<TaskDeps>),
1308    /// This is used when executing an `eval_always` query. We don't
1309    /// need to track dependencies for a query that's always
1310    /// re-executed -- but we need to know that this is an `eval_always`
1311    /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1312    /// when directly feeding other queries.
1313    EvalAlways,
1314    /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1315    Ignore,
1316    /// Any attempt to add new dependencies will cause a panic.
1317    /// This is used when decoding a query result from disk,
1318    /// to ensure that the decoding process doesn't itself
1319    /// require the execution of any queries.
1320    Forbid,
1321}
1322
1323#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "TaskDeps",
            "node", &self.node, "reads", &self.reads, "read_set",
            &self.read_set, "phantom_data", &&self.phantom_data)
    }
}Debug)]
1324pub struct TaskDeps {
1325    #[cfg(debug_assertions)]
1326    node: Option<DepNode>,
1327
1328    /// A vector of `DepNodeIndex`, basically.
1329    reads: EdgesVec,
1330
1331    /// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1332    /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1333    /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1334    /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1335    read_set: FxHashSet<DepNodeIndex>,
1336
1337    phantom_data: PhantomData<DepNode>,
1338}
1339
1340impl TaskDeps {
1341    /// See `TaskDeps::read_set` above.
1342    const LINEAR_SCAN_MAX: usize = 16;
1343
1344    #[inline]
1345    fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1346        TaskDeps {
1347            #[cfg(debug_assertions)]
1348            node,
1349            reads: EdgesVec::new(),
1350            read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1351            phantom_data: PhantomData,
1352        }
1353    }
1354}
1355
1356// A data structure that stores Option<DepNodeColor> values as a contiguous
1357// array, using one u32 per entry.
1358pub(super) struct DepNodeColorMap {
1359    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1360}
1361
1362// All values below `COMPRESSED_RED` are green.
1363const COMPRESSED_RED: u32 = u32::MAX - 1;
1364const COMPRESSED_UNKNOWN: u32 = u32::MAX;
1365
1366impl DepNodeColorMap {
1367    fn new(size: usize) -> DepNodeColorMap {
1368        if true {
    if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
        ::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
    };
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1369        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1370    }
1371
1372    #[inline]
1373    pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1374        let value = self.values[index].load(Ordering::Relaxed);
1375        if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1376    }
1377
1378    /// This tries to atomically mark a node green and assign `index` as the new
1379    /// index. This returns `Ok` if `index` gets assigned, otherwise it returns
1380    /// the already allocated index in `Err`.
1381    #[inline]
1382    pub(super) fn try_mark_green(
1383        &self,
1384        prev_index: SerializedDepNodeIndex,
1385        index: DepNodeIndex,
1386    ) -> Result<(), DepNodeIndex> {
1387        let value = &self.values[prev_index];
1388        match value.compare_exchange(
1389            COMPRESSED_UNKNOWN,
1390            index.as_u32(),
1391            Ordering::Relaxed,
1392            Ordering::Relaxed,
1393        ) {
1394            Ok(_) => Ok(()),
1395            Err(v) => Err({
1396                match (&(v), &(COMPRESSED_RED)) {
    (left_val, right_val) => {
        if *left_val == *right_val {
            let kind = ::core::panicking::AssertKind::Ne;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("tried to mark a red node as green")));
        }
    }
};assert_ne!(v, COMPRESSED_RED, "tried to mark a red node as green");
1397                DepNodeIndex::from_u32(v)
1398            }),
1399        }
1400    }
1401
1402    #[inline]
1403    pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1404        let value = self.values[index].load(Ordering::Acquire);
1405        // Green is by far the most common case. Check for that first so we can succeed with a
1406        // single comparison.
1407        if value < COMPRESSED_RED {
1408            DepNodeColor::Green(DepNodeIndex::from_u32(value))
1409        } else if value == COMPRESSED_RED {
1410            DepNodeColor::Red
1411        } else {
1412            if true {
    match (&value, &COMPRESSED_UNKNOWN) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1413            DepNodeColor::Unknown
1414        }
1415    }
1416
1417    #[inline]
1418    pub(super) fn insert_red(&self, index: SerializedDepNodeIndex) {
1419        let value = self.values[index].swap(COMPRESSED_RED, Ordering::Release);
1420        // Sanity check for duplicate nodes
1421        match (&value, &COMPRESSED_UNKNOWN) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("trying to encode a dep node twice")));
        }
    }
};assert_eq!(value, COMPRESSED_UNKNOWN, "trying to encode a dep node twice");
1422    }
1423}
1424
1425#[inline(never)]
1426#[cold]
1427pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: &MarkFrame<'_>) {
1428    let data = graph.data.as_ref().unwrap();
1429
1430    {
    ::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1431    { ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
1432
1433    let mut i = 0;
1434    let mut current = Some(frame);
1435    while let Some(frame) = current {
1436        let node = data.previous.index_to_node(frame.index);
1437        { ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1438        current = frame.parent;
1439        i += 1;
1440    }
1441
1442    {
    ::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1443}
1444
1445#[cold]
1446#[inline(never)]
1447fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1448    // We have to do an expensive reverse-lookup of the DepNode that
1449    // corresponds to `dep_node_index`, but that's OK since we are about
1450    // to ICE anyway.
1451    let mut dep_node = None;
1452
1453    // First try to find the dep node among those that already existed in the
1454    // previous session and has been marked green
1455    for prev_index in data.colors.values.indices() {
1456        if data.colors.current(prev_index) == Some(dep_node_index) {
1457            dep_node = Some(*data.previous.index_to_node(prev_index));
1458            break;
1459        }
1460    }
1461
1462    if dep_node.is_none()
1463        && let Some(nodes) = &data.current.nodes_in_current_session
1464    {
1465        // Try to find it among the nodes allocated so far in this session
1466        // This is OK, there's only ever one node result possible so this is deterministic.
1467        #[allow(rustc::potential_query_instability)]
1468        if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1469            dep_node = Some(*node);
1470        }
1471    }
1472
1473    let dep_node = dep_node.map_or_else(
1474        || ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
    })format!("with index {:?}", dep_node_index),
1475        |dep_node| ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
    })format!("`{:?}`", dep_node),
1476    );
1477
1478    {
    ::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
            dep_node));
}panic!(
1479        "Error: trying to record dependency on DepNode {dep_node} in a \
1480         context that does not allow it (e.g. during query deserialization). \
1481         The most common case of recording a dependency on a DepNode `foo` is \
1482         when the corresponding query `foo` is invoked. Invoking queries is not \
1483         allowed as part of loading something from the incremental on-disk cache. \
1484         See <https://github.com/rust-lang/rust/pull/91919>."
1485    )
1486}