rustc_query_system/dep_graph/
graph.rs

1use std::assert_matches::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::marker::PhantomData;
5use std::sync::Arc;
6use std::sync::atomic::{AtomicU32, Ordering};
7
8use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_data_structures::outline;
11use rustc_data_structures::profiling::QueryInvocationId;
12use rustc_data_structures::sharded::{self, ShardedHashMap};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock};
15use rustc_data_structures::unord::UnordMap;
16use rustc_errors::DiagInner;
17use rustc_index::IndexVec;
18use rustc_macros::{Decodable, Encodable};
19use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
20use rustc_session::Session;
21use tracing::{debug, instrument};
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
24
25use super::query::DepGraphQuery;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::query::{QueryContext, QuerySideEffect};
31
32#[derive(Clone)]
33pub struct DepGraph<D: Deps> {
34    data: Option<Arc<DepGraphData<D>>>,
35
36    /// This field is used for assigning DepNodeIndices when running in
37    /// non-incremental mode. Even in non-incremental mode we make sure that
38    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
39    /// ID is used for self-profiling.
40    virtual_dep_node_index: Arc<AtomicU32>,
41}
42
43rustc_index::newtype_index! {
44    pub struct DepNodeIndex {}
45}
46
47// We store a large collection of these in `prev_index_to_index` during
48// non-full incremental builds, and want to ensure that the element size
49// doesn't inadvertently increase.
50rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
51
52impl DepNodeIndex {
53    const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
54    pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
55}
56
57impl From<DepNodeIndex> for QueryInvocationId {
58    #[inline(always)]
59    fn from(dep_node_index: DepNodeIndex) -> Self {
60        QueryInvocationId(dep_node_index.as_u32())
61    }
62}
63
64pub struct MarkFrame<'a> {
65    index: SerializedDepNodeIndex,
66    parent: Option<&'a MarkFrame<'a>>,
67}
68
69enum DepNodeColor {
70    Red,
71    Green(DepNodeIndex),
72}
73
74impl DepNodeColor {
75    #[inline]
76    fn is_green(self) -> bool {
77        match self {
78            DepNodeColor::Red => false,
79            DepNodeColor::Green(_) => true,
80        }
81    }
82}
83
84pub(crate) struct DepGraphData<D: Deps> {
85    /// The new encoding of the dependency graph, optimized for red/green
86    /// tracking. The `current` field is the dependency graph of only the
87    /// current compilation session: We don't merge the previous dep-graph into
88    /// current one anymore, but we do reference shared data to save space.
89    current: CurrentDepGraph<D>,
90
91    /// The dep-graph from the previous compilation session. It contains all
92    /// nodes and edges as well as all fingerprints of nodes that have them.
93    previous: Arc<SerializedDepGraph>,
94
95    colors: DepNodeColorMap,
96
97    /// When we load, there may be `.o` files, cached MIR, or other such
98    /// things available to us. If we find that they are not dirty, we
99    /// load the path to the file storing those work-products here into
100    /// this map. We can later look for and extract that data.
101    previous_work_products: WorkProductMap,
102
103    dep_node_debug: Lock<FxHashMap<DepNode, String>>,
104
105    /// Used by incremental compilation tests to assert that
106    /// a particular query result was decoded from disk
107    /// (not just marked green)
108    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
109}
110
111pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
112where
113    R: for<'a> HashStable<StableHashingContext<'a>>,
114{
115    let mut stable_hasher = StableHasher::new();
116    result.hash_stable(hcx, &mut stable_hasher);
117    stable_hasher.finish()
118}
119
120impl<D: Deps> DepGraph<D> {
121    pub fn new(
122        session: &Session,
123        prev_graph: Arc<SerializedDepGraph>,
124        prev_work_products: WorkProductMap,
125        encoder: FileEncoder,
126        record_graph: bool,
127        record_stats: bool,
128    ) -> DepGraph<D> {
129        let prev_graph_node_count = prev_graph.node_count();
130
131        let current = CurrentDepGraph::new(
132            session,
133            prev_graph_node_count,
134            encoder,
135            record_graph,
136            record_stats,
137            Arc::clone(&prev_graph),
138        );
139
140        let colors = DepNodeColorMap::new(prev_graph_node_count);
141
142        // Instantiate a dependy-less node only once for anonymous queries.
143        let _green_node_index = current.alloc_new_node(
144            DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
145            EdgesVec::new(),
146            Fingerprint::ZERO,
147        );
148        assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
149
150        // Instantiate a dependy-less red node only once for anonymous queries.
151        let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
152            &prev_graph,
153            DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
154            EdgesVec::new(),
155            None,
156        );
157        assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
158        match red_node_prev_index_and_color {
159            None => {
160                // This is expected when we have no previous compilation session.
161                assert!(prev_graph_node_count == 0);
162            }
163            Some((prev_red_node_index, DepNodeColor::Red)) => {
164                assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
165                colors.insert(prev_red_node_index, DepNodeColor::Red);
166            }
167            Some((_, DepNodeColor::Green(_))) => {
168                // There must be a logic error somewhere if we hit this branch.
169                panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
170            }
171        }
172
173        DepGraph {
174            data: Some(Arc::new(DepGraphData {
175                previous_work_products: prev_work_products,
176                dep_node_debug: Default::default(),
177                current,
178                previous: prev_graph,
179                colors,
180                debug_loaded_from_disk: Default::default(),
181            })),
182            virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
183        }
184    }
185
186    pub fn new_disabled() -> DepGraph<D> {
187        DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
188    }
189
190    #[inline]
191    pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
192        self.data.as_deref()
193    }
194
195    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
196    #[inline]
197    pub fn is_fully_enabled(&self) -> bool {
198        self.data.is_some()
199    }
200
201    pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
202        if let Some(data) = &self.data {
203            data.current.encoder.with_query(f)
204        }
205    }
206
207    pub fn assert_ignored(&self) {
208        if let Some(..) = self.data {
209            D::read_deps(|task_deps| {
210                assert_matches!(
211                    task_deps,
212                    TaskDepsRef::Ignore,
213                    "expected no task dependency tracking"
214                );
215            })
216        }
217    }
218
219    pub fn with_ignore<OP, R>(&self, op: OP) -> R
220    where
221        OP: FnOnce() -> R,
222    {
223        D::with_deps(TaskDepsRef::Ignore, op)
224    }
225
226    /// Used to wrap the deserialization of a query result from disk,
227    /// This method enforces that no new `DepNodes` are created during
228    /// query result deserialization.
229    ///
230    /// Enforcing this makes the query dep graph simpler - all nodes
231    /// must be created during the query execution, and should be
232    /// created from inside the 'body' of a query (the implementation
233    /// provided by a particular compiler crate).
234    ///
235    /// Consider the case of three queries `A`, `B`, and `C`, where
236    /// `A` invokes `B` and `B` invokes `C`:
237    ///
238    /// `A -> B -> C`
239    ///
240    /// Suppose that decoding the result of query `B` required re-computing
241    /// the query `C`. If we did not create a fresh `TaskDeps` when
242    /// decoding `B`, we would still be using the `TaskDeps` for query `A`
243    /// (if we needed to re-execute `A`). This would cause us to create
244    /// a new edge `A -> C`. If this edge did not previously
245    /// exist in the `DepGraph`, then we could end up with a different
246    /// `DepGraph` at the end of compilation, even if there were no
247    /// meaningful changes to the overall program (e.g. a newline was added).
248    /// In addition, this edge might cause a subsequent compilation run
249    /// to try to force `C` before marking other necessary nodes green. If
250    /// `C` did not exist in the new compilation session, then we could
251    /// get an ICE. Normally, we would have tried (and failed) to mark
252    /// some other query green (e.g. `item_children`) which was used
253    /// to obtain `C`, which would prevent us from ever trying to force
254    /// a nonexistent `D`.
255    ///
256    /// It might be possible to enforce that all `DepNode`s read during
257    /// deserialization already exist in the previous `DepGraph`. In
258    /// the above example, we would invoke `D` during the deserialization
259    /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
260    /// of `B`, this would result in an edge `B -> D`. If that edge already
261    /// existed (with the same `DepPathHash`es), then it should be correct
262    /// to allow the invocation of the query to proceed during deserialization
263    /// of a query result. We would merely assert that the dep-graph fragment
264    /// that would have been added by invoking `C` while decoding `B`
265    /// is equivalent to the dep-graph fragment that we already instantiated for B
266    /// (at the point where we successfully marked B as green).
267    ///
268    /// However, this would require additional complexity
269    /// in the query infrastructure, and is not currently needed by the
270    /// decoding of any query results. Should the need arise in the future,
271    /// we should consider extending the query system with this functionality.
272    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
273    where
274        OP: FnOnce() -> R,
275    {
276        D::with_deps(TaskDepsRef::Forbid, op)
277    }
278
279    #[inline(always)]
280    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
281        &self,
282        key: DepNode,
283        cx: Ctxt,
284        arg: A,
285        task: fn(Ctxt, A) -> R,
286        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
287    ) -> (R, DepNodeIndex) {
288        match self.data() {
289            Some(data) => data.with_task(key, cx, arg, task, hash_result),
290            None => (task(cx, arg), self.next_virtual_depnode_index()),
291        }
292    }
293
294    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
295        &self,
296        cx: Tcx,
297        dep_kind: DepKind,
298        op: OP,
299    ) -> (R, DepNodeIndex)
300    where
301        OP: FnOnce() -> R,
302    {
303        match self.data() {
304            Some(data) => {
305                let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
306                self.read_index(index);
307                (result, index)
308            }
309            None => (op(), self.next_virtual_depnode_index()),
310        }
311    }
312}
313
314impl<D: Deps> DepGraphData<D> {
315    /// Starts a new dep-graph task. Dep-graph tasks are specified
316    /// using a free function (`task`) and **not** a closure -- this
317    /// is intentional because we want to exercise tight control over
318    /// what state they have access to. In particular, we want to
319    /// prevent implicit 'leaks' of tracked state into the task (which
320    /// could then be read without generating correct edges in the
321    /// dep-graph -- see the [rustc dev guide] for more details on
322    /// the dep-graph). To this end, the task function gets exactly two
323    /// pieces of state: the context `cx` and an argument `arg`. Both
324    /// of these bits of state must be of some type that implements
325    /// `DepGraphSafe` and hence does not leak.
326    ///
327    /// The choice of two arguments is not fundamental. One argument
328    /// would work just as well, since multiple values can be
329    /// collected using tuples. However, using two arguments works out
330    /// to be quite convenient, since it is common to need a context
331    /// (`cx`) and some argument (e.g., a `DefId` identifying what
332    /// item to process).
333    ///
334    /// For cases where you need some other number of arguments:
335    ///
336    /// - If you only need one argument, just use `()` for the `arg`
337    ///   parameter.
338    /// - If you need 3+ arguments, use a tuple for the
339    ///   `arg` parameter.
340    ///
341    /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
342    #[inline(always)]
343    pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
344        &self,
345        key: DepNode,
346        cx: Ctxt,
347        arg: A,
348        task: fn(Ctxt, A) -> R,
349        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
350    ) -> (R, DepNodeIndex) {
351        // If the following assertion triggers, it can have two reasons:
352        // 1. Something is wrong with DepNode creation, either here or
353        //    in `DepGraph::try_mark_green()`.
354        // 2. Two distinct query keys get mapped to the same `DepNode`
355        //    (see for example #48923).
356        self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
357            format!(
358                "forcing query with already existing `DepNode`\n\
359                 - query-key: {arg:?}\n\
360                 - dep-node: {key:?}"
361            )
362        });
363
364        let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
365        let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
366            (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
367        } else {
368            let task_deps = Lock::new(TaskDeps {
369                #[cfg(debug_assertions)]
370                node: Some(key),
371                reads: EdgesVec::new(),
372                read_set: Default::default(),
373                phantom_data: PhantomData,
374            });
375            (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
376        };
377
378        let dcx = cx.dep_context();
379        let dep_node_index =
380            self.hash_result_and_intern_node(dcx, key, edges, &result, hash_result);
381
382        (result, dep_node_index)
383    }
384
385    /// Executes something within an "anonymous" task, that is, a task the
386    /// `DepNode` of which is determined by the list of inputs it read from.
387    ///
388    /// NOTE: this does not actually count as a read of the DepNode here.
389    /// Using the result of this task without reading the DepNode will result
390    /// in untracked dependencies which may lead to ICEs as nodes are
391    /// incorrectly marked green.
392    ///
393    /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
394    /// user of this function actually performs the read; we'll have to see
395    /// how to make that work with `anon` in `execute_job_incr`, though.
396    pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
397        &self,
398        cx: Tcx,
399        dep_kind: DepKind,
400        op: OP,
401    ) -> (R, DepNodeIndex)
402    where
403        OP: FnOnce() -> R,
404    {
405        debug_assert!(!cx.is_eval_always(dep_kind));
406
407        let task_deps = Lock::new(TaskDeps::default());
408        let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
409        let task_deps = task_deps.into_inner();
410        let task_deps = task_deps.reads;
411
412        let dep_node_index = match task_deps.len() {
413            0 => {
414                // Because the dep-node id of anon nodes is computed from the sets of its
415                // dependencies we already know what the ID of this dependency-less node is
416                // going to be (i.e. equal to the precomputed
417                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
418                // a `StableHasher` and sending the node through interning.
419                DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
420            }
421            1 => {
422                // When there is only one dependency, don't bother creating a node.
423                task_deps[0]
424            }
425            _ => {
426                // The dep node indices are hashed here instead of hashing the dep nodes of the
427                // dependencies. These indices may refer to different nodes per session, but this isn't
428                // a problem here because we that ensure the final dep node hash is per session only by
429                // combining it with the per session random number `anon_id_seed`. This hash only need
430                // to map the dependencies to a single value on a per session basis.
431                let mut hasher = StableHasher::new();
432                task_deps.hash(&mut hasher);
433
434                let target_dep_node = DepNode {
435                    kind: dep_kind,
436                    // Fingerprint::combine() is faster than sending Fingerprint
437                    // through the StableHasher (at least as long as StableHasher
438                    // is so slow).
439                    hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
440                };
441
442                // The DepNodes generated by the process above are not unique. 2 queries could
443                // have exactly the same dependencies. However, deserialization does not handle
444                // duplicated nodes, so we do the deduplication here directly.
445                //
446                // As anonymous nodes are a small quantity compared to the full dep-graph, the
447                // memory impact of this `anon_node_to_index` map remains tolerable, and helps
448                // us avoid useless growth of the graph with almost-equivalent nodes.
449                self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
450                    self.current.alloc_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
451                })
452            }
453        };
454
455        (result, dep_node_index)
456    }
457
458    /// Intern the new `DepNode` with the dependencies up-to-now.
459    fn hash_result_and_intern_node<Ctxt: DepContext<Deps = D>, R>(
460        &self,
461        cx: &Ctxt,
462        node: DepNode,
463        edges: EdgesVec,
464        result: &R,
465        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
466    ) -> DepNodeIndex {
467        let hashing_timer = cx.profiler().incr_result_hashing();
468        let current_fingerprint = hash_result.map(|hash_result| {
469            cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
470        });
471
472        // Intern the new `DepNode` with the dependencies up-to-now.
473        let (dep_node_index, prev_and_color) =
474            self.current.intern_node(&self.previous, node, edges, current_fingerprint);
475
476        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
477
478        if let Some((prev_index, color)) = prev_and_color {
479            debug_assert!(
480                self.colors.get(prev_index).is_none(),
481                "DepGraph::with_task() - Duplicate DepNodeColor insertion for {node:?}",
482            );
483
484            self.colors.insert(prev_index, color);
485        }
486
487        dep_node_index
488    }
489}
490
491impl<D: Deps> DepGraph<D> {
492    #[inline]
493    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
494        if let Some(ref data) = self.data {
495            D::read_deps(|task_deps| {
496                let mut task_deps = match task_deps {
497                    TaskDepsRef::Allow(deps) => deps.lock(),
498                    TaskDepsRef::EvalAlways => {
499                        // We don't need to record dependencies of eval_always
500                        // queries. They are re-evaluated unconditionally anyway.
501                        return;
502                    }
503                    TaskDepsRef::Ignore => return,
504                    TaskDepsRef::Forbid => {
505                        // Reading is forbidden in this context. ICE with a useful error message.
506                        panic_on_forbidden_read(data, dep_node_index)
507                    }
508                };
509                let task_deps = &mut *task_deps;
510
511                if cfg!(debug_assertions) {
512                    data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
513                }
514
515                // As long as we only have a low number of reads we can avoid doing a hash
516                // insert and potentially allocating/reallocating the hashmap
517                let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
518                    task_deps.reads.iter().all(|other| *other != dep_node_index)
519                } else {
520                    task_deps.read_set.insert(dep_node_index)
521                };
522                if new_read {
523                    task_deps.reads.push(dep_node_index);
524                    if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
525                        // Fill `read_set` with what we have so far so we can use the hashset
526                        // next time
527                        task_deps.read_set.extend(task_deps.reads.iter().copied());
528                    }
529
530                    #[cfg(debug_assertions)]
531                    {
532                        if let Some(target) = task_deps.node {
533                            if let Some(ref forbidden_edge) = data.current.forbidden_edge {
534                                let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
535                                if forbidden_edge.test(&src, &target) {
536                                    panic!("forbidden edge {:?} -> {:?} created", src, target)
537                                }
538                            }
539                        }
540                    }
541                } else if cfg!(debug_assertions) {
542                    data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
543                }
544            })
545        }
546    }
547
548    /// This encodes a diagnostic by creating a node with an unique index and assoicating
549    /// `diagnostic` with it, for use in the next session.
550    #[inline]
551    pub fn record_diagnostic<Qcx: QueryContext>(&self, qcx: Qcx, diagnostic: &DiagInner) {
552        if let Some(ref data) = self.data {
553            D::read_deps(|task_deps| match task_deps {
554                TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
555                TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
556                    self.read_index(data.encode_diagnostic(qcx, diagnostic));
557                }
558            })
559        }
560    }
561    /// This forces a diagnostic node green by running its side effect. `prev_index` would
562    /// refer to a node created used `encode_diagnostic` in the previous session.
563    #[inline]
564    pub fn force_diagnostic_node<Qcx: QueryContext>(
565        &self,
566        qcx: Qcx,
567        prev_index: SerializedDepNodeIndex,
568    ) {
569        if let Some(ref data) = self.data {
570            data.force_diagnostic_node(qcx, prev_index);
571        }
572    }
573
574    /// Create a node when we force-feed a value into the query cache.
575    /// This is used to remove cycles during type-checking const generic parameters.
576    ///
577    /// As usual in the query system, we consider the current state of the calling query
578    /// only depends on the list of dependencies up to now. As a consequence, the value
579    /// that this query gives us can only depend on those dependencies too. Therefore,
580    /// it is sound to use the current dependency set for the created node.
581    ///
582    /// During replay, the order of the nodes is relevant in the dependency graph.
583    /// So the unchanged replay will mark the caller query before trying to mark this one.
584    /// If there is a change to report, the caller query will be re-executed before this one.
585    ///
586    /// FIXME: If the code is changed enough for this node to be marked before requiring the
587    /// caller's node, we suppose that those changes will be enough to mark this node red and
588    /// force a recomputation using the "normal" way.
589    pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
590        &self,
591        node: DepNode,
592        cx: Ctxt,
593        result: &R,
594        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
595    ) -> DepNodeIndex {
596        if let Some(data) = self.data.as_ref() {
597            // The caller query has more dependencies than the node we are creating. We may
598            // encounter a case where this created node is marked as green, but the caller query is
599            // subsequently marked as red or recomputed. In this case, we will end up feeding a
600            // value to an existing node.
601            //
602            // For sanity, we still check that the loaded stable hash and the new one match.
603            if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
604                let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
605                if let Some(dep_node_index) = dep_node_index {
606                    crate::query::incremental_verify_ich(
607                        cx,
608                        data,
609                        result,
610                        prev_index,
611                        hash_result,
612                        |value| format!("{value:?}"),
613                    );
614
615                    #[cfg(debug_assertions)]
616                    if hash_result.is_some() {
617                        data.current.record_edge(
618                            dep_node_index,
619                            node,
620                            data.prev_fingerprint_of(prev_index),
621                        );
622                    }
623
624                    return dep_node_index;
625                }
626            }
627
628            let mut edges = EdgesVec::new();
629            D::read_deps(|task_deps| match task_deps {
630                TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
631                TaskDepsRef::EvalAlways => {
632                    edges.push(DepNodeIndex::FOREVER_RED_NODE);
633                }
634                TaskDepsRef::Ignore => {}
635                TaskDepsRef::Forbid => {
636                    panic!("Cannot summarize when dependencies are not recorded.")
637                }
638            });
639
640            data.hash_result_and_intern_node(&cx, node, edges, result, hash_result)
641        } else {
642            // Incremental compilation is turned off. We just execute the task
643            // without tracking. We still provide a dep-node index that uniquely
644            // identifies the task so that we have a cheap way of referring to
645            // the query for self-profiling.
646            self.next_virtual_depnode_index()
647        }
648    }
649}
650
651impl<D: Deps> DepGraphData<D> {
652    fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
653        &self,
654        dep_node: &DepNode,
655        msg: impl FnOnce() -> S,
656    ) {
657        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
658            let current = self.current.prev_index_to_index.lock()[prev_index];
659            assert!(current.is_none(), "{}", msg())
660        } else if let Some(nodes_newly_allocated_in_current_session) =
661            &self.current.nodes_newly_allocated_in_current_session
662        {
663            outline(|| {
664                let seen = nodes_newly_allocated_in_current_session.lock().contains_key(dep_node);
665                assert!(!seen, "{}", msg());
666            });
667        }
668    }
669
670    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
671        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
672            self.colors.get(prev_index)
673        } else {
674            // This is a node that did not exist in the previous compilation session.
675            None
676        }
677    }
678
679    /// Returns true if the given node has been marked as green during the
680    /// current compilation session. Used in various assertions
681    #[inline]
682    pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
683        self.colors.get(prev_index).is_some_and(|c| c.is_green())
684    }
685
686    #[inline]
687    pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
688        self.previous.fingerprint_by_index(prev_index)
689    }
690
691    #[inline]
692    pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
693        self.previous.index_to_node(prev_index)
694    }
695
696    pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
697        self.debug_loaded_from_disk.lock().insert(dep_node);
698    }
699
700    /// This encodes a diagnostic by creating a node with an unique index and assoicating
701    /// `diagnostic` with it, for use in the next session.
702    #[inline]
703    fn encode_diagnostic<Qcx: QueryContext>(
704        &self,
705        qcx: Qcx,
706        diagnostic: &DiagInner,
707    ) -> DepNodeIndex {
708        // Use `send` so we get an unique index, even though the dep node is not.
709        let dep_node_index = self.current.encoder.send(
710            DepNode {
711                kind: D::DEP_KIND_SIDE_EFFECT,
712                hash: PackedFingerprint::from(Fingerprint::ZERO),
713            },
714            Fingerprint::ZERO,
715            // We want the side effect node to always be red so it will be forced and emit the
716            // diagnostic.
717            std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
718        );
719        let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
720        qcx.store_side_effect(dep_node_index, side_effect);
721        dep_node_index
722    }
723
724    /// This forces a diagnostic node green by running its side effect. `prev_index` would
725    /// refer to a node created used `encode_diagnostic` in the previous session.
726    #[inline]
727    fn force_diagnostic_node<Qcx: QueryContext>(
728        &self,
729        qcx: Qcx,
730        prev_index: SerializedDepNodeIndex,
731    ) {
732        D::with_deps(TaskDepsRef::Ignore, || {
733            let side_effect = qcx.load_side_effect(prev_index).unwrap();
734
735            match &side_effect {
736                QuerySideEffect::Diagnostic(diagnostic) => {
737                    qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
738                }
739            }
740
741            // Promote the previous diagnostics to the current session.
742            let index = self.current.promote_node_and_deps_to_current(&self.previous, prev_index);
743            // FIXME: Can this race with a parallel compiler?
744            qcx.store_side_effect(index, side_effect);
745
746            // Mark the node as green.
747            self.colors.insert(prev_index, DepNodeColor::Green(index));
748        })
749    }
750}
751
752impl<D: Deps> DepGraph<D> {
753    /// Checks whether a previous work product exists for `v` and, if
754    /// so, return the path that leads to it. Used to skip doing work.
755    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
756        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
757    }
758
759    /// Access the map of work-products created during the cached run. Only
760    /// used during saving of the dep-graph.
761    pub fn previous_work_products(&self) -> &WorkProductMap {
762        &self.data.as_ref().unwrap().previous_work_products
763    }
764
765    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
766        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
767    }
768
769    #[cfg(debug_assertions)]
770    #[inline(always)]
771    pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
772    where
773        F: FnOnce() -> String,
774    {
775        let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
776
777        if dep_node_debug.borrow().contains_key(&dep_node) {
778            return;
779        }
780        let debug_str = self.with_ignore(debug_str_gen);
781        dep_node_debug.borrow_mut().insert(dep_node, debug_str);
782    }
783
784    pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
785        self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
786    }
787
788    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
789        if let Some(ref data) = self.data {
790            return data.node_color(dep_node);
791        }
792
793        None
794    }
795
796    pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
797        &self,
798        qcx: Qcx,
799        dep_node: &DepNode,
800    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
801        self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
802    }
803}
804
805impl<D: Deps> DepGraphData<D> {
806    /// Try to mark a node index for the node dep_node.
807    ///
808    /// A node will have an index, when it's already been marked green, or when we can mark it
809    /// green. This function will mark the current task as a reader of the specified node, when
810    /// a node index can be found for that node.
811    pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
812        &self,
813        qcx: Qcx,
814        dep_node: &DepNode,
815    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
816        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
817
818        // Return None if the dep node didn't exist in the previous session
819        let prev_index = self.previous.node_to_index_opt(dep_node)?;
820
821        match self.colors.get(prev_index) {
822            Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
823            Some(DepNodeColor::Red) => None,
824            None => {
825                // This DepNode and the corresponding query invocation existed
826                // in the previous compilation session too, so we can try to
827                // mark it as green by recursively marking all of its
828                // dependencies green.
829                self.try_mark_previous_green(qcx, prev_index, dep_node, None)
830                    .map(|dep_node_index| (prev_index, dep_node_index))
831            }
832        }
833    }
834
835    #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
836    fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
837        &self,
838        qcx: Qcx,
839        parent_dep_node_index: SerializedDepNodeIndex,
840        frame: Option<&MarkFrame<'_>>,
841    ) -> Option<()> {
842        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
843        let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
844
845        match dep_dep_node_color {
846            Some(DepNodeColor::Green(_)) => {
847                // This dependency has been marked as green before, we are
848                // still fine and can continue with checking the other
849                // dependencies.
850                debug!("dependency {dep_dep_node:?} was immediately green");
851                return Some(());
852            }
853            Some(DepNodeColor::Red) => {
854                // We found a dependency the value of which has changed
855                // compared to the previous compilation session. We cannot
856                // mark the DepNode as green and also don't need to bother
857                // with checking any of the other dependencies.
858                debug!("dependency {dep_dep_node:?} was immediately red");
859                return None;
860            }
861            None => {}
862        }
863
864        // We don't know the state of this dependency. If it isn't
865        // an eval_always node, let's try to mark it green recursively.
866        if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
867            debug!(
868                "state of dependency {:?} ({}) is unknown, trying to mark it green",
869                dep_dep_node, dep_dep_node.hash,
870            );
871
872            let node_index =
873                self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
874
875            if node_index.is_some() {
876                debug!("managed to MARK dependency {dep_dep_node:?} as green",);
877                return Some(());
878            }
879        }
880
881        // We failed to mark it green, so we try to force the query.
882        debug!("trying to force dependency {dep_dep_node:?}");
883        if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
884            // The DepNode could not be forced.
885            debug!("dependency {dep_dep_node:?} could not be forced");
886            return None;
887        }
888
889        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
890
891        match dep_dep_node_color {
892            Some(DepNodeColor::Green(_)) => {
893                debug!("managed to FORCE dependency {dep_dep_node:?} to green");
894                return Some(());
895            }
896            Some(DepNodeColor::Red) => {
897                debug!("dependency {dep_dep_node:?} was red after forcing",);
898                return None;
899            }
900            None => {}
901        }
902
903        if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
904            panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
905        }
906
907        // If the query we just forced has resulted in
908        // some kind of compilation error, we cannot rely on
909        // the dep-node color having been properly updated.
910        // This means that the query system has reached an
911        // invalid state. We let the compiler continue (by
912        // returning `None`) so it can emit error messages
913        // and wind down, but rely on the fact that this
914        // invalid state will not be persisted to the
915        // incremental compilation cache because of
916        // compilation errors being present.
917        debug!("dependency {dep_dep_node:?} resulted in compilation error",);
918        return None;
919    }
920
921    /// Try to mark a dep-node which existed in the previous compilation session as green.
922    #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
923    fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
924        &self,
925        qcx: Qcx,
926        prev_dep_node_index: SerializedDepNodeIndex,
927        dep_node: &DepNode,
928        frame: Option<&MarkFrame<'_>>,
929    ) -> Option<DepNodeIndex> {
930        let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
931
932        // We never try to mark eval_always nodes as green
933        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
934
935        debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
936
937        let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
938
939        for dep_dep_node_index in prev_deps {
940            self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
941        }
942
943        // If we got here without hitting a `return` that means that all
944        // dependencies of this DepNode could be marked as green. Therefore we
945        // can also mark this DepNode as green.
946
947        // There may be multiple threads trying to mark the same dep node green concurrently
948
949        // We allocating an entry for the node in the current dependency graph and
950        // adding all the appropriate edges imported from the previous graph
951        let dep_node_index =
952            self.current.promote_node_and_deps_to_current(&self.previous, prev_dep_node_index);
953
954        // ... emitting any stored diagnostic ...
955
956        // ... and finally storing a "Green" entry in the color map.
957        // Multiple threads can all write the same color here
958        self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
959
960        debug!("successfully marked {dep_node:?} as green");
961        Some(dep_node_index)
962    }
963}
964
965impl<D: Deps> DepGraph<D> {
966    /// Returns true if the given node has been marked as red during the
967    /// current compilation session. Used in various assertions
968    pub fn is_red(&self, dep_node: &DepNode) -> bool {
969        matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
970    }
971
972    /// Returns true if the given node has been marked as green during the
973    /// current compilation session. Used in various assertions
974    pub fn is_green(&self, dep_node: &DepNode) -> bool {
975        self.node_color(dep_node).is_some_and(|c| c.is_green())
976    }
977
978    pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
979        &self,
980        dep_node: &DepNode,
981        msg: impl FnOnce() -> S,
982    ) {
983        if let Some(data) = &self.data {
984            data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
985        }
986    }
987
988    /// This method loads all on-disk cacheable query results into memory, so
989    /// they can be written out to the new cache file again. Most query results
990    /// will already be in memory but in the case where we marked something as
991    /// green but then did not need the value, that value will never have been
992    /// loaded from disk.
993    ///
994    /// This method will only load queries that will end up in the disk cache.
995    /// Other queries will not be executed.
996    pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
997        let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
998
999        let data = self.data.as_ref().unwrap();
1000        for prev_index in data.colors.values.indices() {
1001            match data.colors.get(prev_index) {
1002                Some(DepNodeColor::Green(_)) => {
1003                    let dep_node = data.previous.index_to_node(prev_index);
1004                    tcx.try_load_from_on_disk_cache(dep_node);
1005                }
1006                None | Some(DepNodeColor::Red) => {
1007                    // We can skip red nodes because a node can only be marked
1008                    // as red if the query result was recomputed and thus is
1009                    // already in memory.
1010                }
1011            }
1012        }
1013    }
1014
1015    pub fn print_incremental_info(&self) {
1016        if let Some(data) = &self.data {
1017            data.current.encoder.print_incremental_info(
1018                data.current.total_read_count.load(Ordering::Relaxed),
1019                data.current.total_duplicate_read_count.load(Ordering::Relaxed),
1020            )
1021        }
1022    }
1023
1024    pub fn finish_encoding(&self) -> FileEncodeResult {
1025        if let Some(data) = &self.data { data.current.encoder.finish() } else { Ok(0) }
1026    }
1027
1028    pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1029        debug_assert!(self.data.is_none());
1030        let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1031        DepNodeIndex::from_u32(index)
1032    }
1033}
1034
1035/// A "work product" is an intermediate result that we save into the
1036/// incremental directory for later re-use. The primary example are
1037/// the object files that we save for each partition at code
1038/// generation time.
1039///
1040/// Each work product is associated with a dep-node, representing the
1041/// process that produced the work-product. If that dep-node is found
1042/// to be dirty when we load up, then we will delete the work-product
1043/// at load time. If the work-product is found to be clean, then we
1044/// will keep a record in the `previous_work_products` list.
1045///
1046/// In addition, work products have an associated hash. This hash is
1047/// an extra hash that can be used to decide if the work-product from
1048/// a previous compilation can be re-used (in addition to the dirty
1049/// edges check).
1050///
1051/// As the primary example, consider the object files we generate for
1052/// each partition. In the first run, we create partitions based on
1053/// the symbols that need to be compiled. For each partition P, we
1054/// hash the symbols in P and create a `WorkProduct` record associated
1055/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1056/// in P.
1057///
1058/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1059/// judged to be clean (which means none of the things we read to
1060/// generate the partition were found to be dirty), it will be loaded
1061/// into previous work products. We will then regenerate the set of
1062/// symbols in the partition P and hash them (note that new symbols
1063/// may be added -- for example, new monomorphizations -- even if
1064/// nothing in P changed!). We will compare that hash against the
1065/// previous hash. If it matches up, we can reuse the object file.
1066#[derive(Clone, Debug, Encodable, Decodable)]
1067pub struct WorkProduct {
1068    pub cgu_name: String,
1069    /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1070    /// saved file and the key is some identifier for the type of file being saved.
1071    ///
1072    /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1073    /// the object file's path, and "dwo" to the dwarf object file's path.
1074    pub saved_files: UnordMap<String, String>,
1075}
1076
1077pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1078
1079// Index type for `DepNodeData`'s edges.
1080rustc_index::newtype_index! {
1081    struct EdgeIndex {}
1082}
1083
1084/// `CurrentDepGraph` stores the dependency graph for the current session. It
1085/// will be populated as we run queries or tasks. We never remove nodes from the
1086/// graph: they are only added.
1087///
1088/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1089/// in memory. This is important, because these graph structures are some of the
1090/// largest in the compiler.
1091///
1092/// For this reason, we avoid storing `DepNode`s more than once as map
1093/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1094/// graph, and we map nodes in the previous graph to indices via a two-step
1095/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1096/// and the `prev_index_to_index` vector (which is more compact and faster than
1097/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1098///
1099/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1100/// and `prev_index_to_index` fields are locked separately. Operations that take
1101/// a `DepNodeIndex` typically just access the `data` field.
1102///
1103/// We only need to manipulate at most two locks simultaneously:
1104/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1105/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1106/// first, and `data` second.
1107pub(super) struct CurrentDepGraph<D: Deps> {
1108    encoder: GraphEncoder<D>,
1109    prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1110    anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1111
1112    /// This is used to verify that fingerprints do not change between the creation of a node
1113    /// and its recomputation.
1114    #[cfg(debug_assertions)]
1115    fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1116
1117    /// Used to trap when a specific edge is added to the graph.
1118    /// This is used for debug purposes and is only active with `debug_assertions`.
1119    #[cfg(debug_assertions)]
1120    forbidden_edge: Option<EdgeFilter>,
1121
1122    /// Used to verify the absence of hash collisions among DepNodes.
1123    /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1124    /// or if `debug_assertions` are enabled.
1125    ///
1126    /// The map contains all DepNodes that have been allocated in the current session so far and
1127    /// for which there is no equivalent in the previous session.
1128    nodes_newly_allocated_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
1129
1130    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1131    /// their edges. This has the beneficial side-effect that multiple anonymous
1132    /// nodes can be coalesced into one without changing the semantics of the
1133    /// dependency graph. However, the merging of nodes can lead to a subtle
1134    /// problem during red-green marking: The color of an anonymous node from
1135    /// the current session might "shadow" the color of the node with the same
1136    /// ID from the previous session. In order to side-step this problem, we make
1137    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1138    /// This is implemented by mixing a session-key into the ID fingerprint of
1139    /// each anon node. The session-key is just a random number generated when
1140    /// the `DepGraph` is created.
1141    anon_id_seed: Fingerprint,
1142
1143    /// These are simple counters that are for profiling and
1144    /// debugging and only active with `debug_assertions`.
1145    total_read_count: AtomicU64,
1146    total_duplicate_read_count: AtomicU64,
1147}
1148
1149impl<D: Deps> CurrentDepGraph<D> {
1150    fn new(
1151        session: &Session,
1152        prev_graph_node_count: usize,
1153        encoder: FileEncoder,
1154        record_graph: bool,
1155        record_stats: bool,
1156        previous: Arc<SerializedDepGraph>,
1157    ) -> Self {
1158        use std::time::{SystemTime, UNIX_EPOCH};
1159
1160        let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1161        let nanos = duration.as_nanos();
1162        let mut stable_hasher = StableHasher::new();
1163        nanos.hash(&mut stable_hasher);
1164        let anon_id_seed = stable_hasher.finish();
1165
1166        #[cfg(debug_assertions)]
1167        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1168            Ok(s) => match EdgeFilter::new(&s) {
1169                Ok(f) => Some(f),
1170                Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1171            },
1172            Err(_) => None,
1173        };
1174
1175        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1176
1177        let new_node_dbg =
1178            session.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions);
1179
1180        CurrentDepGraph {
1181            encoder: GraphEncoder::new(
1182                encoder,
1183                prev_graph_node_count,
1184                record_graph,
1185                record_stats,
1186                &session.prof,
1187                previous,
1188            ),
1189            anon_node_to_index: ShardedHashMap::with_capacity(
1190                // FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1191                new_node_count_estimate / sharded::shards(),
1192            ),
1193            prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1194            anon_id_seed,
1195            #[cfg(debug_assertions)]
1196            forbidden_edge,
1197            #[cfg(debug_assertions)]
1198            fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1199            nodes_newly_allocated_in_current_session: new_node_dbg.then(|| {
1200                Lock::new(FxHashMap::with_capacity_and_hasher(
1201                    new_node_count_estimate,
1202                    Default::default(),
1203                ))
1204            }),
1205            total_read_count: AtomicU64::new(0),
1206            total_duplicate_read_count: AtomicU64::new(0),
1207        }
1208    }
1209
1210    #[cfg(debug_assertions)]
1211    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1212        if let Some(forbidden_edge) = &self.forbidden_edge {
1213            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1214        }
1215        let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1216        assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1217    }
1218
1219    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1220    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1221    #[inline(always)]
1222    fn alloc_new_node(
1223        &self,
1224        key: DepNode,
1225        edges: EdgesVec,
1226        current_fingerprint: Fingerprint,
1227    ) -> DepNodeIndex {
1228        let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1229
1230        #[cfg(debug_assertions)]
1231        self.record_edge(dep_node_index, key, current_fingerprint);
1232
1233        if let Some(ref nodes_newly_allocated_in_current_session) =
1234            self.nodes_newly_allocated_in_current_session
1235        {
1236            outline(|| {
1237                if nodes_newly_allocated_in_current_session
1238                    .lock()
1239                    .insert(key, dep_node_index)
1240                    .is_some()
1241                {
1242                    panic!("Found duplicate dep-node {key:?}");
1243                }
1244            });
1245        }
1246
1247        dep_node_index
1248    }
1249
1250    fn intern_node(
1251        &self,
1252        prev_graph: &SerializedDepGraph,
1253        key: DepNode,
1254        edges: EdgesVec,
1255        fingerprint: Option<Fingerprint>,
1256    ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1257        if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1258            let get_dep_node_index = |fingerprint| {
1259                let mut prev_index_to_index = self.prev_index_to_index.lock();
1260
1261                let dep_node_index = match prev_index_to_index[prev_index] {
1262                    Some(dep_node_index) => dep_node_index,
1263                    None => {
1264                        let dep_node_index = self.encoder.send(key, fingerprint, edges);
1265                        prev_index_to_index[prev_index] = Some(dep_node_index);
1266                        dep_node_index
1267                    }
1268                };
1269
1270                #[cfg(debug_assertions)]
1271                self.record_edge(dep_node_index, key, fingerprint);
1272
1273                dep_node_index
1274            };
1275
1276            // Determine the color and index of the new `DepNode`.
1277            if let Some(fingerprint) = fingerprint {
1278                if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1279                    // This is a green node: it existed in the previous compilation,
1280                    // its query was re-executed, and it has the same result as before.
1281                    let dep_node_index = get_dep_node_index(fingerprint);
1282                    (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1283                } else {
1284                    // This is a red node: it existed in the previous compilation, its query
1285                    // was re-executed, but it has a different result from before.
1286                    let dep_node_index = get_dep_node_index(fingerprint);
1287                    (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1288                }
1289            } else {
1290                // This is a red node, effectively: it existed in the previous compilation
1291                // session, its query was re-executed, but it doesn't compute a result hash
1292                // (i.e. it represents a `no_hash` query), so we have no way of determining
1293                // whether or not the result was the same as before.
1294                let dep_node_index = get_dep_node_index(Fingerprint::ZERO);
1295                (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1296            }
1297        } else {
1298            let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1299
1300            // This is a new node: it didn't exist in the previous compilation session.
1301            let dep_node_index = self.alloc_new_node(key, edges, fingerprint);
1302
1303            (dep_node_index, None)
1304        }
1305    }
1306
1307    fn promote_node_and_deps_to_current(
1308        &self,
1309        prev_graph: &SerializedDepGraph,
1310        prev_index: SerializedDepNodeIndex,
1311    ) -> DepNodeIndex {
1312        self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1313
1314        let mut prev_index_to_index = self.prev_index_to_index.lock();
1315
1316        match prev_index_to_index[prev_index] {
1317            Some(dep_node_index) => dep_node_index,
1318            None => {
1319                let dep_node_index = self.encoder.send_promoted(prev_index, &*prev_index_to_index);
1320                prev_index_to_index[prev_index] = Some(dep_node_index);
1321                #[cfg(debug_assertions)]
1322                self.record_edge(
1323                    dep_node_index,
1324                    prev_graph.index_to_node(prev_index),
1325                    prev_graph.fingerprint_by_index(prev_index),
1326                );
1327                dep_node_index
1328            }
1329        }
1330    }
1331
1332    #[inline]
1333    fn debug_assert_not_in_new_nodes(
1334        &self,
1335        prev_graph: &SerializedDepGraph,
1336        prev_index: SerializedDepNodeIndex,
1337    ) {
1338        let node = &prev_graph.index_to_node(prev_index);
1339        debug_assert!(
1340            !self
1341                .nodes_newly_allocated_in_current_session
1342                .as_ref()
1343                .map_or(false, |set| set.lock().contains_key(node)),
1344            "node from previous graph present in new node collection"
1345        );
1346    }
1347}
1348
1349#[derive(Debug, Clone, Copy)]
1350pub enum TaskDepsRef<'a> {
1351    /// New dependencies can be added to the
1352    /// `TaskDeps`. This is used when executing a 'normal' query
1353    /// (no `eval_always` modifier)
1354    Allow(&'a Lock<TaskDeps>),
1355    /// This is used when executing an `eval_always` query. We don't
1356    /// need to track dependencies for a query that's always
1357    /// re-executed -- but we need to know that this is an `eval_always`
1358    /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1359    /// when directly feeding other queries.
1360    EvalAlways,
1361    /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1362    Ignore,
1363    /// Any attempt to add new dependencies will cause a panic.
1364    /// This is used when decoding a query result from disk,
1365    /// to ensure that the decoding process doesn't itself
1366    /// require the execution of any queries.
1367    Forbid,
1368}
1369
1370#[derive(Debug)]
1371pub struct TaskDeps {
1372    #[cfg(debug_assertions)]
1373    node: Option<DepNode>,
1374    reads: EdgesVec,
1375    read_set: FxHashSet<DepNodeIndex>,
1376    phantom_data: PhantomData<DepNode>,
1377}
1378
1379impl Default for TaskDeps {
1380    fn default() -> Self {
1381        Self {
1382            #[cfg(debug_assertions)]
1383            node: None,
1384            reads: EdgesVec::new(),
1385            read_set: FxHashSet::with_capacity_and_hasher(128, Default::default()),
1386            phantom_data: PhantomData,
1387        }
1388    }
1389}
1390// A data structure that stores Option<DepNodeColor> values as a contiguous
1391// array, using one u32 per entry.
1392struct DepNodeColorMap {
1393    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1394}
1395
1396const COMPRESSED_NONE: u32 = 0;
1397const COMPRESSED_RED: u32 = 1;
1398const COMPRESSED_FIRST_GREEN: u32 = 2;
1399
1400impl DepNodeColorMap {
1401    fn new(size: usize) -> DepNodeColorMap {
1402        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1403    }
1404
1405    #[inline]
1406    fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1407        match self.values[index].load(Ordering::Acquire) {
1408            COMPRESSED_NONE => None,
1409            COMPRESSED_RED => Some(DepNodeColor::Red),
1410            value => {
1411                Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1412            }
1413        }
1414    }
1415
1416    #[inline]
1417    fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1418        self.values[index].store(
1419            match color {
1420                DepNodeColor::Red => COMPRESSED_RED,
1421                DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1422            },
1423            Ordering::Release,
1424        )
1425    }
1426}
1427
1428#[inline(never)]
1429#[cold]
1430pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1431    let data = graph.data.as_ref().unwrap();
1432
1433    eprintln!("there was a panic while trying to force a dep node");
1434    eprintln!("try_mark_green dep node stack:");
1435
1436    let mut i = 0;
1437    let mut current = frame;
1438    while let Some(frame) = current {
1439        let node = data.previous.index_to_node(frame.index);
1440        eprintln!("#{i} {node:?}");
1441        current = frame.parent;
1442        i += 1;
1443    }
1444
1445    eprintln!("end of try_mark_green dep node stack");
1446}
1447
1448#[cold]
1449#[inline(never)]
1450fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1451    // We have to do an expensive reverse-lookup of the DepNode that
1452    // corresponds to `dep_node_index`, but that's OK since we are about
1453    // to ICE anyway.
1454    let mut dep_node = None;
1455
1456    // First try to find the dep node among those that already existed in the
1457    // previous session
1458    for (prev_index, index) in data.current.prev_index_to_index.lock().iter_enumerated() {
1459        if index == &Some(dep_node_index) {
1460            dep_node = Some(data.previous.index_to_node(prev_index));
1461            break;
1462        }
1463    }
1464
1465    if dep_node.is_none()
1466        && let Some(nodes) = &data.current.nodes_newly_allocated_in_current_session
1467    {
1468        // Try to find it among the nodes allocated so far in this session
1469        if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1470            dep_node = Some(*node);
1471        }
1472    }
1473
1474    let dep_node = dep_node.map_or_else(
1475        || format!("with index {:?}", dep_node_index),
1476        |dep_node| format!("`{:?}`", dep_node),
1477    );
1478
1479    panic!(
1480        "Error: trying to record dependency on DepNode {dep_node} in a \
1481         context that does not allow it (e.g. during query deserialization). \
1482         The most common case of recording a dependency on a DepNode `foo` is \
1483         when the corresponding query `foo` is invoked. Invoking queries is not \
1484         allowed as part of loading something from the incremental on-disk cache. \
1485         See <https://github.com/rust-lang/rust/pull/91919>."
1486    )
1487}