1use std::assert_matches::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::marker::PhantomData;
5use std::sync::Arc;
6use std::sync::atomic::{AtomicU32, Ordering};
7
8use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_data_structures::outline;
11use rustc_data_structures::profiling::QueryInvocationId;
12use rustc_data_structures::sharded::{self, ShardedHashMap};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock};
15use rustc_data_structures::unord::UnordMap;
16use rustc_errors::DiagInner;
17use rustc_index::IndexVec;
18use rustc_macros::{Decodable, Encodable};
19use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
20use rustc_session::Session;
21use tracing::{debug, instrument};
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
24
25use super::query::DepGraphQuery;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::query::{QueryContext, QuerySideEffect};
31
32#[derive(Clone)]
33pub struct DepGraph<D: Deps> {
34 data: Option<Arc<DepGraphData<D>>>,
35
36 virtual_dep_node_index: Arc<AtomicU32>,
41}
42
43rustc_index::newtype_index! {
44 pub struct DepNodeIndex {}
45}
46
47rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
51
52impl DepNodeIndex {
53 const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
54 pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
55}
56
57impl From<DepNodeIndex> for QueryInvocationId {
58 #[inline(always)]
59 fn from(dep_node_index: DepNodeIndex) -> Self {
60 QueryInvocationId(dep_node_index.as_u32())
61 }
62}
63
64pub struct MarkFrame<'a> {
65 index: SerializedDepNodeIndex,
66 parent: Option<&'a MarkFrame<'a>>,
67}
68
69enum DepNodeColor {
70 Red,
71 Green(DepNodeIndex),
72}
73
74impl DepNodeColor {
75 #[inline]
76 fn is_green(self) -> bool {
77 match self {
78 DepNodeColor::Red => false,
79 DepNodeColor::Green(_) => true,
80 }
81 }
82}
83
84pub(crate) struct DepGraphData<D: Deps> {
85 current: CurrentDepGraph<D>,
90
91 previous: Arc<SerializedDepGraph>,
94
95 colors: DepNodeColorMap,
96
97 previous_work_products: WorkProductMap,
102
103 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
104
105 debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
109}
110
111pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
112where
113 R: for<'a> HashStable<StableHashingContext<'a>>,
114{
115 let mut stable_hasher = StableHasher::new();
116 result.hash_stable(hcx, &mut stable_hasher);
117 stable_hasher.finish()
118}
119
120impl<D: Deps> DepGraph<D> {
121 pub fn new(
122 session: &Session,
123 prev_graph: Arc<SerializedDepGraph>,
124 prev_work_products: WorkProductMap,
125 encoder: FileEncoder,
126 record_graph: bool,
127 record_stats: bool,
128 ) -> DepGraph<D> {
129 let prev_graph_node_count = prev_graph.node_count();
130
131 let current = CurrentDepGraph::new(
132 session,
133 prev_graph_node_count,
134 encoder,
135 record_graph,
136 record_stats,
137 Arc::clone(&prev_graph),
138 );
139
140 let colors = DepNodeColorMap::new(prev_graph_node_count);
141
142 let _green_node_index = current.alloc_new_node(
144 DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
145 EdgesVec::new(),
146 Fingerprint::ZERO,
147 );
148 assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
149
150 let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
152 &prev_graph,
153 DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
154 EdgesVec::new(),
155 None,
156 );
157 assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
158 match red_node_prev_index_and_color {
159 None => {
160 assert!(prev_graph_node_count == 0);
162 }
163 Some((prev_red_node_index, DepNodeColor::Red)) => {
164 assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
165 colors.insert(prev_red_node_index, DepNodeColor::Red);
166 }
167 Some((_, DepNodeColor::Green(_))) => {
168 panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
170 }
171 }
172
173 DepGraph {
174 data: Some(Arc::new(DepGraphData {
175 previous_work_products: prev_work_products,
176 dep_node_debug: Default::default(),
177 current,
178 previous: prev_graph,
179 colors,
180 debug_loaded_from_disk: Default::default(),
181 })),
182 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
183 }
184 }
185
186 pub fn new_disabled() -> DepGraph<D> {
187 DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
188 }
189
190 #[inline]
191 pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
192 self.data.as_deref()
193 }
194
195 #[inline]
197 pub fn is_fully_enabled(&self) -> bool {
198 self.data.is_some()
199 }
200
201 pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
202 if let Some(data) = &self.data {
203 data.current.encoder.with_query(f)
204 }
205 }
206
207 pub fn assert_ignored(&self) {
208 if let Some(..) = self.data {
209 D::read_deps(|task_deps| {
210 assert_matches!(
211 task_deps,
212 TaskDepsRef::Ignore,
213 "expected no task dependency tracking"
214 );
215 })
216 }
217 }
218
219 pub fn with_ignore<OP, R>(&self, op: OP) -> R
220 where
221 OP: FnOnce() -> R,
222 {
223 D::with_deps(TaskDepsRef::Ignore, op)
224 }
225
226 pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
273 where
274 OP: FnOnce() -> R,
275 {
276 D::with_deps(TaskDepsRef::Forbid, op)
277 }
278
279 #[inline(always)]
280 pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
281 &self,
282 key: DepNode,
283 cx: Ctxt,
284 arg: A,
285 task: fn(Ctxt, A) -> R,
286 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
287 ) -> (R, DepNodeIndex) {
288 match self.data() {
289 Some(data) => data.with_task(key, cx, arg, task, hash_result),
290 None => (task(cx, arg), self.next_virtual_depnode_index()),
291 }
292 }
293
294 pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
295 &self,
296 cx: Tcx,
297 dep_kind: DepKind,
298 op: OP,
299 ) -> (R, DepNodeIndex)
300 where
301 OP: FnOnce() -> R,
302 {
303 match self.data() {
304 Some(data) => {
305 let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
306 self.read_index(index);
307 (result, index)
308 }
309 None => (op(), self.next_virtual_depnode_index()),
310 }
311 }
312}
313
314impl<D: Deps> DepGraphData<D> {
315 #[inline(always)]
343 pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
344 &self,
345 key: DepNode,
346 cx: Ctxt,
347 arg: A,
348 task: fn(Ctxt, A) -> R,
349 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
350 ) -> (R, DepNodeIndex) {
351 self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
357 format!(
358 "forcing query with already existing `DepNode`\n\
359 - query-key: {arg:?}\n\
360 - dep-node: {key:?}"
361 )
362 });
363
364 let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
365 let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
366 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
367 } else {
368 let task_deps = Lock::new(TaskDeps {
369 #[cfg(debug_assertions)]
370 node: Some(key),
371 reads: EdgesVec::new(),
372 read_set: Default::default(),
373 phantom_data: PhantomData,
374 });
375 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
376 };
377
378 let dcx = cx.dep_context();
379 let dep_node_index =
380 self.hash_result_and_intern_node(dcx, key, edges, &result, hash_result);
381
382 (result, dep_node_index)
383 }
384
385 pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
397 &self,
398 cx: Tcx,
399 dep_kind: DepKind,
400 op: OP,
401 ) -> (R, DepNodeIndex)
402 where
403 OP: FnOnce() -> R,
404 {
405 debug_assert!(!cx.is_eval_always(dep_kind));
406
407 let task_deps = Lock::new(TaskDeps::default());
408 let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
409 let task_deps = task_deps.into_inner();
410 let task_deps = task_deps.reads;
411
412 let dep_node_index = match task_deps.len() {
413 0 => {
414 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
420 }
421 1 => {
422 task_deps[0]
424 }
425 _ => {
426 let mut hasher = StableHasher::new();
432 task_deps.hash(&mut hasher);
433
434 let target_dep_node = DepNode {
435 kind: dep_kind,
436 hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
440 };
441
442 self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
450 self.current.alloc_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
451 })
452 }
453 };
454
455 (result, dep_node_index)
456 }
457
458 fn hash_result_and_intern_node<Ctxt: DepContext<Deps = D>, R>(
460 &self,
461 cx: &Ctxt,
462 node: DepNode,
463 edges: EdgesVec,
464 result: &R,
465 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
466 ) -> DepNodeIndex {
467 let hashing_timer = cx.profiler().incr_result_hashing();
468 let current_fingerprint = hash_result.map(|hash_result| {
469 cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
470 });
471
472 let (dep_node_index, prev_and_color) =
474 self.current.intern_node(&self.previous, node, edges, current_fingerprint);
475
476 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
477
478 if let Some((prev_index, color)) = prev_and_color {
479 debug_assert!(
480 self.colors.get(prev_index).is_none(),
481 "DepGraph::with_task() - Duplicate DepNodeColor insertion for {node:?}",
482 );
483
484 self.colors.insert(prev_index, color);
485 }
486
487 dep_node_index
488 }
489}
490
491impl<D: Deps> DepGraph<D> {
492 #[inline]
493 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
494 if let Some(ref data) = self.data {
495 D::read_deps(|task_deps| {
496 let mut task_deps = match task_deps {
497 TaskDepsRef::Allow(deps) => deps.lock(),
498 TaskDepsRef::EvalAlways => {
499 return;
502 }
503 TaskDepsRef::Ignore => return,
504 TaskDepsRef::Forbid => {
505 panic_on_forbidden_read(data, dep_node_index)
507 }
508 };
509 let task_deps = &mut *task_deps;
510
511 if cfg!(debug_assertions) {
512 data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
513 }
514
515 let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
518 task_deps.reads.iter().all(|other| *other != dep_node_index)
519 } else {
520 task_deps.read_set.insert(dep_node_index)
521 };
522 if new_read {
523 task_deps.reads.push(dep_node_index);
524 if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
525 task_deps.read_set.extend(task_deps.reads.iter().copied());
528 }
529
530 #[cfg(debug_assertions)]
531 {
532 if let Some(target) = task_deps.node {
533 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
534 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
535 if forbidden_edge.test(&src, &target) {
536 panic!("forbidden edge {:?} -> {:?} created", src, target)
537 }
538 }
539 }
540 }
541 } else if cfg!(debug_assertions) {
542 data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
543 }
544 })
545 }
546 }
547
548 #[inline]
551 pub fn record_diagnostic<Qcx: QueryContext>(&self, qcx: Qcx, diagnostic: &DiagInner) {
552 if let Some(ref data) = self.data {
553 D::read_deps(|task_deps| match task_deps {
554 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
555 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
556 self.read_index(data.encode_diagnostic(qcx, diagnostic));
557 }
558 })
559 }
560 }
561 #[inline]
564 pub fn force_diagnostic_node<Qcx: QueryContext>(
565 &self,
566 qcx: Qcx,
567 prev_index: SerializedDepNodeIndex,
568 ) {
569 if let Some(ref data) = self.data {
570 data.force_diagnostic_node(qcx, prev_index);
571 }
572 }
573
574 pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
590 &self,
591 node: DepNode,
592 cx: Ctxt,
593 result: &R,
594 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
595 ) -> DepNodeIndex {
596 if let Some(data) = self.data.as_ref() {
597 if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
604 let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
605 if let Some(dep_node_index) = dep_node_index {
606 crate::query::incremental_verify_ich(
607 cx,
608 data,
609 result,
610 prev_index,
611 hash_result,
612 |value| format!("{value:?}"),
613 );
614
615 #[cfg(debug_assertions)]
616 if hash_result.is_some() {
617 data.current.record_edge(
618 dep_node_index,
619 node,
620 data.prev_fingerprint_of(prev_index),
621 );
622 }
623
624 return dep_node_index;
625 }
626 }
627
628 let mut edges = EdgesVec::new();
629 D::read_deps(|task_deps| match task_deps {
630 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
631 TaskDepsRef::EvalAlways => {
632 edges.push(DepNodeIndex::FOREVER_RED_NODE);
633 }
634 TaskDepsRef::Ignore => {}
635 TaskDepsRef::Forbid => {
636 panic!("Cannot summarize when dependencies are not recorded.")
637 }
638 });
639
640 data.hash_result_and_intern_node(&cx, node, edges, result, hash_result)
641 } else {
642 self.next_virtual_depnode_index()
647 }
648 }
649}
650
651impl<D: Deps> DepGraphData<D> {
652 fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
653 &self,
654 dep_node: &DepNode,
655 msg: impl FnOnce() -> S,
656 ) {
657 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
658 let current = self.current.prev_index_to_index.lock()[prev_index];
659 assert!(current.is_none(), "{}", msg())
660 } else if let Some(nodes_newly_allocated_in_current_session) =
661 &self.current.nodes_newly_allocated_in_current_session
662 {
663 outline(|| {
664 let seen = nodes_newly_allocated_in_current_session.lock().contains_key(dep_node);
665 assert!(!seen, "{}", msg());
666 });
667 }
668 }
669
670 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
671 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
672 self.colors.get(prev_index)
673 } else {
674 None
676 }
677 }
678
679 #[inline]
682 pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
683 self.colors.get(prev_index).is_some_and(|c| c.is_green())
684 }
685
686 #[inline]
687 pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
688 self.previous.fingerprint_by_index(prev_index)
689 }
690
691 #[inline]
692 pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
693 self.previous.index_to_node(prev_index)
694 }
695
696 pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
697 self.debug_loaded_from_disk.lock().insert(dep_node);
698 }
699
700 #[inline]
703 fn encode_diagnostic<Qcx: QueryContext>(
704 &self,
705 qcx: Qcx,
706 diagnostic: &DiagInner,
707 ) -> DepNodeIndex {
708 let dep_node_index = self.current.encoder.send(
710 DepNode {
711 kind: D::DEP_KIND_SIDE_EFFECT,
712 hash: PackedFingerprint::from(Fingerprint::ZERO),
713 },
714 Fingerprint::ZERO,
715 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
718 );
719 let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
720 qcx.store_side_effect(dep_node_index, side_effect);
721 dep_node_index
722 }
723
724 #[inline]
727 fn force_diagnostic_node<Qcx: QueryContext>(
728 &self,
729 qcx: Qcx,
730 prev_index: SerializedDepNodeIndex,
731 ) {
732 D::with_deps(TaskDepsRef::Ignore, || {
733 let side_effect = qcx.load_side_effect(prev_index).unwrap();
734
735 match &side_effect {
736 QuerySideEffect::Diagnostic(diagnostic) => {
737 qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
738 }
739 }
740
741 let index = self.current.promote_node_and_deps_to_current(&self.previous, prev_index);
743 qcx.store_side_effect(index, side_effect);
745
746 self.colors.insert(prev_index, DepNodeColor::Green(index));
748 })
749 }
750}
751
752impl<D: Deps> DepGraph<D> {
753 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
756 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
757 }
758
759 pub fn previous_work_products(&self) -> &WorkProductMap {
762 &self.data.as_ref().unwrap().previous_work_products
763 }
764
765 pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
766 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
767 }
768
769 #[cfg(debug_assertions)]
770 #[inline(always)]
771 pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
772 where
773 F: FnOnce() -> String,
774 {
775 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
776
777 if dep_node_debug.borrow().contains_key(&dep_node) {
778 return;
779 }
780 let debug_str = self.with_ignore(debug_str_gen);
781 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
782 }
783
784 pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
785 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
786 }
787
788 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
789 if let Some(ref data) = self.data {
790 return data.node_color(dep_node);
791 }
792
793 None
794 }
795
796 pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
797 &self,
798 qcx: Qcx,
799 dep_node: &DepNode,
800 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
801 self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
802 }
803}
804
805impl<D: Deps> DepGraphData<D> {
806 pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
812 &self,
813 qcx: Qcx,
814 dep_node: &DepNode,
815 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
816 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
817
818 let prev_index = self.previous.node_to_index_opt(dep_node)?;
820
821 match self.colors.get(prev_index) {
822 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
823 Some(DepNodeColor::Red) => None,
824 None => {
825 self.try_mark_previous_green(qcx, prev_index, dep_node, None)
830 .map(|dep_node_index| (prev_index, dep_node_index))
831 }
832 }
833 }
834
835 #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
836 fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
837 &self,
838 qcx: Qcx,
839 parent_dep_node_index: SerializedDepNodeIndex,
840 frame: Option<&MarkFrame<'_>>,
841 ) -> Option<()> {
842 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
843 let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
844
845 match dep_dep_node_color {
846 Some(DepNodeColor::Green(_)) => {
847 debug!("dependency {dep_dep_node:?} was immediately green");
851 return Some(());
852 }
853 Some(DepNodeColor::Red) => {
854 debug!("dependency {dep_dep_node:?} was immediately red");
859 return None;
860 }
861 None => {}
862 }
863
864 if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
867 debug!(
868 "state of dependency {:?} ({}) is unknown, trying to mark it green",
869 dep_dep_node, dep_dep_node.hash,
870 );
871
872 let node_index =
873 self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
874
875 if node_index.is_some() {
876 debug!("managed to MARK dependency {dep_dep_node:?} as green",);
877 return Some(());
878 }
879 }
880
881 debug!("trying to force dependency {dep_dep_node:?}");
883 if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
884 debug!("dependency {dep_dep_node:?} could not be forced");
886 return None;
887 }
888
889 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
890
891 match dep_dep_node_color {
892 Some(DepNodeColor::Green(_)) => {
893 debug!("managed to FORCE dependency {dep_dep_node:?} to green");
894 return Some(());
895 }
896 Some(DepNodeColor::Red) => {
897 debug!("dependency {dep_dep_node:?} was red after forcing",);
898 return None;
899 }
900 None => {}
901 }
902
903 if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
904 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
905 }
906
907 debug!("dependency {dep_dep_node:?} resulted in compilation error",);
918 return None;
919 }
920
921 #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
923 fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
924 &self,
925 qcx: Qcx,
926 prev_dep_node_index: SerializedDepNodeIndex,
927 dep_node: &DepNode,
928 frame: Option<&MarkFrame<'_>>,
929 ) -> Option<DepNodeIndex> {
930 let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
931
932 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
934
935 debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
936
937 let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
938
939 for dep_dep_node_index in prev_deps {
940 self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
941 }
942
943 let dep_node_index =
952 self.current.promote_node_and_deps_to_current(&self.previous, prev_dep_node_index);
953
954 self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
959
960 debug!("successfully marked {dep_node:?} as green");
961 Some(dep_node_index)
962 }
963}
964
965impl<D: Deps> DepGraph<D> {
966 pub fn is_red(&self, dep_node: &DepNode) -> bool {
969 matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
970 }
971
972 pub fn is_green(&self, dep_node: &DepNode) -> bool {
975 self.node_color(dep_node).is_some_and(|c| c.is_green())
976 }
977
978 pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
979 &self,
980 dep_node: &DepNode,
981 msg: impl FnOnce() -> S,
982 ) {
983 if let Some(data) = &self.data {
984 data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
985 }
986 }
987
988 pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
997 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
998
999 let data = self.data.as_ref().unwrap();
1000 for prev_index in data.colors.values.indices() {
1001 match data.colors.get(prev_index) {
1002 Some(DepNodeColor::Green(_)) => {
1003 let dep_node = data.previous.index_to_node(prev_index);
1004 tcx.try_load_from_on_disk_cache(dep_node);
1005 }
1006 None | Some(DepNodeColor::Red) => {
1007 }
1011 }
1012 }
1013 }
1014
1015 pub fn print_incremental_info(&self) {
1016 if let Some(data) = &self.data {
1017 data.current.encoder.print_incremental_info(
1018 data.current.total_read_count.load(Ordering::Relaxed),
1019 data.current.total_duplicate_read_count.load(Ordering::Relaxed),
1020 )
1021 }
1022 }
1023
1024 pub fn finish_encoding(&self) -> FileEncodeResult {
1025 if let Some(data) = &self.data { data.current.encoder.finish() } else { Ok(0) }
1026 }
1027
1028 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1029 debug_assert!(self.data.is_none());
1030 let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1031 DepNodeIndex::from_u32(index)
1032 }
1033}
1034
1035#[derive(Clone, Debug, Encodable, Decodable)]
1067pub struct WorkProduct {
1068 pub cgu_name: String,
1069 pub saved_files: UnordMap<String, String>,
1075}
1076
1077pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1078
1079rustc_index::newtype_index! {
1081 struct EdgeIndex {}
1082}
1083
1084pub(super) struct CurrentDepGraph<D: Deps> {
1108 encoder: GraphEncoder<D>,
1109 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1110 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1111
1112 #[cfg(debug_assertions)]
1115 fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1116
1117 #[cfg(debug_assertions)]
1120 forbidden_edge: Option<EdgeFilter>,
1121
1122 nodes_newly_allocated_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
1129
1130 anon_id_seed: Fingerprint,
1142
1143 total_read_count: AtomicU64,
1146 total_duplicate_read_count: AtomicU64,
1147}
1148
1149impl<D: Deps> CurrentDepGraph<D> {
1150 fn new(
1151 session: &Session,
1152 prev_graph_node_count: usize,
1153 encoder: FileEncoder,
1154 record_graph: bool,
1155 record_stats: bool,
1156 previous: Arc<SerializedDepGraph>,
1157 ) -> Self {
1158 use std::time::{SystemTime, UNIX_EPOCH};
1159
1160 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1161 let nanos = duration.as_nanos();
1162 let mut stable_hasher = StableHasher::new();
1163 nanos.hash(&mut stable_hasher);
1164 let anon_id_seed = stable_hasher.finish();
1165
1166 #[cfg(debug_assertions)]
1167 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1168 Ok(s) => match EdgeFilter::new(&s) {
1169 Ok(f) => Some(f),
1170 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1171 },
1172 Err(_) => None,
1173 };
1174
1175 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1176
1177 let new_node_dbg =
1178 session.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions);
1179
1180 CurrentDepGraph {
1181 encoder: GraphEncoder::new(
1182 encoder,
1183 prev_graph_node_count,
1184 record_graph,
1185 record_stats,
1186 &session.prof,
1187 previous,
1188 ),
1189 anon_node_to_index: ShardedHashMap::with_capacity(
1190 new_node_count_estimate / sharded::shards(),
1192 ),
1193 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1194 anon_id_seed,
1195 #[cfg(debug_assertions)]
1196 forbidden_edge,
1197 #[cfg(debug_assertions)]
1198 fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1199 nodes_newly_allocated_in_current_session: new_node_dbg.then(|| {
1200 Lock::new(FxHashMap::with_capacity_and_hasher(
1201 new_node_count_estimate,
1202 Default::default(),
1203 ))
1204 }),
1205 total_read_count: AtomicU64::new(0),
1206 total_duplicate_read_count: AtomicU64::new(0),
1207 }
1208 }
1209
1210 #[cfg(debug_assertions)]
1211 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1212 if let Some(forbidden_edge) = &self.forbidden_edge {
1213 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1214 }
1215 let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1216 assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1217 }
1218
1219 #[inline(always)]
1222 fn alloc_new_node(
1223 &self,
1224 key: DepNode,
1225 edges: EdgesVec,
1226 current_fingerprint: Fingerprint,
1227 ) -> DepNodeIndex {
1228 let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1229
1230 #[cfg(debug_assertions)]
1231 self.record_edge(dep_node_index, key, current_fingerprint);
1232
1233 if let Some(ref nodes_newly_allocated_in_current_session) =
1234 self.nodes_newly_allocated_in_current_session
1235 {
1236 outline(|| {
1237 if nodes_newly_allocated_in_current_session
1238 .lock()
1239 .insert(key, dep_node_index)
1240 .is_some()
1241 {
1242 panic!("Found duplicate dep-node {key:?}");
1243 }
1244 });
1245 }
1246
1247 dep_node_index
1248 }
1249
1250 fn intern_node(
1251 &self,
1252 prev_graph: &SerializedDepGraph,
1253 key: DepNode,
1254 edges: EdgesVec,
1255 fingerprint: Option<Fingerprint>,
1256 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1257 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1258 let get_dep_node_index = |fingerprint| {
1259 let mut prev_index_to_index = self.prev_index_to_index.lock();
1260
1261 let dep_node_index = match prev_index_to_index[prev_index] {
1262 Some(dep_node_index) => dep_node_index,
1263 None => {
1264 let dep_node_index = self.encoder.send(key, fingerprint, edges);
1265 prev_index_to_index[prev_index] = Some(dep_node_index);
1266 dep_node_index
1267 }
1268 };
1269
1270 #[cfg(debug_assertions)]
1271 self.record_edge(dep_node_index, key, fingerprint);
1272
1273 dep_node_index
1274 };
1275
1276 if let Some(fingerprint) = fingerprint {
1278 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1279 let dep_node_index = get_dep_node_index(fingerprint);
1282 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1283 } else {
1284 let dep_node_index = get_dep_node_index(fingerprint);
1287 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1288 }
1289 } else {
1290 let dep_node_index = get_dep_node_index(Fingerprint::ZERO);
1295 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1296 }
1297 } else {
1298 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1299
1300 let dep_node_index = self.alloc_new_node(key, edges, fingerprint);
1302
1303 (dep_node_index, None)
1304 }
1305 }
1306
1307 fn promote_node_and_deps_to_current(
1308 &self,
1309 prev_graph: &SerializedDepGraph,
1310 prev_index: SerializedDepNodeIndex,
1311 ) -> DepNodeIndex {
1312 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1313
1314 let mut prev_index_to_index = self.prev_index_to_index.lock();
1315
1316 match prev_index_to_index[prev_index] {
1317 Some(dep_node_index) => dep_node_index,
1318 None => {
1319 let dep_node_index = self.encoder.send_promoted(prev_index, &*prev_index_to_index);
1320 prev_index_to_index[prev_index] = Some(dep_node_index);
1321 #[cfg(debug_assertions)]
1322 self.record_edge(
1323 dep_node_index,
1324 prev_graph.index_to_node(prev_index),
1325 prev_graph.fingerprint_by_index(prev_index),
1326 );
1327 dep_node_index
1328 }
1329 }
1330 }
1331
1332 #[inline]
1333 fn debug_assert_not_in_new_nodes(
1334 &self,
1335 prev_graph: &SerializedDepGraph,
1336 prev_index: SerializedDepNodeIndex,
1337 ) {
1338 let node = &prev_graph.index_to_node(prev_index);
1339 debug_assert!(
1340 !self
1341 .nodes_newly_allocated_in_current_session
1342 .as_ref()
1343 .map_or(false, |set| set.lock().contains_key(node)),
1344 "node from previous graph present in new node collection"
1345 );
1346 }
1347}
1348
1349#[derive(Debug, Clone, Copy)]
1350pub enum TaskDepsRef<'a> {
1351 Allow(&'a Lock<TaskDeps>),
1355 EvalAlways,
1361 Ignore,
1363 Forbid,
1368}
1369
1370#[derive(Debug)]
1371pub struct TaskDeps {
1372 #[cfg(debug_assertions)]
1373 node: Option<DepNode>,
1374 reads: EdgesVec,
1375 read_set: FxHashSet<DepNodeIndex>,
1376 phantom_data: PhantomData<DepNode>,
1377}
1378
1379impl Default for TaskDeps {
1380 fn default() -> Self {
1381 Self {
1382 #[cfg(debug_assertions)]
1383 node: None,
1384 reads: EdgesVec::new(),
1385 read_set: FxHashSet::with_capacity_and_hasher(128, Default::default()),
1386 phantom_data: PhantomData,
1387 }
1388 }
1389}
1390struct DepNodeColorMap {
1393 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1394}
1395
1396const COMPRESSED_NONE: u32 = 0;
1397const COMPRESSED_RED: u32 = 1;
1398const COMPRESSED_FIRST_GREEN: u32 = 2;
1399
1400impl DepNodeColorMap {
1401 fn new(size: usize) -> DepNodeColorMap {
1402 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1403 }
1404
1405 #[inline]
1406 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1407 match self.values[index].load(Ordering::Acquire) {
1408 COMPRESSED_NONE => None,
1409 COMPRESSED_RED => Some(DepNodeColor::Red),
1410 value => {
1411 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1412 }
1413 }
1414 }
1415
1416 #[inline]
1417 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1418 self.values[index].store(
1419 match color {
1420 DepNodeColor::Red => COMPRESSED_RED,
1421 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1422 },
1423 Ordering::Release,
1424 )
1425 }
1426}
1427
1428#[inline(never)]
1429#[cold]
1430pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1431 let data = graph.data.as_ref().unwrap();
1432
1433 eprintln!("there was a panic while trying to force a dep node");
1434 eprintln!("try_mark_green dep node stack:");
1435
1436 let mut i = 0;
1437 let mut current = frame;
1438 while let Some(frame) = current {
1439 let node = data.previous.index_to_node(frame.index);
1440 eprintln!("#{i} {node:?}");
1441 current = frame.parent;
1442 i += 1;
1443 }
1444
1445 eprintln!("end of try_mark_green dep node stack");
1446}
1447
1448#[cold]
1449#[inline(never)]
1450fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1451 let mut dep_node = None;
1455
1456 for (prev_index, index) in data.current.prev_index_to_index.lock().iter_enumerated() {
1459 if index == &Some(dep_node_index) {
1460 dep_node = Some(data.previous.index_to_node(prev_index));
1461 break;
1462 }
1463 }
1464
1465 if dep_node.is_none()
1466 && let Some(nodes) = &data.current.nodes_newly_allocated_in_current_session
1467 {
1468 if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1470 dep_node = Some(*node);
1471 }
1472 }
1473
1474 let dep_node = dep_node.map_or_else(
1475 || format!("with index {:?}", dep_node_index),
1476 |dep_node| format!("`{:?}`", dep_node),
1477 );
1478
1479 panic!(
1480 "Error: trying to record dependency on DepNode {dep_node} in a \
1481 context that does not allow it (e.g. during query deserialization). \
1482 The most common case of recording a dependency on a DepNode `foo` is \
1483 when the corresponding query `foo` is invoked. Invoking queries is not \
1484 allowed as part of loading something from the incremental on-disk cache. \
1485 See <https://github.com/rust-lang/rust/pull/91919>."
1486 )
1487}