1use std::fmt::Debug;
2use std::hash::Hash;
3use std::marker::PhantomData;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
67use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::{FxHashMap, FxHashSet};
9use rustc_data_structures::profiling::QueryInvocationId;
10use rustc_data_structures::sharded::{self, ShardedHashMap};
11use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
12use rustc_data_structures::sync::{AtomicU64, Lock};
13use rustc_data_structures::unord::UnordMap;
14use rustc_data_structures::{assert_matches, outline};
15use rustc_errors::DiagInner;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_query_system::ich::StableHashingContext;
19use rustc_query_system::query::QuerySideEffect;
20use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
21use rustc_session::Session;
22use tracing::{debug, instrument};
23#[cfg(debug_assertions)]
24use {super::debug::EdgeFilter, std::env};
2526use super::query::DepGraphQuery;
27use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
28use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
29use crate::dep_graph::edges::EdgesVec;
30use crate::query::QueryContext;
31use crate::verify_ich::incremental_verify_ich;
3233pub struct DepGraph<D: Deps> {
34 data: Option<Arc<DepGraphData<D>>>,
3536/// This field is used for assigning DepNodeIndices when running in
37 /// non-incremental mode. Even in non-incremental mode we make sure that
38 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
39 /// ID is used for self-profiling.
40virtual_dep_node_index: Arc<AtomicU32>,
41}
4243/// Manual clone impl that does not require `D: Clone`.
44impl<D: Deps> Clonefor DepGraph<D> {
45fn clone(&self) -> Self {
46let Self { data, virtual_dep_node_index } = self;
47Self {
48 data: Option::<Arc<_>>::clone(data),
49 virtual_dep_node_index: Arc::clone(virtual_dep_node_index),
50 }
51 }
52}
5354impl ::std::fmt::Debug for DepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
55pub struct DepNodeIndex {}
56}5758// We store a large collection of these in `prev_index_to_index` during
59// non-full incremental builds, and want to ensure that the element size
60// doesn't inadvertently increase.
61const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
6263impl DepNodeIndex {
64const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
65pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
66}
6768impl From<DepNodeIndex> for QueryInvocationId {
69#[inline(always)]
70fn from(dep_node_index: DepNodeIndex) -> Self {
71QueryInvocationId(dep_node_index.as_u32())
72 }
73}
7475pub struct MarkFrame<'a> {
76 index: SerializedDepNodeIndex,
77 parent: Option<&'a MarkFrame<'a>>,
78}
7980#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
DepNodeColor::Green(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
&__self_0),
DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
DepNodeColor::Unknown =>
::core::fmt::Formatter::write_str(f, "Unknown"),
}
}
}Debug)]
81pub(super) enum DepNodeColor {
82 Green(DepNodeIndex),
83 Red,
84 Unknown,
85}
8687pub struct DepGraphData<D: Deps> {
88/// The new encoding of the dependency graph, optimized for red/green
89 /// tracking. The `current` field is the dependency graph of only the
90 /// current compilation session: We don't merge the previous dep-graph into
91 /// current one anymore, but we do reference shared data to save space.
92current: CurrentDepGraph<D>,
9394/// The dep-graph from the previous compilation session. It contains all
95 /// nodes and edges as well as all fingerprints of nodes that have them.
96previous: Arc<SerializedDepGraph>,
9798 colors: DepNodeColorMap,
99100/// When we load, there may be `.o` files, cached MIR, or other such
101 /// things available to us. If we find that they are not dirty, we
102 /// load the path to the file storing those work-products here into
103 /// this map. We can later look for and extract that data.
104previous_work_products: WorkProductMap,
105106 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
107108/// Used by incremental compilation tests to assert that
109 /// a particular query result was decoded from disk
110 /// (not just marked green)
111debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
112}
113114pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint115where
116R: for<'a> HashStable<StableHashingContext<'a>>,
117{
118let mut stable_hasher = StableHasher::new();
119result.hash_stable(hcx, &mut stable_hasher);
120stable_hasher.finish()
121}
122123impl<D: Deps> DepGraph<D> {
124pub fn new(
125 session: &Session,
126 prev_graph: Arc<SerializedDepGraph>,
127 prev_work_products: WorkProductMap,
128 encoder: FileEncoder,
129 ) -> DepGraph<D> {
130let prev_graph_node_count = prev_graph.node_count();
131132let current =
133CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
134135let colors = DepNodeColorMap::new(prev_graph_node_count);
136137// Instantiate a node with zero dependencies only once for anonymous queries.
138let _green_node_index = current.alloc_new_node(
139DepNode { kind: D::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
140EdgesVec::new(),
141Fingerprint::ZERO,
142 );
143match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
144145// Instantiate a dependy-less red node only once for anonymous queries.
146let red_node_index = current.alloc_new_node(
147DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
148EdgesVec::new(),
149Fingerprint::ZERO,
150 );
151match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
152if prev_graph_node_count > 0 {
153colors.insert_red(SerializedDepNodeIndex::from_u32(
154DepNodeIndex::FOREVER_RED_NODE.as_u32(),
155 ));
156 }
157158DepGraph {
159 data: Some(Arc::new(DepGraphData {
160 previous_work_products: prev_work_products,
161 dep_node_debug: Default::default(),
162current,
163 previous: prev_graph,
164colors,
165 debug_loaded_from_disk: Default::default(),
166 })),
167 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
168 }
169 }
170171pub fn new_disabled() -> DepGraph<D> {
172DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
173 }
174175#[inline]
176pub fn data(&self) -> Option<&DepGraphData<D>> {
177self.data.as_deref()
178 }
179180/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
181#[inline]
182pub fn is_fully_enabled(&self) -> bool {
183self.data.is_some()
184 }
185186pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
187if let Some(data) = &self.data {
188data.current.encoder.with_query(f)
189 }
190 }
191192pub fn assert_ignored(&self) {
193if let Some(..) = self.data {
194 D::read_deps(|task_deps| {
195match task_deps {
TaskDepsRef::Ignore => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"TaskDepsRef::Ignore",
::core::option::Option::Some(format_args!("expected no task dependency tracking")));
}
};assert_matches!(
196 task_deps,
197 TaskDepsRef::Ignore,
198"expected no task dependency tracking"
199);
200 })
201 }
202 }
203204pub fn with_ignore<OP, R>(&self, op: OP) -> R
205where
206OP: FnOnce() -> R,
207 {
208 D::with_deps(TaskDepsRef::Ignore, op)
209 }
210211/// Used to wrap the deserialization of a query result from disk,
212 /// This method enforces that no new `DepNodes` are created during
213 /// query result deserialization.
214 ///
215 /// Enforcing this makes the query dep graph simpler - all nodes
216 /// must be created during the query execution, and should be
217 /// created from inside the 'body' of a query (the implementation
218 /// provided by a particular compiler crate).
219 ///
220 /// Consider the case of three queries `A`, `B`, and `C`, where
221 /// `A` invokes `B` and `B` invokes `C`:
222 ///
223 /// `A -> B -> C`
224 ///
225 /// Suppose that decoding the result of query `B` required re-computing
226 /// the query `C`. If we did not create a fresh `TaskDeps` when
227 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
228 /// (if we needed to re-execute `A`). This would cause us to create
229 /// a new edge `A -> C`. If this edge did not previously
230 /// exist in the `DepGraph`, then we could end up with a different
231 /// `DepGraph` at the end of compilation, even if there were no
232 /// meaningful changes to the overall program (e.g. a newline was added).
233 /// In addition, this edge might cause a subsequent compilation run
234 /// to try to force `C` before marking other necessary nodes green. If
235 /// `C` did not exist in the new compilation session, then we could
236 /// get an ICE. Normally, we would have tried (and failed) to mark
237 /// some other query green (e.g. `item_children`) which was used
238 /// to obtain `C`, which would prevent us from ever trying to force
239 /// a nonexistent `D`.
240 ///
241 /// It might be possible to enforce that all `DepNode`s read during
242 /// deserialization already exist in the previous `DepGraph`. In
243 /// the above example, we would invoke `D` during the deserialization
244 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
245 /// of `B`, this would result in an edge `B -> D`. If that edge already
246 /// existed (with the same `DepPathHash`es), then it should be correct
247 /// to allow the invocation of the query to proceed during deserialization
248 /// of a query result. We would merely assert that the dep-graph fragment
249 /// that would have been added by invoking `C` while decoding `B`
250 /// is equivalent to the dep-graph fragment that we already instantiated for B
251 /// (at the point where we successfully marked B as green).
252 ///
253 /// However, this would require additional complexity
254 /// in the query infrastructure, and is not currently needed by the
255 /// decoding of any query results. Should the need arise in the future,
256 /// we should consider extending the query system with this functionality.
257pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
258where
259OP: FnOnce() -> R,
260 {
261 D::with_deps(TaskDepsRef::Forbid, op)
262 }
263264#[inline(always)]
265pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
266&self,
267 key: DepNode,
268 cx: Ctxt,
269 arg: A,
270 task: fn(Ctxt, A) -> R,
271 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
272 ) -> (R, DepNodeIndex) {
273match self.data() {
274Some(data) => data.with_task(key, cx, arg, task, hash_result),
275None => (task(cx, arg), self.next_virtual_depnode_index()),
276 }
277 }
278279pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
280&self,
281 cx: Tcx,
282 dep_kind: DepKind,
283 op: OP,
284 ) -> (R, DepNodeIndex)
285where
286OP: FnOnce() -> R,
287 {
288match self.data() {
289Some(data) => {
290let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
291self.read_index(index);
292 (result, index)
293 }
294None => (op(), self.next_virtual_depnode_index()),
295 }
296 }
297}
298299impl<D: Deps> DepGraphData<D> {
300/// Starts a new dep-graph task. Dep-graph tasks are specified
301 /// using a free function (`task`) and **not** a closure -- this
302 /// is intentional because we want to exercise tight control over
303 /// what state they have access to. In particular, we want to
304 /// prevent implicit 'leaks' of tracked state into the task (which
305 /// could then be read without generating correct edges in the
306 /// dep-graph -- see the [rustc dev guide] for more details on
307 /// the dep-graph). To this end, the task function gets exactly two
308 /// pieces of state: the context `cx` and an argument `arg`. Both
309 /// of these bits of state must be of some type that implements
310 /// `DepGraphSafe` and hence does not leak.
311 ///
312 /// The choice of two arguments is not fundamental. One argument
313 /// would work just as well, since multiple values can be
314 /// collected using tuples. However, using two arguments works out
315 /// to be quite convenient, since it is common to need a context
316 /// (`cx`) and some argument (e.g., a `DefId` identifying what
317 /// item to process).
318 ///
319 /// For cases where you need some other number of arguments:
320 ///
321 /// - If you only need one argument, just use `()` for the `arg`
322 /// parameter.
323 /// - If you need 3+ arguments, use a tuple for the
324 /// `arg` parameter.
325 ///
326 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
327#[inline(always)]
328pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
329&self,
330 key: DepNode,
331 cx: Ctxt,
332 arg: A,
333 task: fn(Ctxt, A) -> R,
334 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
335 ) -> (R, DepNodeIndex) {
336// If the following assertion triggers, it can have two reasons:
337 // 1. Something is wrong with DepNode creation, either here or
338 // in `DepGraph::try_mark_green()`.
339 // 2. Two distinct query keys get mapped to the same `DepNode`
340 // (see for example #48923).
341self.assert_dep_node_not_yet_allocated_in_current_session(
342cx.dep_context().sess(),
343&key,
344 || {
345::alloc::__export::must_use({
::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`\n- query-key: {0:?}\n- dep-node: {1:?}",
arg, key))
})format!(
346"forcing query with already existing `DepNode`\n\
347 - query-key: {arg:?}\n\
348 - dep-node: {key:?}"
349)350 },
351 );
352353let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
354let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
355 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
356 } else {
357let task_deps = Lock::new(TaskDeps::new(
358#[cfg(debug_assertions)]
359Some(key),
3600,
361 ));
362 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
363 };
364365let dcx = cx.dep_context();
366let dep_node_index = self.hash_result_and_alloc_node(dcx, key, edges, &result, hash_result);
367368 (result, dep_node_index)
369 }
370371/// Executes something within an "anonymous" task, that is, a task the
372 /// `DepNode` of which is determined by the list of inputs it read from.
373 ///
374 /// NOTE: this does not actually count as a read of the DepNode here.
375 /// Using the result of this task without reading the DepNode will result
376 /// in untracked dependencies which may lead to ICEs as nodes are
377 /// incorrectly marked green.
378 ///
379 /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
380 /// user of this function actually performs the read; we'll have to see
381 /// how to make that work with `anon` in `execute_job_incr`, though.
382pub fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
383&self,
384 cx: Tcx,
385 dep_kind: DepKind,
386 op: OP,
387 ) -> (R, DepNodeIndex)
388where
389OP: FnOnce() -> R,
390 {
391if true {
if !!cx.is_eval_always(dep_kind) {
::core::panicking::panic("assertion failed: !cx.is_eval_always(dep_kind)")
};
};debug_assert!(!cx.is_eval_always(dep_kind));
392393// Large numbers of reads are common enough here that pre-sizing `read_set`
394 // to 128 actually helps perf on some benchmarks.
395let task_deps = Lock::new(TaskDeps::new(
396#[cfg(debug_assertions)]
397None,
398128,
399 ));
400let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
401let task_deps = task_deps.into_inner();
402let reads = task_deps.reads;
403404let dep_node_index = match reads.len() {
4050 => {
406// Because the dep-node id of anon nodes is computed from the sets of its
407 // dependencies we already know what the ID of this dependency-less node is
408 // going to be (i.e. equal to the precomputed
409 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
410 // a `StableHasher` and sending the node through interning.
411DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE412 }
4131 => {
414// When there is only one dependency, don't bother creating a node.
415reads[0]
416 }
417_ => {
418// The dep node indices are hashed here instead of hashing the dep nodes of the
419 // dependencies. These indices may refer to different nodes per session, but this isn't
420 // a problem here because we that ensure the final dep node hash is per session only by
421 // combining it with the per session random number `anon_id_seed`. This hash only need
422 // to map the dependencies to a single value on a per session basis.
423let mut hasher = StableHasher::new();
424reads.hash(&mut hasher);
425426let target_dep_node = DepNode {
427 kind: dep_kind,
428// Fingerprint::combine() is faster than sending Fingerprint
429 // through the StableHasher (at least as long as StableHasher
430 // is so slow).
431hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
432 };
433434// The DepNodes generated by the process above are not unique. 2 queries could
435 // have exactly the same dependencies. However, deserialization does not handle
436 // duplicated nodes, so we do the deduplication here directly.
437 //
438 // As anonymous nodes are a small quantity compared to the full dep-graph, the
439 // memory impact of this `anon_node_to_index` map remains tolerable, and helps
440 // us avoid useless growth of the graph with almost-equivalent nodes.
441self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
442self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
443 })
444 }
445 };
446447 (result, dep_node_index)
448 }
449450/// Intern the new `DepNode` with the dependencies up-to-now.
451fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
452&self,
453 cx: &Ctxt,
454 node: DepNode,
455 edges: EdgesVec,
456 result: &R,
457 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
458 ) -> DepNodeIndex {
459let hashing_timer = cx.profiler().incr_result_hashing();
460let current_fingerprint = hash_result.map(|hash_result| {
461cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
462 });
463let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
464hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
465dep_node_index466 }
467}
468469impl<D: Deps> DepGraph<D> {
470#[inline]
471pub fn read_index(&self, dep_node_index: DepNodeIndex) {
472if let Some(ref data) = self.data {
473 D::read_deps(|task_deps| {
474let mut task_deps = match task_deps {
475 TaskDepsRef::Allow(deps) => deps.lock(),
476 TaskDepsRef::EvalAlways => {
477// We don't need to record dependencies of eval_always
478 // queries. They are re-evaluated unconditionally anyway.
479return;
480 }
481 TaskDepsRef::Ignore => return,
482 TaskDepsRef::Forbid => {
483// Reading is forbidden in this context. ICE with a useful error message.
484panic_on_forbidden_read(data, dep_node_index)
485 }
486 };
487let task_deps = &mut *task_deps;
488489if truecfg!(debug_assertions) {
490data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
491 }
492493// Has `dep_node_index` been seen before? Use either a linear scan or a hashset
494 // lookup to determine this. See `TaskDeps::read_set` for details.
495let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
496 !task_deps.reads.contains(&dep_node_index)
497 } else {
498task_deps.read_set.insert(dep_node_index)
499 };
500if new_read {
501task_deps.reads.push(dep_node_index);
502if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
503// Fill `read_set` with what we have so far. Future lookups will use it.
504task_deps.read_set.extend(task_deps.reads.iter().copied());
505 }
506507#[cfg(debug_assertions)]
508{
509if let Some(target) = task_deps.node
510 && let Some(ref forbidden_edge) = data.current.forbidden_edge
511 {
512let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
513if forbidden_edge.test(&src, &target) {
514{
::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)515 }
516 }
517 }
518 } else if truecfg!(debug_assertions) {
519data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
520 }
521 })
522 }
523 }
524525/// This encodes a diagnostic by creating a node with an unique index and associating
526 /// `diagnostic` with it, for use in the next session.
527#[inline]
528pub fn record_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
529&self,
530 qcx: Qcx,
531 diagnostic: &DiagInner,
532 ) {
533if let Some(ref data) = self.data {
534 D::read_deps(|task_deps| match task_deps {
535 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
536 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
537self.read_index(data.encode_diagnostic(qcx, diagnostic));
538 }
539 })
540 }
541 }
542/// This forces a diagnostic node green by running its side effect. `prev_index` would
543 /// refer to a node created used `encode_diagnostic` in the previous session.
544#[inline]
545pub fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
546&self,
547 qcx: Qcx,
548 prev_index: SerializedDepNodeIndex,
549 ) {
550if let Some(ref data) = self.data {
551data.force_diagnostic_node(qcx, prev_index);
552 }
553 }
554555/// Create a node when we force-feed a value into the query cache.
556 /// This is used to remove cycles during type-checking const generic parameters.
557 ///
558 /// As usual in the query system, we consider the current state of the calling query
559 /// only depends on the list of dependencies up to now. As a consequence, the value
560 /// that this query gives us can only depend on those dependencies too. Therefore,
561 /// it is sound to use the current dependency set for the created node.
562 ///
563 /// During replay, the order of the nodes is relevant in the dependency graph.
564 /// So the unchanged replay will mark the caller query before trying to mark this one.
565 /// If there is a change to report, the caller query will be re-executed before this one.
566 ///
567 /// FIXME: If the code is changed enough for this node to be marked before requiring the
568 /// caller's node, we suppose that those changes will be enough to mark this node red and
569 /// force a recomputation using the "normal" way.
570pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R>(
571&self,
572 node: DepNode,
573 cx: Ctxt,
574 result: &R,
575 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
576 format_value_fn: fn(&R) -> String,
577 ) -> DepNodeIndex {
578if let Some(data) = self.data.as_ref() {
579// The caller query has more dependencies than the node we are creating. We may
580 // encounter a case where this created node is marked as green, but the caller query is
581 // subsequently marked as red or recomputed. In this case, we will end up feeding a
582 // value to an existing node.
583 //
584 // For sanity, we still check that the loaded stable hash and the new one match.
585if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
586let dep_node_index = data.colors.current(prev_index);
587if let Some(dep_node_index) = dep_node_index {
588incremental_verify_ich(
589cx,
590data,
591result,
592prev_index,
593hash_result,
594format_value_fn,
595 );
596597#[cfg(debug_assertions)]
598if hash_result.is_some() {
599data.current.record_edge(
600dep_node_index,
601node,
602data.prev_fingerprint_of(prev_index),
603 );
604 }
605606return dep_node_index;
607 }
608 }
609610let mut edges = EdgesVec::new();
611 D::read_deps(|task_deps| match task_deps {
612 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
613 TaskDepsRef::EvalAlways => {
614edges.push(DepNodeIndex::FOREVER_RED_NODE);
615 }
616 TaskDepsRef::Ignore => {}
617 TaskDepsRef::Forbid => {
618{
::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")619 }
620 });
621622data.hash_result_and_alloc_node(&cx, node, edges, result, hash_result)
623 } else {
624// Incremental compilation is turned off. We just execute the task
625 // without tracking. We still provide a dep-node index that uniquely
626 // identifies the task so that we have a cheap way of referring to
627 // the query for self-profiling.
628self.next_virtual_depnode_index()
629 }
630 }
631}
632633impl<D: Deps> DepGraphData<D> {
634fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
635&self,
636 sess: &Session,
637 dep_node: &DepNode,
638 msg: impl FnOnce() -> S,
639 ) {
640if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
641let color = self.colors.get(prev_index);
642let ok = match color {
643 DepNodeColor::Unknown => true,
644 DepNodeColor::Red => false,
645 DepNodeColor::Green(..) => sess.threads() > 1, // Other threads may mark this green
646};
647if !ok {
648{ ::core::panicking::panic_display(&msg()); }panic!("{}", msg())649 }
650 } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
651outline(|| {
652let seen = nodes_in_current_session.lock().contains_key(dep_node);
653if !!seen { { ::core::panicking::panic_display(&msg()); } };assert!(!seen, "{}", msg());
654 });
655 }
656 }
657658fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
659if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
660self.colors.get(prev_index)
661 } else {
662// This is a node that did not exist in the previous compilation session.
663DepNodeColor::Unknown664 }
665 }
666667/// Returns true if the given node has been marked as green during the
668 /// current compilation session. Used in various assertions
669#[inline]
670pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
671#[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))672 }
673674#[inline]
675pub fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
676self.previous.fingerprint_by_index(prev_index)
677 }
678679#[inline]
680pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
681self.previous.index_to_node(prev_index)
682 }
683684pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
685self.debug_loaded_from_disk.lock().insert(dep_node);
686 }
687688/// This encodes a diagnostic by creating a node with an unique index and associating
689 /// `diagnostic` with it, for use in the next session.
690#[inline]
691fn encode_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
692&self,
693 qcx: Qcx,
694 diagnostic: &DiagInner,
695 ) -> DepNodeIndex {
696// Use `send_new` so we get an unique index, even though the dep node is not.
697let dep_node_index = self.current.encoder.send_new(
698DepNode {
699 kind: D::DEP_KIND_SIDE_EFFECT,
700 hash: PackedFingerprint::from(Fingerprint::ZERO),
701 },
702Fingerprint::ZERO,
703// We want the side effect node to always be red so it will be forced and emit the
704 // diagnostic.
705std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
706 );
707let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
708qcx.store_side_effect(dep_node_index, side_effect);
709dep_node_index710 }
711712/// This forces a diagnostic node green by running its side effect. `prev_index` would
713 /// refer to a node created used `encode_diagnostic` in the previous session.
714#[inline]
715fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
716&self,
717 qcx: Qcx,
718 prev_index: SerializedDepNodeIndex,
719 ) {
720 D::with_deps(TaskDepsRef::Ignore, || {
721let side_effect = qcx.load_side_effect(prev_index).unwrap();
722723match &side_effect {
724 QuerySideEffect::Diagnostic(diagnostic) => {
725qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
726 }
727 }
728729// Use `send_and_color` as `promote_node_and_deps_to_current` expects all
730 // green dependencies. `send_and_color` will also prevent multiple nodes
731 // being encoded for concurrent calls.
732let dep_node_index = self.current.encoder.send_and_color(
733prev_index,
734&self.colors,
735DepNode {
736 kind: D::DEP_KIND_SIDE_EFFECT,
737 hash: PackedFingerprint::from(Fingerprint::ZERO),
738 },
739Fingerprint::ZERO,
740 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
741true,
742 );
743// This will just overwrite the same value for concurrent calls.
744qcx.store_side_effect(dep_node_index, side_effect);
745 })
746 }
747748fn alloc_and_color_node(
749&self,
750 key: DepNode,
751 edges: EdgesVec,
752 fingerprint: Option<Fingerprint>,
753 ) -> DepNodeIndex {
754if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
755// Determine the color and index of the new `DepNode`.
756let is_green = if let Some(fingerprint) = fingerprint {
757if fingerprint == self.previous.fingerprint_by_index(prev_index) {
758// This is a green node: it existed in the previous compilation,
759 // its query was re-executed, and it has the same result as before.
760true
761} else {
762// This is a red node: it existed in the previous compilation, its query
763 // was re-executed, but it has a different result from before.
764false
765}
766 } else {
767// This is a red node, effectively: it existed in the previous compilation
768 // session, its query was re-executed, but it doesn't compute a result hash
769 // (i.e. it represents a `no_hash` query), so we have no way of determining
770 // whether or not the result was the same as before.
771false
772};
773774let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
775776let dep_node_index = self.current.encoder.send_and_color(
777prev_index,
778&self.colors,
779key,
780fingerprint,
781edges,
782is_green,
783 );
784785self.current.record_node(dep_node_index, key, fingerprint);
786787dep_node_index788 } else {
789self.current.alloc_new_node(key, edges, fingerprint.unwrap_or(Fingerprint::ZERO))
790 }
791 }
792793fn promote_node_and_deps_to_current(
794&self,
795 prev_index: SerializedDepNodeIndex,
796 ) -> Option<DepNodeIndex> {
797self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
798799let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
800801#[cfg(debug_assertions)]
802if let Some(dep_node_index) = dep_node_index {
803self.current.record_edge(
804dep_node_index,
805*self.previous.index_to_node(prev_index),
806self.previous.fingerprint_by_index(prev_index),
807 );
808 }
809810dep_node_index811 }
812}
813814impl<D: Deps> DepGraph<D> {
815/// Checks whether a previous work product exists for `v` and, if
816 /// so, return the path that leads to it. Used to skip doing work.
817pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
818self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
819 }
820821/// Access the map of work-products created during the cached run. Only
822 /// used during saving of the dep-graph.
823pub fn previous_work_products(&self) -> &WorkProductMap {
824&self.data.as_ref().unwrap().previous_work_products
825 }
826827pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
828self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
829 }
830831pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
832// We only check if we have a dep node corresponding to the given dep kind.
833#[allow(rustc::potential_query_instability)]
834self.data
835 .as_ref()
836 .unwrap()
837 .debug_loaded_from_disk
838 .lock()
839 .iter()
840 .any(|node| node.kind == dep_kind)
841 }
842843#[cfg(debug_assertions)]
844 #[inline(always)]
845pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
846where
847F: FnOnce() -> String,
848 {
849let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
850851if dep_node_debug.borrow().contains_key(&dep_node) {
852return;
853 }
854let debug_str = self.with_ignore(debug_str_gen);
855dep_node_debug.borrow_mut().insert(dep_node, debug_str);
856 }
857858pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
859self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
860 }
861862fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
863if let Some(ref data) = self.data {
864return data.node_color(dep_node);
865 }
866867 DepNodeColor::Unknown868 }
869870pub fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
871&self,
872 qcx: Qcx,
873 dep_node: &DepNode,
874 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
875self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
876 }
877}
878879impl<D: Deps> DepGraphData<D> {
880/// Try to mark a node index for the node dep_node.
881 ///
882 /// A node will have an index, when it's already been marked green, or when we can mark it
883 /// green. This function will mark the current task as a reader of the specified node, when
884 /// a node index can be found for that node.
885pub fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
886&self,
887 qcx: Qcx,
888 dep_node: &DepNode,
889 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
890if true {
if !!qcx.dep_context().is_eval_always(dep_node.kind) {
::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(dep_node.kind)")
};
};debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
891892// Return None if the dep node didn't exist in the previous session
893let prev_index = self.previous.node_to_index_opt(dep_node)?;
894895if true {
match (&self.previous.index_to_node(prev_index), &dep_node) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
896897match self.colors.get(prev_index) {
898 DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
899 DepNodeColor::Red => None,
900 DepNodeColor::Unknown => {
901// This DepNode and the corresponding query invocation existed
902 // in the previous compilation session too, so we can try to
903 // mark it as green by recursively marking all of its
904 // dependencies green.
905self.try_mark_previous_green(qcx, prev_index, None)
906 .map(|dep_node_index| (prev_index, dep_node_index))
907 }
908 }
909 }
910911#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_parent_green",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(911u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<()> = loop {};
return __tracing_attr_fake_return;
}
{
let get_dep_dep_node =
|| self.previous.index_to_node(parent_dep_node_index);
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:929",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(929u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately green",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:937",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(937u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately red",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
let dep_dep_node = get_dep_dep_node();
if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:948",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(948u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("state of dependency {0:?} ({1}) is unknown, trying to mark it green",
dep_dep_node, dep_dep_node.hash) as &dyn Value))])
});
} else { ; }
};
let node_index =
self.try_mark_previous_green(qcx, parent_dep_node_index,
Some(frame));
if node_index.is_some() {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:956",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(956u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to MARK dependency {0:?} as green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:962",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(962u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("trying to force dependency {0:?}",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node,
parent_dep_node_index, frame) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:965",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(965u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} could not be forced",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:971",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(971u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to FORCE dependency {0:?} to green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:975",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(975u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was red after forcing",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
if let None =
qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs()
{
{
::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - Forcing the DepNode should have set its color"));
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:995",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(995u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} resulted in compilation error",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
}
}#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]912fn try_mark_parent_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
913&self,
914 qcx: Qcx,
915 parent_dep_node_index: SerializedDepNodeIndex,
916 frame: &MarkFrame<'_>,
917 ) -> Option<()> {
918let get_dep_dep_node = || self.previous.index_to_node(parent_dep_node_index);
919920match self.colors.get(parent_dep_node_index) {
921 DepNodeColor::Green(_) => {
922// This dependency has been marked as green before, we are
923 // still fine and can continue with checking the other
924 // dependencies.
925 //
926 // This path is extremely hot. We don't want to get the
927 // `dep_dep_node` unless it's necessary. Hence the
928 // `get_dep_dep_node` closure.
929debug!("dependency {:?} was immediately green", get_dep_dep_node());
930return Some(());
931 }
932 DepNodeColor::Red => {
933// We found a dependency the value of which has changed
934 // compared to the previous compilation session. We cannot
935 // mark the DepNode as green and also don't need to bother
936 // with checking any of the other dependencies.
937debug!("dependency {:?} was immediately red", get_dep_dep_node());
938return None;
939 }
940 DepNodeColor::Unknown => {}
941 }
942943let dep_dep_node = get_dep_dep_node();
944945// We don't know the state of this dependency. If it isn't
946 // an eval_always node, let's try to mark it green recursively.
947if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
948debug!(
949"state of dependency {:?} ({}) is unknown, trying to mark it green",
950 dep_dep_node, dep_dep_node.hash,
951 );
952953let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, Some(frame));
954955if node_index.is_some() {
956debug!("managed to MARK dependency {dep_dep_node:?} as green");
957return Some(());
958 }
959 }
960961// We failed to mark it green, so we try to force the query.
962debug!("trying to force dependency {dep_dep_node:?}");
963if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
964// The DepNode could not be forced.
965debug!("dependency {dep_dep_node:?} could not be forced");
966return None;
967 }
968969match self.colors.get(parent_dep_node_index) {
970 DepNodeColor::Green(_) => {
971debug!("managed to FORCE dependency {dep_dep_node:?} to green");
972return Some(());
973 }
974 DepNodeColor::Red => {
975debug!("dependency {dep_dep_node:?} was red after forcing");
976return None;
977 }
978 DepNodeColor::Unknown => {}
979 }
980981if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
982panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
983 }
984985// If the query we just forced has resulted in
986 // some kind of compilation error, we cannot rely on
987 // the dep-node color having been properly updated.
988 // This means that the query system has reached an
989 // invalid state. We let the compiler continue (by
990 // returning `None`) so it can emit error messages
991 // and wind down, but rely on the fact that this
992 // invalid state will not be persisted to the
993 // incremental compilation cache because of
994 // compilation errors being present.
995debug!("dependency {dep_dep_node:?} resulted in compilation error");
996return None;
997 }
998999/// Try to mark a dep-node which existed in the previous compilation session as green.
1000#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_previous_green",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1000u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
return __tracing_attr_fake_return;
}
{
let frame =
MarkFrame { index: prev_dep_node_index, parent: frame };
if true {
if !!qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
{
::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
};
};
let prev_deps =
self.previous.edge_targets_from(prev_dep_node_index);
for dep_dep_node_index in prev_deps {
self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
}
let dep_node_index =
self.promote_node_and_deps_to_current(prev_dep_node_index)?;
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:1036",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1036u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("successfully marked {0:?} as green",
self.previous.index_to_node(prev_dep_node_index)) as
&dyn Value))])
});
} else { ; }
};
Some(dep_node_index)
}
}
}#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]1001fn try_mark_previous_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
1002&self,
1003 qcx: Qcx,
1004 prev_dep_node_index: SerializedDepNodeIndex,
1005 frame: Option<&MarkFrame<'_>>,
1006 ) -> Option<DepNodeIndex> {
1007let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
10081009// We never try to mark eval_always nodes as green
1010debug_assert!(
1011 !qcx.dep_context()
1012 .is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
1013 );
10141015let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
10161017for dep_dep_node_index in prev_deps {
1018self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
1019 }
10201021// If we got here without hitting a `return` that means that all
1022 // dependencies of this DepNode could be marked as green. Therefore we
1023 // can also mark this DepNode as green.
10241025 // There may be multiple threads trying to mark the same dep node green concurrently
10261027 // We allocating an entry for the node in the current dependency graph and
1028 // adding all the appropriate edges imported from the previous graph.
1029 //
1030 // `no_hash` nodes may fail this promotion due to already being conservatively colored red.
1031let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index)?;
10321033// ... and finally storing a "Green" entry in the color map.
1034 // Multiple threads can all write the same color here
10351036debug!(
1037"successfully marked {:?} as green",
1038self.previous.index_to_node(prev_dep_node_index)
1039 );
1040Some(dep_node_index)
1041 }
1042}
10431044impl<D: Deps> DepGraph<D> {
1045/// Returns true if the given node has been marked as red during the
1046 /// current compilation session. Used in various assertions
1047pub fn is_red(&self, dep_node: &DepNode) -> bool {
1048#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Red => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)1049 }
10501051/// Returns true if the given node has been marked as green during the
1052 /// current compilation session. Used in various assertions
1053pub fn is_green(&self, dep_node: &DepNode) -> bool {
1054#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))1055 }
10561057pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1058&self,
1059 sess: &Session,
1060 dep_node: &DepNode,
1061 msg: impl FnOnce() -> S,
1062 ) {
1063if let Some(data) = &self.data {
1064data.assert_dep_node_not_yet_allocated_in_current_session(sess, dep_node, msg)
1065 }
1066 }
10671068/// This method loads all on-disk cacheable query results into memory, so
1069 /// they can be written out to the new cache file again. Most query results
1070 /// will already be in memory but in the case where we marked something as
1071 /// green but then did not need the value, that value will never have been
1072 /// loaded from disk.
1073 ///
1074 /// This method will only load queries that will end up in the disk cache.
1075 /// Other queries will not be executed.
1076pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
1077let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
10781079let data = self.data.as_ref().unwrap();
1080for prev_index in data.colors.values.indices() {
1081match data.colors.get(prev_index) {
1082 DepNodeColor::Green(_) => {
1083let dep_node = data.previous.index_to_node(prev_index);
1084 tcx.try_load_from_on_disk_cache(dep_node);
1085 }
1086 DepNodeColor::Unknown | DepNodeColor::Red => {
1087// We can skip red nodes because a node can only be marked
1088 // as red if the query result was recomputed and thus is
1089 // already in memory.
1090}
1091 }
1092 }
1093 }
10941095pub fn finish_encoding(&self) -> FileEncodeResult {
1096if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1097 }
10981099pub fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1100if true {
if !self.data.is_none() {
::core::panicking::panic("assertion failed: self.data.is_none()")
};
};debug_assert!(self.data.is_none());
1101let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1102DepNodeIndex::from_u32(index)
1103 }
1104}
11051106/// A "work product" is an intermediate result that we save into the
1107/// incremental directory for later re-use. The primary example are
1108/// the object files that we save for each partition at code
1109/// generation time.
1110///
1111/// Each work product is associated with a dep-node, representing the
1112/// process that produced the work-product. If that dep-node is found
1113/// to be dirty when we load up, then we will delete the work-product
1114/// at load time. If the work-product is found to be clean, then we
1115/// will keep a record in the `previous_work_products` list.
1116///
1117/// In addition, work products have an associated hash. This hash is
1118/// an extra hash that can be used to decide if the work-product from
1119/// a previous compilation can be re-used (in addition to the dirty
1120/// edges check).
1121///
1122/// As the primary example, consider the object files we generate for
1123/// each partition. In the first run, we create partitions based on
1124/// the symbols that need to be compiled. For each partition P, we
1125/// hash the symbols in P and create a `WorkProduct` record associated
1126/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1127/// in P.
1128///
1129/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1130/// judged to be clean (which means none of the things we read to
1131/// generate the partition were found to be dirty), it will be loaded
1132/// into previous work products. We will then regenerate the set of
1133/// symbols in the partition P and hash them (note that new symbols
1134/// may be added -- for example, new monomorphizations -- even if
1135/// nothing in P changed!). We will compare that hash against the
1136/// previous hash. If it matches up, we can reuse the object file.
1137#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
#[inline]
fn clone(&self) -> WorkProduct {
WorkProduct {
cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
saved_files: ::core::clone::Clone::clone(&self.saved_files),
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
"cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for WorkProduct {
fn encode(&self, __encoder: &mut __E) {
match *self {
WorkProduct {
cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for WorkProduct {
fn decode(__decoder: &mut __D) -> Self {
WorkProduct {
cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
saved_files: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
1138pub struct WorkProduct {
1139pub cgu_name: String,
1140/// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1141 /// saved file and the key is some identifier for the type of file being saved.
1142 ///
1143 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1144 /// the object file's path, and "dwo" to the dwarf object file's path.
1145pub saved_files: UnordMap<String, String>,
1146}
11471148pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
11491150// Index type for `DepNodeData`'s edges.
1151impl ::std::fmt::Debug for EdgeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
1152struct EdgeIndex {}
1153}11541155/// `CurrentDepGraph` stores the dependency graph for the current session. It
1156/// will be populated as we run queries or tasks. We never remove nodes from the
1157/// graph: they are only added.
1158///
1159/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1160/// in memory. This is important, because these graph structures are some of the
1161/// largest in the compiler.
1162///
1163/// For this reason, we avoid storing `DepNode`s more than once as map
1164/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1165/// graph, and we map nodes in the previous graph to indices via a two-step
1166/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1167/// and the `prev_index_to_index` vector (which is more compact and faster than
1168/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1169///
1170/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1171/// and `prev_index_to_index` fields are locked separately. Operations that take
1172/// a `DepNodeIndex` typically just access the `data` field.
1173///
1174/// We only need to manipulate at most two locks simultaneously:
1175/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1176/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1177/// first, and `data` second.
1178pub(super) struct CurrentDepGraph<D: Deps> {
1179 encoder: GraphEncoder<D>,
1180 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
11811182/// This is used to verify that fingerprints do not change between the creation of a node
1183 /// and its recomputation.
1184#[cfg(debug_assertions)]
1185fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
11861187/// Used to trap when a specific edge is added to the graph.
1188 /// This is used for debug purposes and is only active with `debug_assertions`.
1189#[cfg(debug_assertions)]
1190forbidden_edge: Option<EdgeFilter>,
11911192/// Used to verify the absence of hash collisions among DepNodes.
1193 /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1194 /// or if `debug_assertions` are enabled.
1195 ///
1196 /// The map contains all DepNodes that have been allocated in the current session so far.
1197nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
11981199/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1200 /// their edges. This has the beneficial side-effect that multiple anonymous
1201 /// nodes can be coalesced into one without changing the semantics of the
1202 /// dependency graph. However, the merging of nodes can lead to a subtle
1203 /// problem during red-green marking: The color of an anonymous node from
1204 /// the current session might "shadow" the color of the node with the same
1205 /// ID from the previous session. In order to side-step this problem, we make
1206 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1207 /// This is implemented by mixing a session-key into the ID fingerprint of
1208 /// each anon node. The session-key is a hash of the number of previous sessions.
1209anon_id_seed: Fingerprint,
12101211/// These are simple counters that are for profiling and
1212 /// debugging and only active with `debug_assertions`.
1213pub(super) total_read_count: AtomicU64,
1214pub(super) total_duplicate_read_count: AtomicU64,
1215}
12161217impl<D: Deps> CurrentDepGraph<D> {
1218fn new(
1219 session: &Session,
1220 prev_graph_node_count: usize,
1221 encoder: FileEncoder,
1222 previous: Arc<SerializedDepGraph>,
1223 ) -> Self {
1224let mut stable_hasher = StableHasher::new();
1225previous.session_count().hash(&mut stable_hasher);
1226let anon_id_seed = stable_hasher.finish();
12271228#[cfg(debug_assertions)]
1229let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1230Ok(s) => match EdgeFilter::new(&s) {
1231Ok(f) => Some(f),
1232Err(err) => {
::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1233 },
1234Err(_) => None,
1235 };
12361237let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
12381239let new_node_dbg =
1240session.opts.unstable_opts.incremental_verify_ich || truecfg!(debug_assertions);
12411242CurrentDepGraph {
1243 encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1244 anon_node_to_index: ShardedHashMap::with_capacity(
1245// FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1246new_node_count_estimate / sharded::shards(),
1247 ),
1248anon_id_seed,
1249#[cfg(debug_assertions)]
1250forbidden_edge,
1251#[cfg(debug_assertions)]
1252fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1253 nodes_in_current_session: new_node_dbg.then(|| {
1254Lock::new(FxHashMap::with_capacity_and_hasher(
1255new_node_count_estimate,
1256 Default::default(),
1257 ))
1258 }),
1259 total_read_count: AtomicU64::new(0),
1260 total_duplicate_read_count: AtomicU64::new(0),
1261 }
1262 }
12631264#[cfg(debug_assertions)]
1265fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1266if let Some(forbidden_edge) = &self.forbidden_edge {
1267forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1268 }
1269let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1270match (&previous, &fingerprint) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
key)));
}
}
};assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1271 }
12721273#[inline(always)]
1274fn record_node(
1275&self,
1276 dep_node_index: DepNodeIndex,
1277 key: DepNode,
1278 _current_fingerprint: Fingerprint,
1279 ) {
1280#[cfg(debug_assertions)]
1281self.record_edge(dep_node_index, key, _current_fingerprint);
12821283if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1284outline(|| {
1285if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1286{
::core::panicking::panic_fmt(format_args!("Found duplicate dep-node {0:?}",
key));
};panic!("Found duplicate dep-node {key:?}");
1287 }
1288 });
1289 }
1290 }
12911292/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1293 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1294#[inline(always)]
1295fn alloc_new_node(
1296&self,
1297 key: DepNode,
1298 edges: EdgesVec,
1299 current_fingerprint: Fingerprint,
1300 ) -> DepNodeIndex {
1301let dep_node_index = self.encoder.send_new(key, current_fingerprint, edges);
13021303self.record_node(dep_node_index, key, current_fingerprint);
13041305dep_node_index1306 }
13071308#[inline]
1309fn debug_assert_not_in_new_nodes(
1310&self,
1311 prev_graph: &SerializedDepGraph,
1312 prev_index: SerializedDepNodeIndex,
1313 ) {
1314if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1315if true {
if !!nodes_in_current_session.lock().contains_key(&prev_graph.index_to_node(prev_index))
{
{
::core::panicking::panic_fmt(format_args!("node from previous graph present in new node collection"));
}
};
};debug_assert!(
1316 !nodes_in_current_session
1317 .lock()
1318 .contains_key(&prev_graph.index_to_node(prev_index)),
1319"node from previous graph present in new node collection"
1320);
1321 }
1322 }
1323}
13241325#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TaskDepsRef::Allow(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
&__self_0),
TaskDepsRef::EvalAlways =>
::core::fmt::Formatter::write_str(f, "EvalAlways"),
TaskDepsRef::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
TaskDepsRef::Forbid =>
::core::fmt::Formatter::write_str(f, "Forbid"),
}
}
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
#[inline]
fn clone(&self) -> TaskDepsRef<'a> {
let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
*self
}
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1326pub enum TaskDepsRef<'a> {
1327/// New dependencies can be added to the
1328 /// `TaskDeps`. This is used when executing a 'normal' query
1329 /// (no `eval_always` modifier)
1330Allow(&'a Lock<TaskDeps>),
1331/// This is used when executing an `eval_always` query. We don't
1332 /// need to track dependencies for a query that's always
1333 /// re-executed -- but we need to know that this is an `eval_always`
1334 /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1335 /// when directly feeding other queries.
1336EvalAlways,
1337/// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1338Ignore,
1339/// Any attempt to add new dependencies will cause a panic.
1340 /// This is used when decoding a query result from disk,
1341 /// to ensure that the decoding process doesn't itself
1342 /// require the execution of any queries.
1343Forbid,
1344}
13451346#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "TaskDeps",
"node", &self.node, "reads", &self.reads, "read_set",
&self.read_set, "phantom_data", &&self.phantom_data)
}
}Debug)]
1347pub struct TaskDeps {
1348#[cfg(debug_assertions)]
1349node: Option<DepNode>,
13501351/// A vector of `DepNodeIndex`, basically.
1352reads: EdgesVec,
13531354/// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1355 /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1356 /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1357 /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1358read_set: FxHashSet<DepNodeIndex>,
13591360 phantom_data: PhantomData<DepNode>,
1361}
13621363impl TaskDeps {
1364/// See `TaskDeps::read_set` above.
1365const LINEAR_SCAN_MAX: usize = 16;
13661367#[inline]
1368fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1369TaskDeps {
1370#[cfg(debug_assertions)]
1371node,
1372 reads: EdgesVec::new(),
1373 read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1374 phantom_data: PhantomData,
1375 }
1376 }
1377}
13781379// A data structure that stores Option<DepNodeColor> values as a contiguous
1380// array, using one u32 per entry.
1381pub(super) struct DepNodeColorMap {
1382 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1383}
13841385// All values below `COMPRESSED_RED` are green.
1386const COMPRESSED_RED: u32 = u32::MAX - 1;
1387const COMPRESSED_UNKNOWN: u32 = u32::MAX;
13881389impl DepNodeColorMap {
1390fn new(size: usize) -> DepNodeColorMap {
1391if true {
if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
};
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1392DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1393 }
13941395#[inline]
1396pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1397let value = self.values[index].load(Ordering::Relaxed);
1398if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1399 }
14001401/// This tries to atomically mark a node green and assign `index` as the new
1402 /// index if `green` is true, otherwise it will try to atomicaly mark it red.
1403 ///
1404 /// This returns `Ok` if `index` gets assigned or the node is marked red, otherwise it returns
1405 /// the already allocated index in `Err` if it is green already. If it was already
1406 /// red, `Err(None)` is returned.
1407#[inline(always)]
1408pub(super) fn try_mark(
1409&self,
1410 prev_index: SerializedDepNodeIndex,
1411 index: DepNodeIndex,
1412 green: bool,
1413 ) -> Result<(), Option<DepNodeIndex>> {
1414let value = &self.values[prev_index];
1415match value.compare_exchange(
1416COMPRESSED_UNKNOWN,
1417if green { index.as_u32() } else { COMPRESSED_RED },
1418 Ordering::Relaxed,
1419 Ordering::Relaxed,
1420 ) {
1421Ok(_) => Ok(()),
1422Err(v) => Err(if v == COMPRESSED_RED { None } else { Some(DepNodeIndex::from_u32(v)) }),
1423 }
1424 }
14251426#[inline]
1427pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1428let value = self.values[index].load(Ordering::Acquire);
1429// Green is by far the most common case. Check for that first so we can succeed with a
1430 // single comparison.
1431if value < COMPRESSED_RED {
1432 DepNodeColor::Green(DepNodeIndex::from_u32(value))
1433 } else if value == COMPRESSED_RED {
1434 DepNodeColor::Red1435 } else {
1436if true {
match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1437 DepNodeColor::Unknown1438 }
1439 }
14401441#[inline]
1442pub(super) fn insert_red(&self, index: SerializedDepNodeIndex) {
1443let value = self.values[index].swap(COMPRESSED_RED, Ordering::Release);
1444// Sanity check for duplicate nodes
1445match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("tried to color an already colored node as red")));
}
}
};assert_eq!(value, COMPRESSED_UNKNOWN, "tried to color an already colored node as red");
1446 }
1447}
14481449#[inline(never)]
1450#[cold]
1451pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: &MarkFrame<'_>) {
1452let data = graph.data.as_ref().unwrap();
14531454{
::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1455{ ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
14561457let mut i = 0;
1458let mut current = Some(frame);
1459while let Some(frame) = current {
1460let node = data.previous.index_to_node(frame.index);
1461{ ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1462 current = frame.parent;
1463 i += 1;
1464 }
14651466{
::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1467}
14681469#[cold]
1470#[inline(never)]
1471fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1472// We have to do an expensive reverse-lookup of the DepNode that
1473 // corresponds to `dep_node_index`, but that's OK since we are about
1474 // to ICE anyway.
1475let mut dep_node = None;
14761477// First try to find the dep node among those that already existed in the
1478 // previous session and has been marked green
1479for prev_index in data.colors.values.indices() {
1480if data.colors.current(prev_index) == Some(dep_node_index) {
1481 dep_node = Some(*data.previous.index_to_node(prev_index));
1482break;
1483 }
1484 }
14851486if dep_node.is_none()
1487 && let Some(nodes) = &data.current.nodes_in_current_session
1488 {
1489// Try to find it among the nodes allocated so far in this session
1490 // This is OK, there's only ever one node result possible so this is deterministic.
1491#[allow(rustc::potential_query_instability)]
1492if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1493dep_node = Some(*node);
1494 }
1495 }
14961497let dep_node = dep_node.map_or_else(
1498 || ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
})format!("with index {:?}", dep_node_index),
1499 |dep_node| ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
})format!("`{:?}`", dep_node),
1500 );
15011502{
::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
dep_node));
}panic!(
1503"Error: trying to record dependency on DepNode {dep_node} in a \
1504 context that does not allow it (e.g. during query deserialization). \
1505 The most common case of recording a dependency on a DepNode `foo` is \
1506 when the corresponding query `foo` is invoked. Invoking queries is not \
1507 allowed as part of loading something from the incremental on-disk cache. \
1508 See <https://github.com/rust-lang/rust/pull/91919>."
1509)1510}