1use std::fmt::Debug;
2use std::hash::Hash;
3use std::marker::PhantomData;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
67use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::{FxHashMap, FxHashSet};
9use rustc_data_structures::profiling::QueryInvocationId;
10use rustc_data_structures::sharded::{self, ShardedHashMap};
11use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
12use rustc_data_structures::sync::{AtomicU64, Lock};
13use rustc_data_structures::unord::UnordMap;
14use rustc_data_structures::{assert_matches, outline};
15use rustc_errors::DiagInner;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use rustc_session::Session;
20use tracing::{debug, instrument};
21#[cfg(debug_assertions)]
22use {super::debug::EdgeFilter, std::env};
2324use super::query::DepGraphQuery;
25use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
26use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
27use crate::dep_graph::edges::EdgesVec;
28use crate::ich::StableHashingContext;
29use crate::query::{QueryContext, QuerySideEffect};
3031pub struct DepGraph<D: Deps> {
32 data: Option<Arc<DepGraphData<D>>>,
3334/// This field is used for assigning DepNodeIndices when running in
35 /// non-incremental mode. Even in non-incremental mode we make sure that
36 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
37 /// ID is used for self-profiling.
38virtual_dep_node_index: Arc<AtomicU32>,
39}
4041/// Manual clone impl that does not require `D: Clone`.
42impl<D: Deps> Clonefor DepGraph<D> {
43fn clone(&self) -> Self {
44let Self { data, virtual_dep_node_index } = self;
45Self {
46 data: Option::<Arc<_>>::clone(data),
47 virtual_dep_node_index: Arc::clone(virtual_dep_node_index),
48 }
49 }
50}
5152impl ::std::fmt::Debug for DepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
53pub struct DepNodeIndex {}
54}5556// We store a large collection of these in `prev_index_to_index` during
57// non-full incremental builds, and want to ensure that the element size
58// doesn't inadvertently increase.
59const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
6061impl DepNodeIndex {
62const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
63pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
64}
6566impl From<DepNodeIndex> for QueryInvocationId {
67#[inline(always)]
68fn from(dep_node_index: DepNodeIndex) -> Self {
69QueryInvocationId(dep_node_index.as_u32())
70 }
71}
7273pub struct MarkFrame<'a> {
74 index: SerializedDepNodeIndex,
75 parent: Option<&'a MarkFrame<'a>>,
76}
7778#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
DepNodeColor::Green(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
&__self_0),
DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
DepNodeColor::Unknown =>
::core::fmt::Formatter::write_str(f, "Unknown"),
}
}
}Debug)]
79pub(super) enum DepNodeColor {
80 Green(DepNodeIndex),
81 Red,
82 Unknown,
83}
8485pub(crate) struct DepGraphData<D: Deps> {
86/// The new encoding of the dependency graph, optimized for red/green
87 /// tracking. The `current` field is the dependency graph of only the
88 /// current compilation session: We don't merge the previous dep-graph into
89 /// current one anymore, but we do reference shared data to save space.
90current: CurrentDepGraph<D>,
9192/// The dep-graph from the previous compilation session. It contains all
93 /// nodes and edges as well as all fingerprints of nodes that have them.
94previous: Arc<SerializedDepGraph>,
9596 colors: DepNodeColorMap,
9798/// When we load, there may be `.o` files, cached MIR, or other such
99 /// things available to us. If we find that they are not dirty, we
100 /// load the path to the file storing those work-products here into
101 /// this map. We can later look for and extract that data.
102previous_work_products: WorkProductMap,
103104 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
105106/// Used by incremental compilation tests to assert that
107 /// a particular query result was decoded from disk
108 /// (not just marked green)
109debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
110}
111112pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint113where
114R: for<'a> HashStable<StableHashingContext<'a>>,
115{
116let mut stable_hasher = StableHasher::new();
117result.hash_stable(hcx, &mut stable_hasher);
118stable_hasher.finish()
119}
120121impl<D: Deps> DepGraph<D> {
122pub fn new(
123 session: &Session,
124 prev_graph: Arc<SerializedDepGraph>,
125 prev_work_products: WorkProductMap,
126 encoder: FileEncoder,
127 ) -> DepGraph<D> {
128let prev_graph_node_count = prev_graph.node_count();
129130let current =
131CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
132133let colors = DepNodeColorMap::new(prev_graph_node_count);
134135// Instantiate a node with zero dependencies only once for anonymous queries.
136let _green_node_index = current.alloc_new_node(
137DepNode { kind: D::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
138EdgesVec::new(),
139Fingerprint::ZERO,
140 );
141match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
142143// Instantiate a dependy-less red node only once for anonymous queries.
144let red_node_index = current.alloc_new_node(
145DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
146EdgesVec::new(),
147Fingerprint::ZERO,
148 );
149match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
150if prev_graph_node_count > 0 {
151colors.insert_red(SerializedDepNodeIndex::from_u32(
152DepNodeIndex::FOREVER_RED_NODE.as_u32(),
153 ));
154 }
155156DepGraph {
157 data: Some(Arc::new(DepGraphData {
158 previous_work_products: prev_work_products,
159 dep_node_debug: Default::default(),
160current,
161 previous: prev_graph,
162colors,
163 debug_loaded_from_disk: Default::default(),
164 })),
165 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
166 }
167 }
168169pub fn new_disabled() -> DepGraph<D> {
170DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
171 }
172173#[inline]
174pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
175self.data.as_deref()
176 }
177178/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
179#[inline]
180pub fn is_fully_enabled(&self) -> bool {
181self.data.is_some()
182 }
183184pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
185if let Some(data) = &self.data {
186data.current.encoder.with_query(f)
187 }
188 }
189190pub fn assert_ignored(&self) {
191if let Some(..) = self.data {
192 D::read_deps(|task_deps| {
193match task_deps {
TaskDepsRef::Ignore => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"TaskDepsRef::Ignore",
::core::option::Option::Some(format_args!("expected no task dependency tracking")));
}
};assert_matches!(
194 task_deps,
195 TaskDepsRef::Ignore,
196"expected no task dependency tracking"
197);
198 })
199 }
200 }
201202pub fn with_ignore<OP, R>(&self, op: OP) -> R
203where
204OP: FnOnce() -> R,
205 {
206 D::with_deps(TaskDepsRef::Ignore, op)
207 }
208209/// Used to wrap the deserialization of a query result from disk,
210 /// This method enforces that no new `DepNodes` are created during
211 /// query result deserialization.
212 ///
213 /// Enforcing this makes the query dep graph simpler - all nodes
214 /// must be created during the query execution, and should be
215 /// created from inside the 'body' of a query (the implementation
216 /// provided by a particular compiler crate).
217 ///
218 /// Consider the case of three queries `A`, `B`, and `C`, where
219 /// `A` invokes `B` and `B` invokes `C`:
220 ///
221 /// `A -> B -> C`
222 ///
223 /// Suppose that decoding the result of query `B` required re-computing
224 /// the query `C`. If we did not create a fresh `TaskDeps` when
225 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
226 /// (if we needed to re-execute `A`). This would cause us to create
227 /// a new edge `A -> C`. If this edge did not previously
228 /// exist in the `DepGraph`, then we could end up with a different
229 /// `DepGraph` at the end of compilation, even if there were no
230 /// meaningful changes to the overall program (e.g. a newline was added).
231 /// In addition, this edge might cause a subsequent compilation run
232 /// to try to force `C` before marking other necessary nodes green. If
233 /// `C` did not exist in the new compilation session, then we could
234 /// get an ICE. Normally, we would have tried (and failed) to mark
235 /// some other query green (e.g. `item_children`) which was used
236 /// to obtain `C`, which would prevent us from ever trying to force
237 /// a nonexistent `D`.
238 ///
239 /// It might be possible to enforce that all `DepNode`s read during
240 /// deserialization already exist in the previous `DepGraph`. In
241 /// the above example, we would invoke `D` during the deserialization
242 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
243 /// of `B`, this would result in an edge `B -> D`. If that edge already
244 /// existed (with the same `DepPathHash`es), then it should be correct
245 /// to allow the invocation of the query to proceed during deserialization
246 /// of a query result. We would merely assert that the dep-graph fragment
247 /// that would have been added by invoking `C` while decoding `B`
248 /// is equivalent to the dep-graph fragment that we already instantiated for B
249 /// (at the point where we successfully marked B as green).
250 ///
251 /// However, this would require additional complexity
252 /// in the query infrastructure, and is not currently needed by the
253 /// decoding of any query results. Should the need arise in the future,
254 /// we should consider extending the query system with this functionality.
255pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
256where
257OP: FnOnce() -> R,
258 {
259 D::with_deps(TaskDepsRef::Forbid, op)
260 }
261262#[inline(always)]
263pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
264&self,
265 key: DepNode,
266 cx: Ctxt,
267 arg: A,
268 task: fn(Ctxt, A) -> R,
269 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
270 ) -> (R, DepNodeIndex) {
271match self.data() {
272Some(data) => data.with_task(key, cx, arg, task, hash_result),
273None => (task(cx, arg), self.next_virtual_depnode_index()),
274 }
275 }
276277pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
278&self,
279 cx: Tcx,
280 dep_kind: DepKind,
281 op: OP,
282 ) -> (R, DepNodeIndex)
283where
284OP: FnOnce() -> R,
285 {
286match self.data() {
287Some(data) => {
288let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
289self.read_index(index);
290 (result, index)
291 }
292None => (op(), self.next_virtual_depnode_index()),
293 }
294 }
295}
296297impl<D: Deps> DepGraphData<D> {
298/// Starts a new dep-graph task. Dep-graph tasks are specified
299 /// using a free function (`task`) and **not** a closure -- this
300 /// is intentional because we want to exercise tight control over
301 /// what state they have access to. In particular, we want to
302 /// prevent implicit 'leaks' of tracked state into the task (which
303 /// could then be read without generating correct edges in the
304 /// dep-graph -- see the [rustc dev guide] for more details on
305 /// the dep-graph). To this end, the task function gets exactly two
306 /// pieces of state: the context `cx` and an argument `arg`. Both
307 /// of these bits of state must be of some type that implements
308 /// `DepGraphSafe` and hence does not leak.
309 ///
310 /// The choice of two arguments is not fundamental. One argument
311 /// would work just as well, since multiple values can be
312 /// collected using tuples. However, using two arguments works out
313 /// to be quite convenient, since it is common to need a context
314 /// (`cx`) and some argument (e.g., a `DefId` identifying what
315 /// item to process).
316 ///
317 /// For cases where you need some other number of arguments:
318 ///
319 /// - If you only need one argument, just use `()` for the `arg`
320 /// parameter.
321 /// - If you need 3+ arguments, use a tuple for the
322 /// `arg` parameter.
323 ///
324 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
325#[inline(always)]
326pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
327&self,
328 key: DepNode,
329 cx: Ctxt,
330 arg: A,
331 task: fn(Ctxt, A) -> R,
332 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
333 ) -> (R, DepNodeIndex) {
334// If the following assertion triggers, it can have two reasons:
335 // 1. Something is wrong with DepNode creation, either here or
336 // in `DepGraph::try_mark_green()`.
337 // 2. Two distinct query keys get mapped to the same `DepNode`
338 // (see for example #48923).
339self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
340::alloc::__export::must_use({
::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`\n- query-key: {0:?}\n- dep-node: {1:?}",
arg, key))
})format!(
341"forcing query with already existing `DepNode`\n\
342 - query-key: {arg:?}\n\
343 - dep-node: {key:?}"
344)345 });
346347let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
348let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
349 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
350 } else {
351let task_deps = Lock::new(TaskDeps::new(
352#[cfg(debug_assertions)]
353Some(key),
3540,
355 ));
356 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
357 };
358359let dcx = cx.dep_context();
360let dep_node_index = self.hash_result_and_alloc_node(dcx, key, edges, &result, hash_result);
361362 (result, dep_node_index)
363 }
364365/// Executes something within an "anonymous" task, that is, a task the
366 /// `DepNode` of which is determined by the list of inputs it read from.
367 ///
368 /// NOTE: this does not actually count as a read of the DepNode here.
369 /// Using the result of this task without reading the DepNode will result
370 /// in untracked dependencies which may lead to ICEs as nodes are
371 /// incorrectly marked green.
372 ///
373 /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
374 /// user of this function actually performs the read; we'll have to see
375 /// how to make that work with `anon` in `execute_job_incr`, though.
376pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
377&self,
378 cx: Tcx,
379 dep_kind: DepKind,
380 op: OP,
381 ) -> (R, DepNodeIndex)
382where
383OP: FnOnce() -> R,
384 {
385if true {
if !!cx.is_eval_always(dep_kind) {
::core::panicking::panic("assertion failed: !cx.is_eval_always(dep_kind)")
};
};debug_assert!(!cx.is_eval_always(dep_kind));
386387// Large numbers of reads are common enough here that pre-sizing `read_set`
388 // to 128 actually helps perf on some benchmarks.
389let task_deps = Lock::new(TaskDeps::new(
390#[cfg(debug_assertions)]
391None,
392128,
393 ));
394let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
395let task_deps = task_deps.into_inner();
396let reads = task_deps.reads;
397398let dep_node_index = match reads.len() {
3990 => {
400// Because the dep-node id of anon nodes is computed from the sets of its
401 // dependencies we already know what the ID of this dependency-less node is
402 // going to be (i.e. equal to the precomputed
403 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
404 // a `StableHasher` and sending the node through interning.
405DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE406 }
4071 => {
408// When there is only one dependency, don't bother creating a node.
409reads[0]
410 }
411_ => {
412// The dep node indices are hashed here instead of hashing the dep nodes of the
413 // dependencies. These indices may refer to different nodes per session, but this isn't
414 // a problem here because we that ensure the final dep node hash is per session only by
415 // combining it with the per session random number `anon_id_seed`. This hash only need
416 // to map the dependencies to a single value on a per session basis.
417let mut hasher = StableHasher::new();
418reads.hash(&mut hasher);
419420let target_dep_node = DepNode {
421 kind: dep_kind,
422// Fingerprint::combine() is faster than sending Fingerprint
423 // through the StableHasher (at least as long as StableHasher
424 // is so slow).
425hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
426 };
427428// The DepNodes generated by the process above are not unique. 2 queries could
429 // have exactly the same dependencies. However, deserialization does not handle
430 // duplicated nodes, so we do the deduplication here directly.
431 //
432 // As anonymous nodes are a small quantity compared to the full dep-graph, the
433 // memory impact of this `anon_node_to_index` map remains tolerable, and helps
434 // us avoid useless growth of the graph with almost-equivalent nodes.
435self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
436self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
437 })
438 }
439 };
440441 (result, dep_node_index)
442 }
443444/// Intern the new `DepNode` with the dependencies up-to-now.
445fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
446&self,
447 cx: &Ctxt,
448 node: DepNode,
449 edges: EdgesVec,
450 result: &R,
451 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
452 ) -> DepNodeIndex {
453let hashing_timer = cx.profiler().incr_result_hashing();
454let current_fingerprint = hash_result.map(|hash_result| {
455cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
456 });
457let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
458hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
459dep_node_index460 }
461}
462463impl<D: Deps> DepGraph<D> {
464#[inline]
465pub fn read_index(&self, dep_node_index: DepNodeIndex) {
466if let Some(ref data) = self.data {
467 D::read_deps(|task_deps| {
468let mut task_deps = match task_deps {
469 TaskDepsRef::Allow(deps) => deps.lock(),
470 TaskDepsRef::EvalAlways => {
471// We don't need to record dependencies of eval_always
472 // queries. They are re-evaluated unconditionally anyway.
473return;
474 }
475 TaskDepsRef::Ignore => return,
476 TaskDepsRef::Forbid => {
477// Reading is forbidden in this context. ICE with a useful error message.
478panic_on_forbidden_read(data, dep_node_index)
479 }
480 };
481let task_deps = &mut *task_deps;
482483if truecfg!(debug_assertions) {
484data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
485 }
486487// Has `dep_node_index` been seen before? Use either a linear scan or a hashset
488 // lookup to determine this. See `TaskDeps::read_set` for details.
489let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
490 !task_deps.reads.contains(&dep_node_index)
491 } else {
492task_deps.read_set.insert(dep_node_index)
493 };
494if new_read {
495task_deps.reads.push(dep_node_index);
496if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
497// Fill `read_set` with what we have so far. Future lookups will use it.
498task_deps.read_set.extend(task_deps.reads.iter().copied());
499 }
500501#[cfg(debug_assertions)]
502{
503if let Some(target) = task_deps.node
504 && let Some(ref forbidden_edge) = data.current.forbidden_edge
505 {
506let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
507if forbidden_edge.test(&src, &target) {
508{
::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)509 }
510 }
511 }
512 } else if truecfg!(debug_assertions) {
513data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
514 }
515 })
516 }
517 }
518519/// This encodes a diagnostic by creating a node with an unique index and associating
520 /// `diagnostic` with it, for use in the next session.
521#[inline]
522pub fn record_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
523&self,
524 qcx: Qcx,
525 diagnostic: &DiagInner,
526 ) {
527if let Some(ref data) = self.data {
528 D::read_deps(|task_deps| match task_deps {
529 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
530 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
531self.read_index(data.encode_diagnostic(qcx, diagnostic));
532 }
533 })
534 }
535 }
536/// This forces a diagnostic node green by running its side effect. `prev_index` would
537 /// refer to a node created used `encode_diagnostic` in the previous session.
538#[inline]
539pub fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
540&self,
541 qcx: Qcx,
542 prev_index: SerializedDepNodeIndex,
543 ) {
544if let Some(ref data) = self.data {
545data.force_diagnostic_node(qcx, prev_index);
546 }
547 }
548549/// Create a node when we force-feed a value into the query cache.
550 /// This is used to remove cycles during type-checking const generic parameters.
551 ///
552 /// As usual in the query system, we consider the current state of the calling query
553 /// only depends on the list of dependencies up to now. As a consequence, the value
554 /// that this query gives us can only depend on those dependencies too. Therefore,
555 /// it is sound to use the current dependency set for the created node.
556 ///
557 /// During replay, the order of the nodes is relevant in the dependency graph.
558 /// So the unchanged replay will mark the caller query before trying to mark this one.
559 /// If there is a change to report, the caller query will be re-executed before this one.
560 ///
561 /// FIXME: If the code is changed enough for this node to be marked before requiring the
562 /// caller's node, we suppose that those changes will be enough to mark this node red and
563 /// force a recomputation using the "normal" way.
564pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
565&self,
566 node: DepNode,
567 cx: Ctxt,
568 result: &R,
569 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
570 ) -> DepNodeIndex {
571if let Some(data) = self.data.as_ref() {
572// The caller query has more dependencies than the node we are creating. We may
573 // encounter a case where this created node is marked as green, but the caller query is
574 // subsequently marked as red or recomputed. In this case, we will end up feeding a
575 // value to an existing node.
576 //
577 // For sanity, we still check that the loaded stable hash and the new one match.
578if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
579let dep_node_index = data.colors.current(prev_index);
580if let Some(dep_node_index) = dep_node_index {
581crate::query::incremental_verify_ich(
582cx,
583data,
584result,
585prev_index,
586hash_result,
587 |value| ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", value))
})format!("{value:?}"),
588 );
589590#[cfg(debug_assertions)]
591if hash_result.is_some() {
592data.current.record_edge(
593dep_node_index,
594node,
595data.prev_fingerprint_of(prev_index),
596 );
597 }
598599return dep_node_index;
600 }
601 }
602603let mut edges = EdgesVec::new();
604 D::read_deps(|task_deps| match task_deps {
605 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
606 TaskDepsRef::EvalAlways => {
607edges.push(DepNodeIndex::FOREVER_RED_NODE);
608 }
609 TaskDepsRef::Ignore => {}
610 TaskDepsRef::Forbid => {
611{
::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")612 }
613 });
614615data.hash_result_and_alloc_node(&cx, node, edges, result, hash_result)
616 } else {
617// Incremental compilation is turned off. We just execute the task
618 // without tracking. We still provide a dep-node index that uniquely
619 // identifies the task so that we have a cheap way of referring to
620 // the query for self-profiling.
621self.next_virtual_depnode_index()
622 }
623 }
624}
625626impl<D: Deps> DepGraphData<D> {
627fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
628&self,
629 dep_node: &DepNode,
630 msg: impl FnOnce() -> S,
631 ) {
632if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
633let current = self.colors.get(prev_index);
634match current {
DepNodeColor::Unknown => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"DepNodeColor::Unknown",
::core::option::Option::Some(format_args!("{0}", msg())));
}
}assert_matches!(current, DepNodeColor::Unknown, "{}", msg())635 } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
636outline(|| {
637let seen = nodes_in_current_session.lock().contains_key(dep_node);
638if !!seen { { ::core::panicking::panic_display(&msg()); } };assert!(!seen, "{}", msg());
639 });
640 }
641 }
642643fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
644if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
645self.colors.get(prev_index)
646 } else {
647// This is a node that did not exist in the previous compilation session.
648DepNodeColor::Unknown649 }
650 }
651652/// Returns true if the given node has been marked as green during the
653 /// current compilation session. Used in various assertions
654#[inline]
655pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
656#[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))657 }
658659#[inline]
660pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
661self.previous.fingerprint_by_index(prev_index)
662 }
663664#[inline]
665pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
666self.previous.index_to_node(prev_index)
667 }
668669pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
670self.debug_loaded_from_disk.lock().insert(dep_node);
671 }
672673/// This encodes a diagnostic by creating a node with an unique index and associating
674 /// `diagnostic` with it, for use in the next session.
675#[inline]
676fn encode_diagnostic<'tcx, Qcx: QueryContext<'tcx>>(
677&self,
678 qcx: Qcx,
679 diagnostic: &DiagInner,
680 ) -> DepNodeIndex {
681// Use `send_new` so we get an unique index, even though the dep node is not.
682let dep_node_index = self.current.encoder.send_new(
683DepNode {
684 kind: D::DEP_KIND_SIDE_EFFECT,
685 hash: PackedFingerprint::from(Fingerprint::ZERO),
686 },
687Fingerprint::ZERO,
688// We want the side effect node to always be red so it will be forced and emit the
689 // diagnostic.
690std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
691 );
692let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
693qcx.store_side_effect(dep_node_index, side_effect);
694dep_node_index695 }
696697/// This forces a diagnostic node green by running its side effect. `prev_index` would
698 /// refer to a node created used `encode_diagnostic` in the previous session.
699#[inline]
700fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>(
701&self,
702 qcx: Qcx,
703 prev_index: SerializedDepNodeIndex,
704 ) {
705 D::with_deps(TaskDepsRef::Ignore, || {
706let side_effect = qcx.load_side_effect(prev_index).unwrap();
707708match &side_effect {
709 QuerySideEffect::Diagnostic(diagnostic) => {
710qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
711 }
712 }
713714// Use `send_and_color` as `promote_node_and_deps_to_current` expects all
715 // green dependencies. `send_and_color` will also prevent multiple nodes
716 // being encoded for concurrent calls.
717let dep_node_index = self.current.encoder.send_and_color(
718prev_index,
719&self.colors,
720DepNode {
721 kind: D::DEP_KIND_SIDE_EFFECT,
722 hash: PackedFingerprint::from(Fingerprint::ZERO),
723 },
724Fingerprint::ZERO,
725 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
726true,
727 );
728// This will just overwrite the same value for concurrent calls.
729qcx.store_side_effect(dep_node_index, side_effect);
730 })
731 }
732733fn alloc_and_color_node(
734&self,
735 key: DepNode,
736 edges: EdgesVec,
737 fingerprint: Option<Fingerprint>,
738 ) -> DepNodeIndex {
739if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
740// Determine the color and index of the new `DepNode`.
741let is_green = if let Some(fingerprint) = fingerprint {
742if fingerprint == self.previous.fingerprint_by_index(prev_index) {
743// This is a green node: it existed in the previous compilation,
744 // its query was re-executed, and it has the same result as before.
745true
746} else {
747// This is a red node: it existed in the previous compilation, its query
748 // was re-executed, but it has a different result from before.
749false
750}
751 } else {
752// This is a red node, effectively: it existed in the previous compilation
753 // session, its query was re-executed, but it doesn't compute a result hash
754 // (i.e. it represents a `no_hash` query), so we have no way of determining
755 // whether or not the result was the same as before.
756false
757};
758759let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
760761let dep_node_index = self.current.encoder.send_and_color(
762prev_index,
763&self.colors,
764key,
765fingerprint,
766edges,
767is_green,
768 );
769770self.current.record_node(dep_node_index, key, fingerprint);
771772dep_node_index773 } else {
774self.current.alloc_new_node(key, edges, fingerprint.unwrap_or(Fingerprint::ZERO))
775 }
776 }
777778fn promote_node_and_deps_to_current(&self, prev_index: SerializedDepNodeIndex) -> DepNodeIndex {
779self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
780781let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
782783#[cfg(debug_assertions)]
784self.current.record_edge(
785dep_node_index,
786*self.previous.index_to_node(prev_index),
787self.previous.fingerprint_by_index(prev_index),
788 );
789790dep_node_index791 }
792}
793794impl<D: Deps> DepGraph<D> {
795/// Checks whether a previous work product exists for `v` and, if
796 /// so, return the path that leads to it. Used to skip doing work.
797pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
798self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
799 }
800801/// Access the map of work-products created during the cached run. Only
802 /// used during saving of the dep-graph.
803pub fn previous_work_products(&self) -> &WorkProductMap {
804&self.data.as_ref().unwrap().previous_work_products
805 }
806807pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
808self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
809 }
810811pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
812// We only check if we have a dep node corresponding to the given dep kind.
813#[allow(rustc::potential_query_instability)]
814self.data
815 .as_ref()
816 .unwrap()
817 .debug_loaded_from_disk
818 .lock()
819 .iter()
820 .any(|node| node.kind == dep_kind)
821 }
822823#[cfg(debug_assertions)]
824 #[inline(always)]
825pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
826where
827F: FnOnce() -> String,
828 {
829let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
830831if dep_node_debug.borrow().contains_key(&dep_node) {
832return;
833 }
834let debug_str = self.with_ignore(debug_str_gen);
835dep_node_debug.borrow_mut().insert(dep_node, debug_str);
836 }
837838pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
839self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
840 }
841842fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
843if let Some(ref data) = self.data {
844return data.node_color(dep_node);
845 }
846847 DepNodeColor::Unknown848 }
849850pub fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
851&self,
852 qcx: Qcx,
853 dep_node: &DepNode,
854 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
855self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
856 }
857}
858859impl<D: Deps> DepGraphData<D> {
860/// Try to mark a node index for the node dep_node.
861 ///
862 /// A node will have an index, when it's already been marked green, or when we can mark it
863 /// green. This function will mark the current task as a reader of the specified node, when
864 /// a node index can be found for that node.
865pub(crate) fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
866&self,
867 qcx: Qcx,
868 dep_node: &DepNode,
869 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
870if true {
if !!qcx.dep_context().is_eval_always(dep_node.kind) {
::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(dep_node.kind)")
};
};debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
871872// Return None if the dep node didn't exist in the previous session
873let prev_index = self.previous.node_to_index_opt(dep_node)?;
874875if true {
match (&self.previous.index_to_node(prev_index), &dep_node) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
876877match self.colors.get(prev_index) {
878 DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
879 DepNodeColor::Red => None,
880 DepNodeColor::Unknown => {
881// This DepNode and the corresponding query invocation existed
882 // in the previous compilation session too, so we can try to
883 // mark it as green by recursively marking all of its
884 // dependencies green.
885self.try_mark_previous_green(qcx, prev_index, None)
886 .map(|dep_node_index| (prev_index, dep_node_index))
887 }
888 }
889 }
890891#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_parent_green",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(891u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<()> = loop {};
return __tracing_attr_fake_return;
}
{
let get_dep_dep_node =
|| self.previous.index_to_node(parent_dep_node_index);
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:909",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(909u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately green",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:917",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(917u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately red",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
let dep_dep_node = get_dep_dep_node();
if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:928",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(928u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("state of dependency {0:?} ({1}) is unknown, trying to mark it green",
dep_dep_node, dep_dep_node.hash) as &dyn Value))])
});
} else { ; }
};
let node_index =
self.try_mark_previous_green(qcx, parent_dep_node_index,
Some(frame));
if node_index.is_some() {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:936",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(936u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to MARK dependency {0:?} as green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:942",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(942u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("trying to force dependency {0:?}",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node,
parent_dep_node_index, frame) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:945",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(945u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} could not be forced",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:951",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(951u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to FORCE dependency {0:?} to green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:955",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(955u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was red after forcing",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
if let None =
qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs()
{
{
::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - Forcing the DepNode should have set its color"));
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:975",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(975u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} resulted in compilation error",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
}
}#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]892fn try_mark_parent_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
893&self,
894 qcx: Qcx,
895 parent_dep_node_index: SerializedDepNodeIndex,
896 frame: &MarkFrame<'_>,
897 ) -> Option<()> {
898let get_dep_dep_node = || self.previous.index_to_node(parent_dep_node_index);
899900match self.colors.get(parent_dep_node_index) {
901 DepNodeColor::Green(_) => {
902// This dependency has been marked as green before, we are
903 // still fine and can continue with checking the other
904 // dependencies.
905 //
906 // This path is extremely hot. We don't want to get the
907 // `dep_dep_node` unless it's necessary. Hence the
908 // `get_dep_dep_node` closure.
909debug!("dependency {:?} was immediately green", get_dep_dep_node());
910return Some(());
911 }
912 DepNodeColor::Red => {
913// We found a dependency the value of which has changed
914 // compared to the previous compilation session. We cannot
915 // mark the DepNode as green and also don't need to bother
916 // with checking any of the other dependencies.
917debug!("dependency {:?} was immediately red", get_dep_dep_node());
918return None;
919 }
920 DepNodeColor::Unknown => {}
921 }
922923let dep_dep_node = get_dep_dep_node();
924925// We don't know the state of this dependency. If it isn't
926 // an eval_always node, let's try to mark it green recursively.
927if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
928debug!(
929"state of dependency {:?} ({}) is unknown, trying to mark it green",
930 dep_dep_node, dep_dep_node.hash,
931 );
932933let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, Some(frame));
934935if node_index.is_some() {
936debug!("managed to MARK dependency {dep_dep_node:?} as green");
937return Some(());
938 }
939 }
940941// We failed to mark it green, so we try to force the query.
942debug!("trying to force dependency {dep_dep_node:?}");
943if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
944// The DepNode could not be forced.
945debug!("dependency {dep_dep_node:?} could not be forced");
946return None;
947 }
948949match self.colors.get(parent_dep_node_index) {
950 DepNodeColor::Green(_) => {
951debug!("managed to FORCE dependency {dep_dep_node:?} to green");
952return Some(());
953 }
954 DepNodeColor::Red => {
955debug!("dependency {dep_dep_node:?} was red after forcing");
956return None;
957 }
958 DepNodeColor::Unknown => {}
959 }
960961if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
962panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
963 }
964965// If the query we just forced has resulted in
966 // some kind of compilation error, we cannot rely on
967 // the dep-node color having been properly updated.
968 // This means that the query system has reached an
969 // invalid state. We let the compiler continue (by
970 // returning `None`) so it can emit error messages
971 // and wind down, but rely on the fact that this
972 // invalid state will not be persisted to the
973 // incremental compilation cache because of
974 // compilation errors being present.
975debug!("dependency {dep_dep_node:?} resulted in compilation error");
976return None;
977 }
978979/// Try to mark a dep-node which existed in the previous compilation session as green.
980#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_previous_green",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(980u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
return __tracing_attr_fake_return;
}
{
let frame =
MarkFrame { index: prev_dep_node_index, parent: frame };
if true {
if !!qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
{
::core::panicking::panic("assertion failed: !qcx.dep_context().is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
};
};
let prev_deps =
self.previous.edge_targets_from(prev_dep_node_index);
for dep_dep_node_index in prev_deps {
self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
}
let dep_node_index =
self.promote_node_and_deps_to_current(prev_dep_node_index);
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/graph.rs:1014",
"rustc_query_system::dep_graph::graph",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1014u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("successfully marked {0:?} as green",
self.previous.index_to_node(prev_dep_node_index)) as
&dyn Value))])
});
} else { ; }
};
Some(dep_node_index)
}
}
}#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]981fn try_mark_previous_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>(
982&self,
983 qcx: Qcx,
984 prev_dep_node_index: SerializedDepNodeIndex,
985 frame: Option<&MarkFrame<'_>>,
986 ) -> Option<DepNodeIndex> {
987let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
988989// We never try to mark eval_always nodes as green
990debug_assert!(
991 !qcx.dep_context()
992 .is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
993 );
994995let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
996997for dep_dep_node_index in prev_deps {
998self.try_mark_parent_green(qcx, dep_dep_node_index, &frame)?;
999 }
10001001// If we got here without hitting a `return` that means that all
1002 // dependencies of this DepNode could be marked as green. Therefore we
1003 // can also mark this DepNode as green.
10041005 // There may be multiple threads trying to mark the same dep node green concurrently
10061007 // We allocating an entry for the node in the current dependency graph and
1008 // adding all the appropriate edges imported from the previous graph
1009let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index);
10101011// ... and finally storing a "Green" entry in the color map.
1012 // Multiple threads can all write the same color here
10131014debug!(
1015"successfully marked {:?} as green",
1016self.previous.index_to_node(prev_dep_node_index)
1017 );
1018Some(dep_node_index)
1019 }
1020}
10211022impl<D: Deps> DepGraph<D> {
1023/// Returns true if the given node has been marked as red during the
1024 /// current compilation session. Used in various assertions
1025pub fn is_red(&self, dep_node: &DepNode) -> bool {
1026#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Red => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)1027 }
10281029/// Returns true if the given node has been marked as green during the
1030 /// current compilation session. Used in various assertions
1031pub fn is_green(&self, dep_node: &DepNode) -> bool {
1032#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))1033 }
10341035pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1036&self,
1037 dep_node: &DepNode,
1038 msg: impl FnOnce() -> S,
1039 ) {
1040if let Some(data) = &self.data {
1041data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
1042 }
1043 }
10441045/// This method loads all on-disk cacheable query results into memory, so
1046 /// they can be written out to the new cache file again. Most query results
1047 /// will already be in memory but in the case where we marked something as
1048 /// green but then did not need the value, that value will never have been
1049 /// loaded from disk.
1050 ///
1051 /// This method will only load queries that will end up in the disk cache.
1052 /// Other queries will not be executed.
1053pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
1054let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
10551056let data = self.data.as_ref().unwrap();
1057for prev_index in data.colors.values.indices() {
1058match data.colors.get(prev_index) {
1059 DepNodeColor::Green(_) => {
1060let dep_node = data.previous.index_to_node(prev_index);
1061 tcx.try_load_from_on_disk_cache(dep_node);
1062 }
1063 DepNodeColor::Unknown | DepNodeColor::Red => {
1064// We can skip red nodes because a node can only be marked
1065 // as red if the query result was recomputed and thus is
1066 // already in memory.
1067}
1068 }
1069 }
1070 }
10711072pub fn finish_encoding(&self) -> FileEncodeResult {
1073if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1074 }
10751076pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1077if true {
if !self.data.is_none() {
::core::panicking::panic("assertion failed: self.data.is_none()")
};
};debug_assert!(self.data.is_none());
1078let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1079DepNodeIndex::from_u32(index)
1080 }
1081}
10821083/// A "work product" is an intermediate result that we save into the
1084/// incremental directory for later re-use. The primary example are
1085/// the object files that we save for each partition at code
1086/// generation time.
1087///
1088/// Each work product is associated with a dep-node, representing the
1089/// process that produced the work-product. If that dep-node is found
1090/// to be dirty when we load up, then we will delete the work-product
1091/// at load time. If the work-product is found to be clean, then we
1092/// will keep a record in the `previous_work_products` list.
1093///
1094/// In addition, work products have an associated hash. This hash is
1095/// an extra hash that can be used to decide if the work-product from
1096/// a previous compilation can be re-used (in addition to the dirty
1097/// edges check).
1098///
1099/// As the primary example, consider the object files we generate for
1100/// each partition. In the first run, we create partitions based on
1101/// the symbols that need to be compiled. For each partition P, we
1102/// hash the symbols in P and create a `WorkProduct` record associated
1103/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1104/// in P.
1105///
1106/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1107/// judged to be clean (which means none of the things we read to
1108/// generate the partition were found to be dirty), it will be loaded
1109/// into previous work products. We will then regenerate the set of
1110/// symbols in the partition P and hash them (note that new symbols
1111/// may be added -- for example, new monomorphizations -- even if
1112/// nothing in P changed!). We will compare that hash against the
1113/// previous hash. If it matches up, we can reuse the object file.
1114#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
#[inline]
fn clone(&self) -> WorkProduct {
WorkProduct {
cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
saved_files: ::core::clone::Clone::clone(&self.saved_files),
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
"cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for WorkProduct {
fn encode(&self, __encoder: &mut __E) {
match *self {
WorkProduct {
cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for WorkProduct {
fn decode(__decoder: &mut __D) -> Self {
WorkProduct {
cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
saved_files: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
1115pub struct WorkProduct {
1116pub cgu_name: String,
1117/// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1118 /// saved file and the key is some identifier for the type of file being saved.
1119 ///
1120 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1121 /// the object file's path, and "dwo" to the dwarf object file's path.
1122pub saved_files: UnordMap<String, String>,
1123}
11241125pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
11261127// Index type for `DepNodeData`'s edges.
1128impl ::std::fmt::Debug for EdgeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
1129struct EdgeIndex {}
1130}11311132/// `CurrentDepGraph` stores the dependency graph for the current session. It
1133/// will be populated as we run queries or tasks. We never remove nodes from the
1134/// graph: they are only added.
1135///
1136/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1137/// in memory. This is important, because these graph structures are some of the
1138/// largest in the compiler.
1139///
1140/// For this reason, we avoid storing `DepNode`s more than once as map
1141/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1142/// graph, and we map nodes in the previous graph to indices via a two-step
1143/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1144/// and the `prev_index_to_index` vector (which is more compact and faster than
1145/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1146///
1147/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1148/// and `prev_index_to_index` fields are locked separately. Operations that take
1149/// a `DepNodeIndex` typically just access the `data` field.
1150///
1151/// We only need to manipulate at most two locks simultaneously:
1152/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1153/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1154/// first, and `data` second.
1155pub(super) struct CurrentDepGraph<D: Deps> {
1156 encoder: GraphEncoder<D>,
1157 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
11581159/// This is used to verify that fingerprints do not change between the creation of a node
1160 /// and its recomputation.
1161#[cfg(debug_assertions)]
1162fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
11631164/// Used to trap when a specific edge is added to the graph.
1165 /// This is used for debug purposes and is only active with `debug_assertions`.
1166#[cfg(debug_assertions)]
1167forbidden_edge: Option<EdgeFilter>,
11681169/// Used to verify the absence of hash collisions among DepNodes.
1170 /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1171 /// or if `debug_assertions` are enabled.
1172 ///
1173 /// The map contains all DepNodes that have been allocated in the current session so far.
1174nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
11751176/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1177 /// their edges. This has the beneficial side-effect that multiple anonymous
1178 /// nodes can be coalesced into one without changing the semantics of the
1179 /// dependency graph. However, the merging of nodes can lead to a subtle
1180 /// problem during red-green marking: The color of an anonymous node from
1181 /// the current session might "shadow" the color of the node with the same
1182 /// ID from the previous session. In order to side-step this problem, we make
1183 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1184 /// This is implemented by mixing a session-key into the ID fingerprint of
1185 /// each anon node. The session-key is a hash of the number of previous sessions.
1186anon_id_seed: Fingerprint,
11871188/// These are simple counters that are for profiling and
1189 /// debugging and only active with `debug_assertions`.
1190pub(super) total_read_count: AtomicU64,
1191pub(super) total_duplicate_read_count: AtomicU64,
1192}
11931194impl<D: Deps> CurrentDepGraph<D> {
1195fn new(
1196 session: &Session,
1197 prev_graph_node_count: usize,
1198 encoder: FileEncoder,
1199 previous: Arc<SerializedDepGraph>,
1200 ) -> Self {
1201let mut stable_hasher = StableHasher::new();
1202previous.session_count().hash(&mut stable_hasher);
1203let anon_id_seed = stable_hasher.finish();
12041205#[cfg(debug_assertions)]
1206let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1207Ok(s) => match EdgeFilter::new(&s) {
1208Ok(f) => Some(f),
1209Err(err) => {
::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1210 },
1211Err(_) => None,
1212 };
12131214let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
12151216let new_node_dbg =
1217session.opts.unstable_opts.incremental_verify_ich || truecfg!(debug_assertions);
12181219CurrentDepGraph {
1220 encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1221 anon_node_to_index: ShardedHashMap::with_capacity(
1222// FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1223new_node_count_estimate / sharded::shards(),
1224 ),
1225anon_id_seed,
1226#[cfg(debug_assertions)]
1227forbidden_edge,
1228#[cfg(debug_assertions)]
1229fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1230 nodes_in_current_session: new_node_dbg.then(|| {
1231Lock::new(FxHashMap::with_capacity_and_hasher(
1232new_node_count_estimate,
1233 Default::default(),
1234 ))
1235 }),
1236 total_read_count: AtomicU64::new(0),
1237 total_duplicate_read_count: AtomicU64::new(0),
1238 }
1239 }
12401241#[cfg(debug_assertions)]
1242fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1243if let Some(forbidden_edge) = &self.forbidden_edge {
1244forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1245 }
1246let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1247match (&previous, &fingerprint) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
key)));
}
}
};assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1248 }
12491250#[inline(always)]
1251fn record_node(
1252&self,
1253 dep_node_index: DepNodeIndex,
1254 key: DepNode,
1255 _current_fingerprint: Fingerprint,
1256 ) {
1257#[cfg(debug_assertions)]
1258self.record_edge(dep_node_index, key, _current_fingerprint);
12591260if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1261outline(|| {
1262if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1263{
::core::panicking::panic_fmt(format_args!("Found duplicate dep-node {0:?}",
key));
};panic!("Found duplicate dep-node {key:?}");
1264 }
1265 });
1266 }
1267 }
12681269/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1270 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1271#[inline(always)]
1272fn alloc_new_node(
1273&self,
1274 key: DepNode,
1275 edges: EdgesVec,
1276 current_fingerprint: Fingerprint,
1277 ) -> DepNodeIndex {
1278let dep_node_index = self.encoder.send_new(key, current_fingerprint, edges);
12791280self.record_node(dep_node_index, key, current_fingerprint);
12811282dep_node_index1283 }
12841285#[inline]
1286fn debug_assert_not_in_new_nodes(
1287&self,
1288 prev_graph: &SerializedDepGraph,
1289 prev_index: SerializedDepNodeIndex,
1290 ) {
1291if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1292if true {
if !!nodes_in_current_session.lock().contains_key(&prev_graph.index_to_node(prev_index))
{
{
::core::panicking::panic_fmt(format_args!("node from previous graph present in new node collection"));
}
};
};debug_assert!(
1293 !nodes_in_current_session
1294 .lock()
1295 .contains_key(&prev_graph.index_to_node(prev_index)),
1296"node from previous graph present in new node collection"
1297);
1298 }
1299 }
1300}
13011302#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TaskDepsRef::Allow(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
&__self_0),
TaskDepsRef::EvalAlways =>
::core::fmt::Formatter::write_str(f, "EvalAlways"),
TaskDepsRef::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
TaskDepsRef::Forbid =>
::core::fmt::Formatter::write_str(f, "Forbid"),
}
}
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
#[inline]
fn clone(&self) -> TaskDepsRef<'a> {
let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
*self
}
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1303pub enum TaskDepsRef<'a> {
1304/// New dependencies can be added to the
1305 /// `TaskDeps`. This is used when executing a 'normal' query
1306 /// (no `eval_always` modifier)
1307Allow(&'a Lock<TaskDeps>),
1308/// This is used when executing an `eval_always` query. We don't
1309 /// need to track dependencies for a query that's always
1310 /// re-executed -- but we need to know that this is an `eval_always`
1311 /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1312 /// when directly feeding other queries.
1313EvalAlways,
1314/// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1315Ignore,
1316/// Any attempt to add new dependencies will cause a panic.
1317 /// This is used when decoding a query result from disk,
1318 /// to ensure that the decoding process doesn't itself
1319 /// require the execution of any queries.
1320Forbid,
1321}
13221323#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "TaskDeps",
"node", &self.node, "reads", &self.reads, "read_set",
&self.read_set, "phantom_data", &&self.phantom_data)
}
}Debug)]
1324pub struct TaskDeps {
1325#[cfg(debug_assertions)]
1326node: Option<DepNode>,
13271328/// A vector of `DepNodeIndex`, basically.
1329reads: EdgesVec,
13301331/// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1332 /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1333 /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1334 /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1335read_set: FxHashSet<DepNodeIndex>,
13361337 phantom_data: PhantomData<DepNode>,
1338}
13391340impl TaskDeps {
1341/// See `TaskDeps::read_set` above.
1342const LINEAR_SCAN_MAX: usize = 16;
13431344#[inline]
1345fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1346TaskDeps {
1347#[cfg(debug_assertions)]
1348node,
1349 reads: EdgesVec::new(),
1350 read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1351 phantom_data: PhantomData,
1352 }
1353 }
1354}
13551356// A data structure that stores Option<DepNodeColor> values as a contiguous
1357// array, using one u32 per entry.
1358pub(super) struct DepNodeColorMap {
1359 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1360}
13611362// All values below `COMPRESSED_RED` are green.
1363const COMPRESSED_RED: u32 = u32::MAX - 1;
1364const COMPRESSED_UNKNOWN: u32 = u32::MAX;
13651366impl DepNodeColorMap {
1367fn new(size: usize) -> DepNodeColorMap {
1368if true {
if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
};
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1369DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1370 }
13711372#[inline]
1373pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1374let value = self.values[index].load(Ordering::Relaxed);
1375if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1376 }
13771378/// This tries to atomically mark a node green and assign `index` as the new
1379 /// index. This returns `Ok` if `index` gets assigned, otherwise it returns
1380 /// the already allocated index in `Err`.
1381#[inline]
1382pub(super) fn try_mark_green(
1383&self,
1384 prev_index: SerializedDepNodeIndex,
1385 index: DepNodeIndex,
1386 ) -> Result<(), DepNodeIndex> {
1387let value = &self.values[prev_index];
1388match value.compare_exchange(
1389COMPRESSED_UNKNOWN,
1390index.as_u32(),
1391 Ordering::Relaxed,
1392 Ordering::Relaxed,
1393 ) {
1394Ok(_) => Ok(()),
1395Err(v) => Err({
1396match (&(v), &(COMPRESSED_RED)) {
(left_val, right_val) => {
if *left_val == *right_val {
let kind = ::core::panicking::AssertKind::Ne;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("tried to mark a red node as green")));
}
}
};assert_ne!(v, COMPRESSED_RED, "tried to mark a red node as green");
1397DepNodeIndex::from_u32(v)
1398 }),
1399 }
1400 }
14011402#[inline]
1403pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1404let value = self.values[index].load(Ordering::Acquire);
1405// Green is by far the most common case. Check for that first so we can succeed with a
1406 // single comparison.
1407if value < COMPRESSED_RED {
1408 DepNodeColor::Green(DepNodeIndex::from_u32(value))
1409 } else if value == COMPRESSED_RED {
1410 DepNodeColor::Red1411 } else {
1412if true {
match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1413 DepNodeColor::Unknown1414 }
1415 }
14161417#[inline]
1418pub(super) fn insert_red(&self, index: SerializedDepNodeIndex) {
1419let value = self.values[index].swap(COMPRESSED_RED, Ordering::Release);
1420// Sanity check for duplicate nodes
1421match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("trying to encode a dep node twice")));
}
}
};assert_eq!(value, COMPRESSED_UNKNOWN, "trying to encode a dep node twice");
1422 }
1423}
14241425#[inline(never)]
1426#[cold]
1427pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: &MarkFrame<'_>) {
1428let data = graph.data.as_ref().unwrap();
14291430{
::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1431{ ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
14321433let mut i = 0;
1434let mut current = Some(frame);
1435while let Some(frame) = current {
1436let node = data.previous.index_to_node(frame.index);
1437{ ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1438 current = frame.parent;
1439 i += 1;
1440 }
14411442{
::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1443}
14441445#[cold]
1446#[inline(never)]
1447fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1448// We have to do an expensive reverse-lookup of the DepNode that
1449 // corresponds to `dep_node_index`, but that's OK since we are about
1450 // to ICE anyway.
1451let mut dep_node = None;
14521453// First try to find the dep node among those that already existed in the
1454 // previous session and has been marked green
1455for prev_index in data.colors.values.indices() {
1456if data.colors.current(prev_index) == Some(dep_node_index) {
1457 dep_node = Some(*data.previous.index_to_node(prev_index));
1458break;
1459 }
1460 }
14611462if dep_node.is_none()
1463 && let Some(nodes) = &data.current.nodes_in_current_session
1464 {
1465// Try to find it among the nodes allocated so far in this session
1466 // This is OK, there's only ever one node result possible so this is deterministic.
1467#[allow(rustc::potential_query_instability)]
1468if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1469dep_node = Some(*node);
1470 }
1471 }
14721473let dep_node = dep_node.map_or_else(
1474 || ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
})format!("with index {:?}", dep_node_index),
1475 |dep_node| ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
})format!("`{:?}`", dep_node),
1476 );
14771478{
::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
dep_node));
}panic!(
1479"Error: trying to record dependency on DepNode {dep_node} in a \
1480 context that does not allow it (e.g. during query deserialization). \
1481 The most common case of recording a dependency on a DepNode `foo` is \
1482 when the corresponding query `foo` is invoked. Invoking queries is not \
1483 allowed as part of loading something from the incremental on-disk cache. \
1484 See <https://github.com/rust-lang/rust/pull/91919>."
1485)1486}