1use std::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
67use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::{FxHashMap, FxHashSet};
9use rustc_data_structures::outline;
10use rustc_data_structures::profiling::QueryInvocationId;
11use rustc_data_structures::sharded::{self, ShardedHashMap};
12use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
13use rustc_data_structures::sync::{AtomicU64, Lock, is_dyn_thread_safe};
14use rustc_data_structures::unord::UnordMap;
15use rustc_errors::DiagInner;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use rustc_session::Session;
20use rustc_span::Symbol;
21use tracing::{debug, instrument};
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
2425use super::retained::RetainedDepGraph;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::ty::TyCtxt;
31use crate::verify_ich::incremental_verify_ich;
3233/// Tracks 'side effects' for a particular query.
34/// This struct is saved to disk along with the query result,
35/// and loaded from disk if we mark the query as green.
36/// This allows us to 'replay' changes to global state
37/// that would otherwise only occur if we actually
38/// executed the query method.
39///
40/// Each side effect gets an unique dep node index which is added
41/// as a dependency of the query which had the effect.
42#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QuerySideEffect {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
QuerySideEffect::Diagnostic(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Diagnostic", &__self_0),
QuerySideEffect::CheckFeature { symbol: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"CheckFeature", "symbol", &__self_0),
}
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for QuerySideEffect {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
QuerySideEffect::Diagnostic(ref __binding_0) => { 0usize }
QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
{
1usize
}
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
QuerySideEffect::Diagnostic(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for QuerySideEffect {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => {
QuerySideEffect::Diagnostic(::rustc_serialize::Decodable::decode(__decoder))
}
1usize => {
QuerySideEffect::CheckFeature {
symbol: ::rustc_serialize::Decodable::decode(__decoder),
}
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `QuerySideEffect`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable)]
43pub enum QuerySideEffect {
44/// Stores a diagnostic emitted during query execution.
45 /// This diagnostic will be re-emitted if we mark
46 /// the query as green, as that query will have the side
47 /// effect dep node as a dependency.
48Diagnostic(DiagInner),
49/// Records the feature used during query execution.
50 /// This feature will be inserted into `sess.used_features`
51 /// if we mark the query as green, as that query will have
52 /// the side effect dep node as a dependency.
53CheckFeature { symbol: Symbol },
54}
55#[derive(#[automatically_derived]
impl ::core::clone::Clone for DepGraph {
#[inline]
fn clone(&self) -> DepGraph {
DepGraph {
data: ::core::clone::Clone::clone(&self.data),
virtual_dep_node_index: ::core::clone::Clone::clone(&self.virtual_dep_node_index),
}
}
}Clone)]
56pub struct DepGraph {
57 data: Option<Arc<DepGraphData>>,
5859/// This field is used for assigning DepNodeIndices when running in
60 /// non-incremental mode. Even in non-incremental mode we make sure that
61 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
62 /// ID is used for self-profiling.
63virtual_dep_node_index: Arc<AtomicU32>,
64}
6566impl ::std::fmt::Debug for DepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
67pub struct DepNodeIndex {}
68}6970// We store a large collection of these in `prev_index_to_index` during
71// non-full incremental builds, and want to ensure that the element size
72// doesn't inadvertently increase.
73const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
7475impl DepNodeIndex {
76const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
77pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
78}
7980impl From<DepNodeIndex> for QueryInvocationId {
81#[inline(always)]
82fn from(dep_node_index: DepNodeIndex) -> Self {
83QueryInvocationId(dep_node_index.as_u32())
84 }
85}
8687pub(crate) struct MarkFrame<'a> {
88 index: SerializedDepNodeIndex,
89 parent: Option<&'a MarkFrame<'a>>,
90}
9192#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
DepNodeColor::Green(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
&__self_0),
DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
DepNodeColor::Unknown =>
::core::fmt::Formatter::write_str(f, "Unknown"),
}
}
}Debug)]
93pub(super) enum DepNodeColor {
94 Green(DepNodeIndex),
95 Red,
96 Unknown,
97}
9899pub struct DepGraphData {
100/// The new encoding of the dependency graph, optimized for red/green
101 /// tracking. The `current` field is the dependency graph of only the
102 /// current compilation session: We don't merge the previous dep-graph into
103 /// current one anymore, but we do reference shared data to save space.
104current: CurrentDepGraph,
105106/// The dep-graph from the previous compilation session. It contains all
107 /// nodes and edges as well as all fingerprints of nodes that have them.
108previous: Arc<SerializedDepGraph>,
109110 colors: DepNodeColorMap,
111112/// When we load, there may be `.o` files, cached MIR, or other such
113 /// things available to us. If we find that they are not dirty, we
114 /// load the path to the file storing those work-products here into
115 /// this map. We can later look for and extract that data.
116previous_work_products: WorkProductMap,
117118 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
119120/// Used by incremental compilation tests to assert that
121 /// a particular query result was decoded from disk
122 /// (not just marked green)
123debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
124}
125126pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint127where
128R: for<'a> HashStable<StableHashingContext<'a>>,
129{
130let mut stable_hasher = StableHasher::new();
131result.hash_stable(hcx, &mut stable_hasher);
132stable_hasher.finish()
133}
134135impl DepGraph {
136pub fn new(
137 session: &Session,
138 prev_graph: Arc<SerializedDepGraph>,
139 prev_work_products: WorkProductMap,
140 encoder: FileEncoder,
141 ) -> DepGraph {
142let prev_graph_node_count = prev_graph.node_count();
143144let current =
145CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
146147let colors = DepNodeColorMap::new(prev_graph_node_count);
148149// Instantiate a node with zero dependencies only once for anonymous queries.
150let _green_node_index = current.alloc_new_node(
151DepNode { kind: DepKind::AnonZeroDeps, key_fingerprint: current.anon_id_seed.into() },
152EdgesVec::new(),
153Fingerprint::ZERO,
154 );
155match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
156157// Create a single always-red node, with no dependencies of its own.
158 // Other nodes can use the always-red node as a fake dependency, to
159 // ensure that their dependency list will never be all-green.
160let red_node_index = current.alloc_new_node(
161DepNode { kind: DepKind::Red, key_fingerprint: Fingerprint::ZERO.into() },
162EdgesVec::new(),
163Fingerprint::ZERO,
164 );
165match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
166if prev_graph_node_count > 0 {
167colors.insert_red(SerializedDepNodeIndex::from_u32(
168DepNodeIndex::FOREVER_RED_NODE.as_u32(),
169 ));
170 }
171172DepGraph {
173 data: Some(Arc::new(DepGraphData {
174 previous_work_products: prev_work_products,
175 dep_node_debug: Default::default(),
176current,
177 previous: prev_graph,
178colors,
179 debug_loaded_from_disk: Default::default(),
180 })),
181 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
182 }
183 }
184185pub fn new_disabled() -> DepGraph {
186DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
187 }
188189#[inline]
190pub fn data(&self) -> Option<&DepGraphData> {
191self.data.as_deref()
192 }
193194/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
195#[inline]
196pub fn is_fully_enabled(&self) -> bool {
197self.data.is_some()
198 }
199200pub fn with_retained_dep_graph(&self, f: impl Fn(&RetainedDepGraph)) {
201if let Some(data) = &self.data {
202data.current.encoder.with_retained_dep_graph(f)
203 }
204 }
205206pub fn assert_ignored(&self) {
207if let Some(..) = self.data {
208read_deps(|task_deps| {
209match task_deps {
TaskDepsRef::Ignore => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"TaskDepsRef::Ignore",
::core::option::Option::Some(format_args!("expected no task dependency tracking")));
}
};assert_matches!(
210 task_deps,
211 TaskDepsRef::Ignore,
212"expected no task dependency tracking"
213);
214 })
215 }
216 }
217218pub fn with_ignore<OP, R>(&self, op: OP) -> R
219where
220OP: FnOnce() -> R,
221 {
222with_deps(TaskDepsRef::Ignore, op)
223 }
224225/// Used to wrap the deserialization of a query result from disk,
226 /// This method enforces that no new `DepNodes` are created during
227 /// query result deserialization.
228 ///
229 /// Enforcing this makes the query dep graph simpler - all nodes
230 /// must be created during the query execution, and should be
231 /// created from inside the 'body' of a query (the implementation
232 /// provided by a particular compiler crate).
233 ///
234 /// Consider the case of three queries `A`, `B`, and `C`, where
235 /// `A` invokes `B` and `B` invokes `C`:
236 ///
237 /// `A -> B -> C`
238 ///
239 /// Suppose that decoding the result of query `B` required re-computing
240 /// the query `C`. If we did not create a fresh `TaskDeps` when
241 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
242 /// (if we needed to re-execute `A`). This would cause us to create
243 /// a new edge `A -> C`. If this edge did not previously
244 /// exist in the `DepGraph`, then we could end up with a different
245 /// `DepGraph` at the end of compilation, even if there were no
246 /// meaningful changes to the overall program (e.g. a newline was added).
247 /// In addition, this edge might cause a subsequent compilation run
248 /// to try to force `C` before marking other necessary nodes green. If
249 /// `C` did not exist in the new compilation session, then we could
250 /// get an ICE. Normally, we would have tried (and failed) to mark
251 /// some other query green (e.g. `item_children`) which was used
252 /// to obtain `C`, which would prevent us from ever trying to force
253 /// a nonexistent `D`.
254 ///
255 /// It might be possible to enforce that all `DepNode`s read during
256 /// deserialization already exist in the previous `DepGraph`. In
257 /// the above example, we would invoke `D` during the deserialization
258 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
259 /// of `B`, this would result in an edge `B -> D`. If that edge already
260 /// existed (with the same `DepPathHash`es), then it should be correct
261 /// to allow the invocation of the query to proceed during deserialization
262 /// of a query result. We would merely assert that the dep-graph fragment
263 /// that would have been added by invoking `C` while decoding `B`
264 /// is equivalent to the dep-graph fragment that we already instantiated for B
265 /// (at the point where we successfully marked B as green).
266 ///
267 /// However, this would require additional complexity
268 /// in the query infrastructure, and is not currently needed by the
269 /// decoding of any query results. Should the need arise in the future,
270 /// we should consider extending the query system with this functionality.
271pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
272where
273OP: FnOnce() -> R,
274 {
275with_deps(TaskDepsRef::Forbid, op)
276 }
277278#[inline(always)]
279pub fn with_task<'tcx, A: Debug, R>(
280&self,
281 dep_node: DepNode,
282 tcx: TyCtxt<'tcx>,
283 task_arg: A,
284 task_fn: fn(tcx: TyCtxt<'tcx>, task_arg: A) -> R,
285 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
286 ) -> (R, DepNodeIndex) {
287match self.data() {
288Some(data) => data.with_task(dep_node, tcx, task_arg, task_fn, hash_result),
289None => (task_fn(tcx, task_arg), self.next_virtual_depnode_index()),
290 }
291 }
292293pub fn with_anon_task<'tcx, OP, R>(
294&self,
295 cx: TyCtxt<'tcx>,
296 dep_kind: DepKind,
297 op: OP,
298 ) -> (R, DepNodeIndex)
299where
300OP: FnOnce() -> R,
301 {
302match self.data() {
303Some(data) => {
304let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
305self.read_index(index);
306 (result, index)
307 }
308None => (op(), self.next_virtual_depnode_index()),
309 }
310 }
311}
312313impl DepGraphData {
314/// Starts a new dep-graph task. Dep-graph tasks are specified
315 /// using a free function (`task`) and **not** a closure -- this
316 /// is intentional because we want to exercise tight control over
317 /// what state they have access to. In particular, we want to
318 /// prevent implicit 'leaks' of tracked state into the task (which
319 /// could then be read without generating correct edges in the
320 /// dep-graph -- see the [rustc dev guide] for more details on
321 /// the dep-graph).
322 ///
323 /// Therefore, the task function takes a `TyCtxt`, plus exactly one
324 /// additional argument, `task_arg`. The additional argument type can be
325 /// `()` if no argument is needed, or a tuple if multiple arguments are
326 /// needed.
327 ///
328 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
329#[inline(always)]
330pub fn with_task<'tcx, A: Debug, R>(
331&self,
332 dep_node: DepNode,
333 tcx: TyCtxt<'tcx>,
334 task_arg: A,
335 task_fn: fn(tcx: TyCtxt<'tcx>, task_arg: A) -> R,
336 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
337 ) -> (R, DepNodeIndex) {
338// If the following assertion triggers, it can have two reasons:
339 // 1. Something is wrong with DepNode creation, either here or
340 // in `DepGraph::try_mark_green()`.
341 // 2. Two distinct query keys get mapped to the same `DepNode`
342 // (see for example #48923).
343self.assert_dep_node_not_yet_allocated_in_current_session(tcx.sess, &dep_node, || {
344::alloc::__export::must_use({
::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`\n- query-key: {0:?}\n- dep-node: {1:?}",
task_arg, dep_node))
})format!(
345"forcing query with already existing `DepNode`\n\
346 - query-key: {task_arg:?}\n\
347 - dep-node: {dep_node:?}"
348)349 });
350351let with_deps = |task_deps| with_deps(task_deps, || task_fn(tcx, task_arg));
352let (result, edges) = if tcx.is_eval_always(dep_node.kind) {
353 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
354 } else {
355let task_deps = Lock::new(TaskDeps::new(
356#[cfg(debug_assertions)]
357Some(dep_node),
3580,
359 ));
360 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
361 };
362363let dep_node_index =
364self.hash_result_and_alloc_node(tcx, dep_node, edges, &result, hash_result);
365366 (result, dep_node_index)
367 }
368369/// Executes something within an "anonymous" task, that is, a task the
370 /// `DepNode` of which is determined by the list of inputs it read from.
371 ///
372 /// NOTE: this does not actually count as a read of the DepNode here.
373 /// Using the result of this task without reading the DepNode will result
374 /// in untracked dependencies which may lead to ICEs as nodes are
375 /// incorrectly marked green.
376 ///
377 /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
378 /// user of this function actually performs the read; we'll have to see
379 /// how to make that work with `anon` in `execute_job_incr`, though.
380pub fn with_anon_task_inner<'tcx, OP, R>(
381&self,
382 cx: TyCtxt<'tcx>,
383 dep_kind: DepKind,
384 op: OP,
385 ) -> (R, DepNodeIndex)
386where
387OP: FnOnce() -> R,
388 {
389if true {
if !!cx.is_eval_always(dep_kind) {
::core::panicking::panic("assertion failed: !cx.is_eval_always(dep_kind)")
};
};debug_assert!(!cx.is_eval_always(dep_kind));
390391// Large numbers of reads are common enough here that pre-sizing `read_set`
392 // to 128 actually helps perf on some benchmarks.
393let task_deps = Lock::new(TaskDeps::new(
394#[cfg(debug_assertions)]
395None,
396128,
397 ));
398let result = with_deps(TaskDepsRef::Allow(&task_deps), op);
399let task_deps = task_deps.into_inner();
400let reads = task_deps.reads;
401402let dep_node_index = match reads.len() {
4030 => {
404// Because the dep-node id of anon nodes is computed from the sets of its
405 // dependencies we already know what the ID of this dependency-less node is
406 // going to be (i.e. equal to the precomputed
407 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
408 // a `StableHasher` and sending the node through interning.
409DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE410 }
4111 => {
412// When there is only one dependency, don't bother creating a node.
413reads[0]
414 }
415_ => {
416// The dep node indices are hashed here instead of hashing the dep nodes of the
417 // dependencies. These indices may refer to different nodes per session, but this isn't
418 // a problem here because we that ensure the final dep node hash is per session only by
419 // combining it with the per session random number `anon_id_seed`. This hash only need
420 // to map the dependencies to a single value on a per session basis.
421let mut hasher = StableHasher::new();
422reads.hash(&mut hasher);
423424let target_dep_node = DepNode {
425 kind: dep_kind,
426// Fingerprint::combine() is faster than sending Fingerprint
427 // through the StableHasher (at least as long as StableHasher
428 // is so slow).
429key_fingerprint: self.current.anon_id_seed.combine(hasher.finish()).into(),
430 };
431432// The DepNodes generated by the process above are not unique. 2 queries could
433 // have exactly the same dependencies. However, deserialization does not handle
434 // duplicated nodes, so we do the deduplication here directly.
435 //
436 // As anonymous nodes are a small quantity compared to the full dep-graph, the
437 // memory impact of this `anon_node_to_index` map remains tolerable, and helps
438 // us avoid useless growth of the graph with almost-equivalent nodes.
439self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
440self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
441 })
442 }
443 };
444445 (result, dep_node_index)
446 }
447448/// Intern the new `DepNode` with the dependencies up-to-now.
449fn hash_result_and_alloc_node<'tcx, R>(
450&self,
451 tcx: TyCtxt<'tcx>,
452 node: DepNode,
453 edges: EdgesVec,
454 result: &R,
455 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
456 ) -> DepNodeIndex {
457let hashing_timer = tcx.prof.incr_result_hashing();
458let current_fingerprint = hash_result.map(|hash_result| {
459tcx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
460 });
461let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
462hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
463dep_node_index464 }
465}
466467impl DepGraph {
468#[inline]
469pub fn read_index(&self, dep_node_index: DepNodeIndex) {
470if let Some(ref data) = self.data {
471read_deps(|task_deps| {
472let mut task_deps = match task_deps {
473 TaskDepsRef::Allow(deps) => deps.lock(),
474 TaskDepsRef::EvalAlways => {
475// We don't need to record dependencies of eval_always
476 // queries. They are re-evaluated unconditionally anyway.
477return;
478 }
479 TaskDepsRef::Ignore => return,
480 TaskDepsRef::Forbid => {
481// Reading is forbidden in this context. ICE with a useful error message.
482panic_on_forbidden_read(data, dep_node_index)
483 }
484 };
485let task_deps = &mut *task_deps;
486487if truecfg!(debug_assertions) {
488data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
489 }
490491// Has `dep_node_index` been seen before? Use either a linear scan or a hashset
492 // lookup to determine this. See `TaskDeps::read_set` for details.
493let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
494 !task_deps.reads.contains(&dep_node_index)
495 } else {
496task_deps.read_set.insert(dep_node_index)
497 };
498if new_read {
499task_deps.reads.push(dep_node_index);
500if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
501// Fill `read_set` with what we have so far. Future lookups will use it.
502task_deps.read_set.extend(task_deps.reads.iter().copied());
503 }
504505#[cfg(debug_assertions)]
506{
507if let Some(target) = task_deps.node
508 && let Some(ref forbidden_edge) = data.current.forbidden_edge
509 {
510let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
511if forbidden_edge.test(&src, &target) {
512{
::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)513 }
514 }
515 }
516 } else if truecfg!(debug_assertions) {
517data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
518 }
519 })
520 }
521 }
522523/// This encodes a side effect by creating a node with an unique index and associating
524 /// it with the node, for use in the next session.
525#[inline]
526pub fn record_diagnostic<'tcx>(&self, tcx: TyCtxt<'tcx>, diagnostic: &DiagInner) {
527if let Some(ref data) = self.data {
528read_deps(|task_deps| match task_deps {
529 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
530 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
531let dep_node_index = data532 .encode_side_effect(tcx, QuerySideEffect::Diagnostic(diagnostic.clone()));
533self.read_index(dep_node_index);
534 }
535 })
536 }
537 }
538/// This forces a side effect node green by running its side effect. `prev_index` would
539 /// refer to a node created used `encode_side_effect` in the previous session.
540#[inline]
541pub fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
542if let Some(ref data) = self.data {
543data.force_side_effect(tcx, prev_index);
544 }
545 }
546547#[inline]
548pub fn encode_side_effect<'tcx>(
549&self,
550 tcx: TyCtxt<'tcx>,
551 side_effect: QuerySideEffect,
552 ) -> DepNodeIndex {
553if let Some(ref data) = self.data {
554data.encode_side_effect(tcx, side_effect)
555 } else {
556self.next_virtual_depnode_index()
557 }
558 }
559560/// Create a node when we force-feed a value into the query cache.
561 /// This is used to remove cycles during type-checking const generic parameters.
562 ///
563 /// As usual in the query system, we consider the current state of the calling query
564 /// only depends on the list of dependencies up to now. As a consequence, the value
565 /// that this query gives us can only depend on those dependencies too. Therefore,
566 /// it is sound to use the current dependency set for the created node.
567 ///
568 /// During replay, the order of the nodes is relevant in the dependency graph.
569 /// So the unchanged replay will mark the caller query before trying to mark this one.
570 /// If there is a change to report, the caller query will be re-executed before this one.
571 ///
572 /// FIXME: If the code is changed enough for this node to be marked before requiring the
573 /// caller's node, we suppose that those changes will be enough to mark this node red and
574 /// force a recomputation using the "normal" way.
575pub fn with_feed_task<'tcx, R>(
576&self,
577 node: DepNode,
578 tcx: TyCtxt<'tcx>,
579 result: &R,
580 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
581 format_value_fn: fn(&R) -> String,
582 ) -> DepNodeIndex {
583if let Some(data) = self.data.as_ref() {
584// The caller query has more dependencies than the node we are creating. We may
585 // encounter a case where this created node is marked as green, but the caller query is
586 // subsequently marked as red or recomputed. In this case, we will end up feeding a
587 // value to an existing node.
588 //
589 // For sanity, we still check that the loaded stable hash and the new one match.
590if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
591let dep_node_index = data.colors.current(prev_index);
592if let Some(dep_node_index) = dep_node_index {
593incremental_verify_ich(
594tcx,
595data,
596result,
597prev_index,
598hash_result,
599format_value_fn,
600 );
601602#[cfg(debug_assertions)]
603if hash_result.is_some() {
604data.current.record_edge(
605dep_node_index,
606node,
607data.prev_value_fingerprint_of(prev_index),
608 );
609 }
610611return dep_node_index;
612 }
613 }
614615let mut edges = EdgesVec::new();
616read_deps(|task_deps| match task_deps {
617 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
618 TaskDepsRef::EvalAlways => {
619edges.push(DepNodeIndex::FOREVER_RED_NODE);
620 }
621 TaskDepsRef::Ignore => {}
622 TaskDepsRef::Forbid => {
623{
::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")624 }
625 });
626627data.hash_result_and_alloc_node(tcx, node, edges, result, hash_result)
628 } else {
629// Incremental compilation is turned off. We just execute the task
630 // without tracking. We still provide a dep-node index that uniquely
631 // identifies the task so that we have a cheap way of referring to
632 // the query for self-profiling.
633self.next_virtual_depnode_index()
634 }
635 }
636}
637638impl DepGraphData {
639fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
640&self,
641 sess: &Session,
642 dep_node: &DepNode,
643 msg: impl FnOnce() -> S,
644 ) {
645if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
646let color = self.colors.get(prev_index);
647let ok = match color {
648 DepNodeColor::Unknown => true,
649 DepNodeColor::Red => false,
650 DepNodeColor::Green(..) => sess.threads() > 1, // Other threads may mark this green
651};
652if !ok {
653{ ::core::panicking::panic_display(&msg()); }panic!("{}", msg())654 }
655 } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
656outline(|| {
657let seen = nodes_in_current_session.lock().contains_key(dep_node);
658if !!seen { { ::core::panicking::panic_display(&msg()); } };assert!(!seen, "{}", msg());
659 });
660 }
661 }
662663fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
664if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
665self.colors.get(prev_index)
666 } else {
667// This is a node that did not exist in the previous compilation session.
668DepNodeColor::Unknown669 }
670 }
671672/// Returns true if the given node has been marked as green during the
673 /// current compilation session. Used in various assertions
674#[inline]
675pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
676#[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))677 }
678679#[inline]
680pub fn prev_value_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
681self.previous.value_fingerprint_for_index(prev_index)
682 }
683684#[inline]
685pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
686self.previous.index_to_node(prev_index)
687 }
688689pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
690self.debug_loaded_from_disk.lock().insert(dep_node);
691 }
692693/// This encodes a side effect by creating a node with an unique index and associating
694 /// it with the node, for use in the next session.
695#[inline]
696fn encode_side_effect<'tcx>(
697&self,
698 tcx: TyCtxt<'tcx>,
699 side_effect: QuerySideEffect,
700 ) -> DepNodeIndex {
701// Use `send_new` so we get an unique index, even though the dep node is not.
702let dep_node_index = self.current.encoder.send_new(
703DepNode {
704 kind: DepKind::SideEffect,
705 key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
706 },
707Fingerprint::ZERO,
708// We want the side effect node to always be red so it will be forced and run the
709 // side effect.
710std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
711 );
712tcx.store_side_effect(dep_node_index, side_effect);
713dep_node_index714 }
715716/// This forces a side effect node green by running its side effect. `prev_index` would
717 /// refer to a node created used `encode_side_effect` in the previous session.
718#[inline]
719fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
720with_deps(TaskDepsRef::Ignore, || {
721let side_effect = tcx.load_side_effect(prev_index).unwrap();
722723// Use `send_and_color` as `promote_node_and_deps_to_current` expects all
724 // green dependencies. `send_and_color` will also prevent multiple nodes
725 // being encoded for concurrent calls.
726let dep_node_index = self.current.encoder.send_and_color(
727prev_index,
728&self.colors,
729DepNode {
730 kind: DepKind::SideEffect,
731 key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
732 },
733Fingerprint::ZERO,
734 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
735true,
736 );
737738match &side_effect {
739 QuerySideEffect::Diagnostic(diagnostic) => {
740tcx.dcx().emit_diagnostic(diagnostic.clone());
741 }
742 QuerySideEffect::CheckFeature { symbol } => {
743tcx.sess.used_features.lock().insert(*symbol, dep_node_index.as_u32());
744 }
745 }
746747// This will just overwrite the same value for concurrent calls.
748tcx.store_side_effect(dep_node_index, side_effect);
749 })
750 }
751752fn alloc_and_color_node(
753&self,
754 key: DepNode,
755 edges: EdgesVec,
756 value_fingerprint: Option<Fingerprint>,
757 ) -> DepNodeIndex {
758if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
759// Determine the color and index of the new `DepNode`.
760let is_green = if let Some(value_fingerprint) = value_fingerprint {
761if value_fingerprint == self.previous.value_fingerprint_for_index(prev_index) {
762// This is a green node: it existed in the previous compilation,
763 // its query was re-executed, and it has the same result as before.
764true
765} else {
766// This is a red node: it existed in the previous compilation, its query
767 // was re-executed, but it has a different result from before.
768false
769}
770 } else {
771// This is a red node, effectively: it existed in the previous compilation
772 // session, its query was re-executed, but it doesn't compute a result hash
773 // (i.e. it represents a `no_hash` query), so we have no way of determining
774 // whether or not the result was the same as before.
775false
776};
777778let value_fingerprint = value_fingerprint.unwrap_or(Fingerprint::ZERO);
779780let dep_node_index = self.current.encoder.send_and_color(
781prev_index,
782&self.colors,
783key,
784value_fingerprint,
785edges,
786is_green,
787 );
788789self.current.record_node(dep_node_index, key, value_fingerprint);
790791dep_node_index792 } else {
793self.current.alloc_new_node(key, edges, value_fingerprint.unwrap_or(Fingerprint::ZERO))
794 }
795 }
796797fn promote_node_and_deps_to_current(
798&self,
799 prev_index: SerializedDepNodeIndex,
800 ) -> Option<DepNodeIndex> {
801self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
802803let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
804805#[cfg(debug_assertions)]
806if let Some(dep_node_index) = dep_node_index {
807self.current.record_edge(
808dep_node_index,
809*self.previous.index_to_node(prev_index),
810self.previous.value_fingerprint_for_index(prev_index),
811 );
812 }
813814dep_node_index815 }
816}
817818impl DepGraph {
819/// Checks whether a previous work product exists for `v` and, if
820 /// so, return the path that leads to it. Used to skip doing work.
821pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
822self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
823 }
824825/// Access the map of work-products created during the cached run. Only
826 /// used during saving of the dep-graph.
827pub fn previous_work_products(&self) -> &WorkProductMap {
828&self.data.as_ref().unwrap().previous_work_products
829 }
830831pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
832self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
833 }
834835pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
836// We only check if we have a dep node corresponding to the given dep kind.
837#[allow(rustc::potential_query_instability)]
838self.data
839 .as_ref()
840 .unwrap()
841 .debug_loaded_from_disk
842 .lock()
843 .iter()
844 .any(|node| node.kind == dep_kind)
845 }
846847#[cfg(debug_assertions)]
848 #[inline(always)]
849pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
850where
851F: FnOnce() -> String,
852 {
853// Early queries (e.g., `-Z query-dep-graph` on empty crates) can reach here
854 // before the graph is initialized. Return early to prevent an ICE.
855let data = match &self.data {
856Some(d) => d,
857None => return,
858 };
859let dep_node_debug = &data.dep_node_debug;
860861if dep_node_debug.borrow().contains_key(&dep_node) {
862return;
863 }
864let debug_str = self.with_ignore(debug_str_gen);
865dep_node_debug.borrow_mut().insert(dep_node, debug_str);
866 }
867868pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
869self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
870 }
871872fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
873if let Some(ref data) = self.data {
874return data.node_color(dep_node);
875 }
876877 DepNodeColor::Unknown878 }
879880pub fn try_mark_green<'tcx>(
881&self,
882 tcx: TyCtxt<'tcx>,
883 dep_node: &DepNode,
884 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
885self.data()?.try_mark_green(tcx, dep_node)
886 }
887}
888889impl DepGraphData {
890/// Try to mark a node index for the node dep_node.
891 ///
892 /// A node will have an index, when it's already been marked green, or when we can mark it
893 /// green. This function will mark the current task as a reader of the specified node, when
894 /// a node index can be found for that node.
895pub fn try_mark_green<'tcx>(
896&self,
897 tcx: TyCtxt<'tcx>,
898 dep_node: &DepNode,
899 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
900if true {
if !!tcx.is_eval_always(dep_node.kind) {
::core::panicking::panic("assertion failed: !tcx.is_eval_always(dep_node.kind)")
};
};debug_assert!(!tcx.is_eval_always(dep_node.kind));
901902// Return None if the dep node didn't exist in the previous session
903let prev_index = self.previous.node_to_index_opt(dep_node)?;
904905if true {
match (&self.previous.index_to_node(prev_index), &dep_node) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
906907match self.colors.get(prev_index) {
908 DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
909 DepNodeColor::Red => None,
910 DepNodeColor::Unknown => {
911// This DepNode and the corresponding query invocation existed
912 // in the previous compilation session too, so we can try to
913 // mark it as green by recursively marking all of its
914 // dependencies green.
915self.try_mark_previous_green(tcx, prev_index, None)
916 .map(|dep_node_index| (prev_index, dep_node_index))
917 }
918 }
919 }
920921#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_parent_green",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(921u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<()> = loop {};
return __tracing_attr_fake_return;
}
{
let get_dep_dep_node =
|| self.previous.index_to_node(parent_dep_node_index);
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:939",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(939u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately green",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:947",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(947u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was immediately red",
get_dep_dep_node()) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
let dep_dep_node = get_dep_dep_node();
if !tcx.is_eval_always(dep_dep_node.kind) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:958",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(958u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("state of dependency {0:?} ({1}) is unknown, trying to mark it green",
dep_dep_node, dep_dep_node.key_fingerprint) as
&dyn Value))])
});
} else { ; }
};
let node_index =
self.try_mark_previous_green(tcx, parent_dep_node_index,
Some(frame));
if node_index.is_some() {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:966",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(966u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to MARK dependency {0:?} as green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:972",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(972u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("trying to force dependency {0:?}",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
if !tcx.try_force_from_dep_node(*dep_dep_node,
parent_dep_node_index, frame) {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:975",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(975u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} could not be forced",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:981",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(981u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("managed to FORCE dependency {0:?} to green",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return Some(());
}
DepNodeColor::Red => {
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:985",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(985u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} was red after forcing",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
DepNodeColor::Unknown => {}
}
if let None = tcx.dcx().has_errors_or_delayed_bugs() {
{
::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - Forcing the DepNode should have set its color"));
}
}
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:1005",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1005u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("dependency {0:?} resulted in compilation error",
dep_dep_node) as &dyn Value))])
});
} else { ; }
};
return None;
}
}
}#[instrument(skip(self, tcx, parent_dep_node_index, frame), level = "debug")]922fn try_mark_parent_green<'tcx>(
923&self,
924 tcx: TyCtxt<'tcx>,
925 parent_dep_node_index: SerializedDepNodeIndex,
926 frame: &MarkFrame<'_>,
927 ) -> Option<()> {
928let get_dep_dep_node = || self.previous.index_to_node(parent_dep_node_index);
929930match self.colors.get(parent_dep_node_index) {
931 DepNodeColor::Green(_) => {
932// This dependency has been marked as green before, we are
933 // still fine and can continue with checking the other
934 // dependencies.
935 //
936 // This path is extremely hot. We don't want to get the
937 // `dep_dep_node` unless it's necessary. Hence the
938 // `get_dep_dep_node` closure.
939debug!("dependency {:?} was immediately green", get_dep_dep_node());
940return Some(());
941 }
942 DepNodeColor::Red => {
943// We found a dependency the value of which has changed
944 // compared to the previous compilation session. We cannot
945 // mark the DepNode as green and also don't need to bother
946 // with checking any of the other dependencies.
947debug!("dependency {:?} was immediately red", get_dep_dep_node());
948return None;
949 }
950 DepNodeColor::Unknown => {}
951 }
952953let dep_dep_node = get_dep_dep_node();
954955// We don't know the state of this dependency. If it isn't
956 // an eval_always node, let's try to mark it green recursively.
957if !tcx.is_eval_always(dep_dep_node.kind) {
958debug!(
959"state of dependency {:?} ({}) is unknown, trying to mark it green",
960 dep_dep_node, dep_dep_node.key_fingerprint,
961 );
962963let node_index = self.try_mark_previous_green(tcx, parent_dep_node_index, Some(frame));
964965if node_index.is_some() {
966debug!("managed to MARK dependency {dep_dep_node:?} as green");
967return Some(());
968 }
969 }
970971// We failed to mark it green, so we try to force the query.
972debug!("trying to force dependency {dep_dep_node:?}");
973if !tcx.try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
974// The DepNode could not be forced.
975debug!("dependency {dep_dep_node:?} could not be forced");
976return None;
977 }
978979match self.colors.get(parent_dep_node_index) {
980 DepNodeColor::Green(_) => {
981debug!("managed to FORCE dependency {dep_dep_node:?} to green");
982return Some(());
983 }
984 DepNodeColor::Red => {
985debug!("dependency {dep_dep_node:?} was red after forcing");
986return None;
987 }
988 DepNodeColor::Unknown => {}
989 }
990991if let None = tcx.dcx().has_errors_or_delayed_bugs() {
992panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
993 }
994995// If the query we just forced has resulted in
996 // some kind of compilation error, we cannot rely on
997 // the dep-node color having been properly updated.
998 // This means that the query system has reached an
999 // invalid state. We let the compiler continue (by
1000 // returning `None`) so it can emit error messages
1001 // and wind down, but rely on the fact that this
1002 // invalid state will not be persisted to the
1003 // incremental compilation cache because of
1004 // compilation errors being present.
1005debug!("dependency {dep_dep_node:?} resulted in compilation error");
1006return None;
1007 }
10081009/// Try to mark a dep-node which existed in the previous compilation session as green.
1010#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_previous_green",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1010u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
return __tracing_attr_fake_return;
}
{
let frame =
MarkFrame { index: prev_dep_node_index, parent: frame };
if true {
if !!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
{
::core::panicking::panic("assertion failed: !tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
};
};
let prev_deps =
self.previous.edge_targets_from(prev_dep_node_index);
for dep_dep_node_index in prev_deps {
self.try_mark_parent_green(tcx, dep_dep_node_index, &frame)?;
}
let dep_node_index =
self.promote_node_and_deps_to_current(prev_dep_node_index)?;
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/graph.rs:1043",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(1043u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("successfully marked {0:?} as green",
self.previous.index_to_node(prev_dep_node_index)) as
&dyn Value))])
});
} else { ; }
};
Some(dep_node_index)
}
}
}#[instrument(skip(self, tcx, prev_dep_node_index, frame), level = "debug")]1011fn try_mark_previous_green<'tcx>(
1012&self,
1013 tcx: TyCtxt<'tcx>,
1014 prev_dep_node_index: SerializedDepNodeIndex,
1015 frame: Option<&MarkFrame<'_>>,
1016 ) -> Option<DepNodeIndex> {
1017let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
10181019// We never try to mark eval_always nodes as green
1020debug_assert!(!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind));
10211022let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
10231024for dep_dep_node_index in prev_deps {
1025self.try_mark_parent_green(tcx, dep_dep_node_index, &frame)?;
1026 }
10271028// If we got here without hitting a `return` that means that all
1029 // dependencies of this DepNode could be marked as green. Therefore we
1030 // can also mark this DepNode as green.
10311032 // There may be multiple threads trying to mark the same dep node green concurrently
10331034 // We allocating an entry for the node in the current dependency graph and
1035 // adding all the appropriate edges imported from the previous graph.
1036 //
1037 // `no_hash` nodes may fail this promotion due to already being conservatively colored red.
1038let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index)?;
10391040// ... and finally storing a "Green" entry in the color map.
1041 // Multiple threads can all write the same color here
10421043debug!(
1044"successfully marked {:?} as green",
1045self.previous.index_to_node(prev_dep_node_index)
1046 );
1047Some(dep_node_index)
1048 }
1049}
10501051impl DepGraph {
1052/// Returns true if the given node has been marked as red during the
1053 /// current compilation session. Used in various assertions
1054pub fn is_red(&self, dep_node: &DepNode) -> bool {
1055#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Red => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)1056 }
10571058/// Returns true if the given node has been marked as green during the
1059 /// current compilation session. Used in various assertions
1060pub fn is_green(&self, dep_node: &DepNode) -> bool {
1061#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))1062 }
10631064pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1065&self,
1066 sess: &Session,
1067 dep_node: &DepNode,
1068 msg: impl FnOnce() -> S,
1069 ) {
1070if let Some(data) = &self.data {
1071data.assert_dep_node_not_yet_allocated_in_current_session(sess, dep_node, msg)
1072 }
1073 }
10741075/// This method loads all on-disk cacheable query results into memory, so
1076 /// they can be written out to the new cache file again. Most query results
1077 /// will already be in memory but in the case where we marked something as
1078 /// green but then did not need the value, that value will never have been
1079 /// loaded from disk.
1080 ///
1081 /// This method will only load queries that will end up in the disk cache.
1082 /// Other queries will not be executed.
1083pub fn exec_cache_promotions<'tcx>(&self, tcx: TyCtxt<'tcx>) {
1084let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
10851086let data = self.data.as_ref().unwrap();
1087for prev_index in data.colors.values.indices() {
1088match data.colors.get(prev_index) {
1089 DepNodeColor::Green(_) => {
1090let dep_node = data.previous.index_to_node(prev_index);
1091if let Some(promote_fn) =
1092 tcx.dep_kind_vtable(dep_node.kind).promote_from_disk_fn
1093 {
1094 promote_fn(tcx, *dep_node)
1095 };
1096 }
1097 DepNodeColor::Unknown | DepNodeColor::Red => {
1098// We can skip red nodes because a node can only be marked
1099 // as red if the query result was recomputed and thus is
1100 // already in memory.
1101}
1102 }
1103 }
1104 }
11051106pub fn finish_encoding(&self) -> FileEncodeResult {
1107if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1108 }
11091110pub fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1111if true {
if !self.data.is_none() {
::core::panicking::panic("assertion failed: self.data.is_none()")
};
};debug_assert!(self.data.is_none());
1112let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1113DepNodeIndex::from_u32(index)
1114 }
1115}
11161117/// A "work product" is an intermediate result that we save into the
1118/// incremental directory for later re-use. The primary example are
1119/// the object files that we save for each partition at code
1120/// generation time.
1121///
1122/// Each work product is associated with a dep-node, representing the
1123/// process that produced the work-product. If that dep-node is found
1124/// to be dirty when we load up, then we will delete the work-product
1125/// at load time. If the work-product is found to be clean, then we
1126/// will keep a record in the `previous_work_products` list.
1127///
1128/// In addition, work products have an associated hash. This hash is
1129/// an extra hash that can be used to decide if the work-product from
1130/// a previous compilation can be re-used (in addition to the dirty
1131/// edges check).
1132///
1133/// As the primary example, consider the object files we generate for
1134/// each partition. In the first run, we create partitions based on
1135/// the symbols that need to be compiled. For each partition P, we
1136/// hash the symbols in P and create a `WorkProduct` record associated
1137/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1138/// in P.
1139///
1140/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1141/// judged to be clean (which means none of the things we read to
1142/// generate the partition were found to be dirty), it will be loaded
1143/// into previous work products. We will then regenerate the set of
1144/// symbols in the partition P and hash them (note that new symbols
1145/// may be added -- for example, new monomorphizations -- even if
1146/// nothing in P changed!). We will compare that hash against the
1147/// previous hash. If it matches up, we can reuse the object file.
1148#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
#[inline]
fn clone(&self) -> WorkProduct {
WorkProduct {
cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
saved_files: ::core::clone::Clone::clone(&self.saved_files),
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
"cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for WorkProduct {
fn encode(&self, __encoder: &mut __E) {
match *self {
WorkProduct {
cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for WorkProduct {
fn decode(__decoder: &mut __D) -> Self {
WorkProduct {
cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
saved_files: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
1149pub struct WorkProduct {
1150pub cgu_name: String,
1151/// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1152 /// saved file and the key is some identifier for the type of file being saved.
1153 ///
1154 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1155 /// the object file's path, and "dwo" to the dwarf object file's path.
1156pub saved_files: UnordMap<String, String>,
1157}
11581159pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
11601161// Index type for `DepNodeData`'s edges.
1162impl ::std::fmt::Debug for EdgeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
1163struct EdgeIndex {}
1164}11651166/// `CurrentDepGraph` stores the dependency graph for the current session. It
1167/// will be populated as we run queries or tasks. We never remove nodes from the
1168/// graph: they are only added.
1169///
1170/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1171/// in memory. This is important, because these graph structures are some of the
1172/// largest in the compiler.
1173///
1174/// For this reason, we avoid storing `DepNode`s more than once as map
1175/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1176/// graph, and we map nodes in the previous graph to indices via a two-step
1177/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1178/// and the `prev_index_to_index` vector (which is more compact and faster than
1179/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1180///
1181/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1182/// and `prev_index_to_index` fields are locked separately. Operations that take
1183/// a `DepNodeIndex` typically just access the `data` field.
1184///
1185/// We only need to manipulate at most two locks simultaneously:
1186/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1187/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1188/// first, and `data` second.
1189pub(super) struct CurrentDepGraph {
1190 encoder: GraphEncoder,
1191 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
11921193/// This is used to verify that value fingerprints do not change between the
1194 /// creation of a node and its recomputation.
1195#[cfg(debug_assertions)]
1196value_fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
11971198/// Used to trap when a specific edge is added to the graph.
1199 /// This is used for debug purposes and is only active with `debug_assertions`.
1200#[cfg(debug_assertions)]
1201forbidden_edge: Option<EdgeFilter>,
12021203/// Used to verify the absence of hash collisions among DepNodes.
1204 /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1205 /// or if `debug_assertions` are enabled.
1206 ///
1207 /// The map contains all DepNodes that have been allocated in the current session so far.
1208nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
12091210/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1211 /// their edges. This has the beneficial side-effect that multiple anonymous
1212 /// nodes can be coalesced into one without changing the semantics of the
1213 /// dependency graph. However, the merging of nodes can lead to a subtle
1214 /// problem during red-green marking: The color of an anonymous node from
1215 /// the current session might "shadow" the color of the node with the same
1216 /// ID from the previous session. In order to side-step this problem, we make
1217 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1218 /// This is implemented by mixing a session-key into the ID fingerprint of
1219 /// each anon node. The session-key is a hash of the number of previous sessions.
1220anon_id_seed: Fingerprint,
12211222/// These are simple counters that are for profiling and
1223 /// debugging and only active with `debug_assertions`.
1224pub(super) total_read_count: AtomicU64,
1225pub(super) total_duplicate_read_count: AtomicU64,
1226}
12271228impl CurrentDepGraph {
1229fn new(
1230 session: &Session,
1231 prev_graph_node_count: usize,
1232 encoder: FileEncoder,
1233 previous: Arc<SerializedDepGraph>,
1234 ) -> Self {
1235let mut stable_hasher = StableHasher::new();
1236previous.session_count().hash(&mut stable_hasher);
1237let anon_id_seed = stable_hasher.finish();
12381239#[cfg(debug_assertions)]
1240let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1241Ok(s) => match EdgeFilter::new(&s) {
1242Ok(f) => Some(f),
1243Err(err) => {
::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1244 },
1245Err(_) => None,
1246 };
12471248let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
12491250let new_node_dbg =
1251session.opts.unstable_opts.incremental_verify_ich || truecfg!(debug_assertions);
12521253CurrentDepGraph {
1254 encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1255 anon_node_to_index: ShardedHashMap::with_capacity(
1256// FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1257new_node_count_estimate / sharded::shards(),
1258 ),
1259anon_id_seed,
1260#[cfg(debug_assertions)]
1261forbidden_edge,
1262#[cfg(debug_assertions)]
1263value_fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1264 nodes_in_current_session: new_node_dbg.then(|| {
1265Lock::new(FxHashMap::with_capacity_and_hasher(
1266new_node_count_estimate,
1267 Default::default(),
1268 ))
1269 }),
1270 total_read_count: AtomicU64::new(0),
1271 total_duplicate_read_count: AtomicU64::new(0),
1272 }
1273 }
12741275#[cfg(debug_assertions)]
1276fn record_edge(
1277&self,
1278 dep_node_index: DepNodeIndex,
1279 key: DepNode,
1280 value_fingerprint: Fingerprint,
1281 ) {
1282if let Some(forbidden_edge) = &self.forbidden_edge {
1283forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1284 }
1285let prior_value_fingerprint = *self1286 .value_fingerprints
1287 .lock()
1288 .get_or_insert_with(dep_node_index, || value_fingerprint);
1289match (&prior_value_fingerprint, &value_fingerprint) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
key)));
}
}
};assert_eq!(prior_value_fingerprint, value_fingerprint, "Unstable fingerprints for {key:?}");
1290 }
12911292#[inline(always)]
1293fn record_node(
1294&self,
1295 dep_node_index: DepNodeIndex,
1296 key: DepNode,
1297 _value_fingerprint: Fingerprint,
1298 ) {
1299#[cfg(debug_assertions)]
1300self.record_edge(dep_node_index, key, _value_fingerprint);
13011302if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1303outline(|| {
1304if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1305{
::core::panicking::panic_fmt(format_args!("Found duplicate dep-node {0:?}",
key));
};panic!("Found duplicate dep-node {key:?}");
1306 }
1307 });
1308 }
1309 }
13101311/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1312 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1313#[inline(always)]
1314fn alloc_new_node(
1315&self,
1316 key: DepNode,
1317 edges: EdgesVec,
1318 value_fingerprint: Fingerprint,
1319 ) -> DepNodeIndex {
1320let dep_node_index = self.encoder.send_new(key, value_fingerprint, edges);
13211322self.record_node(dep_node_index, key, value_fingerprint);
13231324dep_node_index1325 }
13261327#[inline]
1328fn debug_assert_not_in_new_nodes(
1329&self,
1330 prev_graph: &SerializedDepGraph,
1331 prev_index: SerializedDepNodeIndex,
1332 ) {
1333if !is_dyn_thread_safe()
1334 && let Some(ref nodes_in_current_session) = self.nodes_in_current_session
1335 {
1336if true {
if !!nodes_in_current_session.lock().contains_key(&prev_graph.index_to_node(prev_index))
{
{
::core::panicking::panic_fmt(format_args!("node from previous graph present in new node collection"));
}
};
};debug_assert!(
1337 !nodes_in_current_session
1338 .lock()
1339 .contains_key(&prev_graph.index_to_node(prev_index)),
1340"node from previous graph present in new node collection"
1341);
1342 }
1343 }
1344}
13451346#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TaskDepsRef::Allow(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
&__self_0),
TaskDepsRef::EvalAlways =>
::core::fmt::Formatter::write_str(f, "EvalAlways"),
TaskDepsRef::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
TaskDepsRef::Forbid =>
::core::fmt::Formatter::write_str(f, "Forbid"),
}
}
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
#[inline]
fn clone(&self) -> TaskDepsRef<'a> {
let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
*self
}
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1347pub enum TaskDepsRef<'a> {
1348/// New dependencies can be added to the
1349 /// `TaskDeps`. This is used when executing a 'normal' query
1350 /// (no `eval_always` modifier)
1351Allow(&'a Lock<TaskDeps>),
1352/// This is used when executing an `eval_always` query. We don't
1353 /// need to track dependencies for a query that's always
1354 /// re-executed -- but we need to know that this is an `eval_always`
1355 /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1356 /// when directly feeding other queries.
1357EvalAlways,
1358/// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1359Ignore,
1360/// Any attempt to add new dependencies will cause a panic.
1361 /// This is used when decoding a query result from disk,
1362 /// to ensure that the decoding process doesn't itself
1363 /// require the execution of any queries.
1364Forbid,
1365}
13661367#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "TaskDeps",
"node", &self.node, "reads", &self.reads, "read_set",
&&self.read_set)
}
}Debug)]
1368pub struct TaskDeps {
1369#[cfg(debug_assertions)]
1370node: Option<DepNode>,
13711372/// A vector of `DepNodeIndex`, basically.
1373reads: EdgesVec,
13741375/// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1376 /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1377 /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1378 /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1379read_set: FxHashSet<DepNodeIndex>,
1380}
13811382impl TaskDeps {
1383/// See `TaskDeps::read_set` above.
1384const LINEAR_SCAN_MAX: usize = 16;
13851386#[inline]
1387fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1388TaskDeps {
1389#[cfg(debug_assertions)]
1390node,
1391 reads: EdgesVec::new(),
1392 read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1393 }
1394 }
1395}
13961397// A data structure that stores Option<DepNodeColor> values as a contiguous
1398// array, using one u32 per entry.
1399pub(super) struct DepNodeColorMap {
1400 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1401}
14021403// All values below `COMPRESSED_RED` are green.
1404const COMPRESSED_RED: u32 = u32::MAX - 1;
1405const COMPRESSED_UNKNOWN: u32 = u32::MAX;
14061407impl DepNodeColorMap {
1408fn new(size: usize) -> DepNodeColorMap {
1409if true {
if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
};
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1410DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1411 }
14121413#[inline]
1414pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1415let value = self.values[index].load(Ordering::Relaxed);
1416if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1417 }
14181419/// This tries to atomically mark a node green and assign `index` as the new
1420 /// index if `green` is true, otherwise it will try to atomicaly mark it red.
1421 ///
1422 /// This returns `Ok` if `index` gets assigned or the node is marked red, otherwise it returns
1423 /// the already allocated index in `Err` if it is green already. If it was already
1424 /// red, `Err(None)` is returned.
1425#[inline(always)]
1426pub(super) fn try_mark(
1427&self,
1428 prev_index: SerializedDepNodeIndex,
1429 index: DepNodeIndex,
1430 green: bool,
1431 ) -> Result<(), Option<DepNodeIndex>> {
1432let value = &self.values[prev_index];
1433match value.compare_exchange(
1434COMPRESSED_UNKNOWN,
1435if green { index.as_u32() } else { COMPRESSED_RED },
1436 Ordering::Relaxed,
1437 Ordering::Relaxed,
1438 ) {
1439Ok(_) => Ok(()),
1440Err(v) => Err(if v == COMPRESSED_RED { None } else { Some(DepNodeIndex::from_u32(v)) }),
1441 }
1442 }
14431444#[inline]
1445pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1446let value = self.values[index].load(Ordering::Acquire);
1447// Green is by far the most common case. Check for that first so we can succeed with a
1448 // single comparison.
1449if value < COMPRESSED_RED {
1450 DepNodeColor::Green(DepNodeIndex::from_u32(value))
1451 } else if value == COMPRESSED_RED {
1452 DepNodeColor::Red1453 } else {
1454if true {
match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1455 DepNodeColor::Unknown1456 }
1457 }
14581459#[inline]
1460pub(super) fn insert_red(&self, index: SerializedDepNodeIndex) {
1461let value = self.values[index].swap(COMPRESSED_RED, Ordering::Release);
1462// Sanity check for duplicate nodes
1463match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("tried to color an already colored node as red")));
}
}
};assert_eq!(value, COMPRESSED_UNKNOWN, "tried to color an already colored node as red");
1464 }
1465}
14661467#[inline(never)]
1468#[cold]
1469pub(crate) fn print_markframe_trace(graph: &DepGraph, frame: &MarkFrame<'_>) {
1470let data = graph.data.as_ref().unwrap();
14711472{
::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1473{ ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
14741475let mut i = 0;
1476let mut current = Some(frame);
1477while let Some(frame) = current {
1478let node = data.previous.index_to_node(frame.index);
1479{ ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1480 current = frame.parent;
1481 i += 1;
1482 }
14831484{
::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1485}
14861487#[cold]
1488#[inline(never)]
1489fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepNodeIndex) -> ! {
1490// We have to do an expensive reverse-lookup of the DepNode that
1491 // corresponds to `dep_node_index`, but that's OK since we are about
1492 // to ICE anyway.
1493let mut dep_node = None;
14941495// First try to find the dep node among those that already existed in the
1496 // previous session and has been marked green
1497for prev_index in data.colors.values.indices() {
1498if data.colors.current(prev_index) == Some(dep_node_index) {
1499 dep_node = Some(*data.previous.index_to_node(prev_index));
1500break;
1501 }
1502 }
15031504if dep_node.is_none()
1505 && let Some(nodes) = &data.current.nodes_in_current_session
1506 {
1507// Try to find it among the nodes allocated so far in this session
1508 // This is OK, there's only ever one node result possible so this is deterministic.
1509#[allow(rustc::potential_query_instability)]
1510if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1511dep_node = Some(*node);
1512 }
1513 }
15141515let dep_node = dep_node.map_or_else(
1516 || ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
})format!("with index {:?}", dep_node_index),
1517 |dep_node| ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
})format!("`{:?}`", dep_node),
1518 );
15191520{
::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
dep_node));
}panic!(
1521"Error: trying to record dependency on DepNode {dep_node} in a \
1522 context that does not allow it (e.g. during query deserialization). \
1523 The most common case of recording a dependency on a DepNode `foo` is \
1524 when the corresponding query `foo` is invoked. Invoking queries is not \
1525 allowed as part of loading something from the incremental on-disk cache. \
1526 See <https://github.com/rust-lang/rust/pull/91919>."
1527)1528}
15291530impl<'tcx> TyCtxt<'tcx> {
1531/// Return whether this kind always require evaluation.
1532#[inline(always)]
1533fn is_eval_always(self, kind: DepKind) -> bool {
1534self.dep_kind_vtable(kind).is_eval_always
1535 }
15361537// Interactions with on_disk_cache
1538fn load_side_effect(
1539self,
1540 prev_dep_node_index: SerializedDepNodeIndex,
1541 ) -> Option<QuerySideEffect> {
1542self.query_system
1543 .on_disk_cache
1544 .as_ref()
1545 .and_then(|c| c.load_side_effect(self, prev_dep_node_index))
1546 }
15471548#[inline(never)]
1549 #[cold]
1550fn store_side_effect(self, dep_node_index: DepNodeIndex, side_effect: QuerySideEffect) {
1551if let Some(c) = self.query_system.on_disk_cache.as_ref() {
1552c.store_side_effect(dep_node_index, side_effect)
1553 }
1554 }
1555}