1use std::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
67use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::{FxHashMap, FxHashSet};
9use rustc_data_structures::outline;
10use rustc_data_structures::profiling::QueryInvocationId;
11use rustc_data_structures::sharded::{self, ShardedHashMap};
12use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
13use rustc_data_structures::sync::{AtomicU64, Lock, is_dyn_thread_safe};
14use rustc_data_structures::unord::UnordMap;
15use rustc_errors::DiagInner;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use rustc_session::Session;
20use rustc_span::Symbol;
21use tracing::instrument;
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
2425use super::retained::RetainedDepGraph;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::ty::TyCtxt;
31use crate::verify_ich::incremental_verify_ich;
3233/// Tracks 'side effects' for a particular query.
34/// This struct is saved to disk along with the query result,
35/// and loaded from disk if we mark the query as green.
36/// This allows us to 'replay' changes to global state
37/// that would otherwise only occur if we actually
38/// executed the query method.
39///
40/// Each side effect gets an unique dep node index which is added
41/// as a dependency of the query which had the effect.
42#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QuerySideEffect {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
QuerySideEffect::Diagnostic(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Diagnostic", &__self_0),
QuerySideEffect::CheckFeature { symbol: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"CheckFeature", "symbol", &__self_0),
}
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for QuerySideEffect {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
QuerySideEffect::Diagnostic(ref __binding_0) => { 0usize }
QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
{
1usize
}
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
QuerySideEffect::Diagnostic(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for QuerySideEffect {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => {
QuerySideEffect::Diagnostic(::rustc_serialize::Decodable::decode(__decoder))
}
1usize => {
QuerySideEffect::CheckFeature {
symbol: ::rustc_serialize::Decodable::decode(__decoder),
}
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `QuerySideEffect`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable)]
43pub enum QuerySideEffect {
44/// Stores a diagnostic emitted during query execution.
45 /// This diagnostic will be re-emitted if we mark
46 /// the query as green, as that query will have the side
47 /// effect dep node as a dependency.
48Diagnostic(DiagInner),
49/// Records the feature used during query execution.
50 /// This feature will be inserted into `sess.used_features`
51 /// if we mark the query as green, as that query will have
52 /// the side effect dep node as a dependency.
53CheckFeature { symbol: Symbol },
54}
5556#[derive(#[automatically_derived]
impl ::core::clone::Clone for DepGraph {
#[inline]
fn clone(&self) -> DepGraph {
DepGraph {
data: ::core::clone::Clone::clone(&self.data),
virtual_dep_node_index: ::core::clone::Clone::clone(&self.virtual_dep_node_index),
}
}
}Clone)]
57pub struct DepGraph {
58 data: Option<Arc<DepGraphData>>,
5960/// This field is used for assigning DepNodeIndices when running in
61 /// non-incremental mode. Even in non-incremental mode we make sure that
62 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
63 /// ID is used for self-profiling.
64virtual_dep_node_index: Arc<AtomicU32>,
65}
6667impl ::std::fmt::Debug for DepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
68pub struct DepNodeIndex {}
69}7071// We store a large collection of these in `prev_index_to_index` during
72// non-full incremental builds, and want to ensure that the element size
73// doesn't inadvertently increase.
74const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
7576impl DepNodeIndex {
77const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
78pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
79}
8081impl From<DepNodeIndex> for QueryInvocationId {
82#[inline(always)]
83fn from(dep_node_index: DepNodeIndex) -> Self {
84QueryInvocationId(dep_node_index.as_u32())
85 }
86}
8788pub(crate) struct MarkFrame<'a> {
89 index: SerializedDepNodeIndex,
90 parent: Option<&'a MarkFrame<'a>>,
91}
9293#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
DepNodeColor::Green(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
&__self_0),
DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
DepNodeColor::Unknown =>
::core::fmt::Formatter::write_str(f, "Unknown"),
}
}
}Debug)]
94pub(super) enum DepNodeColor {
95 Green(DepNodeIndex),
96 Red,
97 Unknown,
98}
99100pub struct DepGraphData {
101/// The new encoding of the dependency graph, optimized for red/green
102 /// tracking. The `current` field is the dependency graph of only the
103 /// current compilation session: We don't merge the previous dep-graph into
104 /// current one anymore, but we do reference shared data to save space.
105current: CurrentDepGraph,
106107/// The dep-graph from the previous compilation session. It contains all
108 /// nodes and edges as well as all fingerprints of nodes that have them.
109previous: Arc<SerializedDepGraph>,
110111 colors: DepNodeColorMap,
112113/// When we load, there may be `.o` files, cached MIR, or other such
114 /// things available to us. If we find that they are not dirty, we
115 /// load the path to the file storing those work-products here into
116 /// this map. We can later look for and extract that data.
117previous_work_products: WorkProductMap,
118119/// Used by incremental compilation tests to assert that
120 /// a particular query result was decoded from disk
121 /// (not just marked green)
122debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
123}
124125pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint126where
127R: for<'a> HashStable<StableHashingContext<'a>>,
128{
129let mut stable_hasher = StableHasher::new();
130result.hash_stable(hcx, &mut stable_hasher);
131stable_hasher.finish()
132}
133134impl DepGraph {
135pub fn new(
136 session: &Session,
137 prev_graph: Arc<SerializedDepGraph>,
138 prev_work_products: WorkProductMap,
139 encoder: FileEncoder,
140 ) -> DepGraph {
141let prev_graph_node_count = prev_graph.node_count();
142143let current =
144CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
145146let colors = DepNodeColorMap::new(prev_graph_node_count);
147148// Instantiate a node with zero dependencies only once for anonymous queries.
149let _green_node_index = current.alloc_new_node(
150DepNode { kind: DepKind::AnonZeroDeps, key_fingerprint: current.anon_id_seed.into() },
151EdgesVec::new(),
152Fingerprint::ZERO,
153 );
154match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
155156// Create a single always-red node, with no dependencies of its own.
157 // Other nodes can use the always-red node as a fake dependency, to
158 // ensure that their dependency list will never be all-green.
159let red_node_index = current.alloc_new_node(
160DepNode { kind: DepKind::Red, key_fingerprint: Fingerprint::ZERO.into() },
161EdgesVec::new(),
162Fingerprint::ZERO,
163 );
164match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
165if prev_graph_node_count > 0 {
166let prev_index =
167const { SerializedDepNodeIndex::from_u32(DepNodeIndex::FOREVER_RED_NODE.as_u32()) };
168let result = colors.try_set_color(prev_index, DesiredColor::Red);
169match result {
TrySetColorResult::Success => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"TrySetColorResult::Success", ::core::option::Option::None);
}
};assert_matches!(result, TrySetColorResult::Success);
170 }
171172DepGraph {
173 data: Some(Arc::new(DepGraphData {
174 previous_work_products: prev_work_products,
175current,
176 previous: prev_graph,
177colors,
178 debug_loaded_from_disk: Default::default(),
179 })),
180 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
181 }
182 }
183184pub fn new_disabled() -> DepGraph {
185DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
186 }
187188#[inline]
189pub fn data(&self) -> Option<&DepGraphData> {
190self.data.as_deref()
191 }
192193/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
194#[inline]
195pub fn is_fully_enabled(&self) -> bool {
196self.data.is_some()
197 }
198199pub fn with_retained_dep_graph(&self, f: impl Fn(&RetainedDepGraph)) {
200if let Some(data) = &self.data {
201data.current.encoder.with_retained_dep_graph(f)
202 }
203 }
204205pub fn assert_ignored(&self) {
206if let Some(..) = self.data {
207read_deps(|task_deps| {
208match task_deps {
TaskDepsRef::Ignore => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"TaskDepsRef::Ignore",
::core::option::Option::Some(format_args!("expected no task dependency tracking")));
}
};assert_matches!(
209 task_deps,
210 TaskDepsRef::Ignore,
211"expected no task dependency tracking"
212);
213 })
214 }
215 }
216217pub fn with_ignore<OP, R>(&self, op: OP) -> R
218where
219OP: FnOnce() -> R,
220 {
221with_deps(TaskDepsRef::Ignore, op)
222 }
223224/// Used to wrap the deserialization of a query result from disk,
225 /// This method enforces that no new `DepNodes` are created during
226 /// query result deserialization.
227 ///
228 /// Enforcing this makes the query dep graph simpler - all nodes
229 /// must be created during the query execution, and should be
230 /// created from inside the 'body' of a query (the implementation
231 /// provided by a particular compiler crate).
232 ///
233 /// Consider the case of three queries `A`, `B`, and `C`, where
234 /// `A` invokes `B` and `B` invokes `C`:
235 ///
236 /// `A -> B -> C`
237 ///
238 /// Suppose that decoding the result of query `B` required re-computing
239 /// the query `C`. If we did not create a fresh `TaskDeps` when
240 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
241 /// (if we needed to re-execute `A`). This would cause us to create
242 /// a new edge `A -> C`. If this edge did not previously
243 /// exist in the `DepGraph`, then we could end up with a different
244 /// `DepGraph` at the end of compilation, even if there were no
245 /// meaningful changes to the overall program (e.g. a newline was added).
246 /// In addition, this edge might cause a subsequent compilation run
247 /// to try to force `C` before marking other necessary nodes green. If
248 /// `C` did not exist in the new compilation session, then we could
249 /// get an ICE. Normally, we would have tried (and failed) to mark
250 /// some other query green (e.g. `item_children`) which was used
251 /// to obtain `C`, which would prevent us from ever trying to force
252 /// a nonexistent `D`.
253 ///
254 /// It might be possible to enforce that all `DepNode`s read during
255 /// deserialization already exist in the previous `DepGraph`. In
256 /// the above example, we would invoke `D` during the deserialization
257 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
258 /// of `B`, this would result in an edge `B -> D`. If that edge already
259 /// existed (with the same `DepPathHash`es), then it should be correct
260 /// to allow the invocation of the query to proceed during deserialization
261 /// of a query result. We would merely assert that the dep-graph fragment
262 /// that would have been added by invoking `C` while decoding `B`
263 /// is equivalent to the dep-graph fragment that we already instantiated for B
264 /// (at the point where we successfully marked B as green).
265 ///
266 /// However, this would require additional complexity
267 /// in the query infrastructure, and is not currently needed by the
268 /// decoding of any query results. Should the need arise in the future,
269 /// we should consider extending the query system with this functionality.
270pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
271where
272OP: FnOnce() -> R,
273 {
274with_deps(TaskDepsRef::Forbid, op)
275 }
276277#[inline(always)]
278pub fn with_task<'tcx, OP, R>(
279&self,
280 dep_node: DepNode,
281 tcx: TyCtxt<'tcx>,
282 op: OP,
283 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
284 ) -> (R, DepNodeIndex)
285where
286OP: FnOnce() -> R,
287 {
288match self.data() {
289Some(data) => data.with_task(dep_node, tcx, op, hash_result),
290None => (op(), self.next_virtual_depnode_index()),
291 }
292 }
293294pub fn with_anon_task<'tcx, OP, R>(
295&self,
296 tcx: TyCtxt<'tcx>,
297 dep_kind: DepKind,
298 op: OP,
299 ) -> (R, DepNodeIndex)
300where
301OP: FnOnce() -> R,
302 {
303match self.data() {
304Some(data) => {
305let (result, index) = data.with_anon_task_inner(tcx, dep_kind, op);
306self.read_index(index);
307 (result, index)
308 }
309None => (op(), self.next_virtual_depnode_index()),
310 }
311 }
312}
313314impl DepGraphData {
315#[inline(always)]
316pub fn with_task<'tcx, OP, R>(
317&self,
318 dep_node: DepNode,
319 tcx: TyCtxt<'tcx>,
320 op: OP,
321 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
322 ) -> (R, DepNodeIndex)
323where
324OP: FnOnce() -> R,
325 {
326// If the following assertion triggers, it can have two reasons:
327 // 1. Something is wrong with DepNode creation, either here or
328 // in `DepGraph::try_mark_green()`.
329 // 2. Two distinct query keys get mapped to the same `DepNode`
330 // (see for example #48923).
331self.assert_dep_node_not_yet_allocated_in_current_session(tcx.sess, &dep_node, || {
332::alloc::__export::must_use({
::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`: {0:?}",
dep_node))
})format!("forcing query with already existing `DepNode`: {dep_node:?}")333 });
334335let with_deps = |task_deps| with_deps(task_deps, op);
336let (result, edges) = if tcx.is_eval_always(dep_node.kind) {
337 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
338 } else {
339let task_deps = Lock::new(TaskDeps::new(
340#[cfg(debug_assertions)]
341Some(dep_node),
3420,
343 ));
344 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
345 };
346347let dep_node_index =
348self.hash_result_and_alloc_node(tcx, dep_node, edges, &result, hash_result);
349350 (result, dep_node_index)
351 }
352353/// Executes something within an "anonymous" task, that is, a task the
354 /// `DepNode` of which is determined by the list of inputs it read from.
355 ///
356 /// NOTE: this does not actually count as a read of the DepNode here.
357 /// Using the result of this task without reading the DepNode will result
358 /// in untracked dependencies which may lead to ICEs as nodes are
359 /// incorrectly marked green.
360 ///
361 /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
362 /// user of this function actually performs the read.
363fn with_anon_task_inner<'tcx, OP, R>(
364&self,
365 tcx: TyCtxt<'tcx>,
366 dep_kind: DepKind,
367 op: OP,
368 ) -> (R, DepNodeIndex)
369where
370OP: FnOnce() -> R,
371 {
372if true {
if !!tcx.is_eval_always(dep_kind) {
::core::panicking::panic("assertion failed: !tcx.is_eval_always(dep_kind)")
};
};debug_assert!(!tcx.is_eval_always(dep_kind));
373374// Large numbers of reads are common enough here that pre-sizing `read_set`
375 // to 128 actually helps perf on some benchmarks.
376let task_deps = Lock::new(TaskDeps::new(
377#[cfg(debug_assertions)]
378None,
379128,
380 ));
381let result = with_deps(TaskDepsRef::Allow(&task_deps), op);
382let task_deps = task_deps.into_inner();
383let reads = task_deps.reads;
384385let dep_node_index = match reads.len() {
3860 => {
387// Because the dep-node id of anon nodes is computed from the sets of its
388 // dependencies we already know what the ID of this dependency-less node is
389 // going to be (i.e. equal to the precomputed
390 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
391 // a `StableHasher` and sending the node through interning.
392DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE393 }
3941 => {
395// When there is only one dependency, don't bother creating a node.
396reads[0]
397 }
398_ => {
399// The dep node indices are hashed here instead of hashing the dep nodes of the
400 // dependencies. These indices may refer to different nodes per session, but this isn't
401 // a problem here because we that ensure the final dep node hash is per session only by
402 // combining it with the per session random number `anon_id_seed`. This hash only need
403 // to map the dependencies to a single value on a per session basis.
404let mut hasher = StableHasher::new();
405reads.hash(&mut hasher);
406407let target_dep_node = DepNode {
408 kind: dep_kind,
409// Fingerprint::combine() is faster than sending Fingerprint
410 // through the StableHasher (at least as long as StableHasher
411 // is so slow).
412key_fingerprint: self.current.anon_id_seed.combine(hasher.finish()).into(),
413 };
414415// The DepNodes generated by the process above are not unique. 2 queries could
416 // have exactly the same dependencies. However, deserialization does not handle
417 // duplicated nodes, so we do the deduplication here directly.
418 //
419 // As anonymous nodes are a small quantity compared to the full dep-graph, the
420 // memory impact of this `anon_node_to_index` map remains tolerable, and helps
421 // us avoid useless growth of the graph with almost-equivalent nodes.
422self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
423self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
424 })
425 }
426 };
427428 (result, dep_node_index)
429 }
430431/// Intern the new `DepNode` with the dependencies up-to-now.
432fn hash_result_and_alloc_node<'tcx, R>(
433&self,
434 tcx: TyCtxt<'tcx>,
435 node: DepNode,
436 edges: EdgesVec,
437 result: &R,
438 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
439 ) -> DepNodeIndex {
440let hashing_timer = tcx.prof.incr_result_hashing();
441let current_fingerprint = hash_result.map(|hash_result| {
442tcx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
443 });
444let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
445hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
446dep_node_index447 }
448}
449450impl DepGraph {
451#[inline]
452pub fn read_index(&self, dep_node_index: DepNodeIndex) {
453if let Some(ref data) = self.data {
454read_deps(|task_deps| {
455let mut task_deps = match task_deps {
456 TaskDepsRef::Allow(deps) => deps.lock(),
457 TaskDepsRef::EvalAlways => {
458// We don't need to record dependencies of eval_always
459 // queries. They are re-evaluated unconditionally anyway.
460return;
461 }
462 TaskDepsRef::Ignore => return,
463 TaskDepsRef::Forbid => {
464// Reading is forbidden in this context. ICE with a useful error message.
465panic_on_forbidden_read(data, dep_node_index)
466 }
467 };
468let task_deps = &mut *task_deps;
469470if truecfg!(debug_assertions) {
471data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
472 }
473474// Has `dep_node_index` been seen before? Use either a linear scan or a hashset
475 // lookup to determine this. See `TaskDeps::read_set` for details.
476let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
477 !task_deps.reads.contains(&dep_node_index)
478 } else {
479task_deps.read_set.insert(dep_node_index)
480 };
481if new_read {
482task_deps.reads.push(dep_node_index);
483if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
484// Fill `read_set` with what we have so far. Future lookups will use it.
485task_deps.read_set.extend(task_deps.reads.iter().copied());
486 }
487488#[cfg(debug_assertions)]
489{
490if let Some(target) = task_deps.node
491 && let Some(ref forbidden_edge) = data.current.forbidden_edge
492 {
493let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
494if forbidden_edge.test(&src, &target) {
495{
::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)496 }
497 }
498 }
499 } else if truecfg!(debug_assertions) {
500data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
501 }
502 })
503 }
504 }
505506/// This encodes a side effect by creating a node with an unique index and associating
507 /// it with the node, for use in the next session.
508#[inline]
509pub fn record_diagnostic<'tcx>(&self, tcx: TyCtxt<'tcx>, diagnostic: &DiagInner) {
510if let Some(ref data) = self.data {
511read_deps(|task_deps| match task_deps {
512 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
513 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
514let dep_node_index = data515 .encode_side_effect(tcx, QuerySideEffect::Diagnostic(diagnostic.clone()));
516self.read_index(dep_node_index);
517 }
518 })
519 }
520 }
521/// This forces a side effect node green by running its side effect. `prev_index` would
522 /// refer to a node created used `encode_side_effect` in the previous session.
523#[inline]
524pub fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
525if let Some(ref data) = self.data {
526data.force_side_effect(tcx, prev_index);
527 }
528 }
529530#[inline]
531pub fn encode_side_effect<'tcx>(
532&self,
533 tcx: TyCtxt<'tcx>,
534 side_effect: QuerySideEffect,
535 ) -> DepNodeIndex {
536if let Some(ref data) = self.data {
537data.encode_side_effect(tcx, side_effect)
538 } else {
539self.next_virtual_depnode_index()
540 }
541 }
542543/// Create a node when we force-feed a value into the query cache.
544 /// This is used to remove cycles during type-checking const generic parameters.
545 ///
546 /// As usual in the query system, we consider the current state of the calling query
547 /// only depends on the list of dependencies up to now. As a consequence, the value
548 /// that this query gives us can only depend on those dependencies too. Therefore,
549 /// it is sound to use the current dependency set for the created node.
550 ///
551 /// During replay, the order of the nodes is relevant in the dependency graph.
552 /// So the unchanged replay will mark the caller query before trying to mark this one.
553 /// If there is a change to report, the caller query will be re-executed before this one.
554 ///
555 /// FIXME: If the code is changed enough for this node to be marked before requiring the
556 /// caller's node, we suppose that those changes will be enough to mark this node red and
557 /// force a recomputation using the "normal" way.
558pub fn with_feed_task<'tcx, R>(
559&self,
560 node: DepNode,
561 tcx: TyCtxt<'tcx>,
562 result: &R,
563 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
564 format_value_fn: fn(&R) -> String,
565 ) -> DepNodeIndex {
566if let Some(data) = self.data.as_ref() {
567// The caller query has more dependencies than the node we are creating. We may
568 // encounter a case where this created node is marked as green, but the caller query is
569 // subsequently marked as red or recomputed. In this case, we will end up feeding a
570 // value to an existing node.
571 //
572 // For sanity, we still check that the loaded stable hash and the new one match.
573if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
574let dep_node_index = data.colors.current(prev_index);
575if let Some(dep_node_index) = dep_node_index {
576incremental_verify_ich(
577tcx,
578data,
579result,
580prev_index,
581hash_result,
582format_value_fn,
583 );
584585#[cfg(debug_assertions)]
586if hash_result.is_some() {
587data.current.record_edge(
588dep_node_index,
589node,
590data.prev_value_fingerprint_of(prev_index),
591 );
592 }
593594return dep_node_index;
595 }
596 }
597598let mut edges = EdgesVec::new();
599read_deps(|task_deps| match task_deps {
600 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
601 TaskDepsRef::EvalAlways => {
602edges.push(DepNodeIndex::FOREVER_RED_NODE);
603 }
604 TaskDepsRef::Ignore => {}
605 TaskDepsRef::Forbid => {
606{
::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")607 }
608 });
609610data.hash_result_and_alloc_node(tcx, node, edges, result, hash_result)
611 } else {
612// Incremental compilation is turned off. We just execute the task
613 // without tracking. We still provide a dep-node index that uniquely
614 // identifies the task so that we have a cheap way of referring to
615 // the query for self-profiling.
616self.next_virtual_depnode_index()
617 }
618 }
619}
620621impl DepGraphData {
622fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
623&self,
624 sess: &Session,
625 dep_node: &DepNode,
626 msg: impl FnOnce() -> S,
627 ) {
628if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
629let color = self.colors.get(prev_index);
630let ok = match color {
631 DepNodeColor::Unknown => true,
632 DepNodeColor::Red => false,
633 DepNodeColor::Green(..) => sess.threads() > 1, // Other threads may mark this green
634};
635if !ok {
636{ ::core::panicking::panic_display(&msg()); }panic!("{}", msg())637 }
638 } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
639outline(|| {
640let seen = nodes_in_current_session.lock().contains_key(dep_node);
641if !!seen { { ::core::panicking::panic_display(&msg()); } };assert!(!seen, "{}", msg());
642 });
643 }
644 }
645646fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
647if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
648self.colors.get(prev_index)
649 } else {
650// This is a node that did not exist in the previous compilation session.
651DepNodeColor::Unknown652 }
653 }
654655/// Returns true if the given node has been marked as green during the
656 /// current compilation session. Used in various assertions
657#[inline]
658pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
659#[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))660 }
661662#[inline]
663pub fn prev_value_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
664self.previous.value_fingerprint_for_index(prev_index)
665 }
666667#[inline]
668pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
669self.previous.index_to_node(prev_index)
670 }
671672pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
673self.debug_loaded_from_disk.lock().insert(dep_node);
674 }
675676/// This encodes a side effect by creating a node with an unique index and associating
677 /// it with the node, for use in the next session.
678#[inline]
679fn encode_side_effect<'tcx>(
680&self,
681 tcx: TyCtxt<'tcx>,
682 side_effect: QuerySideEffect,
683 ) -> DepNodeIndex {
684// Use `send_new` so we get an unique index, even though the dep node is not.
685let dep_node_index = self.current.encoder.send_new(
686DepNode {
687 kind: DepKind::SideEffect,
688 key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
689 },
690Fingerprint::ZERO,
691// We want the side effect node to always be red so it will be forced and run the
692 // side effect.
693std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
694 );
695tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect);
696dep_node_index697 }
698699/// This forces a side effect node green by running its side effect. `prev_index` would
700 /// refer to a node created used `encode_side_effect` in the previous session.
701#[inline]
702fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
703with_deps(TaskDepsRef::Ignore, || {
704let side_effect = tcx705 .query_system
706 .on_disk_cache
707 .as_ref()
708 .unwrap()
709 .load_side_effect(tcx, prev_index)
710 .unwrap();
711712// Use `send_and_color` as `promote_node_and_deps_to_current` expects all
713 // green dependencies. `send_and_color` will also prevent multiple nodes
714 // being encoded for concurrent calls.
715let dep_node_index = self.current.encoder.send_and_color(
716prev_index,
717&self.colors,
718DepNode {
719 kind: DepKind::SideEffect,
720 key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
721 },
722Fingerprint::ZERO,
723 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
724true,
725 );
726727match &side_effect {
728 QuerySideEffect::Diagnostic(diagnostic) => {
729tcx.dcx().emit_diagnostic(diagnostic.clone());
730 }
731 QuerySideEffect::CheckFeature { symbol } => {
732tcx.sess.used_features.lock().insert(*symbol, dep_node_index.as_u32());
733 }
734 }
735736// This will just overwrite the same value for concurrent calls.
737tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect);
738 })
739 }
740741fn alloc_and_color_node(
742&self,
743 key: DepNode,
744 edges: EdgesVec,
745 value_fingerprint: Option<Fingerprint>,
746 ) -> DepNodeIndex {
747if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
748// Determine the color and index of the new `DepNode`.
749let is_green = if let Some(value_fingerprint) = value_fingerprint {
750if value_fingerprint == self.previous.value_fingerprint_for_index(prev_index) {
751// This is a green node: it existed in the previous compilation,
752 // its query was re-executed, and it has the same result as before.
753true
754} else {
755// This is a red node: it existed in the previous compilation, its query
756 // was re-executed, but it has a different result from before.
757false
758}
759 } else {
760// This is a red node, effectively: it existed in the previous compilation
761 // session, its query was re-executed, but it doesn't compute a result hash
762 // (i.e. it represents a `no_hash` query), so we have no way of determining
763 // whether or not the result was the same as before.
764false
765};
766767let value_fingerprint = value_fingerprint.unwrap_or(Fingerprint::ZERO);
768769let dep_node_index = self.current.encoder.send_and_color(
770prev_index,
771&self.colors,
772key,
773value_fingerprint,
774edges,
775is_green,
776 );
777778self.current.record_node(dep_node_index, key, value_fingerprint);
779780dep_node_index781 } else {
782self.current.alloc_new_node(key, edges, value_fingerprint.unwrap_or(Fingerprint::ZERO))
783 }
784 }
785786fn promote_node_and_deps_to_current(
787&self,
788 prev_index: SerializedDepNodeIndex,
789 ) -> Option<DepNodeIndex> {
790self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
791792let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
793794#[cfg(debug_assertions)]
795if let Some(dep_node_index) = dep_node_index {
796self.current.record_edge(
797dep_node_index,
798*self.previous.index_to_node(prev_index),
799self.previous.value_fingerprint_for_index(prev_index),
800 );
801 }
802803dep_node_index804 }
805}
806807impl DepGraph {
808/// Checks whether a previous work product exists for `v` and, if
809 /// so, return the path that leads to it. Used to skip doing work.
810pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
811self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
812 }
813814/// Access the map of work-products created during the cached run. Only
815 /// used during saving of the dep-graph.
816pub fn previous_work_products(&self) -> &WorkProductMap {
817&self.data.as_ref().unwrap().previous_work_products
818 }
819820pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
821self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
822 }
823824pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
825// We only check if we have a dep node corresponding to the given dep kind.
826#[allow(rustc::potential_query_instability)]
827self.data
828 .as_ref()
829 .unwrap()
830 .debug_loaded_from_disk
831 .lock()
832 .iter()
833 .any(|node| node.kind == dep_kind)
834 }
835836fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
837if let Some(ref data) = self.data {
838return data.node_color(dep_node);
839 }
840841 DepNodeColor::Unknown842 }
843844pub fn try_mark_green<'tcx>(
845&self,
846 tcx: TyCtxt<'tcx>,
847 dep_node: &DepNode,
848 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
849self.data()?.try_mark_green(tcx, dep_node)
850 }
851}
852853impl DepGraphData {
854/// Try to mark a node index for the node dep_node.
855 ///
856 /// A node will have an index, when it's already been marked green, or when we can mark it
857 /// green. This function will mark the current task as a reader of the specified node, when
858 /// a node index can be found for that node.
859pub fn try_mark_green<'tcx>(
860&self,
861 tcx: TyCtxt<'tcx>,
862 dep_node: &DepNode,
863 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
864if true {
if !!tcx.is_eval_always(dep_node.kind) {
::core::panicking::panic("assertion failed: !tcx.is_eval_always(dep_node.kind)")
};
};debug_assert!(!tcx.is_eval_always(dep_node.kind));
865866// Return None if the dep node didn't exist in the previous session
867let prev_index = self.previous.node_to_index_opt(dep_node)?;
868869if true {
match (&self.previous.index_to_node(prev_index), &dep_node) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
870871match self.colors.get(prev_index) {
872 DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
873 DepNodeColor::Red => None,
874 DepNodeColor::Unknown => {
875// This DepNode and the corresponding query invocation existed
876 // in the previous compilation session too, so we can try to
877 // mark it as green by recursively marking all of its
878 // dependencies green.
879self.try_mark_previous_green(tcx, prev_index, None)
880 .map(|dep_node_index| (prev_index, dep_node_index))
881 }
882 }
883 }
884885/// Try to mark a dep-node which existed in the previous compilation session as green.
886#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("try_mark_previous_green",
"rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
::tracing_core::__macro_support::Option::Some(886u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
return __tracing_attr_fake_return;
}
{
let frame =
MarkFrame { index: prev_dep_node_index, parent: frame };
if true {
if !!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
{
::core::panicking::panic("assertion failed: !tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
};
};
for parent_dep_node_index in
self.previous.edge_targets_from(prev_dep_node_index) {
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => continue,
DepNodeColor::Red => return None,
DepNodeColor::Unknown => {}
}
let parent_dep_node =
self.previous.index_to_node(parent_dep_node_index);
if !tcx.is_eval_always(parent_dep_node.kind) &&
self.try_mark_previous_green(tcx, parent_dep_node_index,
Some(&frame)).is_some() {
continue;
}
if !tcx.try_force_from_dep_node(*parent_dep_node,
parent_dep_node_index, &frame) {
return None;
}
match self.colors.get(parent_dep_node_index) {
DepNodeColor::Green(_) => continue,
DepNodeColor::Red => return None,
DepNodeColor::Unknown => {}
}
if tcx.dcx().has_errors_or_delayed_bugs().is_none() {
{
::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - forcing failed to set a color"));
};
}
return None;
}
let dep_node_index =
self.promote_node_and_deps_to_current(prev_dep_node_index)?;
Some(dep_node_index)
}
}
}#[instrument(skip(self, tcx, prev_dep_node_index, frame), level = "debug")]887fn try_mark_previous_green<'tcx>(
888&self,
889 tcx: TyCtxt<'tcx>,
890 prev_dep_node_index: SerializedDepNodeIndex,
891 frame: Option<&MarkFrame<'_>>,
892 ) -> Option<DepNodeIndex> {
893let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
894895// We never try to mark eval_always nodes as green
896debug_assert!(!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind));
897898for parent_dep_node_index in self.previous.edge_targets_from(prev_dep_node_index) {
899match self.colors.get(parent_dep_node_index) {
900// This dependency has been marked as green before, we are still ok and can
901 // continue checking the remaining dependencies.
902DepNodeColor::Green(_) => continue,
903904// This dependency's result is different to the previous compilation session. We
905 // cannot mark this dep_node as green, so stop checking.
906DepNodeColor::Red => return None,
907908// We still need to determine this dependency's colour.
909DepNodeColor::Unknown => {}
910 }
911912let parent_dep_node = self.previous.index_to_node(parent_dep_node_index);
913914// If this dependency isn't eval_always, try to mark it green recursively.
915if !tcx.is_eval_always(parent_dep_node.kind)
916 && self.try_mark_previous_green(tcx, parent_dep_node_index, Some(&frame)).is_some()
917 {
918continue;
919 }
920921// We failed to mark it green, so we try to force the query.
922if !tcx.try_force_from_dep_node(*parent_dep_node, parent_dep_node_index, &frame) {
923return None;
924 }
925926match self.colors.get(parent_dep_node_index) {
927 DepNodeColor::Green(_) => continue,
928 DepNodeColor::Red => return None,
929 DepNodeColor::Unknown => {}
930 }
931932if tcx.dcx().has_errors_or_delayed_bugs().is_none() {
933panic!("try_mark_previous_green() - forcing failed to set a color");
934 }
935936// If the query we just forced has resulted in some kind of compilation error, we
937 // cannot rely on the dep-node color having been properly updated. This means that the
938 // query system has reached an invalid state. We let the compiler continue (by
939 // returning `None`) so it can emit error messages and wind down, but rely on the fact
940 // that this invalid state will not be persisted to the incremental compilation cache
941 // because of compilation errors being present.
942return None;
943 }
944945// If we got here without hitting a `return` that means that all
946 // dependencies of this DepNode could be marked as green. Therefore we
947 // can also mark this DepNode as green.
948949 // There may be multiple threads trying to mark the same dep node green concurrently.
950951 // We allocating an entry for the node in the current dependency graph and
952 // adding all the appropriate edges imported from the previous graph.
953 //
954 // `no_hash` nodes may fail this promotion due to already being conservatively colored red.
955let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index)?;
956957// ... and finally storing a "Green" entry in the color map.
958 // Multiple threads can all write the same color here.
959960Some(dep_node_index)
961 }
962}
963964impl DepGraph {
965/// Returns true if the given node has been marked as red during the
966 /// current compilation session. Used in various assertions
967pub fn is_red(&self, dep_node: &DepNode) -> bool {
968#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Red => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)969 }
970971/// Returns true if the given node has been marked as green during the
972 /// current compilation session. Used in various assertions
973pub fn is_green(&self, dep_node: &DepNode) -> bool {
974#[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
DepNodeColor::Green(_) => true,
_ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))975 }
976977pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
978&self,
979 sess: &Session,
980 dep_node: &DepNode,
981 msg: impl FnOnce() -> S,
982 ) {
983if let Some(data) = &self.data {
984data.assert_dep_node_not_yet_allocated_in_current_session(sess, dep_node, msg)
985 }
986 }
987988/// This method loads all on-disk cacheable query results into memory, so
989 /// they can be written out to the new cache file again. Most query results
990 /// will already be in memory but in the case where we marked something as
991 /// green but then did not need the value, that value will never have been
992 /// loaded from disk.
993 ///
994 /// This method will only load queries that will end up in the disk cache.
995 /// Other queries will not be executed.
996pub fn exec_cache_promotions<'tcx>(&self, tcx: TyCtxt<'tcx>) {
997let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
998999let data = self.data.as_ref().unwrap();
1000for prev_index in data.colors.values.indices() {
1001match data.colors.get(prev_index) {
1002 DepNodeColor::Green(_) => {
1003let dep_node = data.previous.index_to_node(prev_index);
1004if let Some(promote_fn) =
1005 tcx.dep_kind_vtable(dep_node.kind).promote_from_disk_fn
1006 {
1007 promote_fn(tcx, *dep_node)
1008 };
1009 }
1010 DepNodeColor::Unknown | DepNodeColor::Red => {
1011// We can skip red nodes because a node can only be marked
1012 // as red if the query result was recomputed and thus is
1013 // already in memory.
1014}
1015 }
1016 }
1017 }
10181019pub(crate) fn finish_encoding(&self) -> FileEncodeResult {
1020if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1021 }
10221023pub fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1024if true {
if !self.data.is_none() {
::core::panicking::panic("assertion failed: self.data.is_none()")
};
};debug_assert!(self.data.is_none());
1025let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1026DepNodeIndex::from_u32(index)
1027 }
1028}
10291030/// A "work product" is an intermediate result that we save into the
1031/// incremental directory for later re-use. The primary example are
1032/// the object files that we save for each partition at code
1033/// generation time.
1034///
1035/// Each work product is associated with a dep-node, representing the
1036/// process that produced the work-product. If that dep-node is found
1037/// to be dirty when we load up, then we will delete the work-product
1038/// at load time. If the work-product is found to be clean, then we
1039/// will keep a record in the `previous_work_products` list.
1040///
1041/// In addition, work products have an associated hash. This hash is
1042/// an extra hash that can be used to decide if the work-product from
1043/// a previous compilation can be re-used (in addition to the dirty
1044/// edges check).
1045///
1046/// As the primary example, consider the object files we generate for
1047/// each partition. In the first run, we create partitions based on
1048/// the symbols that need to be compiled. For each partition P, we
1049/// hash the symbols in P and create a `WorkProduct` record associated
1050/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1051/// in P.
1052///
1053/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1054/// judged to be clean (which means none of the things we read to
1055/// generate the partition were found to be dirty), it will be loaded
1056/// into previous work products. We will then regenerate the set of
1057/// symbols in the partition P and hash them (note that new symbols
1058/// may be added -- for example, new monomorphizations -- even if
1059/// nothing in P changed!). We will compare that hash against the
1060/// previous hash. If it matches up, we can reuse the object file.
1061#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
#[inline]
fn clone(&self) -> WorkProduct {
WorkProduct {
cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
saved_files: ::core::clone::Clone::clone(&self.saved_files),
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
"cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
}
}Debug, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for WorkProduct {
fn encode(&self, __encoder: &mut __E) {
match *self {
WorkProduct {
cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
{
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for WorkProduct {
fn decode(__decoder: &mut __D) -> Self {
WorkProduct {
cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
saved_files: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
1062pub struct WorkProduct {
1063pub cgu_name: String,
1064/// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1065 /// saved file and the key is some identifier for the type of file being saved.
1066 ///
1067 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1068 /// the object file's path, and "dwo" to the dwarf object file's path.
1069pub saved_files: UnordMap<String, String>,
1070}
10711072pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
10731074// Index type for `DepNodeData`'s edges.
1075impl ::std::fmt::Debug for EdgeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
1076struct EdgeIndex {}
1077}10781079/// `CurrentDepGraph` stores the dependency graph for the current session. It
1080/// will be populated as we run queries or tasks. We never remove nodes from the
1081/// graph: they are only added.
1082///
1083/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1084/// in memory. This is important, because these graph structures are some of the
1085/// largest in the compiler.
1086///
1087/// For this reason, we avoid storing `DepNode`s more than once as map
1088/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1089/// graph, and we map nodes in the previous graph to indices via a two-step
1090/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1091/// and the `prev_index_to_index` vector (which is more compact and faster than
1092/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1093///
1094/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1095/// and `prev_index_to_index` fields are locked separately. Operations that take
1096/// a `DepNodeIndex` typically just access the `data` field.
1097///
1098/// We only need to manipulate at most two locks simultaneously:
1099/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1100/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1101/// first, and `data` second.
1102pub(super) struct CurrentDepGraph {
1103 encoder: GraphEncoder,
1104 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
11051106/// This is used to verify that value fingerprints do not change between the
1107 /// creation of a node and its recomputation.
1108#[cfg(debug_assertions)]
1109value_fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
11101111/// Used to trap when a specific edge is added to the graph.
1112 /// This is used for debug purposes and is only active with `debug_assertions`.
1113#[cfg(debug_assertions)]
1114forbidden_edge: Option<EdgeFilter>,
11151116/// Used to verify the absence of hash collisions among DepNodes.
1117 /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1118 /// or if `debug_assertions` are enabled.
1119 ///
1120 /// The map contains all DepNodes that have been allocated in the current session so far.
1121nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
11221123/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1124 /// their edges. This has the beneficial side-effect that multiple anonymous
1125 /// nodes can be coalesced into one without changing the semantics of the
1126 /// dependency graph. However, the merging of nodes can lead to a subtle
1127 /// problem during red-green marking: The color of an anonymous node from
1128 /// the current session might "shadow" the color of the node with the same
1129 /// ID from the previous session. In order to side-step this problem, we make
1130 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1131 /// This is implemented by mixing a session-key into the ID fingerprint of
1132 /// each anon node. The session-key is a hash of the number of previous sessions.
1133anon_id_seed: Fingerprint,
11341135/// These are simple counters that are for profiling and
1136 /// debugging and only active with `debug_assertions`.
1137pub(super) total_read_count: AtomicU64,
1138pub(super) total_duplicate_read_count: AtomicU64,
1139}
11401141impl CurrentDepGraph {
1142fn new(
1143 session: &Session,
1144 prev_graph_node_count: usize,
1145 encoder: FileEncoder,
1146 previous: Arc<SerializedDepGraph>,
1147 ) -> Self {
1148let mut stable_hasher = StableHasher::new();
1149previous.session_count().hash(&mut stable_hasher);
1150let anon_id_seed = stable_hasher.finish();
11511152#[cfg(debug_assertions)]
1153let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1154Ok(s) => match EdgeFilter::new(&s) {
1155Ok(f) => Some(f),
1156Err(err) => {
::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1157 },
1158Err(_) => None,
1159 };
11601161let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
11621163let new_node_dbg =
1164session.opts.unstable_opts.incremental_verify_ich || truecfg!(debug_assertions);
11651166CurrentDepGraph {
1167 encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1168 anon_node_to_index: ShardedHashMap::with_capacity(
1169// FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1170new_node_count_estimate / sharded::shards(),
1171 ),
1172anon_id_seed,
1173#[cfg(debug_assertions)]
1174forbidden_edge,
1175#[cfg(debug_assertions)]
1176value_fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1177 nodes_in_current_session: new_node_dbg.then(|| {
1178Lock::new(FxHashMap::with_capacity_and_hasher(
1179new_node_count_estimate,
1180 Default::default(),
1181 ))
1182 }),
1183 total_read_count: AtomicU64::new(0),
1184 total_duplicate_read_count: AtomicU64::new(0),
1185 }
1186 }
11871188#[cfg(debug_assertions)]
1189fn record_edge(
1190&self,
1191 dep_node_index: DepNodeIndex,
1192 key: DepNode,
1193 value_fingerprint: Fingerprint,
1194 ) {
1195if let Some(forbidden_edge) = &self.forbidden_edge {
1196forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1197 }
1198let prior_value_fingerprint = *self1199 .value_fingerprints
1200 .lock()
1201 .get_or_insert_with(dep_node_index, || value_fingerprint);
1202match (&prior_value_fingerprint, &value_fingerprint) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
key)));
}
}
};assert_eq!(prior_value_fingerprint, value_fingerprint, "Unstable fingerprints for {key:?}");
1203 }
12041205#[inline(always)]
1206fn record_node(
1207&self,
1208 dep_node_index: DepNodeIndex,
1209 key: DepNode,
1210 _value_fingerprint: Fingerprint,
1211 ) {
1212#[cfg(debug_assertions)]
1213self.record_edge(dep_node_index, key, _value_fingerprint);
12141215if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1216outline(|| {
1217if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1218{
::core::panicking::panic_fmt(format_args!("Found duplicate dep-node {0:?}",
key));
};panic!("Found duplicate dep-node {key:?}");
1219 }
1220 });
1221 }
1222 }
12231224/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1225 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1226#[inline(always)]
1227fn alloc_new_node(
1228&self,
1229 key: DepNode,
1230 edges: EdgesVec,
1231 value_fingerprint: Fingerprint,
1232 ) -> DepNodeIndex {
1233let dep_node_index = self.encoder.send_new(key, value_fingerprint, edges);
12341235self.record_node(dep_node_index, key, value_fingerprint);
12361237dep_node_index1238 }
12391240#[inline]
1241fn debug_assert_not_in_new_nodes(
1242&self,
1243 prev_graph: &SerializedDepGraph,
1244 prev_index: SerializedDepNodeIndex,
1245 ) {
1246if !is_dyn_thread_safe()
1247 && let Some(ref nodes_in_current_session) = self.nodes_in_current_session
1248 {
1249if true {
if !!nodes_in_current_session.lock().contains_key(&prev_graph.index_to_node(prev_index))
{
{
::core::panicking::panic_fmt(format_args!("node from previous graph present in new node collection"));
}
};
};debug_assert!(
1250 !nodes_in_current_session
1251 .lock()
1252 .contains_key(&prev_graph.index_to_node(prev_index)),
1253"node from previous graph present in new node collection"
1254);
1255 }
1256 }
1257}
12581259#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TaskDepsRef::Allow(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
&__self_0),
TaskDepsRef::EvalAlways =>
::core::fmt::Formatter::write_str(f, "EvalAlways"),
TaskDepsRef::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
TaskDepsRef::Forbid =>
::core::fmt::Formatter::write_str(f, "Forbid"),
}
}
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
#[inline]
fn clone(&self) -> TaskDepsRef<'a> {
let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
*self
}
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1260pub enum TaskDepsRef<'a> {
1261/// New dependencies can be added to the
1262 /// `TaskDeps`. This is used when executing a 'normal' query
1263 /// (no `eval_always` modifier)
1264Allow(&'a Lock<TaskDeps>),
1265/// This is used when executing an `eval_always` query. We don't
1266 /// need to track dependencies for a query that's always
1267 /// re-executed -- but we need to know that this is an `eval_always`
1268 /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1269 /// when directly feeding other queries.
1270EvalAlways,
1271/// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1272Ignore,
1273/// Any attempt to add new dependencies will cause a panic.
1274 /// This is used when decoding a query result from disk,
1275 /// to ensure that the decoding process doesn't itself
1276 /// require the execution of any queries.
1277Forbid,
1278}
12791280#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "TaskDeps",
"node", &self.node, "reads", &self.reads, "read_set",
&&self.read_set)
}
}Debug)]
1281pub struct TaskDeps {
1282#[cfg(debug_assertions)]
1283node: Option<DepNode>,
12841285/// A vector of `DepNodeIndex`, basically.
1286reads: EdgesVec,
12871288/// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1289 /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1290 /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1291 /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1292read_set: FxHashSet<DepNodeIndex>,
1293}
12941295impl TaskDeps {
1296/// See `TaskDeps::read_set` above.
1297const LINEAR_SCAN_MAX: usize = 16;
12981299#[inline]
1300fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1301TaskDeps {
1302#[cfg(debug_assertions)]
1303node,
1304 reads: EdgesVec::new(),
1305 read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1306 }
1307 }
1308}
13091310// A data structure that stores Option<DepNodeColor> values as a contiguous
1311// array, using one u32 per entry.
1312pub(super) struct DepNodeColorMap {
1313 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1314}
13151316// All values below `COMPRESSED_RED` are green.
1317const COMPRESSED_RED: u32 = u32::MAX - 1;
1318const COMPRESSED_UNKNOWN: u32 = u32::MAX;
13191320impl DepNodeColorMap {
1321fn new(size: usize) -> DepNodeColorMap {
1322if true {
if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
};
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1323DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1324 }
13251326#[inline]
1327pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1328let value = self.values[index].load(Ordering::Relaxed);
1329if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1330 }
13311332/// Atomically sets the color of a previous-session dep node to either green
1333 /// or red, if it has not already been colored.
1334 ///
1335 /// If the node already has a color, the new color is ignored, and the
1336 /// return value indicates the existing color.
1337#[inline(always)]
1338pub(super) fn try_set_color(
1339&self,
1340 prev_index: SerializedDepNodeIndex,
1341 color: DesiredColor,
1342 ) -> TrySetColorResult {
1343match self.values[prev_index].compare_exchange(
1344COMPRESSED_UNKNOWN,
1345match color {
1346 DesiredColor::Red => COMPRESSED_RED,
1347 DesiredColor::Green { index } => index.as_u32(),
1348 },
1349 Ordering::Relaxed,
1350 Ordering::Relaxed,
1351 ) {
1352Ok(_) => TrySetColorResult::Success,
1353Err(COMPRESSED_RED) => TrySetColorResult::AlreadyRed,
1354Err(index) => TrySetColorResult::AlreadyGreen { index: DepNodeIndex::from_u32(index) },
1355 }
1356 }
13571358#[inline]
1359pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1360let value = self.values[index].load(Ordering::Acquire);
1361// Green is by far the most common case. Check for that first so we can succeed with a
1362 // single comparison.
1363if value < COMPRESSED_RED {
1364 DepNodeColor::Green(DepNodeIndex::from_u32(value))
1365 } else if value == COMPRESSED_RED {
1366 DepNodeColor::Red1367 } else {
1368if true {
match (&value, &COMPRESSED_UNKNOWN) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1369 DepNodeColor::Unknown1370 }
1371 }
1372}
13731374/// The color that [`DepNodeColorMap::try_set_color`] should try to apply to a node.
1375#[derive(#[automatically_derived]
impl ::core::clone::Clone for DesiredColor {
#[inline]
fn clone(&self) -> DesiredColor {
let _: ::core::clone::AssertParamIsClone<DepNodeIndex>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for DesiredColor { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for DesiredColor {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
DesiredColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
DesiredColor::Green { index: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f, "Green",
"index", &__self_0),
}
}
}Debug)]
1376pub(super) enum DesiredColor {
1377/// Try to mark the node red.
1378Red,
1379/// Try to mark the node green, associating it with a current-session node index.
1380Green { index: DepNodeIndex },
1381}
13821383/// Return value of [`DepNodeColorMap::try_set_color`], indicating success or failure,
1384/// and (on failure) what the existing color is.
1385#[derive(#[automatically_derived]
impl ::core::clone::Clone for TrySetColorResult {
#[inline]
fn clone(&self) -> TrySetColorResult {
let _: ::core::clone::AssertParamIsClone<DepNodeIndex>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for TrySetColorResult { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for TrySetColorResult {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TrySetColorResult::Success =>
::core::fmt::Formatter::write_str(f, "Success"),
TrySetColorResult::AlreadyRed =>
::core::fmt::Formatter::write_str(f, "AlreadyRed"),
TrySetColorResult::AlreadyGreen { index: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"AlreadyGreen", "index", &__self_0),
}
}
}Debug)]
1386pub(super) enum TrySetColorResult {
1387/// The [`DesiredColor`] was freshly applied to the node.
1388Success,
1389/// Coloring failed because the node was already marked red.
1390AlreadyRed,
1391/// Coloring failed because the node was already marked green,
1392 /// and corresponds to node `index` in the current-session dep graph.
1393AlreadyGreen { index: DepNodeIndex },
1394}
13951396#[inline(never)]
1397#[cold]
1398pub(crate) fn print_markframe_trace(graph: &DepGraph, frame: &MarkFrame<'_>) {
1399let data = graph.data.as_ref().unwrap();
14001401{
::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1402{ ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
14031404let mut i = 0;
1405let mut current = Some(frame);
1406while let Some(frame) = current {
1407let node = data.previous.index_to_node(frame.index);
1408{ ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1409 current = frame.parent;
1410 i += 1;
1411 }
14121413{
::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1414}
14151416#[cold]
1417#[inline(never)]
1418fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepNodeIndex) -> ! {
1419// We have to do an expensive reverse-lookup of the DepNode that
1420 // corresponds to `dep_node_index`, but that's OK since we are about
1421 // to ICE anyway.
1422let mut dep_node = None;
14231424// First try to find the dep node among those that already existed in the
1425 // previous session and has been marked green
1426for prev_index in data.colors.values.indices() {
1427if data.colors.current(prev_index) == Some(dep_node_index) {
1428 dep_node = Some(*data.previous.index_to_node(prev_index));
1429break;
1430 }
1431 }
14321433if dep_node.is_none()
1434 && let Some(nodes) = &data.current.nodes_in_current_session
1435 {
1436// Try to find it among the nodes allocated so far in this session
1437 // This is OK, there's only ever one node result possible so this is deterministic.
1438#[allow(rustc::potential_query_instability)]
1439if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1440dep_node = Some(*node);
1441 }
1442 }
14431444let dep_node = dep_node.map_or_else(
1445 || ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
})format!("with index {:?}", dep_node_index),
1446 |dep_node| ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
})format!("`{:?}`", dep_node),
1447 );
14481449{
::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
dep_node));
}panic!(
1450"Error: trying to record dependency on DepNode {dep_node} in a \
1451 context that does not allow it (e.g. during query deserialization). \
1452 The most common case of recording a dependency on a DepNode `foo` is \
1453 when the corresponding query `foo` is invoked. Invoking queries is not \
1454 allowed as part of loading something from the incremental on-disk cache. \
1455 See <https://github.com/rust-lang/rust/pull/91919>."
1456)1457}
14581459impl<'tcx> TyCtxt<'tcx> {
1460/// Return whether this kind always require evaluation.
1461#[inline(always)]
1462fn is_eval_always(self, kind: DepKind) -> bool {
1463self.dep_kind_vtable(kind).is_eval_always
1464 }
1465}