rustc_type_ir/search_graph/mod.rs
1/// The search graph is responsible for caching and cycle detection in the trait
2/// solver. Making sure that caching doesn't result in soundness bugs or unstable
3/// query results is very challenging and makes this one of the most-involved
4/// self-contained components of the compiler.
5///
6/// We added fuzzing support to test its correctness. The fuzzers used to verify
7/// the current implementation can be found in https://github.com/lcnr/search_graph_fuzz.
8///
9/// This is just a quick overview of the general design, please check out the relevant
10/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11/// more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12/// The global cache has to be completely unobservable, while the per-cycle cache may impact
13/// behavior as long as the resulting behavior is still correct.
14use std::cmp::Ordering;
15use std::collections::BTreeMap;
16use std::collections::hash_map::Entry;
17use std::fmt::Debug;
18use std::hash::Hash;
19use std::marker::PhantomData;
20
21use derive_where::derive_where;
22use rustc_index::{Idx, IndexVec};
23#[cfg(feature = "nightly")]
24use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
25use tracing::debug;
26
27use crate::data_structures::HashMap;
28
29mod global_cache;
30use global_cache::CacheData;
31pub use global_cache::GlobalCache;
32
33/// The search graph does not simply use `Interner` directly
34/// to enable its fuzzing without having to stub the rest of
35/// the interner. We don't make this a super trait of `Interner`
36/// as users of the shared type library shouldn't have to care
37/// about `Input` and `Result` as they are implementation details
38/// of the search graph.
39pub trait Cx: Copy {
40 type Input: Debug + Eq + Hash + Copy;
41 type Result: Debug + Eq + Hash + Copy;
42
43 type DepNodeIndex;
44 type Tracked<T: Debug + Clone>: Debug;
45 fn mk_tracked<T: Debug + Clone>(
46 self,
47 data: T,
48 dep_node_index: Self::DepNodeIndex,
49 ) -> Self::Tracked<T>;
50 fn get_tracked<T: Debug + Clone>(self, tracked: &Self::Tracked<T>) -> T;
51 fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, Self::DepNodeIndex);
52
53 fn with_global_cache<R>(self, f: impl FnOnce(&mut GlobalCache<Self>) -> R) -> R;
54
55 fn evaluation_is_concurrent(&self) -> bool;
56}
57
58pub trait Delegate {
59 type Cx: Cx;
60 /// Whether to use the provisional cache. Set to `false` by a fuzzer when
61 /// validating the search graph.
62 const ENABLE_PROVISIONAL_CACHE: bool;
63 type ValidationScope;
64 /// Returning `Some` disables the global cache for the current goal.
65 ///
66 /// The `ValidationScope` is used when fuzzing the search graph to track
67 /// for which goals the global cache has been disabled. This is necessary
68 /// as we may otherwise ignore the global cache entry for some goal `G`
69 /// only to later use it, failing to detect a cycle goal and potentially
70 /// changing the result.
71 fn enter_validation_scope(
72 cx: Self::Cx,
73 input: <Self::Cx as Cx>::Input,
74 ) -> Option<Self::ValidationScope>;
75
76 const FIXPOINT_STEP_LIMIT: usize;
77
78 type ProofTreeBuilder;
79 fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool;
80
81 const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize;
82
83 fn initial_provisional_result(
84 cx: Self::Cx,
85 kind: PathKind,
86 input: <Self::Cx as Cx>::Input,
87 ) -> <Self::Cx as Cx>::Result;
88 fn is_initial_provisional_result(
89 cx: Self::Cx,
90 kind: PathKind,
91 input: <Self::Cx as Cx>::Input,
92 result: <Self::Cx as Cx>::Result,
93 ) -> bool;
94 fn on_stack_overflow(
95 cx: Self::Cx,
96 inspect: &mut Self::ProofTreeBuilder,
97 input: <Self::Cx as Cx>::Input,
98 ) -> <Self::Cx as Cx>::Result;
99 fn on_fixpoint_overflow(
100 cx: Self::Cx,
101 input: <Self::Cx as Cx>::Input,
102 ) -> <Self::Cx as Cx>::Result;
103
104 fn is_ambiguous_result(result: <Self::Cx as Cx>::Result) -> bool;
105 fn propagate_ambiguity(
106 cx: Self::Cx,
107 for_input: <Self::Cx as Cx>::Input,
108 from_result: <Self::Cx as Cx>::Result,
109 ) -> <Self::Cx as Cx>::Result;
110}
111
112/// In the initial iteration of a cycle, we do not yet have a provisional
113/// result. In the case we return an initial provisional result depending
114/// on the kind of cycle.
115#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
116#[cfg_attr(
117 feature = "nightly",
118 derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext)
119)]
120pub enum PathKind {
121 /// A path consisting of only inductive/unproductive steps. Their initial
122 /// provisional result is `Err(NoSolution)`. We currently treat them as
123 /// `PathKind::Unknown` during coherence until we're fully confident in
124 /// our approach.
125 Inductive,
126 /// A path which is not be coinductive right now but we may want
127 /// to change of them to be so in the future. We return an ambiguous
128 /// result in this case to prevent people from relying on this.
129 Unknown,
130 /// A path with at least one coinductive step. Such cycles hold.
131 Coinductive,
132}
133
134impl PathKind {
135 /// Returns the path kind when merging `self` with `rest`.
136 ///
137 /// Given an inductive path `self` and a coinductive path `rest`,
138 /// the path `self -> rest` would be coinductive.
139 ///
140 /// This operation represents an ordering and would be equivalent
141 /// to `max(self, rest)`.
142 fn extend(self, rest: PathKind) -> PathKind {
143 match (self, rest) {
144 (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
145 (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
146 (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
147 }
148 }
149}
150
151/// The kinds of cycles a cycle head was involved in.
152///
153/// This is used to avoid rerunning a cycle if there's
154/// just a single usage kind and the final result matches
155/// its provisional result.
156#[derive(Debug, Clone, Copy, PartialEq, Eq)]
157pub enum UsageKind {
158 Single(PathKind),
159 Mixed,
160}
161impl From<PathKind> for UsageKind {
162 fn from(path: PathKind) -> UsageKind {
163 UsageKind::Single(path)
164 }
165}
166impl UsageKind {
167 #[must_use]
168 fn merge(self, other: impl Into<Self>) -> Self {
169 match (self, other.into()) {
170 (UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed,
171 (UsageKind::Single(lhs), UsageKind::Single(rhs)) => {
172 if lhs == rhs {
173 UsageKind::Single(lhs)
174 } else {
175 UsageKind::Mixed
176 }
177 }
178 }
179 }
180}
181
182/// For each goal we track whether the paths from this goal
183/// to its cycle heads are coinductive.
184///
185/// This is a necessary condition to rebase provisional cache
186/// entries.
187#[derive(Debug, Clone, Copy, PartialEq, Eq)]
188pub enum AllPathsToHeadCoinductive {
189 Yes,
190 No,
191}
192impl From<PathKind> for AllPathsToHeadCoinductive {
193 fn from(path: PathKind) -> AllPathsToHeadCoinductive {
194 match path {
195 PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
196 _ => AllPathsToHeadCoinductive::No,
197 }
198 }
199}
200impl AllPathsToHeadCoinductive {
201 #[must_use]
202 fn merge(self, other: impl Into<Self>) -> Self {
203 match (self, other.into()) {
204 (AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => {
205 AllPathsToHeadCoinductive::Yes
206 }
207 (AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => {
208 AllPathsToHeadCoinductive::No
209 }
210 }
211 }
212 fn and_merge(&mut self, other: impl Into<Self>) {
213 *self = self.merge(other);
214 }
215}
216
217#[derive(Debug, Clone, Copy)]
218struct AvailableDepth(usize);
219impl AvailableDepth {
220 /// Returns the remaining depth allowed for nested goals.
221 ///
222 /// This is generally simply one less than the current depth.
223 /// However, if we encountered overflow, we significantly reduce
224 /// the remaining depth of all nested goals to prevent hangs
225 /// in case there is exponential blowup.
226 fn allowed_depth_for_nested<D: Delegate>(
227 root_depth: AvailableDepth,
228 stack: &IndexVec<StackDepth, StackEntry<D::Cx>>,
229 ) -> Option<AvailableDepth> {
230 if let Some(last) = stack.raw.last() {
231 if last.available_depth.0 == 0 {
232 return None;
233 }
234
235 Some(if last.encountered_overflow {
236 AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW)
237 } else {
238 AvailableDepth(last.available_depth.0 - 1)
239 })
240 } else {
241 Some(root_depth)
242 }
243 }
244
245 /// Whether we're allowed to use a global cache entry which required
246 /// the given depth.
247 fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
248 self.0 >= additional_depth
249 }
250}
251
252/// All cycle heads a given goal depends on, ordered by their stack depth.
253///
254/// We also track all paths from this goal to that head. This is necessary
255/// when rebasing provisional cache results.
256#[derive(Clone, Debug, PartialEq, Eq, Default)]
257struct CycleHeads {
258 heads: BTreeMap<StackDepth, AllPathsToHeadCoinductive>,
259}
260
261impl CycleHeads {
262 fn is_empty(&self) -> bool {
263 self.heads.is_empty()
264 }
265
266 fn highest_cycle_head(&self) -> StackDepth {
267 self.opt_highest_cycle_head().unwrap()
268 }
269
270 fn opt_highest_cycle_head(&self) -> Option<StackDepth> {
271 self.heads.last_key_value().map(|(k, _)| *k)
272 }
273
274 fn opt_lowest_cycle_head(&self) -> Option<StackDepth> {
275 self.heads.first_key_value().map(|(k, _)| *k)
276 }
277
278 fn remove_highest_cycle_head(&mut self) {
279 let last = self.heads.pop_last();
280 debug_assert_ne!(last, None);
281 }
282
283 fn insert(
284 &mut self,
285 head: StackDepth,
286 path_from_entry: impl Into<AllPathsToHeadCoinductive> + Copy,
287 ) {
288 self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry);
289 }
290
291 fn merge(&mut self, heads: &CycleHeads) {
292 for (&head, &path_from_entry) in heads.heads.iter() {
293 self.insert(head, path_from_entry);
294 debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes));
295 }
296 }
297
298 fn iter(&self) -> impl Iterator<Item = (StackDepth, AllPathsToHeadCoinductive)> + '_ {
299 self.heads.iter().map(|(k, v)| (*k, *v))
300 }
301
302 /// Update the cycle heads of a goal at depth `this` given the cycle heads
303 /// of a nested goal. This merges the heads after filtering the parent goal
304 /// itself.
305 fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) {
306 for (&head, &path_from_entry) in child.heads.iter() {
307 match head.cmp(&this) {
308 Ordering::Less => {}
309 Ordering::Equal => continue,
310 Ordering::Greater => unreachable!(),
311 }
312
313 let path_from_entry = match step_kind {
314 PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
315 PathKind::Unknown | PathKind::Inductive => path_from_entry,
316 };
317
318 self.insert(head, path_from_entry);
319 }
320 }
321}
322
323bitflags::bitflags! {
324 /// Tracks how nested goals have been accessed. This is necessary to disable
325 /// global cache entries if computing them would otherwise result in a cycle or
326 /// access a provisional cache entry.
327 #[derive(Debug, Clone, Copy)]
328 pub struct PathsToNested: u8 {
329 /// The initial value when adding a goal to its own nested goals.
330 const EMPTY = 1 << 0;
331 const INDUCTIVE = 1 << 1;
332 const UNKNOWN = 1 << 2;
333 const COINDUCTIVE = 1 << 3;
334 }
335}
336impl From<PathKind> for PathsToNested {
337 fn from(path: PathKind) -> PathsToNested {
338 match path {
339 PathKind::Inductive => PathsToNested::INDUCTIVE,
340 PathKind::Unknown => PathsToNested::UNKNOWN,
341 PathKind::Coinductive => PathsToNested::COINDUCTIVE,
342 }
343 }
344}
345impl PathsToNested {
346 /// The implementation of this function is kind of ugly. We check whether
347 /// there currently exist 'weaker' paths in the set, if so we upgrade these
348 /// paths to at least `path`.
349 #[must_use]
350 fn extend_with(mut self, path: PathKind) -> Self {
351 match path {
352 PathKind::Inductive => {
353 if self.intersects(PathsToNested::EMPTY) {
354 self.remove(PathsToNested::EMPTY);
355 self.insert(PathsToNested::INDUCTIVE);
356 }
357 }
358 PathKind::Unknown => {
359 if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) {
360 self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE);
361 self.insert(PathsToNested::UNKNOWN);
362 }
363 }
364 PathKind::Coinductive => {
365 if self.intersects(
366 PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
367 ) {
368 self.remove(
369 PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
370 );
371 self.insert(PathsToNested::COINDUCTIVE);
372 }
373 }
374 }
375
376 self
377 }
378}
379
380/// The nested goals of each stack entry and the path from the
381/// stack entry to that nested goal.
382///
383/// We only start tracking nested goals once we've either encountered
384/// overflow or a solver cycle. This is a performance optimization to
385/// avoid tracking nested goals on the happy path.
386///
387/// We use nested goals for two reasons:
388/// - when rebasing provisional cache entries
389/// - when checking whether we have to ignore a global cache entry as reevaluating
390/// it would encounter a cycle or use a provisional cache entry.
391///
392/// We need to disable the global cache if using it would hide a cycle, as
393/// cycles can impact behavior. The cycle ABA may have different final
394/// results from a the cycle BAB depending on the cycle root.
395#[derive_where(Debug, Default, Clone; X: Cx)]
396struct NestedGoals<X: Cx> {
397 nested_goals: HashMap<X::Input, PathsToNested>,
398}
399impl<X: Cx> NestedGoals<X> {
400 fn is_empty(&self) -> bool {
401 self.nested_goals.is_empty()
402 }
403
404 fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) {
405 match self.nested_goals.entry(input) {
406 Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested,
407 Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)),
408 }
409 }
410
411 /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
412 /// to the parent goal.
413 ///
414 /// If the path from this goal to the nested goal is inductive, the paths from this goal
415 /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
416 /// the same as for the child.
417 fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
418 #[allow(rustc::potential_query_instability)]
419 for (input, paths_to_nested) in nested_goals.iter() {
420 let paths_to_nested = paths_to_nested.extend_with(step_kind);
421 self.insert(input, paths_to_nested);
422 }
423 }
424
425 #[cfg_attr(feature = "nightly", rustc_lint_query_instability)]
426 #[allow(rustc::potential_query_instability)]
427 fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ {
428 self.nested_goals.iter().map(|(i, p)| (*i, *p))
429 }
430
431 fn contains(&self, input: X::Input) -> bool {
432 self.nested_goals.contains_key(&input)
433 }
434}
435
436rustc_index::newtype_index! {
437 #[orderable]
438 #[gate_rustc_only]
439 pub struct StackDepth {}
440}
441
442/// Stack entries of the evaluation stack. Its fields tend to be lazily
443/// when popping a child goal or completely immutable.
444#[derive_where(Debug; X: Cx)]
445struct StackEntry<X: Cx> {
446 input: X::Input,
447
448 /// Whether proving this goal is a coinductive step.
449 ///
450 /// This is used when encountering a trait solver cycle to
451 /// decide whether the initial provisional result of the cycle.
452 step_kind_from_parent: PathKind,
453
454 /// The available depth of a given goal, immutable.
455 available_depth: AvailableDepth,
456
457 /// The maximum depth reached by this stack entry, only up-to date
458 /// for the top of the stack and lazily updated for the rest.
459 reached_depth: StackDepth,
460
461 /// All cycle heads this goal depends on. Lazily updated and only
462 /// up-to date for the top of the stack.
463 heads: CycleHeads,
464
465 /// Whether evaluating this goal encountered overflow. Lazily updated.
466 encountered_overflow: bool,
467
468 /// Whether this goal has been used as the root of a cycle. This gets
469 /// eagerly updated when encountering a cycle.
470 has_been_used: Option<UsageKind>,
471
472 /// The nested goals of this goal, see the doc comment of the type.
473 nested_goals: NestedGoals<X>,
474
475 /// Starts out as `None` and gets set when rerunning this
476 /// goal in case we encounter a cycle.
477 provisional_result: Option<X::Result>,
478}
479
480/// A provisional result of an already computed goals which depends on other
481/// goals still on the stack.
482#[derive_where(Debug; X: Cx)]
483struct ProvisionalCacheEntry<X: Cx> {
484 /// Whether evaluating the goal encountered overflow. This is used to
485 /// disable the cache entry except if the last goal on the stack is
486 /// already involved in this cycle.
487 encountered_overflow: bool,
488 /// All cycle heads this cache entry depends on.
489 heads: CycleHeads,
490 /// The path from the highest cycle head to this goal. This differs from
491 /// `heads` which tracks the path to the cycle head *from* this goal.
492 path_from_head: PathKind,
493 result: X::Result,
494}
495
496pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
497 root_depth: AvailableDepth,
498 /// The stack of goals currently being computed.
499 ///
500 /// An element is *deeper* in the stack if its index is *lower*.
501 stack: IndexVec<StackDepth, StackEntry<X>>,
502 /// The provisional cache contains entries for already computed goals which
503 /// still depend on goals higher-up in the stack. We don't move them to the
504 /// global cache and track them locally instead. A provisional cache entry
505 /// is only valid until the result of one of its cycle heads changes.
506 provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
507
508 _marker: PhantomData<D>,
509}
510
511/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
512/// ordinary nested goals/global cache hits and provisional cache hits,
513/// using the provisional cache should not add any nested goals.
514///
515/// `nested_goals` are only used when checking whether global cache entries
516/// are applicable. This only cares about whether a goal is actually accessed.
517/// Given that the usage of the provisional cache is fully determinstic, we
518/// don't need to track the nested goals used while computing a provisional
519/// cache entry.
520enum UpdateParentGoalCtxt<'a, X: Cx> {
521 Ordinary(&'a NestedGoals<X>),
522 ProvisionalCacheHit,
523}
524
525impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
526 pub fn new(root_depth: usize) -> SearchGraph<D> {
527 Self {
528 root_depth: AvailableDepth(root_depth),
529 stack: Default::default(),
530 provisional_cache: Default::default(),
531 _marker: PhantomData,
532 }
533 }
534
535 /// Lazily update the stack entry for the parent goal.
536 /// This behavior is shared between actually evaluating goals
537 /// and using existing global cache entries to make sure they
538 /// have the same impact on the remaining evaluation.
539 fn update_parent_goal(
540 stack: &mut IndexVec<StackDepth, StackEntry<X>>,
541 step_kind_from_parent: PathKind,
542 reached_depth: StackDepth,
543 heads: &CycleHeads,
544 encountered_overflow: bool,
545 context: UpdateParentGoalCtxt<'_, X>,
546 ) {
547 if let Some(parent_index) = stack.last_index() {
548 let parent = &mut stack[parent_index];
549 parent.reached_depth = parent.reached_depth.max(reached_depth);
550 parent.encountered_overflow |= encountered_overflow;
551
552 parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads);
553 let parent_depends_on_cycle = match context {
554 UpdateParentGoalCtxt::Ordinary(nested_goals) => {
555 parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
556 !nested_goals.is_empty()
557 }
558 UpdateParentGoalCtxt::ProvisionalCacheHit => true,
559 };
560 // Once we've got goals which encountered overflow or a cycle,
561 // we track all goals whose behavior may depend depend on these
562 // goals as this change may cause them to now depend on additional
563 // goals, resulting in new cycles. See the dev-guide for examples.
564 if parent_depends_on_cycle {
565 parent.nested_goals.insert(parent.input, PathsToNested::EMPTY);
566 }
567 }
568 }
569
570 pub fn is_empty(&self) -> bool {
571 if self.stack.is_empty() {
572 debug_assert!(self.provisional_cache.is_empty());
573 true
574 } else {
575 false
576 }
577 }
578
579 /// The number of goals currently in the search graph. This should only be
580 /// used for debugging purposes.
581 pub fn debug_current_depth(&self) -> usize {
582 self.stack.len()
583 }
584
585 /// Whether the path from `head` to the current stack entry is inductive or coinductive.
586 ///
587 /// The `step_kind_to_head` is used to add a single additional path segment to the path on
588 /// the stack which completes the cycle. This given an inductive step AB which then cycles
589 /// coinductively with A, we need to treat this cycle as coinductive.
590 fn cycle_path_kind(
591 stack: &IndexVec<StackDepth, StackEntry<X>>,
592 step_kind_to_head: PathKind,
593 head: StackDepth,
594 ) -> PathKind {
595 stack.raw[head.index() + 1..]
596 .iter()
597 .fold(step_kind_to_head, |curr, entry| curr.extend(entry.step_kind_from_parent))
598 }
599
600 /// Probably the most involved method of the whole solver.
601 ///
602 /// Given some goal which is proven via the `prove_goal` closure, this
603 /// handles caching, overflow, and coinductive cycles.
604 pub fn with_new_goal(
605 &mut self,
606 cx: X,
607 input: X::Input,
608 step_kind_from_parent: PathKind,
609 inspect: &mut D::ProofTreeBuilder,
610 mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
611 ) -> X::Result {
612 let Some(available_depth) =
613 AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
614 else {
615 return self.handle_overflow(cx, input, inspect);
616 };
617
618 // We check the provisional cache before checking the global cache. This simplifies
619 // the implementation as we can avoid worrying about cases where both the global and
620 // provisional cache may apply, e.g. consider the following example
621 //
622 // - xxBA overflow
623 // - A
624 // - BA cycle
625 // - CB :x:
626 if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
627 return result;
628 }
629
630 // Lookup the global cache unless we're building proof trees or are currently
631 // fuzzing.
632 let validate_cache = if !D::inspect_is_noop(inspect) {
633 None
634 } else if let Some(scope) = D::enter_validation_scope(cx, input) {
635 // When validating the global cache we need to track the goals for which the
636 // global cache has been disabled as it may otherwise change the result for
637 // cyclic goals. We don't care about goals which are not on the current stack
638 // so it's fine to drop their scope eagerly.
639 self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
640 .inspect(|expected| debug!(?expected, "validate cache entry"))
641 .map(|r| (scope, r))
642 } else if let Some(result) =
643 self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
644 {
645 return result;
646 } else {
647 None
648 };
649
650 // Detect cycles on the stack. We do this after the global cache lookup to
651 // avoid iterating over the stack in case a goal has already been computed.
652 // This may not have an actual performance impact and we could reorder them
653 // as it may reduce the number of `nested_goals` we need to track.
654 if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
655 debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
656 return result;
657 }
658
659 // Unfortunate, it looks like we actually have to compute this goalrar.
660 let depth = self.stack.next_index();
661 let entry = StackEntry {
662 input,
663 step_kind_from_parent,
664 available_depth,
665 reached_depth: depth,
666 heads: Default::default(),
667 encountered_overflow: false,
668 has_been_used: None,
669 nested_goals: Default::default(),
670 provisional_result: None,
671 };
672 assert_eq!(self.stack.push(entry), depth);
673
674 // This is for global caching, so we properly track query dependencies.
675 // Everything that affects the `result` should be performed within this
676 // `with_cached_task` closure. If computing this goal depends on something
677 // not tracked by the cache key and from outside of this anon task, it
678 // must not be added to the global cache. Notably, this is the case for
679 // trait solver cycles participants.
680 let ((final_entry, result), dep_node) = cx.with_cached_task(|| {
681 self.evaluate_goal_in_task(cx, input, inspect, &mut evaluate_goal)
682 });
683
684 // We've finished computing the goal and have popped it from the stack,
685 // lazily update its parent goal.
686 Self::update_parent_goal(
687 &mut self.stack,
688 final_entry.step_kind_from_parent,
689 final_entry.reached_depth,
690 &final_entry.heads,
691 final_entry.encountered_overflow,
692 UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals),
693 );
694
695 // We're now done with this goal. We only add the root of cycles to the global cache.
696 // In case this goal is involved in a larger cycle add it to the provisional cache.
697 if final_entry.heads.is_empty() {
698 if let Some((_scope, expected)) = validate_cache {
699 // Do not try to move a goal into the cache again if we're testing
700 // the global cache.
701 assert_eq!(result, expected, "input={input:?}");
702 } else if D::inspect_is_noop(inspect) {
703 self.insert_global_cache(cx, input, final_entry, result, dep_node)
704 }
705 } else if D::ENABLE_PROVISIONAL_CACHE {
706 debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
707 let entry = self.provisional_cache.entry(input).or_default();
708 let StackEntry { heads, encountered_overflow, .. } = final_entry;
709 let path_from_head = Self::cycle_path_kind(
710 &self.stack,
711 step_kind_from_parent,
712 heads.highest_cycle_head(),
713 );
714 let provisional_cache_entry =
715 ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
716 debug!(?provisional_cache_entry);
717 entry.push(provisional_cache_entry);
718 } else {
719 debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
720 }
721
722 result
723 }
724
725 fn handle_overflow(
726 &mut self,
727 cx: X,
728 input: X::Input,
729 inspect: &mut D::ProofTreeBuilder,
730 ) -> X::Result {
731 if let Some(last) = self.stack.raw.last_mut() {
732 last.encountered_overflow = true;
733 // If computing a goal `B` depends on another goal `A` and
734 // `A` has a nested goal which overflows, then computing `B`
735 // at the same depth, but with `A` already on the stack,
736 // would encounter a solver cycle instead, potentially
737 // changing the result.
738 //
739 // We must therefore not use the global cache entry for `B` in that case.
740 // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
741 last.nested_goals.insert(last.input, PathsToNested::EMPTY);
742 }
743
744 debug!("encountered stack overflow");
745 D::on_stack_overflow(cx, inspect, input)
746 }
747
748 /// When reevaluating a goal with a changed provisional result, all provisional cache entry
749 /// which depend on this goal get invalidated.
750 fn clear_dependent_provisional_results(&mut self) {
751 let head = self.stack.next_index();
752 #[allow(rustc::potential_query_instability)]
753 self.provisional_cache.retain(|_, entries| {
754 entries.retain(|entry| entry.heads.highest_cycle_head() != head);
755 !entries.is_empty()
756 });
757 }
758
759 /// A necessary optimization to handle complex solver cycles. A provisional cache entry
760 /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
761 /// head from the stack after we've finished computing it, we can't be sure that the
762 /// provisional cache entry is still applicable. We need to keep the cache entries to
763 /// prevent hangs.
764 ///
765 /// What we therefore do is check whether the cycle kind of all cycles the goal of a
766 /// provisional cache entry is involved in would stay the same when computing the
767 /// goal without its cycle head on the stack. For more details, see the relevant
768 /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
769 ///
770 /// This can be thought of rotating the sub-tree of this provisional result and changing
771 /// its entry point while making sure that all paths through this sub-tree stay the same.
772 ///
773 /// In case the popped cycle head failed to reach a fixpoint anything which depends on
774 /// its provisional result is invalid. Actually discarding provisional cache entries in
775 /// this case would cause hangs, so we instead change the result of dependant provisional
776 /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
777 /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
778 /// to me.
779 fn rebase_provisional_cache_entries(
780 &mut self,
781 stack_entry: &StackEntry<X>,
782 mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
783 ) {
784 let head = self.stack.next_index();
785 #[allow(rustc::potential_query_instability)]
786 self.provisional_cache.retain(|&input, entries| {
787 entries.retain_mut(|entry| {
788 let ProvisionalCacheEntry {
789 encountered_overflow: _,
790 heads,
791 path_from_head,
792 result,
793 } = entry;
794 if heads.highest_cycle_head() == head {
795 heads.remove_highest_cycle_head()
796 } else {
797 return true;
798 }
799
800 // We only try to rebase if all paths from the cache entry
801 // to its heads are coinductive. In this case these cycle
802 // kinds won't change, no matter the goals between these
803 // heads and the provisional cache entry.
804 if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) {
805 return false;
806 }
807
808 // The same for nested goals of the cycle head.
809 if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No))
810 {
811 return false;
812 }
813
814 // Merge the cycle heads of the provisional cache entry and the
815 // popped head. If the popped cycle head was a root, discard all
816 // provisional cache entries which depend on it.
817 heads.merge(&stack_entry.heads);
818 let Some(head) = heads.opt_highest_cycle_head() else {
819 return false;
820 };
821
822 // We now care about the path from the next highest cycle head to the
823 // provisional cache entry.
824 *path_from_head = path_from_head.extend(Self::cycle_path_kind(
825 &self.stack,
826 stack_entry.step_kind_from_parent,
827 head,
828 ));
829 // Mutate the result of the provisional cache entry in case we did
830 // not reach a fixpoint.
831 *result = mutate_result(input, *result);
832 true
833 });
834 !entries.is_empty()
835 });
836 }
837
838 fn lookup_provisional_cache(
839 &mut self,
840 input: X::Input,
841 step_kind_from_parent: PathKind,
842 ) -> Option<X::Result> {
843 if !D::ENABLE_PROVISIONAL_CACHE {
844 return None;
845 }
846
847 let entries = self.provisional_cache.get(&input)?;
848 for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
849 entries
850 {
851 let head = heads.highest_cycle_head();
852 if encountered_overflow {
853 // This check is overly strict and very subtle. We need to make sure that if
854 // a global cache entry depends on some goal without adding it to its
855 // `nested_goals`, that goal must never have an applicable provisional
856 // cache entry to avoid incorrectly applying the cache entry.
857 //
858 // As we'd have to otherwise track literally all nested goals, we only
859 // apply provisional cache entries which encountered overflow once the
860 // current goal is already part of the same cycle. This check could be
861 // improved but seems to be good enough for now.
862 let last = self.stack.raw.last().unwrap();
863 if last.heads.opt_lowest_cycle_head().is_none_or(|lowest| lowest > head) {
864 continue;
865 }
866 }
867
868 // A provisional cache entry is only valid if the current path from its
869 // highest cycle head to the goal is the same.
870 if path_from_head == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head) {
871 // While we don't have to track the full depth of the provisional cache entry,
872 // we do have to increment the required depth by one as we'd have already failed
873 // with overflow otherwise
874 let next_index = self.stack.next_index();
875 Self::update_parent_goal(
876 &mut self.stack,
877 step_kind_from_parent,
878 next_index,
879 heads,
880 encountered_overflow,
881 UpdateParentGoalCtxt::ProvisionalCacheHit,
882 );
883 debug_assert!(self.stack[head].has_been_used.is_some());
884 debug!(?head, ?path_from_head, "provisional cache hit");
885 return Some(result);
886 }
887 }
888
889 None
890 }
891
892 /// Even if there is a global cache entry for a given goal, we need to make sure
893 /// evaluating this entry would not have ended up depending on either a goal
894 /// already on the stack or a provisional cache entry.
895 fn candidate_is_applicable(
896 stack: &IndexVec<StackDepth, StackEntry<X>>,
897 step_kind_from_parent: PathKind,
898 provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
899 nested_goals: &NestedGoals<X>,
900 ) -> bool {
901 // If the global cache entry didn't depend on any nested goals, it always
902 // applies.
903 if nested_goals.is_empty() {
904 return true;
905 }
906
907 // If a nested goal of the global cache entry is on the stack, we would
908 // definitely encounter a cycle.
909 if stack.iter().any(|e| nested_goals.contains(e.input)) {
910 debug!("cache entry not applicable due to stack");
911 return false;
912 }
913
914 // The global cache entry is also invalid if there's a provisional cache entry
915 // would apply for any of its nested goals.
916 #[allow(rustc::potential_query_instability)]
917 for (input, path_from_global_entry) in nested_goals.iter() {
918 let Some(entries) = provisional_cache.get(&input) else {
919 continue;
920 };
921
922 debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable");
923 // A provisional cache entry is applicable if the path to
924 // its highest cycle head is equal to the expected path.
925 for &ProvisionalCacheEntry {
926 encountered_overflow,
927 ref heads,
928 path_from_head: head_to_provisional,
929 result: _,
930 } in entries.iter()
931 {
932 // We don't have to worry about provisional cache entries which encountered
933 // overflow, see the relevant comment in `lookup_provisional_cache`.
934 if encountered_overflow {
935 continue;
936 }
937
938 // A provisional cache entry only applies if the path from its highest head
939 // matches the path when encountering the goal.
940 //
941 // We check if any of the paths taken while computing the global goal
942 // would end up with an applicable provisional cache entry.
943 let head = heads.highest_cycle_head();
944 let head_to_curr = Self::cycle_path_kind(stack, step_kind_from_parent, head);
945 let full_paths = path_from_global_entry.extend_with(head_to_curr);
946 if full_paths.contains(head_to_provisional.into()) {
947 debug!(
948 ?full_paths,
949 ?head_to_provisional,
950 "cache entry not applicable due to matching paths"
951 );
952 return false;
953 }
954 }
955 }
956
957 true
958 }
959
960 /// Used when fuzzing the global cache. Accesses the global cache without
961 /// updating the state of the search graph.
962 fn lookup_global_cache_untracked(
963 &self,
964 cx: X,
965 input: X::Input,
966 step_kind_from_parent: PathKind,
967 available_depth: AvailableDepth,
968 ) -> Option<X::Result> {
969 cx.with_global_cache(|cache| {
970 cache
971 .get(cx, input, available_depth, |nested_goals| {
972 Self::candidate_is_applicable(
973 &self.stack,
974 step_kind_from_parent,
975 &self.provisional_cache,
976 nested_goals,
977 )
978 })
979 .map(|c| c.result)
980 })
981 }
982
983 /// Try to fetch a previously computed result from the global cache,
984 /// making sure to only do so if it would match the result of reevaluating
985 /// this goal.
986 fn lookup_global_cache(
987 &mut self,
988 cx: X,
989 input: X::Input,
990 step_kind_from_parent: PathKind,
991 available_depth: AvailableDepth,
992 ) -> Option<X::Result> {
993 cx.with_global_cache(|cache| {
994 let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache
995 .get(cx, input, available_depth, |nested_goals| {
996 Self::candidate_is_applicable(
997 &self.stack,
998 step_kind_from_parent,
999 &self.provisional_cache,
1000 nested_goals,
1001 )
1002 })?;
1003
1004 // Update the reached depth of the current goal to make sure
1005 // its state is the same regardless of whether we've used the
1006 // global cache or not.
1007 let reached_depth = self.stack.next_index().plus(additional_depth);
1008 // We don't move cycle participants to the global cache, so the
1009 // cycle heads are always empty.
1010 let heads = Default::default();
1011 Self::update_parent_goal(
1012 &mut self.stack,
1013 step_kind_from_parent,
1014 reached_depth,
1015 &heads,
1016 encountered_overflow,
1017 UpdateParentGoalCtxt::Ordinary(nested_goals),
1018 );
1019
1020 debug!(?additional_depth, "global cache hit");
1021 Some(result)
1022 })
1023 }
1024
1025 fn check_cycle_on_stack(
1026 &mut self,
1027 cx: X,
1028 input: X::Input,
1029 step_kind_from_parent: PathKind,
1030 ) -> Option<X::Result> {
1031 let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?;
1032 // We have a nested goal which directly relies on a goal deeper in the stack.
1033 //
1034 // We start by tagging all cycle participants, as that's necessary for caching.
1035 //
1036 // Finally we can return either the provisional response or the initial response
1037 // in case we're in the first fixpoint iteration for this goal.
1038 let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head);
1039 debug!(?path_kind, "encountered cycle with depth {head:?}");
1040 let usage_kind = UsageKind::Single(path_kind);
1041 self.stack[head].has_been_used =
1042 Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind)));
1043
1044 // Subtle: when encountering a cyclic goal, we still first checked for overflow,
1045 // so we have to update the reached depth.
1046 let next_index = self.stack.next_index();
1047 let last_index = self.stack.last_index().unwrap();
1048 let last = &mut self.stack[last_index];
1049 last.reached_depth = last.reached_depth.max(next_index);
1050
1051 last.nested_goals.insert(input, step_kind_from_parent.into());
1052 last.nested_goals.insert(last.input, PathsToNested::EMPTY);
1053 if last_index != head {
1054 last.heads.insert(head, step_kind_from_parent);
1055 }
1056
1057 // Return the provisional result or, if we're in the first iteration,
1058 // start with no constraints.
1059 if let Some(result) = self.stack[head].provisional_result {
1060 Some(result)
1061 } else {
1062 Some(D::initial_provisional_result(cx, path_kind, input))
1063 }
1064 }
1065
1066 /// Whether we've reached a fixpoint when evaluating a cycle head.
1067 fn reached_fixpoint(
1068 &mut self,
1069 cx: X,
1070 stack_entry: &StackEntry<X>,
1071 usage_kind: UsageKind,
1072 result: X::Result,
1073 ) -> bool {
1074 if let Some(prev) = stack_entry.provisional_result {
1075 prev == result
1076 } else if let UsageKind::Single(kind) = usage_kind {
1077 D::is_initial_provisional_result(cx, kind, stack_entry.input, result)
1078 } else {
1079 false
1080 }
1081 }
1082
1083 /// When we encounter a coinductive cycle, we have to fetch the
1084 /// result of that cycle while we are still computing it. Because
1085 /// of this we continuously recompute the cycle until the result
1086 /// of the previous iteration is equal to the final result, at which
1087 /// point we are done.
1088 fn evaluate_goal_in_task(
1089 &mut self,
1090 cx: X,
1091 input: X::Input,
1092 inspect: &mut D::ProofTreeBuilder,
1093 mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
1094 ) -> (StackEntry<X>, X::Result) {
1095 let mut i = 0;
1096 loop {
1097 let result = evaluate_goal(self, inspect);
1098 let stack_entry = self.stack.pop().unwrap();
1099 debug_assert_eq!(stack_entry.input, input);
1100
1101 // If the current goal is not the root of a cycle, we are done.
1102 //
1103 // There are no provisional cache entries which depend on this goal.
1104 let Some(usage_kind) = stack_entry.has_been_used else {
1105 return (stack_entry, result);
1106 };
1107
1108 // If it is a cycle head, we have to keep trying to prove it until
1109 // we reach a fixpoint. We need to do so for all cycle heads,
1110 // not only for the root.
1111 //
1112 // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
1113 // for an example.
1114 //
1115 // Check whether we reached a fixpoint, either because the final result
1116 // is equal to the provisional result of the previous iteration, or because
1117 // this was only the root of either coinductive or inductive cycles, and the
1118 // final result is equal to the initial response for that case.
1119 if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) {
1120 self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
1121 return (stack_entry, result);
1122 }
1123
1124 // If computing this goal results in ambiguity with no constraints,
1125 // we do not rerun it. It's incredibly difficult to get a different
1126 // response in the next iteration in this case. These changes would
1127 // likely either be caused by incompleteness or can change the maybe
1128 // cause from ambiguity to overflow. Returning ambiguity always
1129 // preserves soundness and completeness even if the goal is be known
1130 // to succeed or fail.
1131 //
1132 // This prevents exponential blowup affecting multiple major crates.
1133 // As we only get to this branch if we haven't yet reached a fixpoint,
1134 // we also taint all provisional cache entries which depend on the
1135 // current goal.
1136 if D::is_ambiguous_result(result) {
1137 self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
1138 D::propagate_ambiguity(cx, input, result)
1139 });
1140 return (stack_entry, result);
1141 };
1142
1143 // If we've reached the fixpoint step limit, we bail with overflow and taint all
1144 // provisional cache entries which depend on the current goal.
1145 i += 1;
1146 if i >= D::FIXPOINT_STEP_LIMIT {
1147 debug!("canonical cycle overflow");
1148 let result = D::on_fixpoint_overflow(cx, input);
1149 self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
1150 D::on_fixpoint_overflow(cx, input)
1151 });
1152 return (stack_entry, result);
1153 }
1154
1155 // Clear all provisional cache entries which depend on a previous provisional
1156 // result of this goal and rerun.
1157 self.clear_dependent_provisional_results();
1158
1159 debug!(?result, "fixpoint changed provisional results");
1160 self.stack.push(StackEntry {
1161 has_been_used: None,
1162 provisional_result: Some(result),
1163 ..stack_entry
1164 });
1165 }
1166 }
1167
1168 /// When encountering a cycle, both inductive and coinductive, we only
1169 /// move the root into the global cache. We also store all other cycle
1170 /// participants involved.
1171 ///
1172 /// We must not use the global cache entry of a root goal if a cycle
1173 /// participant is on the stack. This is necessary to prevent unstable
1174 /// results. See the comment of `StackEntry::nested_goals` for
1175 /// more details.
1176 fn insert_global_cache(
1177 &mut self,
1178 cx: X,
1179 input: X::Input,
1180 final_entry: StackEntry<X>,
1181 result: X::Result,
1182 dep_node: X::DepNodeIndex,
1183 ) {
1184 let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len();
1185 debug!(?final_entry, ?result, "insert global cache");
1186 cx.with_global_cache(|cache| {
1187 cache.insert(
1188 cx,
1189 input,
1190 result,
1191 dep_node,
1192 additional_depth,
1193 final_entry.encountered_overflow,
1194 final_entry.nested_goals,
1195 )
1196 })
1197 }
1198}