rustc_type_ir/search_graph/
mod.rs

1//! The search graph is responsible for caching and cycle detection in the trait
2//! solver. Making sure that caching doesn't result in soundness bugs or unstable
3//! query results is very challenging and makes this one of the most-involved
4//! self-contained components of the compiler.
5//!
6//! We added fuzzing support to test its correctness. The fuzzers used to verify
7//! the current implementation can be found in <https://github.com/lcnr/search_graph_fuzz>.
8//!
9//! This is just a quick overview of the general design, please check out the relevant
10//! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11//! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12//! The global cache has to be completely unobservable, while the per-cycle cache may impact
13//! behavior as long as the resulting behavior is still correct.
14use std::cmp::Ordering;
15use std::collections::hash_map::Entry;
16use std::collections::{BTreeMap, btree_map};
17use std::fmt::Debug;
18use std::hash::Hash;
19use std::iter;
20use std::marker::PhantomData;
21
22use derive_where::derive_where;
23#[cfg(feature = "nightly")]
24use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
25use rustc_type_ir::data_structures::HashMap;
26use tracing::{debug, instrument};
27
28mod stack;
29use stack::{Stack, StackDepth, StackEntry};
30mod global_cache;
31use global_cache::CacheData;
32pub use global_cache::GlobalCache;
33
34/// The search graph does not simply use `Interner` directly
35/// to enable its fuzzing without having to stub the rest of
36/// the interner. We don't make this a super trait of `Interner`
37/// as users of the shared type library shouldn't have to care
38/// about `Input` and `Result` as they are implementation details
39/// of the search graph.
40pub trait Cx: Copy {
41    type Input: Debug + Eq + Hash + Copy;
42    type Result: Debug + Eq + Hash + Copy;
43    type AmbiguityInfo: Debug + Eq + Hash + Copy;
44
45    type DepNodeIndex;
46    type Tracked<T: Debug + Clone>: Debug;
47    fn mk_tracked<T: Debug + Clone>(
48        self,
49        data: T,
50        dep_node_index: Self::DepNodeIndex,
51    ) -> Self::Tracked<T>;
52    fn get_tracked<T: Debug + Clone>(self, tracked: &Self::Tracked<T>) -> T;
53    fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, Self::DepNodeIndex);
54
55    fn with_global_cache<R>(self, f: impl FnOnce(&mut GlobalCache<Self>) -> R) -> R;
56
57    fn assert_evaluation_is_concurrent(&self);
58}
59
60pub trait Delegate: Sized {
61    type Cx: Cx;
62    /// Whether to use the provisional cache. Set to `false` by a fuzzer when
63    /// validating the search graph.
64    const ENABLE_PROVISIONAL_CACHE: bool;
65    type ValidationScope;
66    /// Returning `Some` disables the global cache for the current goal.
67    ///
68    /// The `ValidationScope` is used when fuzzing the search graph to track
69    /// for which goals the global cache has been disabled. This is necessary
70    /// as we may otherwise ignore the global cache entry for some goal `G`
71    /// only to later use it, failing to detect a cycle goal and potentially
72    /// changing the result.
73    fn enter_validation_scope(
74        cx: Self::Cx,
75        input: <Self::Cx as Cx>::Input,
76    ) -> Option<Self::ValidationScope>;
77
78    const FIXPOINT_STEP_LIMIT: usize;
79
80    type ProofTreeBuilder;
81    fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool;
82
83    const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize;
84
85    fn initial_provisional_result(
86        cx: Self::Cx,
87        kind: PathKind,
88        input: <Self::Cx as Cx>::Input,
89    ) -> <Self::Cx as Cx>::Result;
90    fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
91    fn stack_overflow_result(
92        cx: Self::Cx,
93        input: <Self::Cx as Cx>::Input,
94    ) -> <Self::Cx as Cx>::Result;
95    fn fixpoint_overflow_result(
96        cx: Self::Cx,
97        input: <Self::Cx as Cx>::Input,
98    ) -> <Self::Cx as Cx>::Result;
99
100    fn is_ambiguous_result(
101        result: <Self::Cx as Cx>::Result,
102    ) -> Option<<Self::Cx as Cx>::AmbiguityInfo>;
103    fn propagate_ambiguity(
104        cx: Self::Cx,
105        for_input: <Self::Cx as Cx>::Input,
106        ambiguity_info: <Self::Cx as Cx>::AmbiguityInfo,
107    ) -> <Self::Cx as Cx>::Result;
108
109    fn compute_goal(
110        search_graph: &mut SearchGraph<Self>,
111        cx: Self::Cx,
112        input: <Self::Cx as Cx>::Input,
113        inspect: &mut Self::ProofTreeBuilder,
114    ) -> <Self::Cx as Cx>::Result;
115}
116
117/// In the initial iteration of a cycle, we do not yet have a provisional
118/// result. In the case we return an initial provisional result depending
119/// on the kind of cycle.
120#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
121#[cfg_attr(
122    feature = "nightly",
123    derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext)
124)]
125pub enum PathKind {
126    /// A path consisting of only inductive/unproductive steps. Their initial
127    /// provisional result is `Err(NoSolution)`. We currently treat them as
128    /// `PathKind::Unknown` during coherence until we're fully confident in
129    /// our approach.
130    Inductive,
131    /// A path which is not be coinductive right now but we may want
132    /// to change of them to be so in the future. We return an ambiguous
133    /// result in this case to prevent people from relying on this.
134    Unknown,
135    /// A path with at least one coinductive step. Such cycles hold.
136    Coinductive,
137    /// A path which is treated as ambiguous. Once a path has this path kind
138    /// any other segment does not change its kind.
139    ///
140    /// This is currently only used when fuzzing to support negative reasoning.
141    /// For more details, see #143054.
142    ForcedAmbiguity,
143}
144
145impl PathKind {
146    /// Returns the path kind when merging `self` with `rest`.
147    ///
148    /// Given an inductive path `self` and a coinductive path `rest`,
149    /// the path `self -> rest` would be coinductive.
150    ///
151    /// This operation represents an ordering and would be equivalent
152    /// to `max(self, rest)`.
153    fn extend(self, rest: PathKind) -> PathKind {
154        match (self, rest) {
155            (PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => {
156                PathKind::ForcedAmbiguity
157            }
158            (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
159            (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
160            (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
161        }
162    }
163}
164
165/// The kinds of cycles a cycle head was involved in.
166///
167/// This is used to avoid rerunning a cycle if there's
168/// just a single usage kind and the final result matches
169/// its provisional result.
170///
171/// While it tracks the amount of usages using `u32`, we only ever
172/// care whether there are any. We only count them to be able to ignore
173/// usages from irrelevant candidates while evaluating a goal.
174///
175/// This cares about how nested goals relied on a cycle head. It does
176/// not care about how frequently the nested goal relied on it.
177#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
178struct HeadUsages {
179    inductive: u32,
180    unknown: u32,
181    coinductive: u32,
182    forced_ambiguity: u32,
183}
184
185impl HeadUsages {
186    fn add_usage(&mut self, path: PathKind) {
187        match path {
188            PathKind::Inductive => self.inductive += 1,
189            PathKind::Unknown => self.unknown += 1,
190            PathKind::Coinductive => self.coinductive += 1,
191            PathKind::ForcedAmbiguity => self.forced_ambiguity += 1,
192        }
193    }
194
195    /// This adds the usages which occurred while computing a nested goal.
196    ///
197    /// We don't actually care about how frequently the nested goal relied
198    /// on its cycle heads, only whether it did.
199    fn add_usages_from_nested(&mut self, usages: HeadUsages) {
200        let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
201        self.inductive += if inductive == 0 { 0 } else { 1 };
202        self.unknown += if unknown == 0 { 0 } else { 1 };
203        self.coinductive += if coinductive == 0 { 0 } else { 1 };
204        self.forced_ambiguity += if forced_ambiguity == 0 { 0 } else { 1 };
205    }
206
207    fn ignore_usages(&mut self, usages: HeadUsages) {
208        let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
209        self.inductive = self.inductive.checked_sub(inductive).unwrap();
210        self.unknown = self.unknown.checked_sub(unknown).unwrap();
211        self.coinductive = self.coinductive.checked_sub(coinductive).unwrap();
212        self.forced_ambiguity = self.forced_ambiguity.checked_sub(forced_ambiguity).unwrap();
213    }
214
215    fn is_empty(self) -> bool {
216        let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
217        inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
218    }
219
220    fn is_single(self, path_kind: PathKind) -> bool {
221        match path_kind {
222            PathKind::Inductive => matches!(
223                self,
224                HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
225            ),
226            PathKind::Unknown => matches!(
227                self,
228                HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
229            ),
230            PathKind::Coinductive => matches!(
231                self,
232                HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
233            ),
234            PathKind::ForcedAmbiguity => matches!(
235                self,
236                HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
237            ),
238        }
239    }
240}
241
242#[derive(Debug, Default)]
243pub struct CandidateHeadUsages {
244    usages: Option<Box<HashMap<StackDepth, HeadUsages>>>,
245}
246impl CandidateHeadUsages {
247    pub fn merge_usages(&mut self, other: CandidateHeadUsages) {
248        if let Some(other_usages) = other.usages {
249            if let Some(ref mut self_usages) = self.usages {
250                #[allow(rustc::potential_query_instability)]
251                for (head_index, head) in other_usages.into_iter() {
252                    let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = head;
253                    let self_usages = self_usages.entry(head_index).or_default();
254                    self_usages.inductive += inductive;
255                    self_usages.unknown += unknown;
256                    self_usages.coinductive += coinductive;
257                    self_usages.forced_ambiguity += forced_ambiguity;
258                }
259            } else {
260                self.usages = Some(other_usages);
261            }
262        }
263    }
264}
265
266#[derive(Debug, Clone, Copy)]
267struct AvailableDepth(usize);
268impl AvailableDepth {
269    /// Returns the remaining depth allowed for nested goals.
270    ///
271    /// This is generally simply one less than the current depth.
272    /// However, if we encountered overflow, we significantly reduce
273    /// the remaining depth of all nested goals to prevent hangs
274    /// in case there is exponential blowup.
275    fn allowed_depth_for_nested<D: Delegate>(
276        root_depth: AvailableDepth,
277        stack: &Stack<D::Cx>,
278    ) -> Option<AvailableDepth> {
279        if let Some(last) = stack.last() {
280            if last.available_depth.0 == 0 {
281                return None;
282            }
283
284            Some(if last.encountered_overflow {
285                AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW)
286            } else {
287                AvailableDepth(last.available_depth.0 - 1)
288            })
289        } else {
290            Some(root_depth)
291        }
292    }
293
294    /// Whether we're allowed to use a global cache entry which required
295    /// the given depth.
296    fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
297        self.0 >= additional_depth
298    }
299}
300
301#[derive(Clone, Copy, Debug)]
302struct CycleHead {
303    paths_to_head: PathsToNested,
304    /// If the `usages` are empty, the result of that head does not matter
305    /// for the current goal. However, we still don't completely drop this
306    /// cycle head as whether or not it exists impacts which queries we
307    /// access, so ignoring it would cause incremental compilation verification
308    /// failures or hide query cycles.
309    usages: HeadUsages,
310}
311
312/// All cycle heads a given goal depends on, ordered by their stack depth.
313///
314/// We also track all paths from this goal to that head. This is necessary
315/// when rebasing provisional cache results.
316#[derive(Clone, Debug, Default)]
317struct CycleHeads {
318    heads: BTreeMap<StackDepth, CycleHead>,
319}
320
321impl CycleHeads {
322    fn is_empty(&self) -> bool {
323        self.heads.is_empty()
324    }
325
326    fn highest_cycle_head(&self) -> (StackDepth, CycleHead) {
327        self.heads.last_key_value().map(|(k, v)| (*k, *v)).unwrap()
328    }
329
330    fn highest_cycle_head_index(&self) -> StackDepth {
331        self.opt_highest_cycle_head_index().unwrap()
332    }
333
334    fn opt_highest_cycle_head_index(&self) -> Option<StackDepth> {
335        self.heads.last_key_value().map(|(k, _)| *k)
336    }
337
338    fn opt_lowest_cycle_head_index(&self) -> Option<StackDepth> {
339        self.heads.first_key_value().map(|(k, _)| *k)
340    }
341
342    fn remove_highest_cycle_head(&mut self) -> CycleHead {
343        let last = self.heads.pop_last();
344        last.unwrap().1
345    }
346
347    fn insert(
348        &mut self,
349        head_index: StackDepth,
350        path_from_entry: impl Into<PathsToNested> + Copy,
351        usages: HeadUsages,
352    ) {
353        match self.heads.entry(head_index) {
354            btree_map::Entry::Vacant(entry) => {
355                entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usages });
356            }
357            btree_map::Entry::Occupied(entry) => {
358                let head = entry.into_mut();
359                head.paths_to_head |= path_from_entry.into();
360                head.usages.add_usages_from_nested(usages);
361            }
362        }
363    }
364
365    fn ignore_usages(&mut self, head_index: StackDepth, usages: HeadUsages) {
366        self.heads.get_mut(&head_index).unwrap().usages.ignore_usages(usages)
367    }
368
369    fn iter(&self) -> impl Iterator<Item = (StackDepth, CycleHead)> + '_ {
370        self.heads.iter().map(|(k, v)| (*k, *v))
371    }
372}
373
374bitflags::bitflags! {
375    /// Tracks how nested goals have been accessed. This is necessary to disable
376    /// global cache entries if computing them would otherwise result in a cycle or
377    /// access a provisional cache entry.
378    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
379    pub struct PathsToNested: u8 {
380        /// The initial value when adding a goal to its own nested goals.
381        const EMPTY                      = 1 << 0;
382        const INDUCTIVE                  = 1 << 1;
383        const UNKNOWN                    = 1 << 2;
384        const COINDUCTIVE                = 1 << 3;
385        const FORCED_AMBIGUITY           = 1 << 4;
386    }
387}
388impl From<PathKind> for PathsToNested {
389    fn from(path: PathKind) -> PathsToNested {
390        match path {
391            PathKind::Inductive => PathsToNested::INDUCTIVE,
392            PathKind::Unknown => PathsToNested::UNKNOWN,
393            PathKind::Coinductive => PathsToNested::COINDUCTIVE,
394            PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY,
395        }
396    }
397}
398impl PathsToNested {
399    /// The implementation of this function is kind of ugly. We check whether
400    /// there currently exist 'weaker' paths in the set, if so we upgrade these
401    /// paths to at least `path`.
402    #[must_use]
403    fn extend_with(mut self, path: PathKind) -> Self {
404        match path {
405            PathKind::Inductive => {
406                if self.intersects(PathsToNested::EMPTY) {
407                    self.remove(PathsToNested::EMPTY);
408                    self.insert(PathsToNested::INDUCTIVE);
409                }
410            }
411            PathKind::Unknown => {
412                if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) {
413                    self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE);
414                    self.insert(PathsToNested::UNKNOWN);
415                }
416            }
417            PathKind::Coinductive => {
418                if self.intersects(
419                    PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
420                ) {
421                    self.remove(
422                        PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
423                    );
424                    self.insert(PathsToNested::COINDUCTIVE);
425                }
426            }
427            PathKind::ForcedAmbiguity => {
428                if self.intersects(
429                    PathsToNested::EMPTY
430                        | PathsToNested::INDUCTIVE
431                        | PathsToNested::UNKNOWN
432                        | PathsToNested::COINDUCTIVE,
433                ) {
434                    self.remove(
435                        PathsToNested::EMPTY
436                            | PathsToNested::INDUCTIVE
437                            | PathsToNested::UNKNOWN
438                            | PathsToNested::COINDUCTIVE,
439                    );
440                    self.insert(PathsToNested::FORCED_AMBIGUITY);
441                }
442            }
443        }
444
445        self
446    }
447
448    #[must_use]
449    fn extend_with_paths(self, path: PathsToNested) -> Self {
450        let mut new = PathsToNested::empty();
451        for p in path.iter_paths() {
452            new |= self.extend_with(p);
453        }
454        new
455    }
456
457    fn iter_paths(self) -> impl Iterator<Item = PathKind> {
458        let (PathKind::Inductive
459        | PathKind::Unknown
460        | PathKind::Coinductive
461        | PathKind::ForcedAmbiguity);
462        [PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity]
463            .into_iter()
464            .filter(move |&p| self.contains(p.into()))
465    }
466}
467
468/// The nested goals of each stack entry and the path from the
469/// stack entry to that nested goal.
470///
471/// They are used when checking whether reevaluating a global cache
472/// would encounter a cycle or use a provisional cache entry given the
473/// current search graph state. We need to disable the global cache
474/// in this case as it could otherwise result in behavioral differences.
475/// Cycles can impact behavior. The cycle ABA may have different final
476/// results from a the cycle BAB depending on the cycle root.
477///
478/// We only start tracking nested goals once we've either encountered
479/// overflow or a solver cycle. This is a performance optimization to
480/// avoid tracking nested goals on the happy path.
481#[derive_where(Debug, Default, Clone; X: Cx)]
482struct NestedGoals<X: Cx> {
483    nested_goals: HashMap<X::Input, PathsToNested>,
484}
485impl<X: Cx> NestedGoals<X> {
486    fn is_empty(&self) -> bool {
487        self.nested_goals.is_empty()
488    }
489
490    fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) {
491        match self.nested_goals.entry(input) {
492            Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested,
493            Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)),
494        }
495    }
496
497    /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
498    /// to the parent goal.
499    ///
500    /// If the path from this goal to the nested goal is inductive, the paths from this goal
501    /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
502    /// the same as for the child.
503    fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
504        #[allow(rustc::potential_query_instability)]
505        for (input, paths_to_nested) in nested_goals.iter() {
506            let paths_to_nested = paths_to_nested.extend_with(step_kind);
507            self.insert(input, paths_to_nested);
508        }
509    }
510
511    #[cfg_attr(feature = "nightly", rustc_lint_query_instability)]
512    #[allow(rustc::potential_query_instability)]
513    fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ {
514        self.nested_goals.iter().map(|(i, p)| (*i, *p))
515    }
516
517    fn contains(&self, input: X::Input) -> bool {
518        self.nested_goals.contains_key(&input)
519    }
520}
521
522/// A provisional result of an already computed goals which depends on other
523/// goals still on the stack.
524#[derive_where(Debug; X: Cx)]
525struct ProvisionalCacheEntry<X: Cx> {
526    /// Whether evaluating the goal encountered overflow. This is used to
527    /// disable the cache entry except if the last goal on the stack is
528    /// already involved in this cycle.
529    encountered_overflow: bool,
530    /// All cycle heads this cache entry depends on.
531    heads: CycleHeads,
532    /// The path from the highest cycle head to this goal. This differs from
533    /// `heads` which tracks the path to the cycle head *from* this goal.
534    path_from_head: PathKind,
535    result: X::Result,
536}
537
538/// The final result of evaluating a goal.
539///
540/// We reset `encountered_overflow` when reevaluating a goal,
541/// but need to track whether we've hit the recursion limit at
542/// all for correctness.
543///
544/// We've previously simply returned the final `StackEntry` but this
545/// made it easy to accidentally drop information from the previous
546/// evaluation.
547#[derive_where(Debug; X: Cx)]
548struct EvaluationResult<X: Cx> {
549    encountered_overflow: bool,
550    required_depth: usize,
551    heads: CycleHeads,
552    nested_goals: NestedGoals<X>,
553    result: X::Result,
554}
555
556impl<X: Cx> EvaluationResult<X> {
557    fn finalize(
558        final_entry: StackEntry<X>,
559        encountered_overflow: bool,
560        result: X::Result,
561    ) -> EvaluationResult<X> {
562        EvaluationResult {
563            encountered_overflow,
564            // Unlike `encountered_overflow`, we share `heads`, `required_depth`,
565            // and `nested_goals` between evaluations.
566            required_depth: final_entry.required_depth,
567            heads: final_entry.heads,
568            nested_goals: final_entry.nested_goals,
569            // We only care about the final result.
570            result,
571        }
572    }
573}
574
575pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
576    root_depth: AvailableDepth,
577    stack: Stack<X>,
578    /// The provisional cache contains entries for already computed goals which
579    /// still depend on goals higher-up in the stack. We don't move them to the
580    /// global cache and track them locally instead. A provisional cache entry
581    /// is only valid until the result of one of its cycle heads changes.
582    provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
583
584    _marker: PhantomData<D>,
585}
586
587/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
588/// ordinary nested goals/global cache hits and provisional cache hits,
589/// using the provisional cache should not add any nested goals.
590///
591/// `nested_goals` are only used when checking whether global cache entries
592/// are applicable. This only cares about whether a goal is actually accessed.
593/// Given that the usage of the provisional cache is fully deterministic, we
594/// don't need to track the nested goals used while computing a provisional
595/// cache entry.
596enum UpdateParentGoalCtxt<'a, X: Cx> {
597    Ordinary(&'a NestedGoals<X>),
598    CycleOnStack(X::Input),
599    ProvisionalCacheHit,
600}
601
602impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
603    pub fn new(root_depth: usize) -> SearchGraph<D> {
604        Self {
605            root_depth: AvailableDepth(root_depth),
606            stack: Default::default(),
607            provisional_cache: Default::default(),
608            _marker: PhantomData,
609        }
610    }
611
612    /// Lazily update the stack entry for the parent goal.
613    /// This behavior is shared between actually evaluating goals
614    /// and using existing global cache entries to make sure they
615    /// have the same impact on the remaining evaluation.
616    fn update_parent_goal(
617        stack: &mut Stack<X>,
618        step_kind_from_parent: PathKind,
619        required_depth_for_nested: usize,
620        heads: impl Iterator<Item = (StackDepth, CycleHead)>,
621        encountered_overflow: bool,
622        context: UpdateParentGoalCtxt<'_, X>,
623    ) {
624        if let Some((parent_index, parent)) = stack.last_mut_with_index() {
625            parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
626            parent.encountered_overflow |= encountered_overflow;
627
628            for (head_index, head) in heads {
629                if let Some(candidate_usages) = &mut parent.candidate_usages {
630                    candidate_usages
631                        .usages
632                        .get_or_insert_default()
633                        .entry(head_index)
634                        .or_default()
635                        .add_usages_from_nested(head.usages);
636                }
637                match head_index.cmp(&parent_index) {
638                    Ordering::Less => parent.heads.insert(
639                        head_index,
640                        head.paths_to_head.extend_with(step_kind_from_parent),
641                        head.usages,
642                    ),
643                    Ordering::Equal => {
644                        parent.usages.get_or_insert_default().add_usages_from_nested(head.usages);
645                    }
646                    Ordering::Greater => unreachable!(),
647                }
648            }
649            let parent_depends_on_cycle = match context {
650                UpdateParentGoalCtxt::Ordinary(nested_goals) => {
651                    parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
652                    !nested_goals.is_empty()
653                }
654                UpdateParentGoalCtxt::CycleOnStack(head) => {
655                    // We lookup provisional cache entries before detecting cycles.
656                    // We therefore can't use a global cache entry if it contains a cycle
657                    // whose head is in the provisional cache.
658                    parent.nested_goals.insert(head, step_kind_from_parent.into());
659                    true
660                }
661                UpdateParentGoalCtxt::ProvisionalCacheHit => true,
662            };
663            // Once we've got goals which encountered overflow or a cycle,
664            // we track all goals whose behavior may depend depend on these
665            // goals as this change may cause them to now depend on additional
666            // goals, resulting in new cycles. See the dev-guide for examples.
667            if parent_depends_on_cycle {
668                parent.nested_goals.insert(parent.input, PathsToNested::EMPTY);
669            }
670        }
671    }
672
673    pub fn is_empty(&self) -> bool {
674        if self.stack.is_empty() {
675            debug_assert!(self.provisional_cache.is_empty());
676            true
677        } else {
678            false
679        }
680    }
681
682    /// The number of goals currently in the search graph. This should only be
683    /// used for debugging purposes.
684    pub fn debug_current_depth(&self) -> usize {
685        self.stack.len()
686    }
687
688    /// Whether the path from `head` to the current stack entry is inductive or coinductive.
689    ///
690    /// The `step_kind_to_head` is used to add a single additional path segment to the path on
691    /// the stack which completes the cycle. This given an inductive step AB which then cycles
692    /// coinductively with A, we need to treat this cycle as coinductive.
693    fn cycle_path_kind(
694        stack: &Stack<X>,
695        step_kind_to_head: PathKind,
696        head: StackDepth,
697    ) -> PathKind {
698        stack.cycle_step_kinds(head).fold(step_kind_to_head, |curr, step| curr.extend(step))
699    }
700
701    pub fn enter_single_candidate(&mut self) {
702        let prev = self.stack.last_mut().unwrap().candidate_usages.replace(Default::default());
703        debug_assert!(prev.is_none(), "existing candidate_usages: {prev:?}");
704    }
705
706    pub fn finish_single_candidate(&mut self) -> CandidateHeadUsages {
707        self.stack.last_mut().unwrap().candidate_usages.take().unwrap()
708    }
709
710    pub fn ignore_candidate_head_usages(&mut self, usages: CandidateHeadUsages) {
711        if let Some(usages) = usages.usages {
712            let (entry_index, entry) = self.stack.last_mut_with_index().unwrap();
713            #[allow(rustc::potential_query_instability)]
714            for (head_index, usages) in usages.into_iter() {
715                if head_index == entry_index {
716                    entry.usages.unwrap().ignore_usages(usages);
717                } else {
718                    entry.heads.ignore_usages(head_index, usages);
719                }
720            }
721        }
722    }
723
724    pub fn evaluate_root_goal_for_proof_tree(
725        cx: X,
726        root_depth: usize,
727        input: X::Input,
728        inspect: &mut D::ProofTreeBuilder,
729    ) -> X::Result {
730        let mut this = SearchGraph::<D>::new(root_depth);
731        let available_depth = AvailableDepth(root_depth);
732        let step_kind_from_parent = PathKind::Inductive; // is never used
733        this.stack.push(StackEntry {
734            input,
735            step_kind_from_parent,
736            available_depth,
737            provisional_result: None,
738            required_depth: 0,
739            heads: Default::default(),
740            encountered_overflow: false,
741            usages: None,
742            candidate_usages: None,
743            nested_goals: Default::default(),
744        });
745        let evaluation_result = this.evaluate_goal_in_task(cx, input, inspect);
746        evaluation_result.result
747    }
748
749    /// Probably the most involved method of the whole solver.
750    ///
751    /// While goals get computed via `D::compute_goal`, this function handles
752    /// caching, overflow, and cycles.
753    #[instrument(level = "debug", skip(self, cx, inspect), ret)]
754    pub fn evaluate_goal(
755        &mut self,
756        cx: X,
757        input: X::Input,
758        step_kind_from_parent: PathKind,
759        inspect: &mut D::ProofTreeBuilder,
760    ) -> X::Result {
761        let Some(available_depth) =
762            AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
763        else {
764            return self.handle_overflow(cx, input);
765        };
766
767        // We check the provisional cache before checking the global cache. This simplifies
768        // the implementation as we can avoid worrying about cases where both the global and
769        // provisional cache may apply, e.g. consider the following example
770        //
771        // - xxBA overflow
772        // - A
773        //     - BA cycle
774        //     - CB :x:
775        if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
776            return result;
777        }
778
779        // Lookup the global cache unless we're building proof trees or are currently
780        // fuzzing.
781        let validate_cache = if !D::inspect_is_noop(inspect) {
782            None
783        } else if let Some(scope) = D::enter_validation_scope(cx, input) {
784            // When validating the global cache we need to track the goals for which the
785            // global cache has been disabled as it may otherwise change the result for
786            // cyclic goals. We don't care about goals which are not on the current stack
787            // so it's fine to drop their scope eagerly.
788            self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
789                .inspect(|expected| debug!(?expected, "validate cache entry"))
790                .map(|r| (scope, r))
791        } else if let Some(result) =
792            self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
793        {
794            return result;
795        } else {
796            None
797        };
798
799        // Detect cycles on the stack. We do this after the global cache lookup to
800        // avoid iterating over the stack in case a goal has already been computed.
801        // This may not have an actual performance impact and we could reorder them
802        // as it may reduce the number of `nested_goals` we need to track.
803        if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
804            debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
805            return result;
806        }
807
808        // Unfortunate, it looks like we actually have to compute this goal.
809        self.stack.push(StackEntry {
810            input,
811            step_kind_from_parent,
812            available_depth,
813            provisional_result: None,
814            required_depth: 0,
815            heads: Default::default(),
816            encountered_overflow: false,
817            usages: None,
818            candidate_usages: None,
819            nested_goals: Default::default(),
820        });
821
822        // This is for global caching, so we properly track query dependencies.
823        // Everything that affects the `result` should be performed within this
824        // `with_cached_task` closure. If computing this goal depends on something
825        // not tracked by the cache key and from outside of this anon task, it
826        // must not be added to the global cache. Notably, this is the case for
827        // trait solver cycles participants.
828        let (evaluation_result, dep_node) =
829            cx.with_cached_task(|| self.evaluate_goal_in_task(cx, input, inspect));
830
831        // We've finished computing the goal and have popped it from the stack,
832        // lazily update its parent goal.
833        Self::update_parent_goal(
834            &mut self.stack,
835            step_kind_from_parent,
836            evaluation_result.required_depth,
837            evaluation_result.heads.iter(),
838            evaluation_result.encountered_overflow,
839            UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
840        );
841        let result = evaluation_result.result;
842
843        // We're now done with this goal. We only add the root of cycles to the global cache.
844        // In case this goal is involved in a larger cycle add it to the provisional cache.
845        if evaluation_result.heads.is_empty() {
846            if let Some((_scope, expected)) = validate_cache {
847                // Do not try to move a goal into the cache again if we're testing
848                // the global cache.
849                assert_eq!(expected, evaluation_result.result, "input={input:?}");
850            } else if D::inspect_is_noop(inspect) {
851                self.insert_global_cache(cx, input, evaluation_result, dep_node)
852            }
853        } else if D::ENABLE_PROVISIONAL_CACHE {
854            debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
855            let entry = self.provisional_cache.entry(input).or_default();
856            let EvaluationResult {
857                encountered_overflow,
858                required_depth: _,
859                heads,
860                nested_goals: _,
861                result,
862            } = evaluation_result;
863            let path_from_head = Self::cycle_path_kind(
864                &self.stack,
865                step_kind_from_parent,
866                heads.highest_cycle_head_index(),
867            );
868            let provisional_cache_entry =
869                ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
870            debug!(?provisional_cache_entry);
871            entry.push(provisional_cache_entry);
872        } else {
873            debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
874        }
875
876        result
877    }
878
879    fn handle_overflow(&mut self, cx: X, input: X::Input) -> X::Result {
880        if let Some(last) = self.stack.last_mut() {
881            last.encountered_overflow = true;
882            // If computing a goal `B` depends on another goal `A` and
883            // `A` has a nested goal which overflows, then computing `B`
884            // at the same depth, but with `A` already on the stack,
885            // would encounter a solver cycle instead, potentially
886            // changing the result.
887            //
888            // We must therefore not use the global cache entry for `B` in that case.
889            // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
890            last.nested_goals.insert(last.input, PathsToNested::EMPTY);
891        }
892
893        debug!("encountered stack overflow");
894        D::stack_overflow_result(cx, input)
895    }
896
897    /// When reevaluating a goal with a changed provisional result, all provisional cache entry
898    /// which depend on this goal get invalidated.
899    ///
900    /// Note that we keep provisional cache entries which accessed this goal as a cycle head, but
901    /// don't depend on its value.
902    fn clear_dependent_provisional_results_for_rerun(&mut self) {
903        let rerun_index = self.stack.next_index();
904        #[allow(rustc::potential_query_instability)]
905        self.provisional_cache.retain(|_, entries| {
906            entries.retain(|entry| {
907                let (head_index, head) = entry.heads.highest_cycle_head();
908                head_index != rerun_index || head.usages.is_empty()
909            });
910            !entries.is_empty()
911        });
912    }
913}
914
915/// We need to rebase provisional cache entries when popping one of their cycle
916/// heads from the stack. This may not necessarily mean that we've actually
917/// reached a fixpoint for that cycle head, which impacts the way we rebase
918/// provisional cache entries.
919enum RebaseReason<X: Cx> {
920    NoCycleUsages,
921    Ambiguity(X::AmbiguityInfo),
922    Overflow,
923    /// We've actually reached a fixpoint.
924    ///
925    /// This either happens in the first evaluation step for the cycle head.
926    /// In this case the used provisional result depends on the cycle `PathKind`.
927    /// We store this path kind to check whether the provisional cache entry
928    /// we're rebasing relied on the same cycles.
929    ///
930    /// In later iterations cycles always return `stack_entry.provisional_result`
931    /// so we no longer depend on the `PathKind`. We store `None` in that case.
932    ReachedFixpoint(Option<PathKind>),
933}
934
935impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
936    /// A necessary optimization to handle complex solver cycles. A provisional cache entry
937    /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
938    /// head from the stack after we've finished computing it, we can't be sure that the
939    /// provisional cache entry is still applicable. We need to keep the cache entries to
940    /// prevent hangs.
941    ///
942    /// This can be thought of as pretending to reevaluate the popped head as nested goals
943    /// of this provisional result. For this to be correct, all cycles encountered while
944    /// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind.
945    /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
946    ///
947    /// In case the popped cycle head failed to reach a fixpoint anything which depends on
948    /// its provisional result is invalid. Actually discarding provisional cache entries in
949    /// this case would cause hangs, so we instead change the result of dependant provisional
950    /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
951    /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
952    /// to me.
953    fn rebase_provisional_cache_entries(
954        &mut self,
955        cx: X,
956        stack_entry: &StackEntry<X>,
957        rebase_reason: RebaseReason<X>,
958    ) {
959        let popped_head_index = self.stack.next_index();
960        #[allow(rustc::potential_query_instability)]
961        self.provisional_cache.retain(|&input, entries| {
962            entries.retain_mut(|entry| {
963                let ProvisionalCacheEntry {
964                    encountered_overflow: _,
965                    heads,
966                    path_from_head,
967                    result,
968                } = entry;
969                let popped_head = if heads.highest_cycle_head_index() == popped_head_index {
970                    heads.remove_highest_cycle_head()
971                } else {
972                    return true;
973                };
974
975                // We're rebasing an entry `e` over a head `p`. This head
976                // has a number of own heads `h` it depends on.
977                //
978                // This causes our provisional result to depend on the heads
979                // of `p` to avoid moving any goal which uses this cache entry to
980                // the global cache.
981                if popped_head.usages.is_empty() {
982                    // The result of `e` does not depend on the value of `p`. This we can
983                    // keep using the result of this provisional cache entry even if evaluating
984                    // `p` as a nested goal of `e` would have a different result.
985                    for (head_index, _) in stack_entry.heads.iter() {
986                        heads.insert(head_index, PathsToNested::EMPTY, HeadUsages::default());
987                    }
988                } else {
989                    // The entry `e` actually depends on the value of `p`. We need
990                    // to make sure that the value of `p` wouldn't change even if we
991                    // were to reevaluate it as a nested goal of `e` instead. For this
992                    // we check that the path kind of all paths `hph` remain the
993                    // same after rebasing.
994                    //
995                    // After rebasing the cycles `hph` will go through `e`. We need to make
996                    // sure that forall possible paths `hep`, `heph` is equal to `hph.`
997                    let ep = popped_head.paths_to_head;
998                    for (head_index, head) in stack_entry.heads.iter() {
999                        let ph = head.paths_to_head;
1000                        let hp = Self::cycle_path_kind(
1001                            &self.stack,
1002                            stack_entry.step_kind_from_parent,
1003                            head_index,
1004                        );
1005                        // We first validate that all cycles while computing `p` would stay
1006                        // the same if we were to recompute it as a nested goal of `e`.
1007                        let he = hp.extend(*path_from_head);
1008                        for ph in ph.iter_paths() {
1009                            let hph = hp.extend(ph);
1010                            for ep in ep.iter_paths() {
1011                                let hep = ep.extend(he);
1012                                let heph = hep.extend(ph);
1013                                if hph != heph {
1014                                    return false;
1015                                }
1016                            }
1017                        }
1018
1019                        // If so, all paths reached while computing `p` have to get added
1020                        // the heads of `e` to make sure that rebasing `e` again also considers
1021                        // them.
1022                        let eph = ep.extend_with_paths(ph);
1023                        heads.insert(head_index, eph, head.usages);
1024                    }
1025
1026                    // The provisional cache entry does depend on the provisional result
1027                    // of the popped cycle head. We need to mutate the result of our
1028                    // provisional cache entry in case we did not reach a fixpoint.
1029                    match rebase_reason {
1030                        // If the cycle head does not actually depend on itself, then
1031                        // the provisional result used by the provisional cache entry
1032                        // is not actually equal to the final provisional result. We
1033                        // need to discard the provisional cache entry in this case.
1034                        RebaseReason::NoCycleUsages => return false,
1035                        RebaseReason::Ambiguity(info) => {
1036                            *result = D::propagate_ambiguity(cx, input, info);
1037                        }
1038                        RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
1039                        RebaseReason::ReachedFixpoint(None) => {}
1040                        RebaseReason::ReachedFixpoint(Some(path_kind)) => {
1041                            if !popped_head.usages.is_single(path_kind) {
1042                                return false;
1043                            }
1044                        }
1045                    };
1046                }
1047
1048                let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
1049                    return false;
1050                };
1051
1052                // We now care about the path from the next highest cycle head to the
1053                // provisional cache entry.
1054                *path_from_head = path_from_head.extend(Self::cycle_path_kind(
1055                    &self.stack,
1056                    stack_entry.step_kind_from_parent,
1057                    new_highest_head_index,
1058                ));
1059
1060                true
1061            });
1062            !entries.is_empty()
1063        });
1064    }
1065
1066    fn lookup_provisional_cache(
1067        &mut self,
1068        input: X::Input,
1069        step_kind_from_parent: PathKind,
1070    ) -> Option<X::Result> {
1071        if !D::ENABLE_PROVISIONAL_CACHE {
1072            return None;
1073        }
1074
1075        let entries = self.provisional_cache.get(&input)?;
1076        for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
1077            entries
1078        {
1079            let head_index = heads.highest_cycle_head_index();
1080            if encountered_overflow {
1081                // This check is overly strict and very subtle. We need to make sure that if
1082                // a global cache entry depends on some goal without adding it to its
1083                // `nested_goals`, that goal must never have an applicable provisional
1084                // cache entry to avoid incorrectly applying the cache entry.
1085                //
1086                // As we'd have to otherwise track literally all nested goals, we only
1087                // apply provisional cache entries which encountered overflow once the
1088                // current goal is already part of the same cycle. This check could be
1089                // improved but seems to be good enough for now.
1090                let last = self.stack.last().unwrap();
1091                if last.heads.opt_lowest_cycle_head_index().is_none_or(|lowest| lowest > head_index)
1092                {
1093                    continue;
1094                }
1095            }
1096
1097            // A provisional cache entry is only valid if the current path from its
1098            // highest cycle head to the goal is the same.
1099            if path_from_head
1100                == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index)
1101            {
1102                Self::update_parent_goal(
1103                    &mut self.stack,
1104                    step_kind_from_parent,
1105                    0,
1106                    heads.iter(),
1107                    encountered_overflow,
1108                    UpdateParentGoalCtxt::ProvisionalCacheHit,
1109                );
1110                debug!(?head_index, ?path_from_head, "provisional cache hit");
1111                return Some(result);
1112            }
1113        }
1114
1115        None
1116    }
1117
1118    /// Even if there is a global cache entry for a given goal, we need to make sure
1119    /// evaluating this entry would not have ended up depending on either a goal
1120    /// already on the stack or a provisional cache entry.
1121    fn candidate_is_applicable(
1122        &self,
1123        step_kind_from_parent: PathKind,
1124        nested_goals: &NestedGoals<X>,
1125    ) -> bool {
1126        // If the global cache entry didn't depend on any nested goals, it always
1127        // applies.
1128        if nested_goals.is_empty() {
1129            return true;
1130        }
1131
1132        // If a nested goal of the global cache entry is on the stack, we would
1133        // definitely encounter a cycle.
1134        if self.stack.iter().any(|e| nested_goals.contains(e.input)) {
1135            debug!("cache entry not applicable due to stack");
1136            return false;
1137        }
1138
1139        // The global cache entry is also invalid if there's a provisional cache entry
1140        // would apply for any of its nested goals.
1141        #[allow(rustc::potential_query_instability)]
1142        for (input, path_from_global_entry) in nested_goals.iter() {
1143            let Some(entries) = self.provisional_cache.get(&input) else {
1144                continue;
1145            };
1146
1147            debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable");
1148            // A provisional cache entry is applicable if the path to
1149            // its highest cycle head is equal to the expected path.
1150            for &ProvisionalCacheEntry {
1151                encountered_overflow,
1152                ref heads,
1153                path_from_head: head_to_provisional,
1154                result: _,
1155            } in entries.iter()
1156            {
1157                // We don't have to worry about provisional cache entries which encountered
1158                // overflow, see the relevant comment in `lookup_provisional_cache`.
1159                if encountered_overflow {
1160                    continue;
1161                }
1162
1163                // A provisional cache entry only applies if the path from its highest head
1164                // matches the path when encountering the goal.
1165                //
1166                // We check if any of the paths taken while computing the global goal
1167                // would end up with an applicable provisional cache entry.
1168                let head_index = heads.highest_cycle_head_index();
1169                let head_to_curr =
1170                    Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1171                let full_paths = path_from_global_entry.extend_with(head_to_curr);
1172                if full_paths.contains(head_to_provisional.into()) {
1173                    debug!(
1174                        ?full_paths,
1175                        ?head_to_provisional,
1176                        "cache entry not applicable due to matching paths"
1177                    );
1178                    return false;
1179                }
1180            }
1181        }
1182
1183        true
1184    }
1185
1186    /// Used when fuzzing the global cache. Accesses the global cache without
1187    /// updating the state of the search graph.
1188    fn lookup_global_cache_untracked(
1189        &self,
1190        cx: X,
1191        input: X::Input,
1192        step_kind_from_parent: PathKind,
1193        available_depth: AvailableDepth,
1194    ) -> Option<X::Result> {
1195        cx.with_global_cache(|cache| {
1196            cache
1197                .get(cx, input, available_depth, |nested_goals| {
1198                    self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1199                })
1200                .map(|c| c.result)
1201        })
1202    }
1203
1204    /// Try to fetch a previously computed result from the global cache,
1205    /// making sure to only do so if it would match the result of reevaluating
1206    /// this goal.
1207    fn lookup_global_cache(
1208        &mut self,
1209        cx: X,
1210        input: X::Input,
1211        step_kind_from_parent: PathKind,
1212        available_depth: AvailableDepth,
1213    ) -> Option<X::Result> {
1214        cx.with_global_cache(|cache| {
1215            let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache
1216                .get(cx, input, available_depth, |nested_goals| {
1217                    self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1218                })?;
1219
1220            // We don't move cycle participants to the global cache, so the
1221            // cycle heads are always empty.
1222            let heads = iter::empty();
1223            Self::update_parent_goal(
1224                &mut self.stack,
1225                step_kind_from_parent,
1226                required_depth,
1227                heads,
1228                encountered_overflow,
1229                UpdateParentGoalCtxt::Ordinary(nested_goals),
1230            );
1231
1232            debug!(?required_depth, "global cache hit");
1233            Some(result)
1234        })
1235    }
1236
1237    fn check_cycle_on_stack(
1238        &mut self,
1239        cx: X,
1240        input: X::Input,
1241        step_kind_from_parent: PathKind,
1242    ) -> Option<X::Result> {
1243        let head_index = self.stack.find(input)?;
1244        // We have a nested goal which directly relies on a goal deeper in the stack.
1245        //
1246        // We start by tagging all cycle participants, as that's necessary for caching.
1247        //
1248        // Finally we can return either the provisional response or the initial response
1249        // in case we're in the first fixpoint iteration for this goal.
1250        let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1251        debug!(?path_kind, "encountered cycle with depth {head_index:?}");
1252        let mut usages = HeadUsages::default();
1253        usages.add_usage(path_kind);
1254        let head = CycleHead { paths_to_head: step_kind_from_parent.into(), usages };
1255        Self::update_parent_goal(
1256            &mut self.stack,
1257            step_kind_from_parent,
1258            0,
1259            iter::once((head_index, head)),
1260            false,
1261            UpdateParentGoalCtxt::CycleOnStack(input),
1262        );
1263
1264        // Return the provisional result or, if we're in the first iteration,
1265        // start with no constraints.
1266        if let Some(result) = self.stack[head_index].provisional_result {
1267            Some(result)
1268        } else {
1269            Some(D::initial_provisional_result(cx, path_kind, input))
1270        }
1271    }
1272
1273    /// Whether we've reached a fixpoint when evaluating a cycle head.
1274    #[instrument(level = "trace", skip(self, stack_entry), ret)]
1275    fn reached_fixpoint(
1276        &mut self,
1277        stack_entry: &StackEntry<X>,
1278        usages: HeadUsages,
1279        result: X::Result,
1280    ) -> Result<Option<PathKind>, ()> {
1281        let provisional_result = stack_entry.provisional_result;
1282        if let Some(provisional_result) = provisional_result {
1283            if provisional_result == result { Ok(None) } else { Err(()) }
1284        } else if let Some(path_kind) = D::is_initial_provisional_result(result)
1285            .filter(|&path_kind| usages.is_single(path_kind))
1286        {
1287            Ok(Some(path_kind))
1288        } else {
1289            Err(())
1290        }
1291    }
1292
1293    /// When we encounter a coinductive cycle, we have to fetch the
1294    /// result of that cycle while we are still computing it. Because
1295    /// of this we continuously recompute the cycle until the result
1296    /// of the previous iteration is equal to the final result, at which
1297    /// point we are done.
1298    fn evaluate_goal_in_task(
1299        &mut self,
1300        cx: X,
1301        input: X::Input,
1302        inspect: &mut D::ProofTreeBuilder,
1303    ) -> EvaluationResult<X> {
1304        // We reset `encountered_overflow` each time we rerun this goal
1305        // but need to make sure we currently propagate it to the global
1306        // cache even if only some of the evaluations actually reach the
1307        // recursion limit.
1308        let mut encountered_overflow = false;
1309        let mut i = 0;
1310        loop {
1311            let result = D::compute_goal(self, cx, input, inspect);
1312            let stack_entry = self.stack.pop();
1313            encountered_overflow |= stack_entry.encountered_overflow;
1314            debug_assert_eq!(stack_entry.input, input);
1315
1316            // If the current goal is not a cycle head, we are done.
1317            //
1318            // There are no provisional cache entries which depend on this goal.
1319            let Some(usages) = stack_entry.usages else {
1320                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1321            };
1322
1323            // If it is a cycle head, we have to keep trying to prove it until
1324            // we reach a fixpoint. We need to do so for all cycle heads,
1325            // not only for the root.
1326            //
1327            // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
1328            // for an example.
1329            //
1330            // Check whether we reached a fixpoint, either because the final result
1331            // is equal to the provisional result of the previous iteration, or because
1332            // this was only the head of either coinductive or inductive cycles, and the
1333            // final result is equal to the initial response for that case.
1334            if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
1335                self.rebase_provisional_cache_entries(
1336                    cx,
1337                    &stack_entry,
1338                    RebaseReason::ReachedFixpoint(fixpoint),
1339                );
1340                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1341            } else if usages.is_empty() {
1342                self.rebase_provisional_cache_entries(
1343                    cx,
1344                    &stack_entry,
1345                    RebaseReason::NoCycleUsages,
1346                );
1347                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1348            }
1349
1350            // If computing this goal results in ambiguity with no constraints,
1351            // we do not rerun it. It's incredibly difficult to get a different
1352            // response in the next iteration in this case. These changes would
1353            // likely either be caused by incompleteness or can change the maybe
1354            // cause from ambiguity to overflow. Returning ambiguity always
1355            // preserves soundness and completeness even if the goal is be known
1356            // to succeed or fail.
1357            //
1358            // This prevents exponential blowup affecting multiple major crates.
1359            // As we only get to this branch if we haven't yet reached a fixpoint,
1360            // we also taint all provisional cache entries which depend on the
1361            // current goal.
1362            if let Some(info) = D::is_ambiguous_result(result) {
1363                self.rebase_provisional_cache_entries(
1364                    cx,
1365                    &stack_entry,
1366                    RebaseReason::Ambiguity(info),
1367                );
1368                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1369            };
1370
1371            // If we've reached the fixpoint step limit, we bail with overflow and taint all
1372            // provisional cache entries which depend on the current goal.
1373            i += 1;
1374            if i >= D::FIXPOINT_STEP_LIMIT {
1375                debug!("canonical cycle overflow");
1376                let result = D::fixpoint_overflow_result(cx, input);
1377                self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
1378                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1379            }
1380
1381            // Clear all provisional cache entries which depend on a previous provisional
1382            // result of this goal and rerun.
1383            self.clear_dependent_provisional_results_for_rerun();
1384
1385            debug!(?result, "fixpoint changed provisional results");
1386            self.stack.push(StackEntry {
1387                input,
1388                step_kind_from_parent: stack_entry.step_kind_from_parent,
1389                available_depth: stack_entry.available_depth,
1390                provisional_result: Some(result),
1391                // We can keep these goals from previous iterations as they are only
1392                // ever read after finalizing this evaluation.
1393                required_depth: stack_entry.required_depth,
1394                heads: stack_entry.heads,
1395                nested_goals: stack_entry.nested_goals,
1396                // We reset these two fields when rerunning this goal. We could
1397                // keep `encountered_overflow` as it's only used as a performance
1398                // optimization. However, given that the proof tree will likely look
1399                // similar to the previous iterations when reevaluating, it's better
1400                // for caching if the reevaluation also starts out with `false`.
1401                encountered_overflow: false,
1402                usages: None,
1403                candidate_usages: None,
1404            });
1405        }
1406    }
1407
1408    /// When encountering a cycle, both inductive and coinductive, we only
1409    /// move the root into the global cache. We also store all other cycle
1410    /// participants involved.
1411    ///
1412    /// We must not use the global cache entry of a root goal if a cycle
1413    /// participant is on the stack. This is necessary to prevent unstable
1414    /// results. See the comment of `StackEntry::nested_goals` for
1415    /// more details.
1416    fn insert_global_cache(
1417        &mut self,
1418        cx: X,
1419        input: X::Input,
1420        evaluation_result: EvaluationResult<X>,
1421        dep_node: X::DepNodeIndex,
1422    ) {
1423        debug!(?evaluation_result, "insert global cache");
1424        cx.with_global_cache(|cache| cache.insert(cx, input, evaluation_result, dep_node))
1425    }
1426}