rustc_type_ir/search_graph/mod.rs
1//! The search graph is responsible for caching and cycle detection in the trait
2//! solver. Making sure that caching doesn't result in soundness bugs or unstable
3//! query results is very challenging and makes this one of the most-involved
4//! self-contained components of the compiler.
5//!
6//! We added fuzzing support to test its correctness. The fuzzers used to verify
7//! the current implementation can be found in <https://github.com/lcnr/search_graph_fuzz>.
8//!
9//! This is just a quick overview of the general design, please check out the relevant
10//! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11//! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12//! The global cache has to be completely unobservable, while the per-cycle cache may impact
13//! behavior as long as the resulting behavior is still correct.
14use std::cmp::Ordering;
15use std::collections::hash_map::Entry;
16use std::collections::{BTreeMap, btree_map};
17use std::fmt::Debug;
18use std::hash::Hash;
19use std::iter;
20use std::marker::PhantomData;
21
22use derive_where::derive_where;
23#[cfg(feature = "nightly")]
24use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
25use rustc_type_ir::data_structures::HashMap;
26use tracing::{debug, instrument, trace};
27
28mod stack;
29use stack::{Stack, StackDepth, StackEntry};
30mod global_cache;
31use global_cache::CacheData;
32pub use global_cache::GlobalCache;
33
34/// The search graph does not simply use `Interner` directly
35/// to enable its fuzzing without having to stub the rest of
36/// the interner. We don't make this a super trait of `Interner`
37/// as users of the shared type library shouldn't have to care
38/// about `Input` and `Result` as they are implementation details
39/// of the search graph.
40pub trait Cx: Copy {
41 type Input: Debug + Eq + Hash + Copy;
42 type Result: Debug + Eq + Hash + Copy;
43 type AmbiguityInfo: Debug + Eq + Hash + Copy;
44
45 type DepNodeIndex;
46 type Tracked<T: Debug + Clone>: Debug;
47 fn mk_tracked<T: Debug + Clone>(
48 self,
49 data: T,
50 dep_node_index: Self::DepNodeIndex,
51 ) -> Self::Tracked<T>;
52 fn get_tracked<T: Debug + Clone>(self, tracked: &Self::Tracked<T>) -> T;
53 fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, Self::DepNodeIndex);
54
55 fn with_global_cache<R>(self, f: impl FnOnce(&mut GlobalCache<Self>) -> R) -> R;
56
57 fn assert_evaluation_is_concurrent(&self);
58}
59
60pub trait Delegate: Sized {
61 type Cx: Cx;
62 /// Whether to use the provisional cache. Set to `false` by a fuzzer when
63 /// validating the search graph.
64 const ENABLE_PROVISIONAL_CACHE: bool;
65 type ValidationScope;
66 /// Returning `Some` disables the global cache for the current goal.
67 ///
68 /// The `ValidationScope` is used when fuzzing the search graph to track
69 /// for which goals the global cache has been disabled. This is necessary
70 /// as we may otherwise ignore the global cache entry for some goal `G`
71 /// only to later use it, failing to detect a cycle goal and potentially
72 /// changing the result.
73 fn enter_validation_scope(
74 cx: Self::Cx,
75 input: <Self::Cx as Cx>::Input,
76 ) -> Option<Self::ValidationScope>;
77
78 const FIXPOINT_STEP_LIMIT: usize;
79
80 type ProofTreeBuilder;
81 fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool;
82
83 const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize;
84
85 fn initial_provisional_result(
86 cx: Self::Cx,
87 kind: PathKind,
88 input: <Self::Cx as Cx>::Input,
89 ) -> <Self::Cx as Cx>::Result;
90 fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
91 fn stack_overflow_result(
92 cx: Self::Cx,
93 input: <Self::Cx as Cx>::Input,
94 ) -> <Self::Cx as Cx>::Result;
95 fn fixpoint_overflow_result(
96 cx: Self::Cx,
97 input: <Self::Cx as Cx>::Input,
98 ) -> <Self::Cx as Cx>::Result;
99
100 fn is_ambiguous_result(
101 result: <Self::Cx as Cx>::Result,
102 ) -> Option<<Self::Cx as Cx>::AmbiguityInfo>;
103 fn propagate_ambiguity(
104 cx: Self::Cx,
105 for_input: <Self::Cx as Cx>::Input,
106 ambiguity_info: <Self::Cx as Cx>::AmbiguityInfo,
107 ) -> <Self::Cx as Cx>::Result;
108
109 fn compute_goal(
110 search_graph: &mut SearchGraph<Self>,
111 cx: Self::Cx,
112 input: <Self::Cx as Cx>::Input,
113 inspect: &mut Self::ProofTreeBuilder,
114 ) -> <Self::Cx as Cx>::Result;
115}
116
117/// In the initial iteration of a cycle, we do not yet have a provisional
118/// result. In the case we return an initial provisional result depending
119/// on the kind of cycle.
120#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
121#[cfg_attr(
122 feature = "nightly",
123 derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext)
124)]
125pub enum PathKind {
126 /// A path consisting of only inductive/unproductive steps. Their initial
127 /// provisional result is `Err(NoSolution)`. We currently treat them as
128 /// `PathKind::Unknown` during coherence until we're fully confident in
129 /// our approach.
130 Inductive,
131 /// A path which is not be coinductive right now but we may want
132 /// to change of them to be so in the future. We return an ambiguous
133 /// result in this case to prevent people from relying on this.
134 Unknown,
135 /// A path with at least one coinductive step. Such cycles hold.
136 Coinductive,
137 /// A path which is treated as ambiguous. Once a path has this path kind
138 /// any other segment does not change its kind.
139 ///
140 /// This is currently only used when fuzzing to support negative reasoning.
141 /// For more details, see #143054.
142 ForcedAmbiguity,
143}
144
145impl PathKind {
146 /// Returns the path kind when merging `self` with `rest`.
147 ///
148 /// Given an inductive path `self` and a coinductive path `rest`,
149 /// the path `self -> rest` would be coinductive.
150 ///
151 /// This operation represents an ordering and would be equivalent
152 /// to `max(self, rest)`.
153 fn extend(self, rest: PathKind) -> PathKind {
154 match (self, rest) {
155 (PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => {
156 PathKind::ForcedAmbiguity
157 }
158 (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
159 (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
160 (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
161 }
162 }
163}
164
165/// The kinds of cycles a cycle head was involved in.
166///
167/// This is used to avoid rerunning a cycle if there's
168/// just a single usage kind and the final result matches
169/// its provisional result.
170///
171/// While it tracks the amount of usages using `u32`, we only ever
172/// care whether there are any. We only count them to be able to ignore
173/// usages from irrelevant candidates while evaluating a goal.
174///
175/// This cares about how nested goals relied on a cycle head. It does
176/// not care about how frequently the nested goal relied on it.
177#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
178struct HeadUsages {
179 inductive: u32,
180 unknown: u32,
181 coinductive: u32,
182 forced_ambiguity: u32,
183}
184
185impl HeadUsages {
186 fn add_usage(&mut self, path: PathKind) {
187 match path {
188 PathKind::Inductive => self.inductive += 1,
189 PathKind::Unknown => self.unknown += 1,
190 PathKind::Coinductive => self.coinductive += 1,
191 PathKind::ForcedAmbiguity => self.forced_ambiguity += 1,
192 }
193 }
194
195 /// This adds the usages which occurred while computing a nested goal.
196 ///
197 /// We don't actually care about how frequently the nested goal relied
198 /// on its cycle heads, only whether it did.
199 fn add_usages_from_nested(&mut self, usages: HeadUsages) {
200 let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
201 self.inductive += if inductive == 0 { 0 } else { 1 };
202 self.unknown += if unknown == 0 { 0 } else { 1 };
203 self.coinductive += if coinductive == 0 { 0 } else { 1 };
204 self.forced_ambiguity += if forced_ambiguity == 0 { 0 } else { 1 };
205 }
206
207 fn ignore_usages(&mut self, usages: HeadUsages) {
208 let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
209 self.inductive = self.inductive.checked_sub(inductive).unwrap();
210 self.unknown = self.unknown.checked_sub(unknown).unwrap();
211 self.coinductive = self.coinductive.checked_sub(coinductive).unwrap();
212 self.forced_ambiguity = self.forced_ambiguity.checked_sub(forced_ambiguity).unwrap();
213 }
214
215 fn is_empty(self) -> bool {
216 let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
217 inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
218 }
219
220 fn is_single(self, path_kind: PathKind) -> bool {
221 match path_kind {
222 PathKind::Inductive => matches!(
223 self,
224 HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
225 ),
226 PathKind::Unknown => matches!(
227 self,
228 HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
229 ),
230 PathKind::Coinductive => matches!(
231 self,
232 HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
233 ),
234 PathKind::ForcedAmbiguity => matches!(
235 self,
236 HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
237 ),
238 }
239 }
240}
241
242#[derive(Debug, Default)]
243pub struct CandidateHeadUsages {
244 usages: Option<Box<HashMap<StackDepth, HeadUsages>>>,
245}
246impl CandidateHeadUsages {
247 pub fn merge_usages(&mut self, other: CandidateHeadUsages) {
248 if let Some(other_usages) = other.usages {
249 if let Some(ref mut self_usages) = self.usages {
250 #[allow(rustc::potential_query_instability)]
251 for (head_index, head) in other_usages.into_iter() {
252 let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = head;
253 let self_usages = self_usages.entry(head_index).or_default();
254 self_usages.inductive += inductive;
255 self_usages.unknown += unknown;
256 self_usages.coinductive += coinductive;
257 self_usages.forced_ambiguity += forced_ambiguity;
258 }
259 } else {
260 self.usages = Some(other_usages);
261 }
262 }
263 }
264}
265
266#[derive(Debug, Clone, Copy)]
267struct AvailableDepth(usize);
268impl AvailableDepth {
269 /// Returns the remaining depth allowed for nested goals.
270 ///
271 /// This is generally simply one less than the current depth.
272 /// However, if we encountered overflow, we significantly reduce
273 /// the remaining depth of all nested goals to prevent hangs
274 /// in case there is exponential blowup.
275 fn allowed_depth_for_nested<D: Delegate>(
276 root_depth: AvailableDepth,
277 stack: &Stack<D::Cx>,
278 ) -> Option<AvailableDepth> {
279 if let Some(last) = stack.last() {
280 if last.available_depth.0 == 0 {
281 return None;
282 }
283
284 Some(if last.encountered_overflow {
285 AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW)
286 } else {
287 AvailableDepth(last.available_depth.0 - 1)
288 })
289 } else {
290 Some(root_depth)
291 }
292 }
293
294 /// Whether we're allowed to use a global cache entry which required
295 /// the given depth.
296 fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
297 self.0 >= additional_depth
298 }
299}
300
301#[derive(Clone, Copy, Debug)]
302struct CycleHead {
303 paths_to_head: PathsToNested,
304 /// If the `usages` are empty, the result of that head does not matter
305 /// for the current goal. However, we still don't completely drop this
306 /// cycle head as whether or not it exists impacts which queries we
307 /// access, so ignoring it would cause incremental compilation verification
308 /// failures or hide query cycles.
309 usages: HeadUsages,
310}
311
312/// All cycle heads a given goal depends on, ordered by their stack depth.
313///
314/// We also track all paths from this goal to that head. This is necessary
315/// when rebasing provisional cache results.
316#[derive(Clone, Debug, Default)]
317struct CycleHeads {
318 heads: BTreeMap<StackDepth, CycleHead>,
319}
320
321impl CycleHeads {
322 fn is_empty(&self) -> bool {
323 self.heads.is_empty()
324 }
325
326 fn highest_cycle_head(&self) -> (StackDepth, CycleHead) {
327 self.heads.last_key_value().map(|(k, v)| (*k, *v)).unwrap()
328 }
329
330 fn highest_cycle_head_index(&self) -> StackDepth {
331 self.opt_highest_cycle_head_index().unwrap()
332 }
333
334 fn opt_highest_cycle_head_index(&self) -> Option<StackDepth> {
335 self.heads.last_key_value().map(|(k, _)| *k)
336 }
337
338 fn opt_lowest_cycle_head_index(&self) -> Option<StackDepth> {
339 self.heads.first_key_value().map(|(k, _)| *k)
340 }
341
342 fn remove_highest_cycle_head(&mut self) -> CycleHead {
343 let last = self.heads.pop_last();
344 last.unwrap().1
345 }
346
347 fn insert(
348 &mut self,
349 head_index: StackDepth,
350 path_from_entry: impl Into<PathsToNested> + Copy,
351 usages: HeadUsages,
352 ) {
353 match self.heads.entry(head_index) {
354 btree_map::Entry::Vacant(entry) => {
355 entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usages });
356 }
357 btree_map::Entry::Occupied(entry) => {
358 let head = entry.into_mut();
359 head.paths_to_head |= path_from_entry.into();
360 head.usages.add_usages_from_nested(usages);
361 }
362 }
363 }
364
365 fn ignore_usages(&mut self, head_index: StackDepth, usages: HeadUsages) {
366 self.heads.get_mut(&head_index).unwrap().usages.ignore_usages(usages)
367 }
368
369 fn iter(&self) -> impl Iterator<Item = (StackDepth, CycleHead)> + '_ {
370 self.heads.iter().map(|(k, v)| (*k, *v))
371 }
372}
373
374bitflags::bitflags! {
375 /// Tracks how nested goals have been accessed. This is necessary to disable
376 /// global cache entries if computing them would otherwise result in a cycle or
377 /// access a provisional cache entry.
378 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
379 pub struct PathsToNested: u8 {
380 /// The initial value when adding a goal to its own nested goals.
381 const EMPTY = 1 << 0;
382 const INDUCTIVE = 1 << 1;
383 const UNKNOWN = 1 << 2;
384 const COINDUCTIVE = 1 << 3;
385 const FORCED_AMBIGUITY = 1 << 4;
386 }
387}
388impl From<PathKind> for PathsToNested {
389 fn from(path: PathKind) -> PathsToNested {
390 match path {
391 PathKind::Inductive => PathsToNested::INDUCTIVE,
392 PathKind::Unknown => PathsToNested::UNKNOWN,
393 PathKind::Coinductive => PathsToNested::COINDUCTIVE,
394 PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY,
395 }
396 }
397}
398impl PathsToNested {
399 /// The implementation of this function is kind of ugly. We check whether
400 /// there currently exist 'weaker' paths in the set, if so we upgrade these
401 /// paths to at least `path`.
402 #[must_use]
403 fn extend_with(mut self, path: PathKind) -> Self {
404 match path {
405 PathKind::Inductive => {
406 if self.intersects(PathsToNested::EMPTY) {
407 self.remove(PathsToNested::EMPTY);
408 self.insert(PathsToNested::INDUCTIVE);
409 }
410 }
411 PathKind::Unknown => {
412 if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) {
413 self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE);
414 self.insert(PathsToNested::UNKNOWN);
415 }
416 }
417 PathKind::Coinductive => {
418 if self.intersects(
419 PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
420 ) {
421 self.remove(
422 PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
423 );
424 self.insert(PathsToNested::COINDUCTIVE);
425 }
426 }
427 PathKind::ForcedAmbiguity => {
428 if self.intersects(
429 PathsToNested::EMPTY
430 | PathsToNested::INDUCTIVE
431 | PathsToNested::UNKNOWN
432 | PathsToNested::COINDUCTIVE,
433 ) {
434 self.remove(
435 PathsToNested::EMPTY
436 | PathsToNested::INDUCTIVE
437 | PathsToNested::UNKNOWN
438 | PathsToNested::COINDUCTIVE,
439 );
440 self.insert(PathsToNested::FORCED_AMBIGUITY);
441 }
442 }
443 }
444
445 self
446 }
447
448 #[must_use]
449 fn extend_with_paths(self, path: PathsToNested) -> Self {
450 let mut new = PathsToNested::empty();
451 for p in path.iter_paths() {
452 new |= self.extend_with(p);
453 }
454 new
455 }
456
457 fn iter_paths(self) -> impl Iterator<Item = PathKind> {
458 let (PathKind::Inductive
459 | PathKind::Unknown
460 | PathKind::Coinductive
461 | PathKind::ForcedAmbiguity);
462 [PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity]
463 .into_iter()
464 .filter(move |&p| self.contains(p.into()))
465 }
466}
467
468/// The nested goals of each stack entry and the path from the
469/// stack entry to that nested goal.
470///
471/// They are used when checking whether reevaluating a global cache
472/// would encounter a cycle or use a provisional cache entry given the
473/// current search graph state. We need to disable the global cache
474/// in this case as it could otherwise result in behavioral differences.
475/// Cycles can impact behavior. The cycle ABA may have different final
476/// results from a the cycle BAB depending on the cycle root.
477///
478/// We only start tracking nested goals once we've either encountered
479/// overflow or a solver cycle. This is a performance optimization to
480/// avoid tracking nested goals on the happy path.
481#[derive_where(Debug, Default, Clone; X: Cx)]
482struct NestedGoals<X: Cx> {
483 nested_goals: HashMap<X::Input, PathsToNested>,
484}
485impl<X: Cx> NestedGoals<X> {
486 fn is_empty(&self) -> bool {
487 self.nested_goals.is_empty()
488 }
489
490 fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) {
491 match self.nested_goals.entry(input) {
492 Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested,
493 Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)),
494 }
495 }
496
497 /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
498 /// to the parent goal.
499 ///
500 /// If the path from this goal to the nested goal is inductive, the paths from this goal
501 /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
502 /// the same as for the child.
503 fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
504 #[allow(rustc::potential_query_instability)]
505 for (input, paths_to_nested) in nested_goals.iter() {
506 let paths_to_nested = paths_to_nested.extend_with(step_kind);
507 self.insert(input, paths_to_nested);
508 }
509 }
510
511 #[cfg_attr(feature = "nightly", rustc_lint_query_instability)]
512 #[allow(rustc::potential_query_instability)]
513 fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ {
514 self.nested_goals.iter().map(|(i, p)| (*i, *p))
515 }
516
517 fn contains(&self, input: X::Input) -> bool {
518 self.nested_goals.contains_key(&input)
519 }
520}
521
522/// A provisional result of an already computed goals which depends on other
523/// goals still on the stack.
524#[derive_where(Debug; X: Cx)]
525struct ProvisionalCacheEntry<X: Cx> {
526 /// Whether evaluating the goal encountered overflow. This is used to
527 /// disable the cache entry except if the last goal on the stack is
528 /// already involved in this cycle.
529 encountered_overflow: bool,
530 /// All cycle heads this cache entry depends on.
531 heads: CycleHeads,
532 /// The path from the highest cycle head to this goal. This differs from
533 /// `heads` which tracks the path to the cycle head *from* this goal.
534 path_from_head: PathKind,
535 result: X::Result,
536}
537
538/// The final result of evaluating a goal.
539///
540/// We reset `encountered_overflow` when reevaluating a goal,
541/// but need to track whether we've hit the recursion limit at
542/// all for correctness.
543///
544/// We've previously simply returned the final `StackEntry` but this
545/// made it easy to accidentally drop information from the previous
546/// evaluation.
547#[derive_where(Debug; X: Cx)]
548struct EvaluationResult<X: Cx> {
549 encountered_overflow: bool,
550 required_depth: usize,
551 heads: CycleHeads,
552 nested_goals: NestedGoals<X>,
553 result: X::Result,
554}
555
556impl<X: Cx> EvaluationResult<X> {
557 fn finalize(
558 final_entry: StackEntry<X>,
559 encountered_overflow: bool,
560 result: X::Result,
561 ) -> EvaluationResult<X> {
562 EvaluationResult {
563 encountered_overflow,
564 // Unlike `encountered_overflow`, we share `heads`, `required_depth`,
565 // and `nested_goals` between evaluations.
566 required_depth: final_entry.required_depth,
567 heads: final_entry.heads,
568 nested_goals: final_entry.nested_goals,
569 // We only care about the final result.
570 result,
571 }
572 }
573}
574
575pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
576 root_depth: AvailableDepth,
577 stack: Stack<X>,
578 /// The provisional cache contains entries for already computed goals which
579 /// still depend on goals higher-up in the stack. We don't move them to the
580 /// global cache and track them locally instead. A provisional cache entry
581 /// is only valid until the result of one of its cycle heads changes.
582 provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
583
584 _marker: PhantomData<D>,
585}
586
587/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
588/// ordinary nested goals/global cache hits and provisional cache hits,
589/// using the provisional cache should not add any nested goals.
590///
591/// `nested_goals` are only used when checking whether global cache entries
592/// are applicable. This only cares about whether a goal is actually accessed.
593/// Given that the usage of the provisional cache is fully deterministic, we
594/// don't need to track the nested goals used while computing a provisional
595/// cache entry.
596enum UpdateParentGoalCtxt<'a, X: Cx> {
597 Ordinary(&'a NestedGoals<X>),
598 CycleOnStack(X::Input),
599 ProvisionalCacheHit,
600}
601
602impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
603 pub fn new(root_depth: usize) -> SearchGraph<D> {
604 Self {
605 root_depth: AvailableDepth(root_depth),
606 stack: Default::default(),
607 provisional_cache: Default::default(),
608 _marker: PhantomData,
609 }
610 }
611
612 /// Lazily update the stack entry for the parent goal.
613 /// This behavior is shared between actually evaluating goals
614 /// and using existing global cache entries to make sure they
615 /// have the same impact on the remaining evaluation.
616 fn update_parent_goal(
617 stack: &mut Stack<X>,
618 step_kind_from_parent: PathKind,
619 required_depth_for_nested: usize,
620 heads: impl Iterator<Item = (StackDepth, CycleHead)>,
621 encountered_overflow: bool,
622 context: UpdateParentGoalCtxt<'_, X>,
623 ) {
624 if let Some((parent_index, parent)) = stack.last_mut_with_index() {
625 parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
626 parent.encountered_overflow |= encountered_overflow;
627
628 for (head_index, head) in heads {
629 if let Some(candidate_usages) = &mut parent.candidate_usages {
630 candidate_usages
631 .usages
632 .get_or_insert_default()
633 .entry(head_index)
634 .or_default()
635 .add_usages_from_nested(head.usages);
636 }
637 match head_index.cmp(&parent_index) {
638 Ordering::Less => parent.heads.insert(
639 head_index,
640 head.paths_to_head.extend_with(step_kind_from_parent),
641 head.usages,
642 ),
643 Ordering::Equal => {
644 parent.usages.get_or_insert_default().add_usages_from_nested(head.usages);
645 }
646 Ordering::Greater => unreachable!(),
647 }
648 }
649 let parent_depends_on_cycle = match context {
650 UpdateParentGoalCtxt::Ordinary(nested_goals) => {
651 parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
652 !nested_goals.is_empty()
653 }
654 UpdateParentGoalCtxt::CycleOnStack(head) => {
655 // We lookup provisional cache entries before detecting cycles.
656 // We therefore can't use a global cache entry if it contains a cycle
657 // whose head is in the provisional cache.
658 parent.nested_goals.insert(head, step_kind_from_parent.into());
659 true
660 }
661 UpdateParentGoalCtxt::ProvisionalCacheHit => true,
662 };
663 // Once we've got goals which encountered overflow or a cycle,
664 // we track all goals whose behavior may depend depend on these
665 // goals as this change may cause them to now depend on additional
666 // goals, resulting in new cycles. See the dev-guide for examples.
667 if parent_depends_on_cycle {
668 parent.nested_goals.insert(parent.input, PathsToNested::EMPTY);
669 }
670 }
671 }
672
673 pub fn is_empty(&self) -> bool {
674 if self.stack.is_empty() {
675 debug_assert!(self.provisional_cache.is_empty());
676 true
677 } else {
678 false
679 }
680 }
681
682 /// The number of goals currently in the search graph. This should only be
683 /// used for debugging purposes.
684 pub fn debug_current_depth(&self) -> usize {
685 self.stack.len()
686 }
687
688 /// Whether the path from `head` to the current stack entry is inductive or coinductive.
689 ///
690 /// The `step_kind_to_head` is used to add a single additional path segment to the path on
691 /// the stack which completes the cycle. This given an inductive step AB which then cycles
692 /// coinductively with A, we need to treat this cycle as coinductive.
693 fn cycle_path_kind(
694 stack: &Stack<X>,
695 step_kind_to_head: PathKind,
696 head: StackDepth,
697 ) -> PathKind {
698 stack.cycle_step_kinds(head).fold(step_kind_to_head, |curr, step| curr.extend(step))
699 }
700
701 pub fn enter_single_candidate(&mut self) {
702 let prev = self.stack.last_mut().unwrap().candidate_usages.replace(Default::default());
703 debug_assert!(prev.is_none(), "existing candidate_usages: {prev:?}");
704 }
705
706 pub fn finish_single_candidate(&mut self) -> CandidateHeadUsages {
707 self.stack.last_mut().unwrap().candidate_usages.take().unwrap()
708 }
709
710 pub fn ignore_candidate_head_usages(&mut self, usages: CandidateHeadUsages) {
711 if let Some(usages) = usages.usages {
712 let (entry_index, entry) = self.stack.last_mut_with_index().unwrap();
713 #[allow(rustc::potential_query_instability)]
714 for (head_index, usages) in usages.into_iter() {
715 if head_index == entry_index {
716 entry.usages.unwrap().ignore_usages(usages);
717 } else {
718 entry.heads.ignore_usages(head_index, usages);
719 }
720 }
721 }
722 }
723
724 pub fn evaluate_root_goal_for_proof_tree(
725 cx: X,
726 root_depth: usize,
727 input: X::Input,
728 inspect: &mut D::ProofTreeBuilder,
729 ) -> X::Result {
730 let mut this = SearchGraph::<D>::new(root_depth);
731 let available_depth = AvailableDepth(root_depth);
732 let step_kind_from_parent = PathKind::Inductive; // is never used
733 this.stack.push(StackEntry {
734 input,
735 step_kind_from_parent,
736 available_depth,
737 provisional_result: None,
738 required_depth: 0,
739 heads: Default::default(),
740 encountered_overflow: false,
741 usages: None,
742 candidate_usages: None,
743 nested_goals: Default::default(),
744 });
745 let evaluation_result = this.evaluate_goal_in_task(cx, input, inspect);
746 evaluation_result.result
747 }
748
749 /// Probably the most involved method of the whole solver.
750 ///
751 /// While goals get computed via `D::compute_goal`, this function handles
752 /// caching, overflow, and cycles.
753 #[instrument(level = "debug", skip(self, cx, inspect), ret)]
754 pub fn evaluate_goal(
755 &mut self,
756 cx: X,
757 input: X::Input,
758 step_kind_from_parent: PathKind,
759 inspect: &mut D::ProofTreeBuilder,
760 ) -> X::Result {
761 let Some(available_depth) =
762 AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
763 else {
764 return self.handle_overflow(cx, input);
765 };
766
767 // We check the provisional cache before checking the global cache. This simplifies
768 // the implementation as we can avoid worrying about cases where both the global and
769 // provisional cache may apply, e.g. consider the following example
770 //
771 // - xxBA overflow
772 // - A
773 // - BA cycle
774 // - CB :x:
775 if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
776 return result;
777 }
778
779 // Lookup the global cache unless we're building proof trees or are currently
780 // fuzzing.
781 let validate_cache = if !D::inspect_is_noop(inspect) {
782 None
783 } else if let Some(scope) = D::enter_validation_scope(cx, input) {
784 // When validating the global cache we need to track the goals for which the
785 // global cache has been disabled as it may otherwise change the result for
786 // cyclic goals. We don't care about goals which are not on the current stack
787 // so it's fine to drop their scope eagerly.
788 self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
789 .inspect(|expected| debug!(?expected, "validate cache entry"))
790 .map(|r| (scope, r))
791 } else if let Some(result) =
792 self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
793 {
794 return result;
795 } else {
796 None
797 };
798
799 // Detect cycles on the stack. We do this after the global cache lookup to
800 // avoid iterating over the stack in case a goal has already been computed.
801 // This may not have an actual performance impact and we could reorder them
802 // as it may reduce the number of `nested_goals` we need to track.
803 if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
804 debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
805 return result;
806 }
807
808 // Unfortunate, it looks like we actually have to compute this goal.
809 self.stack.push(StackEntry {
810 input,
811 step_kind_from_parent,
812 available_depth,
813 provisional_result: None,
814 required_depth: 0,
815 heads: Default::default(),
816 encountered_overflow: false,
817 usages: None,
818 candidate_usages: None,
819 nested_goals: Default::default(),
820 });
821
822 // This is for global caching, so we properly track query dependencies.
823 // Everything that affects the `result` should be performed within this
824 // `with_cached_task` closure. If computing this goal depends on something
825 // not tracked by the cache key and from outside of this anon task, it
826 // must not be added to the global cache. Notably, this is the case for
827 // trait solver cycles participants.
828 let (evaluation_result, dep_node) =
829 cx.with_cached_task(|| self.evaluate_goal_in_task(cx, input, inspect));
830
831 // We've finished computing the goal and have popped it from the stack,
832 // lazily update its parent goal.
833 Self::update_parent_goal(
834 &mut self.stack,
835 step_kind_from_parent,
836 evaluation_result.required_depth,
837 evaluation_result.heads.iter(),
838 evaluation_result.encountered_overflow,
839 UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
840 );
841 let result = evaluation_result.result;
842
843 // We're now done with this goal. We only add the root of cycles to the global cache.
844 // In case this goal is involved in a larger cycle add it to the provisional cache.
845 if evaluation_result.heads.is_empty() {
846 if let Some((_scope, expected)) = validate_cache {
847 // Do not try to move a goal into the cache again if we're testing
848 // the global cache.
849 assert_eq!(expected, evaluation_result.result, "input={input:?}");
850 } else if D::inspect_is_noop(inspect) {
851 self.insert_global_cache(cx, input, evaluation_result, dep_node)
852 }
853 } else if D::ENABLE_PROVISIONAL_CACHE {
854 debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
855 let entry = self.provisional_cache.entry(input).or_default();
856 let EvaluationResult {
857 encountered_overflow,
858 required_depth: _,
859 heads,
860 nested_goals: _,
861 result,
862 } = evaluation_result;
863 let path_from_head = Self::cycle_path_kind(
864 &self.stack,
865 step_kind_from_parent,
866 heads.highest_cycle_head_index(),
867 );
868 let provisional_cache_entry =
869 ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
870 debug!(?provisional_cache_entry);
871 entry.push(provisional_cache_entry);
872 } else {
873 debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
874 }
875
876 result
877 }
878
879 fn handle_overflow(&mut self, cx: X, input: X::Input) -> X::Result {
880 if let Some(last) = self.stack.last_mut() {
881 last.encountered_overflow = true;
882 // If computing a goal `B` depends on another goal `A` and
883 // `A` has a nested goal which overflows, then computing `B`
884 // at the same depth, but with `A` already on the stack,
885 // would encounter a solver cycle instead, potentially
886 // changing the result.
887 //
888 // We must therefore not use the global cache entry for `B` in that case.
889 // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
890 last.nested_goals.insert(last.input, PathsToNested::EMPTY);
891 }
892
893 debug!("encountered stack overflow");
894 D::stack_overflow_result(cx, input)
895 }
896
897 /// When reevaluating a goal with a changed provisional result, all provisional cache entry
898 /// which depend on this goal get invalidated.
899 ///
900 /// Note that we keep provisional cache entries which accessed this goal as a cycle head, but
901 /// don't depend on its value.
902 fn clear_dependent_provisional_results_for_rerun(&mut self) {
903 let rerun_index = self.stack.next_index();
904 #[allow(rustc::potential_query_instability)]
905 self.provisional_cache.retain(|_, entries| {
906 entries.retain(|entry| {
907 let (head_index, head) = entry.heads.highest_cycle_head();
908 head_index != rerun_index || head.usages.is_empty()
909 });
910 !entries.is_empty()
911 });
912 }
913}
914
915/// We need to rebase provisional cache entries when popping one of their cycle
916/// heads from the stack. This may not necessarily mean that we've actually
917/// reached a fixpoint for that cycle head, which impacts the way we rebase
918/// provisional cache entries.
919#[derive_where(Debug; X: Cx)]
920enum RebaseReason<X: Cx> {
921 NoCycleUsages,
922 Ambiguity(X::AmbiguityInfo),
923 Overflow,
924 /// We've actually reached a fixpoint.
925 ///
926 /// This either happens in the first evaluation step for the cycle head.
927 /// In this case the used provisional result depends on the cycle `PathKind`.
928 /// We store this path kind to check whether the provisional cache entry
929 /// we're rebasing relied on the same cycles.
930 ///
931 /// In later iterations cycles always return `stack_entry.provisional_result`
932 /// so we no longer depend on the `PathKind`. We store `None` in that case.
933 ReachedFixpoint(Option<PathKind>),
934}
935
936impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
937 /// A necessary optimization to handle complex solver cycles. A provisional cache entry
938 /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
939 /// head from the stack after we've finished computing it, we can't be sure that the
940 /// provisional cache entry is still applicable. We need to keep the cache entries to
941 /// prevent hangs.
942 ///
943 /// This can be thought of as pretending to reevaluate the popped head as nested goals
944 /// of this provisional result. For this to be correct, all cycles encountered while
945 /// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind.
946 /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
947 ///
948 /// In case the popped cycle head failed to reach a fixpoint anything which depends on
949 /// its provisional result is invalid. Actually discarding provisional cache entries in
950 /// this case would cause hangs, so we instead change the result of dependant provisional
951 /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
952 /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
953 /// to me.
954 #[instrument(level = "trace", skip(self, cx))]
955 fn rebase_provisional_cache_entries(
956 &mut self,
957 cx: X,
958 stack_entry: &StackEntry<X>,
959 rebase_reason: RebaseReason<X>,
960 ) {
961 let popped_head_index = self.stack.next_index();
962 #[allow(rustc::potential_query_instability)]
963 self.provisional_cache.retain(|&input, entries| {
964 entries.retain_mut(|entry| {
965 let ProvisionalCacheEntry {
966 encountered_overflow: _,
967 heads,
968 path_from_head,
969 result,
970 } = entry;
971 let popped_head = if heads.highest_cycle_head_index() == popped_head_index {
972 heads.remove_highest_cycle_head()
973 } else {
974 debug_assert!(heads.highest_cycle_head_index() < popped_head_index);
975 return true;
976 };
977
978 // We're rebasing an entry `e` over a head `p`. This head
979 // has a number of own heads `h` it depends on.
980 //
981 // This causes our provisional result to depend on the heads
982 // of `p` to avoid moving any goal which uses this cache entry to
983 // the global cache.
984 if popped_head.usages.is_empty() {
985 // The result of `e` does not depend on the value of `p`. This we can
986 // keep using the result of this provisional cache entry even if evaluating
987 // `p` as a nested goal of `e` would have a different result.
988 for (head_index, _) in stack_entry.heads.iter() {
989 heads.insert(head_index, PathsToNested::EMPTY, HeadUsages::default());
990 }
991 } else {
992 // The entry `e` actually depends on the value of `p`. We need
993 // to make sure that the value of `p` wouldn't change even if we
994 // were to reevaluate it as a nested goal of `e` instead. For this
995 // we check that the path kind of all paths `hph` remain the
996 // same after rebasing.
997 //
998 // After rebasing the cycles `hph` will go through `e`. We need to make
999 // sure that forall possible paths `hep`, `heph` is equal to `hph.`
1000 let ep = popped_head.paths_to_head;
1001 for (head_index, head) in stack_entry.heads.iter() {
1002 let ph = head.paths_to_head;
1003 let hp = Self::cycle_path_kind(
1004 &self.stack,
1005 stack_entry.step_kind_from_parent,
1006 head_index,
1007 );
1008 // We first validate that all cycles while computing `p` would stay
1009 // the same if we were to recompute it as a nested goal of `e`.
1010 let he = hp.extend(*path_from_head);
1011 for ph in ph.iter_paths() {
1012 let hph = hp.extend(ph);
1013 for ep in ep.iter_paths() {
1014 let hep = ep.extend(he);
1015 let heph = hep.extend(ph);
1016 if hph != heph {
1017 return false;
1018 }
1019 }
1020 }
1021
1022 // If so, all paths reached while computing `p` have to get added
1023 // the heads of `e` to make sure that rebasing `e` again also considers
1024 // them.
1025 let eph = ep.extend_with_paths(ph);
1026 heads.insert(head_index, eph, head.usages);
1027 }
1028
1029 // The provisional cache entry does depend on the provisional result
1030 // of the popped cycle head. We need to mutate the result of our
1031 // provisional cache entry in case we did not reach a fixpoint.
1032 match rebase_reason {
1033 // If the cycle head does not actually depend on itself, then
1034 // the provisional result used by the provisional cache entry
1035 // is not actually equal to the final provisional result. We
1036 // need to discard the provisional cache entry in this case.
1037 RebaseReason::NoCycleUsages => return false,
1038 RebaseReason::Ambiguity(info) => {
1039 *result = D::propagate_ambiguity(cx, input, info);
1040 }
1041 RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
1042 RebaseReason::ReachedFixpoint(None) => {}
1043 RebaseReason::ReachedFixpoint(Some(path_kind)) => {
1044 if !popped_head.usages.is_single(path_kind) {
1045 return false;
1046 }
1047 }
1048 };
1049 }
1050
1051 let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
1052 return false;
1053 };
1054
1055 // We now care about the path from the next highest cycle head to the
1056 // provisional cache entry.
1057 *path_from_head = path_from_head.extend(Self::cycle_path_kind(
1058 &self.stack,
1059 stack_entry.step_kind_from_parent,
1060 new_highest_head_index,
1061 ));
1062
1063 trace!(?input, ?entry, "rebased provisional cache entry");
1064
1065 true
1066 });
1067 !entries.is_empty()
1068 });
1069 }
1070
1071 fn lookup_provisional_cache(
1072 &mut self,
1073 input: X::Input,
1074 step_kind_from_parent: PathKind,
1075 ) -> Option<X::Result> {
1076 if !D::ENABLE_PROVISIONAL_CACHE {
1077 return None;
1078 }
1079
1080 let entries = self.provisional_cache.get(&input)?;
1081 for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
1082 entries
1083 {
1084 let head_index = heads.highest_cycle_head_index();
1085 if encountered_overflow {
1086 // This check is overly strict and very subtle. We need to make sure that if
1087 // a global cache entry depends on some goal without adding it to its
1088 // `nested_goals`, that goal must never have an applicable provisional
1089 // cache entry to avoid incorrectly applying the cache entry.
1090 //
1091 // As we'd have to otherwise track literally all nested goals, we only
1092 // apply provisional cache entries which encountered overflow once the
1093 // current goal is already part of the same cycle. This check could be
1094 // improved but seems to be good enough for now.
1095 let last = self.stack.last().unwrap();
1096 if last.heads.opt_lowest_cycle_head_index().is_none_or(|lowest| lowest > head_index)
1097 {
1098 continue;
1099 }
1100 }
1101
1102 // A provisional cache entry is only valid if the current path from its
1103 // highest cycle head to the goal is the same.
1104 if path_from_head
1105 == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index)
1106 {
1107 Self::update_parent_goal(
1108 &mut self.stack,
1109 step_kind_from_parent,
1110 0,
1111 heads.iter(),
1112 encountered_overflow,
1113 UpdateParentGoalCtxt::ProvisionalCacheHit,
1114 );
1115 debug!(?head_index, ?path_from_head, "provisional cache hit");
1116 return Some(result);
1117 }
1118 }
1119
1120 None
1121 }
1122
1123 /// Even if there is a global cache entry for a given goal, we need to make sure
1124 /// evaluating this entry would not have ended up depending on either a goal
1125 /// already on the stack or a provisional cache entry.
1126 fn candidate_is_applicable(
1127 &self,
1128 step_kind_from_parent: PathKind,
1129 nested_goals: &NestedGoals<X>,
1130 ) -> bool {
1131 // If the global cache entry didn't depend on any nested goals, it always
1132 // applies.
1133 if nested_goals.is_empty() {
1134 return true;
1135 }
1136
1137 // If a nested goal of the global cache entry is on the stack, we would
1138 // definitely encounter a cycle.
1139 if self.stack.iter().any(|e| nested_goals.contains(e.input)) {
1140 debug!("cache entry not applicable due to stack");
1141 return false;
1142 }
1143
1144 // The global cache entry is also invalid if there's a provisional cache entry
1145 // would apply for any of its nested goals.
1146 #[allow(rustc::potential_query_instability)]
1147 for (input, path_from_global_entry) in nested_goals.iter() {
1148 let Some(entries) = self.provisional_cache.get(&input) else {
1149 continue;
1150 };
1151
1152 debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable");
1153 // A provisional cache entry is applicable if the path to
1154 // its highest cycle head is equal to the expected path.
1155 for &ProvisionalCacheEntry {
1156 encountered_overflow,
1157 ref heads,
1158 path_from_head: head_to_provisional,
1159 result: _,
1160 } in entries.iter()
1161 {
1162 // We don't have to worry about provisional cache entries which encountered
1163 // overflow, see the relevant comment in `lookup_provisional_cache`.
1164 if encountered_overflow {
1165 continue;
1166 }
1167
1168 // A provisional cache entry only applies if the path from its highest head
1169 // matches the path when encountering the goal.
1170 //
1171 // We check if any of the paths taken while computing the global goal
1172 // would end up with an applicable provisional cache entry.
1173 let head_index = heads.highest_cycle_head_index();
1174 let head_to_curr =
1175 Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1176 let full_paths = path_from_global_entry.extend_with(head_to_curr);
1177 if full_paths.contains(head_to_provisional.into()) {
1178 debug!(
1179 ?full_paths,
1180 ?head_to_provisional,
1181 "cache entry not applicable due to matching paths"
1182 );
1183 return false;
1184 }
1185 }
1186 }
1187
1188 true
1189 }
1190
1191 /// Used when fuzzing the global cache. Accesses the global cache without
1192 /// updating the state of the search graph.
1193 fn lookup_global_cache_untracked(
1194 &self,
1195 cx: X,
1196 input: X::Input,
1197 step_kind_from_parent: PathKind,
1198 available_depth: AvailableDepth,
1199 ) -> Option<X::Result> {
1200 cx.with_global_cache(|cache| {
1201 cache
1202 .get(cx, input, available_depth, |nested_goals| {
1203 self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1204 })
1205 .map(|c| c.result)
1206 })
1207 }
1208
1209 /// Try to fetch a previously computed result from the global cache,
1210 /// making sure to only do so if it would match the result of reevaluating
1211 /// this goal.
1212 fn lookup_global_cache(
1213 &mut self,
1214 cx: X,
1215 input: X::Input,
1216 step_kind_from_parent: PathKind,
1217 available_depth: AvailableDepth,
1218 ) -> Option<X::Result> {
1219 cx.with_global_cache(|cache| {
1220 let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache
1221 .get(cx, input, available_depth, |nested_goals| {
1222 self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1223 })?;
1224
1225 // We don't move cycle participants to the global cache, so the
1226 // cycle heads are always empty.
1227 let heads = iter::empty();
1228 Self::update_parent_goal(
1229 &mut self.stack,
1230 step_kind_from_parent,
1231 required_depth,
1232 heads,
1233 encountered_overflow,
1234 UpdateParentGoalCtxt::Ordinary(nested_goals),
1235 );
1236
1237 debug!(?required_depth, "global cache hit");
1238 Some(result)
1239 })
1240 }
1241
1242 fn check_cycle_on_stack(
1243 &mut self,
1244 cx: X,
1245 input: X::Input,
1246 step_kind_from_parent: PathKind,
1247 ) -> Option<X::Result> {
1248 let head_index = self.stack.find(input)?;
1249 // We have a nested goal which directly relies on a goal deeper in the stack.
1250 //
1251 // We start by tagging all cycle participants, as that's necessary for caching.
1252 //
1253 // Finally we can return either the provisional response or the initial response
1254 // in case we're in the first fixpoint iteration for this goal.
1255 let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1256 debug!(?path_kind, "encountered cycle with depth {head_index:?}");
1257 let mut usages = HeadUsages::default();
1258 usages.add_usage(path_kind);
1259 let head = CycleHead { paths_to_head: step_kind_from_parent.into(), usages };
1260 Self::update_parent_goal(
1261 &mut self.stack,
1262 step_kind_from_parent,
1263 0,
1264 iter::once((head_index, head)),
1265 false,
1266 UpdateParentGoalCtxt::CycleOnStack(input),
1267 );
1268
1269 // Return the provisional result or, if we're in the first iteration,
1270 // start with no constraints.
1271 if let Some(result) = self.stack[head_index].provisional_result {
1272 Some(result)
1273 } else {
1274 Some(D::initial_provisional_result(cx, path_kind, input))
1275 }
1276 }
1277
1278 /// Whether we've reached a fixpoint when evaluating a cycle head.
1279 #[instrument(level = "trace", skip(self, stack_entry), ret)]
1280 fn reached_fixpoint(
1281 &mut self,
1282 stack_entry: &StackEntry<X>,
1283 usages: HeadUsages,
1284 result: X::Result,
1285 ) -> Result<Option<PathKind>, ()> {
1286 let provisional_result = stack_entry.provisional_result;
1287 if let Some(provisional_result) = provisional_result {
1288 if provisional_result == result { Ok(None) } else { Err(()) }
1289 } else if let Some(path_kind) = D::is_initial_provisional_result(result)
1290 .filter(|&path_kind| usages.is_single(path_kind))
1291 {
1292 Ok(Some(path_kind))
1293 } else {
1294 Err(())
1295 }
1296 }
1297
1298 /// When we encounter a coinductive cycle, we have to fetch the
1299 /// result of that cycle while we are still computing it. Because
1300 /// of this we continuously recompute the cycle until the result
1301 /// of the previous iteration is equal to the final result, at which
1302 /// point we are done.
1303 fn evaluate_goal_in_task(
1304 &mut self,
1305 cx: X,
1306 input: X::Input,
1307 inspect: &mut D::ProofTreeBuilder,
1308 ) -> EvaluationResult<X> {
1309 // We reset `encountered_overflow` each time we rerun this goal
1310 // but need to make sure we currently propagate it to the global
1311 // cache even if only some of the evaluations actually reach the
1312 // recursion limit.
1313 let mut encountered_overflow = false;
1314 let mut i = 0;
1315 loop {
1316 let result = D::compute_goal(self, cx, input, inspect);
1317 let stack_entry = self.stack.pop();
1318 encountered_overflow |= stack_entry.encountered_overflow;
1319 debug_assert_eq!(stack_entry.input, input);
1320
1321 // If the current goal is not a cycle head, we are done.
1322 //
1323 // There are no provisional cache entries which depend on this goal.
1324 let Some(usages) = stack_entry.usages else {
1325 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1326 };
1327
1328 // If it is a cycle head, we have to keep trying to prove it until
1329 // we reach a fixpoint. We need to do so for all cycle heads,
1330 // not only for the root.
1331 //
1332 // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
1333 // for an example.
1334 //
1335 // Check whether we reached a fixpoint, either because the final result
1336 // is equal to the provisional result of the previous iteration, or because
1337 // this was only the head of either coinductive or inductive cycles, and the
1338 // final result is equal to the initial response for that case.
1339 if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
1340 self.rebase_provisional_cache_entries(
1341 cx,
1342 &stack_entry,
1343 RebaseReason::ReachedFixpoint(fixpoint),
1344 );
1345 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1346 } else if usages.is_empty() {
1347 self.rebase_provisional_cache_entries(
1348 cx,
1349 &stack_entry,
1350 RebaseReason::NoCycleUsages,
1351 );
1352 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1353 }
1354
1355 // If computing this goal results in ambiguity with no constraints,
1356 // we do not rerun it. It's incredibly difficult to get a different
1357 // response in the next iteration in this case. These changes would
1358 // likely either be caused by incompleteness or can change the maybe
1359 // cause from ambiguity to overflow. Returning ambiguity always
1360 // preserves soundness and completeness even if the goal is be known
1361 // to succeed or fail.
1362 //
1363 // This prevents exponential blowup affecting multiple major crates.
1364 // As we only get to this branch if we haven't yet reached a fixpoint,
1365 // we also taint all provisional cache entries which depend on the
1366 // current goal.
1367 if let Some(info) = D::is_ambiguous_result(result) {
1368 self.rebase_provisional_cache_entries(
1369 cx,
1370 &stack_entry,
1371 RebaseReason::Ambiguity(info),
1372 );
1373 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1374 };
1375
1376 // If we've reached the fixpoint step limit, we bail with overflow and taint all
1377 // provisional cache entries which depend on the current goal.
1378 i += 1;
1379 if i >= D::FIXPOINT_STEP_LIMIT {
1380 debug!("canonical cycle overflow");
1381 let result = D::fixpoint_overflow_result(cx, input);
1382 self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
1383 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1384 }
1385
1386 // Clear all provisional cache entries which depend on a previous provisional
1387 // result of this goal and rerun. This does not remove goals which accessed this
1388 // goal without depending on its result.
1389 self.clear_dependent_provisional_results_for_rerun();
1390
1391 debug!(?result, "fixpoint changed provisional results");
1392 self.stack.push(StackEntry {
1393 input,
1394 step_kind_from_parent: stack_entry.step_kind_from_parent,
1395 available_depth: stack_entry.available_depth,
1396 provisional_result: Some(result),
1397 // We can keep these goals from previous iterations as they are only
1398 // ever read after finalizing this evaluation.
1399 required_depth: stack_entry.required_depth,
1400 heads: stack_entry.heads,
1401 nested_goals: stack_entry.nested_goals,
1402 // We reset these two fields when rerunning this goal. We could
1403 // keep `encountered_overflow` as it's only used as a performance
1404 // optimization. However, given that the proof tree will likely look
1405 // similar to the previous iterations when reevaluating, it's better
1406 // for caching if the reevaluation also starts out with `false`.
1407 encountered_overflow: false,
1408 // We keep provisional cache entries around if they used this goal
1409 // without depending on its result.
1410 //
1411 // We still need to drop or rebase these cache entries once we've
1412 // finished evaluating this goal.
1413 usages: Some(HeadUsages::default()),
1414 candidate_usages: None,
1415 });
1416 }
1417 }
1418
1419 /// When encountering a cycle, both inductive and coinductive, we only
1420 /// move the root into the global cache. We also store all other cycle
1421 /// participants involved.
1422 ///
1423 /// We must not use the global cache entry of a root goal if a cycle
1424 /// participant is on the stack. This is necessary to prevent unstable
1425 /// results. See the comment of `StackEntry::nested_goals` for
1426 /// more details.
1427 fn insert_global_cache(
1428 &mut self,
1429 cx: X,
1430 input: X::Input,
1431 evaluation_result: EvaluationResult<X>,
1432 dep_node: X::DepNodeIndex,
1433 ) {
1434 debug!(?evaluation_result, "insert global cache");
1435 cx.with_global_cache(|cache| cache.insert(cx, input, evaluation_result, dep_node))
1436 }
1437}