rustc_trait_selection/traits/select/mod.rs
1//! Candidate selection. See the [rustc dev guide] for more information on how this works.
2//!
3//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
4
5use std::assert_matches::assert_matches;
6use std::cell::{Cell, RefCell};
7use std::cmp;
8use std::fmt::{self, Display};
9use std::ops::ControlFlow;
10
11use hir::def::DefKind;
12use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
13use rustc_data_structures::stack::ensure_sufficient_stack;
14use rustc_errors::{Diag, EmissionGuarantee};
15use rustc_hir as hir;
16use rustc_hir::LangItem;
17use rustc_hir::def_id::DefId;
18use rustc_infer::infer::BoundRegionConversionTime::{self, HigherRankedType};
19use rustc_infer::infer::DefineOpaqueTypes;
20use rustc_infer::infer::at::ToTrace;
21use rustc_infer::infer::relate::TypeRelation;
22use rustc_infer::traits::{PredicateObligations, TraitObligation};
23use rustc_macros::{TypeFoldable, TypeVisitable};
24use rustc_middle::bug;
25use rustc_middle::dep_graph::{DepNodeIndex, dep_kinds};
26pub use rustc_middle::traits::select::*;
27use rustc_middle::ty::abstract_const::NotConstEvaluatable;
28use rustc_middle::ty::error::TypeErrorToStringExt;
29use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
30use rustc_middle::ty::{
31 self, CandidatePreferenceMode, DeepRejectCtxt, GenericArgsRef, PolyProjectionPredicate,
32 SizedTraitKind, Ty, TyCtxt, TypeFoldable, TypeVisitableExt, TypingMode, Upcast, elaborate,
33 may_use_unstable_feature,
34};
35use rustc_next_trait_solver::solve::AliasBoundKind;
36use rustc_span::{Symbol, sym};
37use tracing::{debug, instrument, trace};
38
39use self::EvaluationResult::*;
40use self::SelectionCandidate::*;
41use super::coherence::{self, Conflict};
42use super::project::ProjectionTermObligation;
43use super::util::closure_trait_ref_and_return_type;
44use super::{
45 ImplDerivedCause, Normalized, Obligation, ObligationCause, ObligationCauseCode,
46 PolyTraitObligation, PredicateObligation, Selection, SelectionError, SelectionResult,
47 TraitQueryMode, const_evaluatable, project, util, wf,
48};
49use crate::error_reporting::InferCtxtErrorExt;
50use crate::infer::{InferCtxt, InferOk, TypeFreshener};
51use crate::solve::InferCtxtSelectExt as _;
52use crate::traits::normalize::{normalize_with_depth, normalize_with_depth_to};
53use crate::traits::project::{ProjectAndUnifyResult, ProjectionCacheKeyExt};
54use crate::traits::{EvaluateConstErr, ProjectionCacheKey, effects, sizedness_fast_path};
55
56mod _match;
57mod candidate_assembly;
58mod confirmation;
59
60#[derive(Clone, Debug, Eq, PartialEq, Hash)]
61pub enum IntercrateAmbiguityCause<'tcx> {
62 DownstreamCrate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
63 UpstreamCrateUpdate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
64 ReservationImpl { message: Symbol },
65}
66
67impl<'tcx> IntercrateAmbiguityCause<'tcx> {
68 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
69 /// See #23980 for details.
70 pub fn add_intercrate_ambiguity_hint<G: EmissionGuarantee>(&self, err: &mut Diag<'_, G>) {
71 err.note(self.intercrate_ambiguity_hint());
72 }
73
74 pub fn intercrate_ambiguity_hint(&self) -> String {
75 with_no_trimmed_paths!(match self {
76 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty } => {
77 format!(
78 "downstream crates may implement trait `{trait_desc}`{self_desc}",
79 trait_desc = trait_ref.print_trait_sugared(),
80 self_desc = if let Some(self_ty) = self_ty {
81 format!(" for type `{self_ty}`")
82 } else {
83 String::new()
84 }
85 )
86 }
87 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty } => {
88 format!(
89 "upstream crates may add a new impl of trait `{trait_desc}`{self_desc} \
90 in future versions",
91 trait_desc = trait_ref.print_trait_sugared(),
92 self_desc = if let Some(self_ty) = self_ty {
93 format!(" for type `{self_ty}`")
94 } else {
95 String::new()
96 }
97 )
98 }
99 IntercrateAmbiguityCause::ReservationImpl { message } => message.to_string(),
100 })
101 }
102}
103
104pub struct SelectionContext<'cx, 'tcx> {
105 pub infcx: &'cx InferCtxt<'tcx>,
106
107 /// Freshener used specifically for entries on the obligation
108 /// stack. This ensures that all entries on the stack at one time
109 /// will have the same set of placeholder entries, which is
110 /// important for checking for trait bounds that recursively
111 /// require themselves.
112 freshener: TypeFreshener<'cx, 'tcx>,
113
114 /// If `intercrate` is set, we remember predicates which were
115 /// considered ambiguous because of impls potentially added in other crates.
116 /// This is used in coherence to give improved diagnostics.
117 /// We don't do his until we detect a coherence error because it can
118 /// lead to false overflow results (#47139) and because always
119 /// computing it may negatively impact performance.
120 intercrate_ambiguity_causes: Option<FxIndexSet<IntercrateAmbiguityCause<'tcx>>>,
121
122 /// The mode that trait queries run in, which informs our error handling
123 /// policy. In essence, canonicalized queries need their errors propagated
124 /// rather than immediately reported because we do not have accurate spans.
125 query_mode: TraitQueryMode,
126}
127
128// A stack that walks back up the stack frame.
129struct TraitObligationStack<'prev, 'tcx> {
130 obligation: &'prev PolyTraitObligation<'tcx>,
131
132 /// The trait predicate from `obligation` but "freshened" with the
133 /// selection-context's freshener. Used to check for recursion.
134 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
135
136 /// Starts out equal to `depth` -- if, during evaluation, we
137 /// encounter a cycle, then we will set this flag to the minimum
138 /// depth of that cycle for all participants in the cycle. These
139 /// participants will then forego caching their results. This is
140 /// not the most efficient solution, but it addresses #60010. The
141 /// problem we are trying to prevent:
142 ///
143 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
144 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
145 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
146 ///
147 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
148 /// is `EvaluatedToOk`; this is because they were only considered
149 /// ok on the premise that if `A: AutoTrait` held, but we indeed
150 /// encountered a problem (later on) with `A: AutoTrait`. So we
151 /// currently set a flag on the stack node for `B: AutoTrait` (as
152 /// well as the second instance of `A: AutoTrait`) to suppress
153 /// caching.
154 ///
155 /// This is a simple, targeted fix. A more-performant fix requires
156 /// deeper changes, but would permit more caching: we could
157 /// basically defer caching until we have fully evaluated the
158 /// tree, and then cache the entire tree at once. In any case, the
159 /// performance impact here shouldn't be so horrible: every time
160 /// this is hit, we do cache at least one trait, so we only
161 /// evaluate each member of a cycle up to N times, where N is the
162 /// length of the cycle. This means the performance impact is
163 /// bounded and we shouldn't have any terrible worst-cases.
164 reached_depth: Cell<usize>,
165
166 previous: TraitObligationStackList<'prev, 'tcx>,
167
168 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
169 depth: usize,
170
171 /// The depth-first number of this node in the search graph -- a
172 /// pre-order index. Basically, a freshly incremented counter.
173 dfn: usize,
174}
175
176struct SelectionCandidateSet<'tcx> {
177 /// A list of candidates that definitely apply to the current
178 /// obligation (meaning: types unify).
179 vec: Vec<SelectionCandidate<'tcx>>,
180
181 /// If `true`, then there were candidates that might or might
182 /// not have applied, but we couldn't tell. This occurs when some
183 /// of the input types are type variables, in which case there are
184 /// various "builtin" rules that might or might not trigger.
185 ambiguous: bool,
186}
187
188#[derive(PartialEq, Eq, Debug, Clone)]
189struct EvaluatedCandidate<'tcx> {
190 candidate: SelectionCandidate<'tcx>,
191 evaluation: EvaluationResult,
192}
193
194impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
195 pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
196 SelectionContext {
197 infcx,
198 freshener: infcx.freshener(),
199 intercrate_ambiguity_causes: None,
200 query_mode: TraitQueryMode::Standard,
201 }
202 }
203
204 pub fn with_query_mode(
205 infcx: &'cx InferCtxt<'tcx>,
206 query_mode: TraitQueryMode,
207 ) -> SelectionContext<'cx, 'tcx> {
208 debug!(?query_mode, "with_query_mode");
209 SelectionContext { query_mode, ..SelectionContext::new(infcx) }
210 }
211
212 /// Enables tracking of intercrate ambiguity causes. See
213 /// the documentation of [`Self::intercrate_ambiguity_causes`] for more.
214 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
215 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
216 assert!(self.intercrate_ambiguity_causes.is_none());
217 self.intercrate_ambiguity_causes = Some(FxIndexSet::default());
218 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
219 }
220
221 /// Gets the intercrate ambiguity causes collected since tracking
222 /// was enabled and disables tracking at the same time. If
223 /// tracking is not enabled, just returns an empty vector.
224 pub fn take_intercrate_ambiguity_causes(
225 &mut self,
226 ) -> FxIndexSet<IntercrateAmbiguityCause<'tcx>> {
227 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
228 self.intercrate_ambiguity_causes.take().unwrap_or_default()
229 }
230
231 pub fn tcx(&self) -> TyCtxt<'tcx> {
232 self.infcx.tcx
233 }
234
235 ///////////////////////////////////////////////////////////////////////////
236 // Selection
237 //
238 // The selection phase tries to identify *how* an obligation will
239 // be resolved. For example, it will identify which impl or
240 // parameter bound is to be used. The process can be inconclusive
241 // if the self type in the obligation is not fully inferred. Selection
242 // can result in an error in one of two ways:
243 //
244 // 1. If no applicable impl or parameter bound can be found.
245 // 2. If the output type parameters in the obligation do not match
246 // those specified by the impl/bound. For example, if the obligation
247 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
248 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
249
250 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
251 /// type environment by performing unification.
252 #[instrument(level = "debug", skip(self), ret)]
253 pub fn poly_select(
254 &mut self,
255 obligation: &PolyTraitObligation<'tcx>,
256 ) -> SelectionResult<'tcx, Selection<'tcx>> {
257 assert!(!self.infcx.next_trait_solver());
258
259 let candidate = match self.select_from_obligation(obligation) {
260 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
261 // In standard mode, overflow must have been caught and reported
262 // earlier.
263 assert!(self.query_mode == TraitQueryMode::Canonical);
264 return Err(SelectionError::Overflow(OverflowError::Canonical));
265 }
266 Err(e) => {
267 return Err(e);
268 }
269 Ok(None) => {
270 return Ok(None);
271 }
272 Ok(Some(candidate)) => candidate,
273 };
274
275 match self.confirm_candidate(obligation, candidate) {
276 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
277 assert!(self.query_mode == TraitQueryMode::Canonical);
278 Err(SelectionError::Overflow(OverflowError::Canonical))
279 }
280 Err(e) => Err(e),
281 Ok(candidate) => Ok(Some(candidate)),
282 }
283 }
284
285 pub fn select(
286 &mut self,
287 obligation: &TraitObligation<'tcx>,
288 ) -> SelectionResult<'tcx, Selection<'tcx>> {
289 if self.infcx.next_trait_solver() {
290 return self.infcx.select_in_new_trait_solver(obligation);
291 }
292
293 self.poly_select(&Obligation {
294 cause: obligation.cause.clone(),
295 param_env: obligation.param_env,
296 predicate: ty::Binder::dummy(obligation.predicate),
297 recursion_depth: obligation.recursion_depth,
298 })
299 }
300
301 fn select_from_obligation(
302 &mut self,
303 obligation: &PolyTraitObligation<'tcx>,
304 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
305 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
306
307 let pec = &ProvisionalEvaluationCache::default();
308 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
309
310 self.candidate_from_obligation(&stack)
311 }
312
313 #[instrument(level = "debug", skip(self), ret)]
314 fn candidate_from_obligation<'o>(
315 &mut self,
316 stack: &TraitObligationStack<'o, 'tcx>,
317 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
318 debug_assert!(!self.infcx.next_trait_solver());
319 // Watch out for overflow. This intentionally bypasses (and does
320 // not update) the cache.
321 self.check_recursion_limit(stack.obligation, stack.obligation)?;
322
323 // Check the cache. Note that we freshen the trait-ref
324 // separately rather than using `stack.fresh_trait_ref` --
325 // this is because we want the unbound variables to be
326 // replaced with fresh types starting from index 0.
327 let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate);
328 debug!(?cache_fresh_trait_pred);
329 debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
330
331 if let Some(c) =
332 self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
333 {
334 debug!("CACHE HIT");
335 return c;
336 }
337
338 // If no match, compute result and insert into cache.
339 //
340 // FIXME(nikomatsakis) -- this cache is not taking into
341 // account cycles that may have occurred in forming the
342 // candidate. I don't know of any specific problems that
343 // result but it seems awfully suspicious.
344 let (candidate, dep_node) =
345 self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
346
347 debug!("CACHE MISS");
348 self.insert_candidate_cache(
349 stack.obligation.param_env,
350 cache_fresh_trait_pred,
351 dep_node,
352 candidate.clone(),
353 );
354 candidate
355 }
356
357 fn candidate_from_obligation_no_cache<'o>(
358 &mut self,
359 stack: &TraitObligationStack<'o, 'tcx>,
360 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
361 if let Err(conflict) = self.is_knowable(stack) {
362 debug!("coherence stage: not knowable");
363 if self.intercrate_ambiguity_causes.is_some() {
364 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
365 // Heuristics: show the diagnostics when there are no candidates in crate.
366 if let Ok(candidate_set) = self.assemble_candidates(stack) {
367 let mut no_candidates_apply = true;
368
369 for c in candidate_set.vec.iter() {
370 if self.evaluate_candidate(stack, c)?.may_apply() {
371 no_candidates_apply = false;
372 break;
373 }
374 }
375
376 if !candidate_set.ambiguous && no_candidates_apply {
377 let trait_ref = self.infcx.resolve_vars_if_possible(
378 stack.obligation.predicate.skip_binder().trait_ref,
379 );
380 if !trait_ref.references_error() {
381 let self_ty = trait_ref.self_ty();
382 let self_ty = self_ty.has_concrete_skeleton().then(|| self_ty);
383 let cause = if let Conflict::Upstream = conflict {
384 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty }
385 } else {
386 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty }
387 };
388 debug!(?cause, "evaluate_stack: pushing cause");
389 self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
390 }
391 }
392 }
393 }
394 return Ok(None);
395 }
396
397 let candidate_set = self.assemble_candidates(stack)?;
398
399 if candidate_set.ambiguous {
400 debug!("candidate set contains ambig");
401 return Ok(None);
402 }
403
404 let candidates = candidate_set.vec;
405
406 debug!(?stack, ?candidates, "assembled {} candidates", candidates.len());
407
408 // At this point, we know that each of the entries in the
409 // candidate set is *individually* applicable. Now we have to
410 // figure out if they contain mutual incompatibilities. This
411 // frequently arises if we have an unconstrained input type --
412 // for example, we are looking for `$0: Eq` where `$0` is some
413 // unconstrained type variable. In that case, we'll get a
414 // candidate which assumes $0 == int, one that assumes `$0 ==
415 // usize`, etc. This spells an ambiguity.
416
417 let mut candidates = self.filter_impls(candidates, stack.obligation);
418
419 // If there is more than one candidate, first winnow them down
420 // by considering extra conditions (nested obligations and so
421 // forth). We don't winnow if there is exactly one
422 // candidate. This is a relatively minor distinction but it
423 // can lead to better inference and error-reporting. An
424 // example would be if there was an impl:
425 //
426 // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
427 //
428 // and we were to see some code `foo.push_clone()` where `boo`
429 // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
430 // we were to winnow, we'd wind up with zero candidates.
431 // Instead, we select the right impl now but report "`Bar` does
432 // not implement `Clone`".
433 if candidates.len() == 1 {
434 return self.filter_reservation_impls(candidates.pop().unwrap());
435 }
436
437 // Winnow, but record the exact outcome of evaluation, which
438 // is needed for specialization. Propagate overflow if it occurs.
439 let candidates = candidates
440 .into_iter()
441 .map(|c| match self.evaluate_candidate(stack, &c) {
442 Ok(eval) if eval.may_apply() => {
443 Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
444 }
445 Ok(_) => Ok(None),
446 Err(OverflowError::Canonical) => {
447 Err(SelectionError::Overflow(OverflowError::Canonical))
448 }
449 Err(OverflowError::Error(e)) => {
450 Err(SelectionError::Overflow(OverflowError::Error(e)))
451 }
452 })
453 .flat_map(Result::transpose)
454 .collect::<Result<Vec<_>, _>>()?;
455
456 debug!(?stack, ?candidates, "{} potentially applicable candidates", candidates.len());
457 // If there are *NO* candidates, then there are no impls --
458 // that we know of, anyway. Note that in the case where there
459 // are unbound type variables within the obligation, it might
460 // be the case that you could still satisfy the obligation
461 // from another crate by instantiating the type variables with
462 // a type from another crate that does have an impl. This case
463 // is checked for in `evaluate_stack` (and hence users
464 // who might care about this case, like coherence, should use
465 // that function).
466 if candidates.is_empty() {
467 // If there's an error type, 'downgrade' our result from
468 // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid
469 // emitting additional spurious errors, since we're guaranteed
470 // to have emitted at least one.
471 if stack.obligation.predicate.references_error() {
472 debug!(?stack.obligation.predicate, "found error type in predicate, treating as ambiguous");
473 Ok(None)
474 } else {
475 Err(SelectionError::Unimplemented)
476 }
477 } else {
478 let has_non_region_infer = stack.obligation.predicate.has_non_region_infer();
479 let candidate_preference_mode =
480 CandidatePreferenceMode::compute(self.tcx(), stack.obligation.predicate.def_id());
481 if let Some(candidate) =
482 self.winnow_candidates(has_non_region_infer, candidate_preference_mode, candidates)
483 {
484 self.filter_reservation_impls(candidate)
485 } else {
486 Ok(None)
487 }
488 }
489 }
490
491 ///////////////////////////////////////////////////////////////////////////
492 // EVALUATION
493 //
494 // Tests whether an obligation can be selected or whether an impl
495 // can be applied to particular types. It skips the "confirmation"
496 // step and hence completely ignores output type parameters.
497 //
498 // The result is "true" if the obligation *may* hold and "false" if
499 // we can be sure it does not.
500
501 /// Evaluates whether the obligation `obligation` can be satisfied
502 /// and returns an `EvaluationResult`. This is meant for the
503 /// *initial* call.
504 ///
505 /// Do not use this directly, use `infcx.evaluate_obligation` instead.
506 pub fn evaluate_root_obligation(
507 &mut self,
508 obligation: &PredicateObligation<'tcx>,
509 ) -> Result<EvaluationResult, OverflowError> {
510 debug_assert!(!self.infcx.next_trait_solver());
511 self.evaluation_probe(|this| {
512 let goal =
513 this.infcx.resolve_vars_if_possible((obligation.predicate, obligation.param_env));
514 let mut result = this.evaluate_predicate_recursively(
515 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
516 obligation.clone(),
517 )?;
518 // If the predicate has done any inference, then downgrade the
519 // result to ambiguous.
520 if this.infcx.resolve_vars_if_possible(goal) != goal {
521 result = result.max(EvaluatedToAmbig);
522 }
523 Ok(result)
524 })
525 }
526
527 /// Computes the evaluation result of `op`, discarding any constraints.
528 ///
529 /// This also runs for leak check to allow higher ranked region errors to impact
530 /// selection. By default it checks for leaks from all universes created inside of
531 /// `op`, but this can be overwritten if necessary.
532 fn evaluation_probe(
533 &mut self,
534 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
535 ) -> Result<EvaluationResult, OverflowError> {
536 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
537 let outer_universe = self.infcx.universe();
538 let result = op(self)?;
539
540 match self.infcx.leak_check(outer_universe, Some(snapshot)) {
541 Ok(()) => {}
542 Err(_) => return Ok(EvaluatedToErr),
543 }
544
545 if self.infcx.opaque_types_added_in_snapshot(snapshot) {
546 return Ok(result.max(EvaluatedToOkModuloOpaqueTypes));
547 }
548
549 if self.infcx.region_constraints_added_in_snapshot(snapshot) {
550 Ok(result.max(EvaluatedToOkModuloRegions))
551 } else {
552 Ok(result)
553 }
554 })
555 }
556
557 /// Evaluates the predicates in `predicates` recursively. This may
558 /// guide inference. If this is not desired, run it inside of a
559 /// is run within an inference probe.
560 /// `probe`.
561 #[instrument(skip(self, stack), level = "debug")]
562 fn evaluate_predicates_recursively<'o, I>(
563 &mut self,
564 stack: TraitObligationStackList<'o, 'tcx>,
565 predicates: I,
566 ) -> Result<EvaluationResult, OverflowError>
567 where
568 I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
569 {
570 let mut result = EvaluatedToOk;
571 for mut obligation in predicates {
572 obligation.set_depth_from_parent(stack.depth());
573 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
574 if let EvaluatedToErr = eval {
575 // fast-path - EvaluatedToErr is the top of the lattice,
576 // so we don't need to look on the other predicates.
577 return Ok(EvaluatedToErr);
578 } else {
579 result = cmp::max(result, eval);
580 }
581 }
582 Ok(result)
583 }
584
585 #[instrument(
586 level = "debug",
587 skip(self, previous_stack),
588 fields(previous_stack = ?previous_stack.head())
589 ret,
590 )]
591 fn evaluate_predicate_recursively<'o>(
592 &mut self,
593 previous_stack: TraitObligationStackList<'o, 'tcx>,
594 obligation: PredicateObligation<'tcx>,
595 ) -> Result<EvaluationResult, OverflowError> {
596 debug_assert!(!self.infcx.next_trait_solver());
597 // `previous_stack` stores a `PolyTraitObligation`, while `obligation` is
598 // a `PredicateObligation`. These are distinct types, so we can't
599 // use any `Option` combinator method that would force them to be
600 // the same.
601 match previous_stack.head() {
602 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
603 None => self.check_recursion_limit(&obligation, &obligation)?,
604 }
605
606 if sizedness_fast_path(self.tcx(), obligation.predicate, obligation.param_env) {
607 return Ok(EvaluatedToOk);
608 }
609
610 ensure_sufficient_stack(|| {
611 let bound_predicate = obligation.predicate.kind();
612 match bound_predicate.skip_binder() {
613 ty::PredicateKind::Clause(ty::ClauseKind::Trait(t)) => {
614 let t = bound_predicate.rebind(t);
615 debug_assert!(!t.has_escaping_bound_vars());
616 let obligation = obligation.with(self.tcx(), t);
617 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
618 }
619
620 ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(data)) => {
621 self.infcx.enter_forall(bound_predicate.rebind(data), |data| {
622 match effects::evaluate_host_effect_obligation(
623 self,
624 &obligation.with(self.tcx(), data),
625 ) {
626 Ok(nested) => {
627 self.evaluate_predicates_recursively(previous_stack, nested)
628 }
629 Err(effects::EvaluationFailure::Ambiguous) => Ok(EvaluatedToAmbig),
630 Err(effects::EvaluationFailure::NoSolution) => Ok(EvaluatedToErr),
631 }
632 })
633 }
634
635 ty::PredicateKind::Subtype(p) => {
636 let p = bound_predicate.rebind(p);
637 // Does this code ever run?
638 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
639 Ok(Ok(InferOk { obligations, .. })) => {
640 self.evaluate_predicates_recursively(previous_stack, obligations)
641 }
642 Ok(Err(_)) => Ok(EvaluatedToErr),
643 Err(..) => Ok(EvaluatedToAmbig),
644 }
645 }
646
647 ty::PredicateKind::Coerce(p) => {
648 let p = bound_predicate.rebind(p);
649 // Does this code ever run?
650 match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
651 Ok(Ok(InferOk { obligations, .. })) => {
652 self.evaluate_predicates_recursively(previous_stack, obligations)
653 }
654 Ok(Err(_)) => Ok(EvaluatedToErr),
655 Err(..) => Ok(EvaluatedToAmbig),
656 }
657 }
658
659 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(term)) => {
660 if term.is_trivially_wf(self.tcx()) {
661 return Ok(EvaluatedToOk);
662 }
663
664 // So, there is a bit going on here. First, `WellFormed` predicates
665 // are coinductive, like trait predicates with auto traits.
666 // This means that we need to detect if we have recursively
667 // evaluated `WellFormed(X)`. Otherwise, we would run into
668 // a "natural" overflow error.
669 //
670 // Now, the next question is whether we need to do anything
671 // special with caching. Considering the following tree:
672 // - `WF(Foo<T>)`
673 // - `Bar<T>: Send`
674 // - `WF(Foo<T>)`
675 // - `Foo<T>: Trait`
676 // In this case, the innermost `WF(Foo<T>)` should return
677 // `EvaluatedToOk`, since it's coinductive. Then if
678 // `Bar<T>: Send` is resolved to `EvaluatedToOk`, it can be
679 // inserted into a cache (because without thinking about `WF`
680 // goals, it isn't in a cycle). If `Foo<T>: Trait` later doesn't
681 // hold, then `Bar<T>: Send` shouldn't hold. Therefore, we
682 // *do* need to keep track of coinductive cycles.
683
684 let cache = previous_stack.cache;
685 let dfn = cache.next_dfn();
686
687 for stack_term in previous_stack.cache.wf_args.borrow().iter().rev() {
688 if stack_term.0 != term {
689 continue;
690 }
691 debug!("WellFormed({:?}) on stack", term);
692 if let Some(stack) = previous_stack.head {
693 // Okay, let's imagine we have two different stacks:
694 // `T: NonAutoTrait -> WF(T) -> T: NonAutoTrait`
695 // `WF(T) -> T: NonAutoTrait -> WF(T)`
696 // Because of this, we need to check that all
697 // predicates between the WF goals are coinductive.
698 // Otherwise, we can say that `T: NonAutoTrait` is
699 // true.
700 // Let's imagine we have a predicate stack like
701 // `Foo: Bar -> WF(T) -> T: NonAutoTrait -> T: Auto`
702 // depth ^1 ^2 ^3
703 // and the current predicate is `WF(T)`. `wf_args`
704 // would contain `(T, 1)`. We want to check all
705 // trait predicates greater than `1`. The previous
706 // stack would be `T: Auto`.
707 let cycle = stack.iter().take_while(|s| s.depth > stack_term.1);
708 let tcx = self.tcx();
709 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
710 if self.coinductive_match(cycle) {
711 stack.update_reached_depth(stack_term.1);
712 return Ok(EvaluatedToOk);
713 } else {
714 return Ok(EvaluatedToAmbigStackDependent);
715 }
716 }
717 return Ok(EvaluatedToOk);
718 }
719
720 match wf::obligations(
721 self.infcx,
722 obligation.param_env,
723 obligation.cause.body_id,
724 obligation.recursion_depth + 1,
725 term,
726 obligation.cause.span,
727 ) {
728 Some(obligations) => {
729 cache.wf_args.borrow_mut().push((term, previous_stack.depth()));
730 let result =
731 self.evaluate_predicates_recursively(previous_stack, obligations);
732 cache.wf_args.borrow_mut().pop();
733
734 let result = result?;
735
736 if !result.must_apply_modulo_regions() {
737 cache.on_failure(dfn);
738 }
739
740 cache.on_completion(dfn);
741
742 Ok(result)
743 }
744 None => Ok(EvaluatedToAmbig),
745 }
746 }
747
748 ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(pred)) => {
749 // A global type with no free lifetimes or generic parameters
750 // outlives anything.
751 if pred.0.has_free_regions()
752 || pred.0.has_bound_regions()
753 || pred.0.has_non_region_infer()
754 || pred.0.has_non_region_infer()
755 {
756 Ok(EvaluatedToOkModuloRegions)
757 } else {
758 Ok(EvaluatedToOk)
759 }
760 }
761
762 ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(..)) => {
763 // We do not consider region relationships when evaluating trait matches.
764 Ok(EvaluatedToOkModuloRegions)
765 }
766
767 ty::PredicateKind::DynCompatible(trait_def_id) => {
768 if self.tcx().is_dyn_compatible(trait_def_id) {
769 Ok(EvaluatedToOk)
770 } else {
771 Ok(EvaluatedToErr)
772 }
773 }
774
775 ty::PredicateKind::Clause(ty::ClauseKind::Projection(data)) => {
776 let data = bound_predicate.rebind(data);
777 let project_obligation = obligation.with(self.tcx(), data);
778 match project::poly_project_and_unify_term(self, &project_obligation) {
779 ProjectAndUnifyResult::Holds(mut subobligations) => {
780 'compute_res: {
781 // If we've previously marked this projection as 'complete', then
782 // use the final cached result (either `EvaluatedToOk` or
783 // `EvaluatedToOkModuloRegions`), and skip re-evaluating the
784 // sub-obligations.
785 if let Some(key) =
786 ProjectionCacheKey::from_poly_projection_obligation(
787 self,
788 &project_obligation,
789 )
790 && let Some(cached_res) = self
791 .infcx
792 .inner
793 .borrow_mut()
794 .projection_cache()
795 .is_complete(key)
796 {
797 break 'compute_res Ok(cached_res);
798 }
799
800 // Need to explicitly set the depth of nested goals here as
801 // projection obligations can cycle by themselves and in
802 // `evaluate_predicates_recursively` we only add the depth
803 // for parent trait goals because only these get added to the
804 // `TraitObligationStackList`.
805 for subobligation in subobligations.iter_mut() {
806 subobligation.set_depth_from_parent(obligation.recursion_depth);
807 }
808 let res = self.evaluate_predicates_recursively(
809 previous_stack,
810 subobligations,
811 );
812 if let Ok(eval_rslt) = res
813 && (eval_rslt == EvaluatedToOk
814 || eval_rslt == EvaluatedToOkModuloRegions)
815 && let Some(key) =
816 ProjectionCacheKey::from_poly_projection_obligation(
817 self,
818 &project_obligation,
819 )
820 {
821 // If the result is something that we can cache, then mark this
822 // entry as 'complete'. This will allow us to skip evaluating the
823 // subobligations at all the next time we evaluate the projection
824 // predicate.
825 self.infcx
826 .inner
827 .borrow_mut()
828 .projection_cache()
829 .complete(key, eval_rslt);
830 }
831 res
832 }
833 }
834 ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig),
835 ProjectAndUnifyResult::Recursive => Ok(EvaluatedToAmbigStackDependent),
836 ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr),
837 }
838 }
839
840 ty::PredicateKind::Clause(ty::ClauseKind::UnstableFeature(symbol)) => {
841 if may_use_unstable_feature(self.infcx, obligation.param_env, symbol) {
842 Ok(EvaluatedToOk)
843 } else {
844 Ok(EvaluatedToAmbig)
845 }
846 }
847
848 ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(uv)) => {
849 match const_evaluatable::is_const_evaluatable(
850 self.infcx,
851 uv,
852 obligation.param_env,
853 obligation.cause.span,
854 ) {
855 Ok(()) => Ok(EvaluatedToOk),
856 Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig),
857 Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr),
858 Err(_) => Ok(EvaluatedToErr),
859 }
860 }
861
862 ty::PredicateKind::ConstEquate(c1, c2) => {
863 let tcx = self.tcx();
864 assert!(
865 tcx.features().generic_const_exprs(),
866 "`ConstEquate` without a feature gate: {c1:?} {c2:?}",
867 );
868
869 {
870 let c1 = tcx.expand_abstract_consts(c1);
871 let c2 = tcx.expand_abstract_consts(c2);
872 debug!(
873 "evaluate_predicate_recursively: equating consts:\nc1= {:?}\nc2= {:?}",
874 c1, c2
875 );
876
877 use rustc_hir::def::DefKind;
878 match (c1.kind(), c2.kind()) {
879 (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b))
880 if a.def == b.def && tcx.def_kind(a.def) == DefKind::AssocConst =>
881 {
882 if let Ok(InferOk { obligations, value: () }) = self
883 .infcx
884 .at(&obligation.cause, obligation.param_env)
885 // Can define opaque types as this is only reachable with
886 // `generic_const_exprs`
887 .eq(
888 DefineOpaqueTypes::Yes,
889 ty::AliasTerm::from(a),
890 ty::AliasTerm::from(b),
891 )
892 {
893 return self.evaluate_predicates_recursively(
894 previous_stack,
895 obligations,
896 );
897 }
898 }
899 (_, ty::ConstKind::Unevaluated(_))
900 | (ty::ConstKind::Unevaluated(_), _) => (),
901 (_, _) => {
902 if let Ok(InferOk { obligations, value: () }) = self
903 .infcx
904 .at(&obligation.cause, obligation.param_env)
905 // Can define opaque types as this is only reachable with
906 // `generic_const_exprs`
907 .eq(DefineOpaqueTypes::Yes, c1, c2)
908 {
909 return self.evaluate_predicates_recursively(
910 previous_stack,
911 obligations,
912 );
913 }
914 }
915 }
916 }
917
918 let evaluate = |c: ty::Const<'tcx>| {
919 if let ty::ConstKind::Unevaluated(_) = c.kind() {
920 match crate::traits::try_evaluate_const(
921 self.infcx,
922 c,
923 obligation.param_env,
924 ) {
925 Ok(val) => Ok(val),
926 Err(e) => Err(e),
927 }
928 } else {
929 Ok(c)
930 }
931 };
932
933 match (evaluate(c1), evaluate(c2)) {
934 (Ok(c1), Ok(c2)) => {
935 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
936 // Can define opaque types as this is only reachable with
937 // `generic_const_exprs`
938 DefineOpaqueTypes::Yes,
939 c1,
940 c2,
941 ) {
942 Ok(inf_ok) => self.evaluate_predicates_recursively(
943 previous_stack,
944 inf_ok.into_obligations(),
945 ),
946 Err(_) => Ok(EvaluatedToErr),
947 }
948 }
949 (Err(EvaluateConstErr::InvalidConstParamTy(..)), _)
950 | (_, Err(EvaluateConstErr::InvalidConstParamTy(..))) => Ok(EvaluatedToErr),
951 (Err(EvaluateConstErr::EvaluationFailure(..)), _)
952 | (_, Err(EvaluateConstErr::EvaluationFailure(..))) => Ok(EvaluatedToErr),
953 (Err(EvaluateConstErr::HasGenericsOrInfers), _)
954 | (_, Err(EvaluateConstErr::HasGenericsOrInfers)) => {
955 if c1.has_non_region_infer() || c2.has_non_region_infer() {
956 Ok(EvaluatedToAmbig)
957 } else {
958 // Two different constants using generic parameters ~> error.
959 Ok(EvaluatedToErr)
960 }
961 }
962 }
963 }
964 ty::PredicateKind::NormalizesTo(..) => {
965 bug!("NormalizesTo is only used by the new solver")
966 }
967 ty::PredicateKind::AliasRelate(..) => {
968 bug!("AliasRelate is only used by the new solver")
969 }
970 ty::PredicateKind::Ambiguous => Ok(EvaluatedToAmbig),
971 ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, ty)) => {
972 let ct = self.infcx.shallow_resolve_const(ct);
973 let ct_ty = match ct.kind() {
974 ty::ConstKind::Infer(_) => {
975 return Ok(EvaluatedToAmbig);
976 }
977 ty::ConstKind::Error(_) => return Ok(EvaluatedToOk),
978 ty::ConstKind::Value(cv) => cv.ty,
979 ty::ConstKind::Unevaluated(uv) => {
980 self.tcx().type_of(uv.def).instantiate(self.tcx(), uv.args)
981 }
982 // FIXME(generic_const_exprs): See comment in `fulfill.rs`
983 ty::ConstKind::Expr(_) => return Ok(EvaluatedToOk),
984 ty::ConstKind::Placeholder(_) => {
985 bug!("placeholder const {:?} in old solver", ct)
986 }
987 ty::ConstKind::Bound(_, _) => bug!("escaping bound vars in {:?}", ct),
988 ty::ConstKind::Param(param_ct) => {
989 param_ct.find_const_ty_from_env(obligation.param_env)
990 }
991 };
992
993 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
994 // Only really exercised by generic_const_exprs
995 DefineOpaqueTypes::Yes,
996 ct_ty,
997 ty,
998 ) {
999 Ok(inf_ok) => self.evaluate_predicates_recursively(
1000 previous_stack,
1001 inf_ok.into_obligations(),
1002 ),
1003 Err(_) => Ok(EvaluatedToErr),
1004 }
1005 }
1006 }
1007 })
1008 }
1009
1010 #[instrument(skip(self, previous_stack), level = "debug", ret)]
1011 fn evaluate_trait_predicate_recursively<'o>(
1012 &mut self,
1013 previous_stack: TraitObligationStackList<'o, 'tcx>,
1014 mut obligation: PolyTraitObligation<'tcx>,
1015 ) -> Result<EvaluationResult, OverflowError> {
1016 if !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
1017 && obligation.is_global()
1018 && obligation.param_env.caller_bounds().iter().all(|bound| bound.has_param())
1019 {
1020 // If a param env has no global bounds, global obligations do not
1021 // depend on its particular value in order to work, so we can clear
1022 // out the param env and get better caching.
1023 debug!("in global");
1024 obligation.param_env = ty::ParamEnv::empty();
1025 }
1026
1027 let stack = self.push_stack(previous_stack, &obligation);
1028 let fresh_trait_pred = stack.fresh_trait_pred;
1029 let param_env = obligation.param_env;
1030
1031 debug!(?fresh_trait_pred);
1032
1033 // If a trait predicate is in the (local or global) evaluation cache,
1034 // then we know it holds without cycles.
1035 if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
1036 debug!("CACHE HIT");
1037 return Ok(result);
1038 }
1039
1040 if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
1041 debug!("PROVISIONAL CACHE HIT");
1042 stack.update_reached_depth(result.reached_depth);
1043 return Ok(result.result);
1044 }
1045
1046 // Check if this is a match for something already on the
1047 // stack. If so, we don't want to insert the result into the
1048 // main cache (it is cycle dependent) nor the provisional
1049 // cache (which is meant for things that have completed but
1050 // for a "backedge" -- this result *is* the backedge).
1051 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
1052 return Ok(cycle_result);
1053 }
1054
1055 let (result, dep_node) = self.in_task(|this| {
1056 let mut result = this.evaluate_stack(&stack)?;
1057
1058 // fix issue #103563, we don't normalize
1059 // nested obligations which produced by `TraitDef` candidate
1060 // (i.e. using bounds on assoc items as assumptions).
1061 // because we don't have enough information to
1062 // normalize these obligations before evaluating.
1063 // so we will try to normalize the obligation and evaluate again.
1064 // we will replace it with new solver in the future.
1065 if EvaluationResult::EvaluatedToErr == result
1066 && fresh_trait_pred.has_aliases()
1067 && fresh_trait_pred.is_global()
1068 {
1069 let mut nested_obligations = PredicateObligations::new();
1070 let predicate = normalize_with_depth_to(
1071 this,
1072 param_env,
1073 obligation.cause.clone(),
1074 obligation.recursion_depth + 1,
1075 obligation.predicate,
1076 &mut nested_obligations,
1077 );
1078 if predicate != obligation.predicate {
1079 let mut nested_result = EvaluationResult::EvaluatedToOk;
1080 for obligation in nested_obligations {
1081 nested_result = cmp::max(
1082 this.evaluate_predicate_recursively(previous_stack, obligation)?,
1083 nested_result,
1084 );
1085 }
1086
1087 if nested_result.must_apply_modulo_regions() {
1088 let obligation = obligation.with(this.tcx(), predicate);
1089 result = cmp::max(
1090 nested_result,
1091 this.evaluate_trait_predicate_recursively(previous_stack, obligation)?,
1092 );
1093 }
1094 }
1095 }
1096
1097 Ok::<_, OverflowError>(result)
1098 });
1099
1100 let result = result?;
1101
1102 if !result.must_apply_modulo_regions() {
1103 stack.cache().on_failure(stack.dfn);
1104 }
1105
1106 let reached_depth = stack.reached_depth.get();
1107 if reached_depth >= stack.depth {
1108 debug!("CACHE MISS");
1109 self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
1110 stack.cache().on_completion(stack.dfn);
1111 } else {
1112 debug!("PROVISIONAL");
1113 debug!(
1114 "caching provisionally because {:?} \
1115 is a cycle participant (at depth {}, reached depth {})",
1116 fresh_trait_pred, stack.depth, reached_depth,
1117 );
1118
1119 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result);
1120 }
1121
1122 Ok(result)
1123 }
1124
1125 /// If there is any previous entry on the stack that precisely
1126 /// matches this obligation, then we can assume that the
1127 /// obligation is satisfied for now (still all other conditions
1128 /// must be met of course). One obvious case this comes up is
1129 /// marker traits like `Send`. Think of a linked list:
1130 ///
1131 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
1132 ///
1133 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
1134 /// `Option<Box<List<T>>>` is `Send`, and in turn
1135 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
1136 /// `Send`.
1137 ///
1138 /// Note that we do this comparison using the `fresh_trait_ref`
1139 /// fields. Because these have all been freshened using
1140 /// `self.freshener`, we can be sure that (a) this will not
1141 /// affect the inferencer state and (b) that if we see two
1142 /// fresh regions with the same index, they refer to the same
1143 /// unbound type variable.
1144 fn check_evaluation_cycle(
1145 &mut self,
1146 stack: &TraitObligationStack<'_, 'tcx>,
1147 ) -> Option<EvaluationResult> {
1148 if let Some(cycle_depth) = stack
1149 .iter()
1150 .skip(1) // Skip top-most frame.
1151 .find(|prev| {
1152 stack.obligation.param_env == prev.obligation.param_env
1153 && stack.fresh_trait_pred == prev.fresh_trait_pred
1154 })
1155 .map(|stack| stack.depth)
1156 {
1157 debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
1158
1159 // If we have a stack like `A B C D E A`, where the top of
1160 // the stack is the final `A`, then this will iterate over
1161 // `A, E, D, C, B` -- i.e., all the participants apart
1162 // from the cycle head. We mark them as participating in a
1163 // cycle. This suppresses caching for those nodes. See
1164 // `in_cycle` field for more details.
1165 stack.update_reached_depth(cycle_depth);
1166
1167 // Subtle: when checking for a coinductive cycle, we do
1168 // not compare using the "freshened trait refs" (which
1169 // have erased regions) but rather the fully explicit
1170 // trait refs. This is important because it's only a cycle
1171 // if the regions match exactly.
1172 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
1173 let tcx = self.tcx();
1174 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
1175 if self.coinductive_match(cycle) {
1176 debug!("evaluate_stack --> recursive, coinductive");
1177 Some(EvaluatedToOk)
1178 } else {
1179 debug!("evaluate_stack --> recursive, inductive");
1180 Some(EvaluatedToAmbigStackDependent)
1181 }
1182 } else {
1183 None
1184 }
1185 }
1186
1187 fn evaluate_stack<'o>(
1188 &mut self,
1189 stack: &TraitObligationStack<'o, 'tcx>,
1190 ) -> Result<EvaluationResult, OverflowError> {
1191 debug_assert!(!self.infcx.next_trait_solver());
1192 // In intercrate mode, whenever any of the generics are unbound,
1193 // there can always be an impl. Even if there are no impls in
1194 // this crate, perhaps the type would be unified with
1195 // something from another crate that does provide an impl.
1196 //
1197 // In intra mode, we must still be conservative. The reason is
1198 // that we want to avoid cycles. Imagine an impl like:
1199 //
1200 // impl<T:Eq> Eq for Vec<T>
1201 //
1202 // and a trait reference like `$0 : Eq` where `$0` is an
1203 // unbound variable. When we evaluate this trait-reference, we
1204 // will unify `$0` with `Vec<$1>` (for some fresh variable
1205 // `$1`), on the condition that `$1 : Eq`. We will then wind
1206 // up with many candidates (since that are other `Eq` impls
1207 // that apply) and try to winnow things down. This results in
1208 // a recursive evaluation that `$1 : Eq` -- as you can
1209 // imagine, this is just where we started. To avoid that, we
1210 // check for unbound variables and return an ambiguous (hence possible)
1211 // match if we've seen this trait before.
1212 //
1213 // This suffices to allow chains like `FnMut` implemented in
1214 // terms of `Fn` etc, but we could probably make this more
1215 // precise still.
1216 let unbound_input_types =
1217 stack.fresh_trait_pred.skip_binder().trait_ref.args.types().any(|ty| ty.is_fresh());
1218
1219 if unbound_input_types
1220 && stack.iter().skip(1).any(|prev| {
1221 stack.obligation.param_env == prev.obligation.param_env
1222 && self.match_fresh_trait_refs(stack.fresh_trait_pred, prev.fresh_trait_pred)
1223 })
1224 {
1225 debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
1226 return Ok(EvaluatedToAmbigStackDependent);
1227 }
1228
1229 match self.candidate_from_obligation(stack) {
1230 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
1231 Ok(None) => Ok(EvaluatedToAmbig),
1232 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
1233 Err(OverflowError::Canonical)
1234 }
1235 Err(..) => Ok(EvaluatedToErr),
1236 }
1237 }
1238
1239 /// For defaulted traits, we use a co-inductive strategy to solve, so
1240 /// that recursion is ok. This routine returns `true` if the top of the
1241 /// stack (`cycle[0]`):
1242 ///
1243 /// - is a coinductive trait: an auto-trait or `Sized`,
1244 /// - it also appears in the backtrace at some position `X`,
1245 /// - all the predicates at positions `X..` between `X` and the top are
1246 /// also coinductive traits.
1247 pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
1248 where
1249 I: Iterator<Item = ty::Predicate<'tcx>>,
1250 {
1251 cycle.all(|p| match p.kind().skip_binder() {
1252 ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => {
1253 self.infcx.tcx.trait_is_coinductive(data.def_id())
1254 }
1255 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => {
1256 // FIXME(generic_const_exprs): GCE needs well-formedness predicates to be
1257 // coinductive, but GCE is on the way out anyways, so this should eventually
1258 // be replaced with `false`.
1259 self.infcx.tcx.features().generic_const_exprs()
1260 }
1261 _ => false,
1262 })
1263 }
1264
1265 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
1266 /// obligations are met. Returns whether `candidate` remains viable after this further
1267 /// scrutiny.
1268 #[instrument(
1269 level = "debug",
1270 skip(self, stack),
1271 fields(depth = stack.obligation.recursion_depth),
1272 ret
1273 )]
1274 fn evaluate_candidate<'o>(
1275 &mut self,
1276 stack: &TraitObligationStack<'o, 'tcx>,
1277 candidate: &SelectionCandidate<'tcx>,
1278 ) -> Result<EvaluationResult, OverflowError> {
1279 let mut result = self.evaluation_probe(|this| {
1280 match this.confirm_candidate(stack.obligation, candidate.clone()) {
1281 Ok(selection) => {
1282 debug!(?selection);
1283 this.evaluate_predicates_recursively(
1284 stack.list(),
1285 selection.nested_obligations().into_iter(),
1286 )
1287 }
1288 Err(..) => Ok(EvaluatedToErr),
1289 }
1290 })?;
1291
1292 // If we erased any lifetimes, then we want to use
1293 // `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk`
1294 // as your final result. The result will be cached using
1295 // the freshened trait predicate as a key, so we need
1296 // our result to be correct by *any* choice of original lifetimes,
1297 // not just the lifetime choice for this particular (non-erased)
1298 // predicate.
1299 // See issue #80691
1300 if stack.fresh_trait_pred.has_erased_regions() {
1301 result = result.max(EvaluatedToOkModuloRegions);
1302 }
1303
1304 Ok(result)
1305 }
1306
1307 fn check_evaluation_cache(
1308 &self,
1309 param_env: ty::ParamEnv<'tcx>,
1310 trait_pred: ty::PolyTraitPredicate<'tcx>,
1311 ) -> Option<EvaluationResult> {
1312 let infcx = self.infcx;
1313 let tcx = infcx.tcx;
1314 if self.can_use_global_caches(param_env, trait_pred) {
1315 let key = (infcx.typing_env(param_env), trait_pred);
1316 if let Some(res) = tcx.evaluation_cache.get(&key, tcx) {
1317 Some(res)
1318 } else {
1319 debug_assert_eq!(infcx.evaluation_cache.get(&(param_env, trait_pred), tcx), None);
1320 None
1321 }
1322 } else {
1323 self.infcx.evaluation_cache.get(&(param_env, trait_pred), tcx)
1324 }
1325 }
1326
1327 fn insert_evaluation_cache(
1328 &mut self,
1329 param_env: ty::ParamEnv<'tcx>,
1330 trait_pred: ty::PolyTraitPredicate<'tcx>,
1331 dep_node: DepNodeIndex,
1332 result: EvaluationResult,
1333 ) {
1334 // Avoid caching results that depend on more than just the trait-ref
1335 // - the stack can create recursion.
1336 if result.is_stack_dependent() {
1337 return;
1338 }
1339
1340 let infcx = self.infcx;
1341 let tcx = infcx.tcx;
1342 if self.can_use_global_caches(param_env, trait_pred) {
1343 debug!(?trait_pred, ?result, "insert_evaluation_cache global");
1344 // This may overwrite the cache with the same value
1345 tcx.evaluation_cache.insert(
1346 (infcx.typing_env(param_env), trait_pred),
1347 dep_node,
1348 result,
1349 );
1350 return;
1351 } else {
1352 debug!(?trait_pred, ?result, "insert_evaluation_cache local");
1353 self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result);
1354 }
1355 }
1356
1357 fn check_recursion_depth<T>(
1358 &self,
1359 depth: usize,
1360 error_obligation: &Obligation<'tcx, T>,
1361 ) -> Result<(), OverflowError>
1362 where
1363 T: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1364 {
1365 if !self.infcx.tcx.recursion_limit().value_within_limit(depth) {
1366 match self.query_mode {
1367 TraitQueryMode::Standard => {
1368 if let Some(e) = self.infcx.tainted_by_errors() {
1369 return Err(OverflowError::Error(e));
1370 }
1371 self.infcx.err_ctxt().report_overflow_obligation(error_obligation, true);
1372 }
1373 TraitQueryMode::Canonical => {
1374 return Err(OverflowError::Canonical);
1375 }
1376 }
1377 }
1378 Ok(())
1379 }
1380
1381 /// Checks that the recursion limit has not been exceeded.
1382 ///
1383 /// The weird return type of this function allows it to be used with the `try` (`?`)
1384 /// operator within certain functions.
1385 #[inline(always)]
1386 fn check_recursion_limit<T: Display + TypeFoldable<TyCtxt<'tcx>>, V>(
1387 &self,
1388 obligation: &Obligation<'tcx, T>,
1389 error_obligation: &Obligation<'tcx, V>,
1390 ) -> Result<(), OverflowError>
1391 where
1392 V: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1393 {
1394 self.check_recursion_depth(obligation.recursion_depth, error_obligation)
1395 }
1396
1397 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
1398 where
1399 OP: FnOnce(&mut Self) -> R,
1400 {
1401 self.tcx().dep_graph.with_anon_task(self.tcx(), dep_kinds::TraitSelect, || op(self))
1402 }
1403
1404 /// filter_impls filters candidates that have a positive impl for a negative
1405 /// goal and a negative impl for a positive goal
1406 #[instrument(level = "debug", skip(self, candidates))]
1407 fn filter_impls(
1408 &mut self,
1409 candidates: Vec<SelectionCandidate<'tcx>>,
1410 obligation: &PolyTraitObligation<'tcx>,
1411 ) -> Vec<SelectionCandidate<'tcx>> {
1412 trace!("{candidates:#?}");
1413 let tcx = self.tcx();
1414 let mut result = Vec::with_capacity(candidates.len());
1415
1416 for candidate in candidates {
1417 if let ImplCandidate(def_id) = candidate {
1418 match (tcx.impl_polarity(def_id), obligation.polarity()) {
1419 (ty::ImplPolarity::Reservation, _)
1420 | (ty::ImplPolarity::Positive, ty::PredicatePolarity::Positive)
1421 | (ty::ImplPolarity::Negative, ty::PredicatePolarity::Negative) => {
1422 result.push(candidate);
1423 }
1424 _ => {}
1425 }
1426 } else {
1427 result.push(candidate);
1428 }
1429 }
1430
1431 trace!("{result:#?}");
1432 result
1433 }
1434
1435 /// filter_reservation_impls filter reservation impl for any goal as ambiguous
1436 #[instrument(level = "debug", skip(self))]
1437 fn filter_reservation_impls(
1438 &mut self,
1439 candidate: SelectionCandidate<'tcx>,
1440 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
1441 let tcx = self.tcx();
1442 // Treat reservation impls as ambiguity.
1443 if let ImplCandidate(def_id) = candidate
1444 && let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id)
1445 {
1446 if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes {
1447 let message =
1448 tcx.get_attr(def_id, sym::rustc_reservation_impl).and_then(|a| a.value_str());
1449 if let Some(message) = message {
1450 debug!(
1451 "filter_reservation_impls: \
1452 reservation impl ambiguity on {:?}",
1453 def_id
1454 );
1455 intercrate_ambiguity_clauses
1456 .insert(IntercrateAmbiguityCause::ReservationImpl { message });
1457 }
1458 }
1459 return Ok(None);
1460 }
1461 Ok(Some(candidate))
1462 }
1463
1464 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<(), Conflict> {
1465 let obligation = &stack.obligation;
1466 match self.infcx.typing_mode() {
1467 TypingMode::Coherence => {}
1468 TypingMode::Analysis { .. }
1469 | TypingMode::Borrowck { .. }
1470 | TypingMode::PostBorrowckAnalysis { .. }
1471 | TypingMode::PostAnalysis => return Ok(()),
1472 }
1473
1474 debug!("is_knowable()");
1475
1476 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
1477
1478 // Okay to skip binder because of the nature of the
1479 // trait-ref-is-knowable check, which does not care about
1480 // bound regions.
1481 let trait_ref = predicate.skip_binder().trait_ref;
1482
1483 coherence::trait_ref_is_knowable(self.infcx, trait_ref, |ty| Ok::<_, !>(ty)).into_ok()
1484 }
1485
1486 /// Returns `true` if the global caches can be used.
1487 fn can_use_global_caches(
1488 &self,
1489 param_env: ty::ParamEnv<'tcx>,
1490 pred: ty::PolyTraitPredicate<'tcx>,
1491 ) -> bool {
1492 // If there are any inference variables in the `ParamEnv`, then we
1493 // always use a cache local to this particular scope. Otherwise, we
1494 // switch to a global cache.
1495 if param_env.has_infer() || pred.has_infer() {
1496 return false;
1497 }
1498
1499 match self.infcx.typing_mode() {
1500 // Avoid using the global cache during coherence and just rely
1501 // on the local cache. It is really just a simplification to
1502 // avoid us having to fear that coherence results "pollute"
1503 // the master cache. Since coherence executes pretty quickly,
1504 // it's not worth going to more trouble to increase the
1505 // hit-rate, I don't think.
1506 TypingMode::Coherence => false,
1507 // Avoid using the global cache when we're defining opaque types
1508 // as their hidden type may impact the result of candidate selection.
1509 //
1510 // HACK: This is still theoretically unsound. Goals can indirectly rely
1511 // on opaques in the defining scope, and it's easier to do so with TAIT.
1512 // However, if we disqualify *all* goals from being cached, perf suffers.
1513 // This is likely fixed by better caching in general in the new solver.
1514 // See: <https://github.com/rust-lang/rust/issues/132064>.
1515 TypingMode::Analysis {
1516 defining_opaque_types_and_generators: defining_opaque_types,
1517 }
1518 | TypingMode::Borrowck { defining_opaque_types } => {
1519 defining_opaque_types.is_empty()
1520 || (!pred.has_opaque_types() && !pred.has_coroutines())
1521 }
1522 // The hidden types of `defined_opaque_types` is not local to the current
1523 // inference context, so we can freely move this to the global cache.
1524 TypingMode::PostBorrowckAnalysis { .. } => true,
1525 // The global cache is only used if there are no opaque types in
1526 // the defining scope or we're outside of analysis.
1527 //
1528 // FIXME(#132279): This is still incorrect as we treat opaque types
1529 // and default associated items differently between these two modes.
1530 TypingMode::PostAnalysis => true,
1531 }
1532 }
1533
1534 fn check_candidate_cache(
1535 &mut self,
1536 param_env: ty::ParamEnv<'tcx>,
1537 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1538 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1539 let infcx = self.infcx;
1540 let tcx = infcx.tcx;
1541 let pred = cache_fresh_trait_pred.skip_binder();
1542
1543 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1544 if let Some(res) = tcx.selection_cache.get(&(infcx.typing_env(param_env), pred), tcx) {
1545 return Some(res);
1546 } else if cfg!(debug_assertions) {
1547 match infcx.selection_cache.get(&(param_env, pred), tcx) {
1548 None | Some(Err(SelectionError::Overflow(OverflowError::Canonical))) => {}
1549 res => bug!("unexpected local cache result: {res:?}"),
1550 }
1551 }
1552 }
1553
1554 // Subtle: we need to check the local cache even if we're able to use the
1555 // global cache as we don't cache overflow in the global cache but need to
1556 // cache it as otherwise rustdoc hangs when compiling diesel.
1557 infcx.selection_cache.get(&(param_env, pred), tcx)
1558 }
1559
1560 /// Determines whether can we safely cache the result
1561 /// of selecting an obligation. This is almost always `true`,
1562 /// except when dealing with certain `ParamCandidate`s.
1563 ///
1564 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1565 /// since it was usually produced directly from a `DefId`. However,
1566 /// certain cases (currently only librustdoc's blanket impl finder),
1567 /// a `ParamEnv` may be explicitly constructed with inference types.
1568 /// When this is the case, we do *not* want to cache the resulting selection
1569 /// candidate. This is due to the fact that it might not always be possible
1570 /// to equate the obligation's trait ref and the candidate's trait ref,
1571 /// if more constraints end up getting added to an inference variable.
1572 ///
1573 /// Because of this, we always want to re-run the full selection
1574 /// process for our obligation the next time we see it, since
1575 /// we might end up picking a different `SelectionCandidate` (or none at all).
1576 fn can_cache_candidate(
1577 &self,
1578 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1579 ) -> bool {
1580 match result {
1581 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.has_infer(),
1582 _ => true,
1583 }
1584 }
1585
1586 #[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")]
1587 fn insert_candidate_cache(
1588 &mut self,
1589 param_env: ty::ParamEnv<'tcx>,
1590 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1591 dep_node: DepNodeIndex,
1592 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1593 ) {
1594 let infcx = self.infcx;
1595 let tcx = infcx.tcx;
1596 let pred = cache_fresh_trait_pred.skip_binder();
1597
1598 if !self.can_cache_candidate(&candidate) {
1599 debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
1600 return;
1601 }
1602
1603 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1604 if let Err(SelectionError::Overflow(OverflowError::Canonical)) = candidate {
1605 // Don't cache overflow globally; we only produce this in certain modes.
1606 } else {
1607 debug!(?pred, ?candidate, "insert_candidate_cache global");
1608 debug_assert!(!candidate.has_infer());
1609
1610 // This may overwrite the cache with the same value.
1611 tcx.selection_cache.insert(
1612 (infcx.typing_env(param_env), pred),
1613 dep_node,
1614 candidate,
1615 );
1616 return;
1617 }
1618 }
1619
1620 debug!(?pred, ?candidate, "insert_candidate_cache local");
1621 self.infcx.selection_cache.insert((param_env, pred), dep_node, candidate);
1622 }
1623
1624 /// Looks at the item bounds of the projection or opaque type.
1625 /// If this is a nested rigid projection, such as
1626 /// `<<T as Tr1>::Assoc as Tr2>::Assoc`, consider the item bounds
1627 /// on both `Tr1::Assoc` and `Tr2::Assoc`, since we may encounter
1628 /// relative bounds on both via the `associated_type_bounds` feature.
1629 pub(super) fn for_each_item_bound<T>(
1630 &mut self,
1631 mut self_ty: Ty<'tcx>,
1632 mut for_each: impl FnMut(
1633 &mut Self,
1634 ty::Clause<'tcx>,
1635 usize,
1636 AliasBoundKind,
1637 ) -> ControlFlow<T, ()>,
1638 on_ambiguity: impl FnOnce(),
1639 ) -> ControlFlow<T, ()> {
1640 let mut idx = 0;
1641 let mut alias_bound_kind = AliasBoundKind::SelfBounds;
1642
1643 loop {
1644 let (kind, alias_ty) = match *self_ty.kind() {
1645 ty::Alias(kind @ (ty::Projection | ty::Opaque), alias_ty) => (kind, alias_ty),
1646 ty::Infer(ty::TyVar(_)) => {
1647 on_ambiguity();
1648 return ControlFlow::Continue(());
1649 }
1650 _ => return ControlFlow::Continue(()),
1651 };
1652
1653 // HACK: On subsequent recursions, we only care about bounds that don't
1654 // share the same type as `self_ty`. This is because for truly rigid
1655 // projections, we will never be able to equate, e.g. `<T as Tr>::A`
1656 // with `<<T as Tr>::A as Tr>::A`.
1657 let relevant_bounds = if matches!(alias_bound_kind, AliasBoundKind::NonSelfBounds) {
1658 self.tcx().item_non_self_bounds(alias_ty.def_id)
1659 } else {
1660 self.tcx().item_self_bounds(alias_ty.def_id)
1661 };
1662
1663 for bound in relevant_bounds.instantiate(self.tcx(), alias_ty.args) {
1664 for_each(self, bound, idx, alias_bound_kind)?;
1665 idx += 1;
1666 }
1667
1668 if kind == ty::Projection {
1669 self_ty = alias_ty.self_ty();
1670 } else {
1671 return ControlFlow::Continue(());
1672 }
1673
1674 alias_bound_kind = AliasBoundKind::NonSelfBounds;
1675 }
1676 }
1677
1678 /// Equates the trait in `obligation` with trait bound. If the two traits
1679 /// can be equated and the normalized trait bound doesn't contain inference
1680 /// variables or placeholders, the normalized bound is returned.
1681 fn match_normalize_trait_ref(
1682 &mut self,
1683 obligation: &PolyTraitObligation<'tcx>,
1684 placeholder_trait_ref: ty::TraitRef<'tcx>,
1685 trait_bound: ty::PolyTraitRef<'tcx>,
1686 ) -> Result<Option<ty::TraitRef<'tcx>>, ()> {
1687 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1688 if placeholder_trait_ref.def_id != trait_bound.def_id() {
1689 // Avoid unnecessary normalization
1690 return Err(());
1691 }
1692
1693 let drcx = DeepRejectCtxt::relate_rigid_rigid(self.infcx.tcx);
1694 let obligation_args = obligation.predicate.skip_binder().trait_ref.args;
1695 if !drcx.args_may_unify(obligation_args, trait_bound.skip_binder().args) {
1696 return Err(());
1697 }
1698
1699 let trait_bound = self.infcx.instantiate_binder_with_fresh_vars(
1700 obligation.cause.span,
1701 HigherRankedType,
1702 trait_bound,
1703 );
1704 let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
1705 normalize_with_depth(
1706 self,
1707 obligation.param_env,
1708 obligation.cause.clone(),
1709 obligation.recursion_depth + 1,
1710 trait_bound,
1711 )
1712 });
1713 self.infcx
1714 .at(&obligation.cause, obligation.param_env)
1715 .eq(DefineOpaqueTypes::No, placeholder_trait_ref, trait_bound)
1716 .map(|InferOk { obligations: _, value: () }| {
1717 // This method is called within a probe, so we can't have
1718 // inference variables and placeholders escape.
1719 if !trait_bound.has_infer() && !trait_bound.has_placeholders() {
1720 Some(trait_bound)
1721 } else {
1722 None
1723 }
1724 })
1725 .map_err(|_| ())
1726 }
1727
1728 fn where_clause_may_apply<'o>(
1729 &mut self,
1730 stack: &TraitObligationStack<'o, 'tcx>,
1731 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1732 ) -> Result<EvaluationResult, OverflowError> {
1733 self.evaluation_probe(|this| {
1734 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1735 Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
1736 Err(()) => Ok(EvaluatedToErr),
1737 }
1738 })
1739 }
1740
1741 /// Return `Yes` if the obligation's predicate type applies to the env_predicate, and
1742 /// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT,
1743 /// and applying this env_predicate constrains any of the obligation's GAT parameters.
1744 ///
1745 /// This behavior is a somewhat of a hack to prevent over-constraining inference variables
1746 /// in cases like #91762.
1747 pub(super) fn match_projection_projections(
1748 &mut self,
1749 obligation: &ProjectionTermObligation<'tcx>,
1750 env_predicate: PolyProjectionPredicate<'tcx>,
1751 potentially_unnormalized_candidates: bool,
1752 ) -> ProjectionMatchesProjection {
1753 debug_assert_eq!(obligation.predicate.def_id, env_predicate.item_def_id());
1754
1755 let mut nested_obligations = PredicateObligations::new();
1756 let infer_predicate = self.infcx.instantiate_binder_with_fresh_vars(
1757 obligation.cause.span,
1758 BoundRegionConversionTime::HigherRankedType,
1759 env_predicate,
1760 );
1761 let infer_projection = if potentially_unnormalized_candidates {
1762 ensure_sufficient_stack(|| {
1763 normalize_with_depth_to(
1764 self,
1765 obligation.param_env,
1766 obligation.cause.clone(),
1767 obligation.recursion_depth + 1,
1768 infer_predicate.projection_term,
1769 &mut nested_obligations,
1770 )
1771 })
1772 } else {
1773 infer_predicate.projection_term
1774 };
1775
1776 let is_match = self
1777 .infcx
1778 .at(&obligation.cause, obligation.param_env)
1779 .eq(DefineOpaqueTypes::No, obligation.predicate, infer_projection)
1780 .is_ok_and(|InferOk { obligations, value: () }| {
1781 self.evaluate_predicates_recursively(
1782 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
1783 nested_obligations.into_iter().chain(obligations),
1784 )
1785 .is_ok_and(|res| res.may_apply())
1786 });
1787
1788 if is_match {
1789 let generics = self.tcx().generics_of(obligation.predicate.def_id);
1790 // FIXME(generic_associated_types): Addresses aggressive inference in #92917.
1791 // If this type is a GAT, and of the GAT args resolve to something new,
1792 // that means that we must have newly inferred something about the GAT.
1793 // We should give up in that case.
1794 //
1795 // This only detects one layer of inference, which is probably not what we actually
1796 // want, but fixing it causes some ambiguity:
1797 // <https://github.com/rust-lang/rust/issues/125196>.
1798 if !generics.is_own_empty()
1799 && obligation.predicate.args[generics.parent_count..].iter().any(|&p| {
1800 p.has_non_region_infer()
1801 && match p.kind() {
1802 ty::GenericArgKind::Const(ct) => {
1803 self.infcx.shallow_resolve_const(ct) != ct
1804 }
1805 ty::GenericArgKind::Type(ty) => self.infcx.shallow_resolve(ty) != ty,
1806 ty::GenericArgKind::Lifetime(_) => false,
1807 }
1808 })
1809 {
1810 ProjectionMatchesProjection::Ambiguous
1811 } else {
1812 ProjectionMatchesProjection::Yes
1813 }
1814 } else {
1815 ProjectionMatchesProjection::No
1816 }
1817 }
1818}
1819
1820/// ## Winnowing
1821///
1822/// Winnowing is the process of attempting to resolve ambiguity by
1823/// probing further. During the winnowing process, we unify all
1824/// type variables and then we also attempt to evaluate recursive
1825/// bounds to see if they are satisfied.
1826impl<'tcx> SelectionContext<'_, 'tcx> {
1827 /// If there are multiple ways to prove a trait goal, we make some
1828 /// *fairly arbitrary* choices about which candidate is actually used.
1829 ///
1830 /// For more details, look at the implementation of this method :)
1831 #[instrument(level = "debug", skip(self), ret)]
1832 fn winnow_candidates(
1833 &mut self,
1834 has_non_region_infer: bool,
1835 candidate_preference_mode: CandidatePreferenceMode,
1836 mut candidates: Vec<EvaluatedCandidate<'tcx>>,
1837 ) -> Option<SelectionCandidate<'tcx>> {
1838 if candidates.len() == 1 {
1839 return Some(candidates.pop().unwrap().candidate);
1840 }
1841
1842 // We prefer `Sized` candidates over everything.
1843 let mut sized_candidates =
1844 candidates.iter().filter(|c| matches!(c.candidate, SizedCandidate));
1845 if let Some(sized_candidate) = sized_candidates.next() {
1846 // There should only ever be a single sized candidate
1847 // as they would otherwise overlap.
1848 debug_assert_eq!(sized_candidates.next(), None);
1849 // Only prefer the built-in `Sized` candidate if its nested goals are certain.
1850 // Otherwise, we may encounter failure later on if inference causes this candidate
1851 // to not hold, but a where clause would've applied instead.
1852 if sized_candidate.evaluation.must_apply_modulo_regions() {
1853 return Some(sized_candidate.candidate.clone());
1854 } else {
1855 return None;
1856 }
1857 }
1858
1859 // Before we consider where-bounds, we have to deduplicate them here and also
1860 // drop where-bounds in case the same where-bound exists without bound vars.
1861 // This is necessary as elaborating super-trait bounds may result in duplicates.
1862 'search_victim: loop {
1863 for (i, this) in candidates.iter().enumerate() {
1864 let ParamCandidate(this) = this.candidate else { continue };
1865 for (j, other) in candidates.iter().enumerate() {
1866 if i == j {
1867 continue;
1868 }
1869
1870 let ParamCandidate(other) = other.candidate else { continue };
1871 if this == other {
1872 candidates.remove(j);
1873 continue 'search_victim;
1874 }
1875
1876 if this.skip_binder().trait_ref == other.skip_binder().trait_ref
1877 && this.skip_binder().polarity == other.skip_binder().polarity
1878 && !this.skip_binder().trait_ref.has_escaping_bound_vars()
1879 {
1880 candidates.remove(j);
1881 continue 'search_victim;
1882 }
1883 }
1884 }
1885
1886 break;
1887 }
1888
1889 let mut alias_bounds = candidates.iter().filter_map(|c| {
1890 if let ProjectionCandidate { idx, kind } = c.candidate {
1891 Some((idx, kind))
1892 } else {
1893 None
1894 }
1895 });
1896 // Extract non-nested alias bound candidates, will be preferred over where bounds if
1897 // we're proving an auto-trait, sizedness trait or default trait.
1898 if matches!(candidate_preference_mode, CandidatePreferenceMode::Marker) {
1899 match alias_bounds
1900 .clone()
1901 .filter_map(|(idx, kind)| (kind == AliasBoundKind::SelfBounds).then_some(idx))
1902 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) })
1903 {
1904 Some(Some(idx)) => {
1905 return Some(ProjectionCandidate { idx, kind: AliasBoundKind::SelfBounds });
1906 }
1907 Some(None) => {}
1908 None => return None,
1909 }
1910 }
1911
1912 // The next highest priority is for non-global where-bounds. However, while we don't
1913 // prefer global where-clauses here, we do bail with ambiguity when encountering both
1914 // a global and a non-global where-clause.
1915 //
1916 // Our handling of where-bounds is generally fairly messy but necessary for backwards
1917 // compatibility, see #50825 for why we need to handle global where-bounds like this.
1918 let is_global = |c: ty::PolyTraitPredicate<'tcx>| c.is_global() && !c.has_bound_vars();
1919 let param_candidates = candidates
1920 .iter()
1921 .filter_map(|c| if let ParamCandidate(p) = c.candidate { Some(p) } else { None });
1922 let mut has_global_bounds = false;
1923 let mut param_candidate = None;
1924 for c in param_candidates {
1925 if is_global(c) {
1926 has_global_bounds = true;
1927 } else if param_candidate.replace(c).is_some() {
1928 // Ambiguity, two potentially different where-clauses
1929 return None;
1930 }
1931 }
1932 if let Some(predicate) = param_candidate {
1933 // Ambiguity, a global and a non-global where-bound.
1934 if has_global_bounds {
1935 return None;
1936 } else {
1937 return Some(ParamCandidate(predicate));
1938 }
1939 }
1940
1941 // Prefer alias-bounds over blanket impls for rigid associated types. This is
1942 // fairly arbitrary but once again necessary for backwards compatibility.
1943 // If there are multiple applicable candidates which don't affect type inference,
1944 // choose the one with the lowest index.
1945 match alias_bounds.try_reduce(|(c1, k1), (c2, k2)| {
1946 if has_non_region_infer {
1947 None
1948 } else if c1 < c2 {
1949 Some((c1, k1))
1950 } else {
1951 Some((c2, k2))
1952 }
1953 }) {
1954 Some(Some((idx, kind))) => return Some(ProjectionCandidate { idx, kind }),
1955 Some(None) => {}
1956 None => return None,
1957 }
1958
1959 // Need to prioritize builtin trait object impls as `<dyn Any as Any>::type_id`
1960 // should use the vtable method and not the method provided by the user-defined
1961 // impl `impl<T: ?Sized> Any for T { .. }`. This really shouldn't exist but is
1962 // necessary due to #57893. We again arbitrarily prefer the applicable candidate
1963 // with the lowest index.
1964 //
1965 // We do not want to use these impls to guide inference in case a user-written impl
1966 // may also apply.
1967 let object_bound = candidates
1968 .iter()
1969 .filter_map(|c| if let ObjectCandidate(i) = c.candidate { Some(i) } else { None })
1970 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1971 match object_bound {
1972 Some(Some(index)) => {
1973 return if has_non_region_infer
1974 && candidates.iter().any(|c| matches!(c.candidate, ImplCandidate(_)))
1975 {
1976 None
1977 } else {
1978 Some(ObjectCandidate(index))
1979 };
1980 }
1981 Some(None) => {}
1982 None => return None,
1983 }
1984 // Same for upcasting.
1985 let upcast_bound = candidates
1986 .iter()
1987 .filter_map(|c| {
1988 if let TraitUpcastingUnsizeCandidate(i) = c.candidate { Some(i) } else { None }
1989 })
1990 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1991 match upcast_bound {
1992 Some(Some(index)) => return Some(TraitUpcastingUnsizeCandidate(index)),
1993 Some(None) => {}
1994 None => return None,
1995 }
1996
1997 // Finally, handle overlapping user-written impls.
1998 let impls = candidates.iter().filter_map(|c| {
1999 if let ImplCandidate(def_id) = c.candidate {
2000 Some((def_id, c.evaluation))
2001 } else {
2002 None
2003 }
2004 });
2005 let mut impl_candidate = None;
2006 for c in impls {
2007 if let Some(prev) = impl_candidate.replace(c) {
2008 if self.prefer_lhs_over_victim(has_non_region_infer, c, prev.0) {
2009 // Ok, prefer `c` over the previous entry
2010 } else if self.prefer_lhs_over_victim(has_non_region_infer, prev, c.0) {
2011 // Ok, keep `prev` instead of the new entry
2012 impl_candidate = Some(prev);
2013 } else {
2014 // Ambiguity, two potentially different where-clauses
2015 return None;
2016 }
2017 }
2018 }
2019 if let Some((def_id, _evaluation)) = impl_candidate {
2020 // Don't use impl candidates which overlap with other candidates.
2021 // This should pretty much only ever happen with malformed impls.
2022 if candidates.iter().all(|c| match c.candidate {
2023 SizedCandidate
2024 | BuiltinCandidate
2025 | TransmutabilityCandidate
2026 | AutoImplCandidate
2027 | ClosureCandidate { .. }
2028 | AsyncClosureCandidate
2029 | AsyncFnKindHelperCandidate
2030 | CoroutineCandidate
2031 | FutureCandidate
2032 | IteratorCandidate
2033 | AsyncIteratorCandidate
2034 | FnPointerCandidate
2035 | TraitAliasCandidate
2036 | TraitUpcastingUnsizeCandidate(_)
2037 | BuiltinObjectCandidate
2038 | BuiltinUnsizeCandidate
2039 | PointerLikeCandidate
2040 | BikeshedGuaranteedNoDropCandidate => false,
2041 // Non-global param candidates have already been handled, global
2042 // where-bounds get ignored.
2043 ParamCandidate(_) | ImplCandidate(_) => true,
2044 ProjectionCandidate { .. } | ObjectCandidate(_) => unreachable!(),
2045 }) {
2046 return Some(ImplCandidate(def_id));
2047 } else {
2048 return None;
2049 }
2050 }
2051
2052 if candidates.len() == 1 {
2053 Some(candidates.pop().unwrap().candidate)
2054 } else {
2055 // Also try ignoring all global where-bounds and check whether we end
2056 // with a unique candidate in this case.
2057 let mut not_a_global_where_bound = candidates
2058 .into_iter()
2059 .filter(|c| !matches!(c.candidate, ParamCandidate(p) if is_global(p)));
2060 not_a_global_where_bound
2061 .next()
2062 .map(|c| c.candidate)
2063 .filter(|_| not_a_global_where_bound.next().is_none())
2064 }
2065 }
2066
2067 fn prefer_lhs_over_victim(
2068 &self,
2069 has_non_region_infer: bool,
2070 (lhs, lhs_evaluation): (DefId, EvaluationResult),
2071 victim: DefId,
2072 ) -> bool {
2073 let tcx = self.tcx();
2074 // See if we can toss out `victim` based on specialization.
2075 //
2076 // While this requires us to know *for sure* that the `lhs` impl applies
2077 // we still use modulo regions here. This is fine as specialization currently
2078 // assumes that specializing impls have to be always applicable, meaning that
2079 // the only allowed region constraints may be constraints also present on the default impl.
2080 if lhs_evaluation.must_apply_modulo_regions() {
2081 if tcx.specializes((lhs, victim)) {
2082 return true;
2083 }
2084 }
2085
2086 match tcx.impls_are_allowed_to_overlap(lhs, victim) {
2087 // For candidates which already reference errors it doesn't really
2088 // matter what we do 🤷
2089 Some(ty::ImplOverlapKind::Permitted { marker: false }) => {
2090 lhs_evaluation.must_apply_considering_regions()
2091 }
2092 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
2093 // Subtle: If the predicate we are evaluating has inference
2094 // variables, do *not* allow discarding candidates due to
2095 // marker trait impls.
2096 //
2097 // Without this restriction, we could end up accidentally
2098 // constraining inference variables based on an arbitrarily
2099 // chosen trait impl.
2100 //
2101 // Imagine we have the following code:
2102 //
2103 // ```rust
2104 // #[marker] trait MyTrait {}
2105 // impl MyTrait for u8 {}
2106 // impl MyTrait for bool {}
2107 // ```
2108 //
2109 // And we are evaluating the predicate `<_#0t as MyTrait>`.
2110 //
2111 // During selection, we will end up with one candidate for each
2112 // impl of `MyTrait`. If we were to discard one impl in favor
2113 // of the other, we would be left with one candidate, causing
2114 // us to "successfully" select the predicate, unifying
2115 // _#0t with (for example) `u8`.
2116 //
2117 // However, we have no reason to believe that this unification
2118 // is correct - we've essentially just picked an arbitrary
2119 // *possibility* for _#0t, and required that this be the *only*
2120 // possibility.
2121 //
2122 // Eventually, we will either:
2123 // 1) Unify all inference variables in the predicate through
2124 // some other means (e.g. type-checking of a function). We will
2125 // then be in a position to drop marker trait candidates
2126 // without constraining inference variables (since there are
2127 // none left to constrain)
2128 // 2) Be left with some unconstrained inference variables. We
2129 // will then correctly report an inference error, since the
2130 // existence of multiple marker trait impls tells us nothing
2131 // about which one should actually apply.
2132 !has_non_region_infer && lhs_evaluation.must_apply_considering_regions()
2133 }
2134 None => false,
2135 }
2136 }
2137}
2138
2139impl<'tcx> SelectionContext<'_, 'tcx> {
2140 fn sizedness_conditions(
2141 &mut self,
2142 self_ty: Ty<'tcx>,
2143 sizedness: SizedTraitKind,
2144 ) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
2145 match self_ty.kind() {
2146 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2147 | ty::Uint(_)
2148 | ty::Int(_)
2149 | ty::Bool
2150 | ty::Float(_)
2151 | ty::FnDef(..)
2152 | ty::FnPtr(..)
2153 | ty::RawPtr(..)
2154 | ty::Char
2155 | ty::Ref(..)
2156 | ty::Coroutine(..)
2157 | ty::CoroutineWitness(..)
2158 | ty::Array(..)
2159 | ty::Closure(..)
2160 | ty::CoroutineClosure(..)
2161 | ty::Never
2162 | ty::Error(_) => ty::Binder::dummy(vec![]),
2163
2164 ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
2165 SizedTraitKind::Sized => unreachable!("tried to assemble `Sized` for unsized type"),
2166 SizedTraitKind::MetaSized => ty::Binder::dummy(vec![]),
2167 },
2168
2169 ty::Foreign(..) => unreachable!("tried to assemble `Sized` for unsized type"),
2170
2171 ty::Tuple(tys) => {
2172 ty::Binder::dummy(tys.last().map_or_else(Vec::new, |&last| vec![last]))
2173 }
2174
2175 ty::Pat(ty, _) => ty::Binder::dummy(vec![*ty]),
2176
2177 ty::Adt(def, args) => {
2178 if let Some(crit) = def.sizedness_constraint(self.tcx(), sizedness) {
2179 ty::Binder::dummy(vec![crit.instantiate(self.tcx(), args)])
2180 } else {
2181 ty::Binder::dummy(vec![])
2182 }
2183 }
2184
2185 ty::UnsafeBinder(binder_ty) => binder_ty.map_bound(|ty| vec![ty]),
2186
2187 ty::Alias(..)
2188 | ty::Param(_)
2189 | ty::Placeholder(..)
2190 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
2191 | ty::Bound(..) => {
2192 bug!("asked to assemble `Sized` of unexpected type: {:?}", self_ty);
2193 }
2194 }
2195 }
2196
2197 fn copy_clone_conditions(&mut self, self_ty: Ty<'tcx>) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
2198 match *self_ty.kind() {
2199 ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => ty::Binder::dummy(vec![]),
2200
2201 ty::Uint(_)
2202 | ty::Int(_)
2203 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2204 | ty::Bool
2205 | ty::Float(_)
2206 | ty::Char
2207 | ty::RawPtr(..)
2208 | ty::Never
2209 | ty::Ref(_, _, hir::Mutability::Not)
2210 | ty::Array(..) => {
2211 unreachable!("tried to assemble `Sized` for type with libcore-provided impl")
2212 }
2213
2214 // FIXME(unsafe_binder): Should we conditionally
2215 // (i.e. universally) implement copy/clone?
2216 ty::UnsafeBinder(_) => unreachable!("tried to assemble `Sized` for unsafe binder"),
2217
2218 ty::Tuple(tys) => {
2219 // (*) binder moved here
2220 ty::Binder::dummy(tys.iter().collect())
2221 }
2222
2223 ty::Pat(ty, _) => {
2224 // (*) binder moved here
2225 ty::Binder::dummy(vec![ty])
2226 }
2227
2228 ty::Coroutine(def_id, args) => match self.tcx().coroutine_movability(def_id) {
2229 hir::Movability::Static => {
2230 unreachable!("tried to assemble `Clone` for static coroutine")
2231 }
2232 hir::Movability::Movable => {
2233 if self.tcx().features().coroutine_clone() {
2234 ty::Binder::dummy(vec![
2235 args.as_coroutine().tupled_upvars_ty(),
2236 Ty::new_coroutine_witness_for_coroutine(self.tcx(), def_id, args),
2237 ])
2238 } else {
2239 unreachable!(
2240 "tried to assemble `Clone` for coroutine without enabled feature"
2241 )
2242 }
2243 }
2244 },
2245
2246 ty::CoroutineWitness(def_id, args) => self
2247 .infcx
2248 .tcx
2249 .coroutine_hidden_types(def_id)
2250 .instantiate(self.infcx.tcx, args)
2251 .map_bound(|witness| witness.types.to_vec()),
2252
2253 ty::Closure(_, args) => ty::Binder::dummy(args.as_closure().upvar_tys().to_vec()),
2254
2255 ty::CoroutineClosure(_, args) => {
2256 ty::Binder::dummy(args.as_coroutine_closure().upvar_tys().to_vec())
2257 }
2258
2259 ty::Foreign(..)
2260 | ty::Str
2261 | ty::Slice(_)
2262 | ty::Dynamic(..)
2263 | ty::Adt(..)
2264 | ty::Alias(..)
2265 | ty::Param(..)
2266 | ty::Placeholder(..)
2267 | ty::Bound(..)
2268 | ty::Ref(_, _, ty::Mutability::Mut)
2269 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2270 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
2271 }
2272 }
2273 }
2274
2275 fn coroutine_is_gen(&mut self, self_ty: Ty<'tcx>) -> bool {
2276 matches!(*self_ty.kind(), ty::Coroutine(did, ..)
2277 if self.tcx().coroutine_is_gen(did))
2278 }
2279
2280 /// For default impls, we need to break apart a type into its
2281 /// "constituent types" -- meaning, the types that it contains.
2282 ///
2283 /// Here are some (simple) examples:
2284 ///
2285 /// ```ignore (illustrative)
2286 /// (i32, u32) -> [i32, u32]
2287 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
2288 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
2289 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
2290 /// ```
2291 #[instrument(level = "debug", skip(self), ret)]
2292 fn constituent_types_for_auto_trait(
2293 &self,
2294 t: Ty<'tcx>,
2295 ) -> Result<ty::Binder<'tcx, AutoImplConstituents<'tcx>>, SelectionError<'tcx>> {
2296 Ok(match *t.kind() {
2297 ty::Uint(_)
2298 | ty::Int(_)
2299 | ty::Bool
2300 | ty::Float(_)
2301 | ty::FnDef(..)
2302 | ty::FnPtr(..)
2303 | ty::Error(_)
2304 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2305 | ty::Never
2306 | ty::Char => {
2307 ty::Binder::dummy(AutoImplConstituents { types: vec![], assumptions: vec![] })
2308 }
2309
2310 // This branch is only for `experimental_default_bounds`.
2311 // Other foreign types were rejected earlier in
2312 // `assemble_candidates_from_auto_impls`.
2313 ty::Foreign(..) => {
2314 ty::Binder::dummy(AutoImplConstituents { types: vec![], assumptions: vec![] })
2315 }
2316
2317 ty::UnsafeBinder(ty) => {
2318 ty.map_bound(|ty| AutoImplConstituents { types: vec![ty], assumptions: vec![] })
2319 }
2320
2321 // Treat this like `struct str([u8]);`
2322 ty::Str => ty::Binder::dummy(AutoImplConstituents {
2323 types: vec![Ty::new_slice(self.tcx(), self.tcx().types.u8)],
2324 assumptions: vec![],
2325 }),
2326
2327 ty::Placeholder(..)
2328 | ty::Dynamic(..)
2329 | ty::Param(..)
2330 | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
2331 | ty::Bound(..)
2332 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2333 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
2334 }
2335
2336 ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => {
2337 ty::Binder::dummy(AutoImplConstituents {
2338 types: vec![element_ty],
2339 assumptions: vec![],
2340 })
2341 }
2342
2343 ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => {
2344 ty::Binder::dummy(AutoImplConstituents { types: vec![ty], assumptions: vec![] })
2345 }
2346
2347 ty::Tuple(tys) => {
2348 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
2349 ty::Binder::dummy(AutoImplConstituents {
2350 types: tys.iter().collect(),
2351 assumptions: vec![],
2352 })
2353 }
2354
2355 ty::Closure(_, args) => {
2356 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
2357 ty::Binder::dummy(AutoImplConstituents { types: vec![ty], assumptions: vec![] })
2358 }
2359
2360 ty::CoroutineClosure(_, args) => {
2361 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
2362 ty::Binder::dummy(AutoImplConstituents { types: vec![ty], assumptions: vec![] })
2363 }
2364
2365 ty::Coroutine(def_id, args) => {
2366 let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
2367 let tcx = self.tcx();
2368 let witness = Ty::new_coroutine_witness_for_coroutine(tcx, def_id, args);
2369 ty::Binder::dummy(AutoImplConstituents {
2370 types: vec![ty, witness],
2371 assumptions: vec![],
2372 })
2373 }
2374
2375 ty::CoroutineWitness(def_id, args) => self
2376 .infcx
2377 .tcx
2378 .coroutine_hidden_types(def_id)
2379 .instantiate(self.infcx.tcx, args)
2380 .map_bound(|witness| AutoImplConstituents {
2381 types: witness.types.to_vec(),
2382 assumptions: witness.assumptions.to_vec(),
2383 }),
2384
2385 // For `PhantomData<T>`, we pass `T`.
2386 ty::Adt(def, args) if def.is_phantom_data() => {
2387 ty::Binder::dummy(AutoImplConstituents {
2388 types: args.types().collect(),
2389 assumptions: vec![],
2390 })
2391 }
2392
2393 ty::Adt(def, args) => ty::Binder::dummy(AutoImplConstituents {
2394 types: def.all_fields().map(|f| f.ty(self.tcx(), args)).collect(),
2395 assumptions: vec![],
2396 }),
2397
2398 ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
2399 if self.infcx.can_define_opaque_ty(def_id) {
2400 unreachable!()
2401 } else {
2402 // We can resolve the opaque type to its hidden type,
2403 // which enforces a DAG between the functions requiring
2404 // the auto trait bounds in question.
2405 match self.tcx().type_of_opaque(def_id) {
2406 Ok(ty) => ty::Binder::dummy(AutoImplConstituents {
2407 types: vec![ty.instantiate(self.tcx(), args)],
2408 assumptions: vec![],
2409 }),
2410 Err(_) => {
2411 return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
2412 }
2413 }
2414 }
2415 }
2416 })
2417 }
2418
2419 fn collect_predicates_for_types(
2420 &mut self,
2421 param_env: ty::ParamEnv<'tcx>,
2422 cause: ObligationCause<'tcx>,
2423 recursion_depth: usize,
2424 trait_def_id: DefId,
2425 types: Vec<Ty<'tcx>>,
2426 ) -> PredicateObligations<'tcx> {
2427 // Because the types were potentially derived from
2428 // higher-ranked obligations they may reference late-bound
2429 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
2430 // yield a type like `for<'a> &'a i32`. In general, we
2431 // maintain the invariant that we never manipulate bound
2432 // regions, so we have to process these bound regions somehow.
2433 //
2434 // The strategy is to:
2435 //
2436 // 1. Instantiate those regions to placeholder regions (e.g.,
2437 // `for<'a> &'a i32` becomes `&0 i32`.
2438 // 2. Produce something like `&'0 i32 : Copy`
2439 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
2440
2441 types
2442 .into_iter()
2443 .flat_map(|placeholder_ty| {
2444 let Normalized { value: normalized_ty, mut obligations } =
2445 ensure_sufficient_stack(|| {
2446 normalize_with_depth(
2447 self,
2448 param_env,
2449 cause.clone(),
2450 recursion_depth,
2451 placeholder_ty,
2452 )
2453 });
2454
2455 let tcx = self.tcx();
2456 let trait_ref = if tcx.generics_of(trait_def_id).own_params.len() == 1 {
2457 ty::TraitRef::new(tcx, trait_def_id, [normalized_ty])
2458 } else {
2459 // If this is an ill-formed auto/built-in trait, then synthesize
2460 // new error args for the missing generics.
2461 let err_args = ty::GenericArgs::extend_with_error(
2462 tcx,
2463 trait_def_id,
2464 &[normalized_ty.into()],
2465 );
2466 ty::TraitRef::new_from_args(tcx, trait_def_id, err_args)
2467 };
2468
2469 let obligation = Obligation::new(self.tcx(), cause.clone(), param_env, trait_ref);
2470 obligations.push(obligation);
2471 obligations
2472 })
2473 .collect()
2474 }
2475
2476 ///////////////////////////////////////////////////////////////////////////
2477 // Matching
2478 //
2479 // Matching is a common path used for both evaluation and
2480 // confirmation. It basically unifies types that appear in impls
2481 // and traits. This does affect the surrounding environment;
2482 // therefore, when used during evaluation, match routines must be
2483 // run inside of a `probe()` so that their side-effects are
2484 // contained.
2485
2486 fn rematch_impl(
2487 &mut self,
2488 impl_def_id: DefId,
2489 obligation: &PolyTraitObligation<'tcx>,
2490 ) -> Normalized<'tcx, GenericArgsRef<'tcx>> {
2491 let impl_trait_header = self.tcx().impl_trait_header(impl_def_id);
2492 match self.match_impl(impl_def_id, impl_trait_header, obligation) {
2493 Ok(args) => args,
2494 Err(()) => {
2495 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
2496 bug!("impl {impl_def_id:?} was matchable against {predicate:?} but now is not")
2497 }
2498 }
2499 }
2500
2501 #[instrument(level = "debug", skip(self), ret)]
2502 fn match_impl(
2503 &mut self,
2504 impl_def_id: DefId,
2505 impl_trait_header: ty::ImplTraitHeader<'tcx>,
2506 obligation: &PolyTraitObligation<'tcx>,
2507 ) -> Result<Normalized<'tcx, GenericArgsRef<'tcx>>, ()> {
2508 let placeholder_obligation =
2509 self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2510 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
2511
2512 let impl_args = self.infcx.fresh_args_for_item(obligation.cause.span, impl_def_id);
2513
2514 let trait_ref = impl_trait_header.trait_ref.instantiate(self.tcx(), impl_args);
2515 debug!(?impl_trait_header);
2516
2517 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
2518 ensure_sufficient_stack(|| {
2519 normalize_with_depth(
2520 self,
2521 obligation.param_env,
2522 obligation.cause.clone(),
2523 obligation.recursion_depth + 1,
2524 trait_ref,
2525 )
2526 });
2527
2528 debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
2529
2530 let cause = ObligationCause::new(
2531 obligation.cause.span,
2532 obligation.cause.body_id,
2533 ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id),
2534 );
2535
2536 let InferOk { obligations, .. } = self
2537 .infcx
2538 .at(&cause, obligation.param_env)
2539 .eq(DefineOpaqueTypes::No, placeholder_obligation_trait_ref, impl_trait_ref)
2540 .map_err(|e| {
2541 debug!("match_impl: failed eq_trait_refs due to `{}`", e.to_string(self.tcx()))
2542 })?;
2543 nested_obligations.extend(obligations);
2544
2545 if impl_trait_header.polarity == ty::ImplPolarity::Reservation
2546 && !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
2547 {
2548 debug!("reservation impls only apply in intercrate mode");
2549 return Err(());
2550 }
2551
2552 Ok(Normalized { value: impl_args, obligations: nested_obligations })
2553 }
2554
2555 fn match_upcast_principal(
2556 &mut self,
2557 obligation: &PolyTraitObligation<'tcx>,
2558 unnormalized_upcast_principal: ty::PolyTraitRef<'tcx>,
2559 a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2560 b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2561 a_region: ty::Region<'tcx>,
2562 b_region: ty::Region<'tcx>,
2563 ) -> SelectionResult<'tcx, PredicateObligations<'tcx>> {
2564 let tcx = self.tcx();
2565 let mut nested = PredicateObligations::new();
2566
2567 // We may upcast to auto traits that are either explicitly listed in
2568 // the object type's bounds, or implied by the principal trait ref's
2569 // supertraits.
2570 let a_auto_traits: FxIndexSet<DefId> = a_data
2571 .auto_traits()
2572 .chain(a_data.principal_def_id().into_iter().flat_map(|principal_def_id| {
2573 elaborate::supertrait_def_ids(tcx, principal_def_id)
2574 .filter(|def_id| tcx.trait_is_auto(*def_id))
2575 }))
2576 .collect();
2577
2578 let upcast_principal = normalize_with_depth_to(
2579 self,
2580 obligation.param_env,
2581 obligation.cause.clone(),
2582 obligation.recursion_depth + 1,
2583 unnormalized_upcast_principal,
2584 &mut nested,
2585 );
2586
2587 for bound in b_data {
2588 match bound.skip_binder() {
2589 // Check that a_ty's supertrait (upcast_principal) is compatible
2590 // with the target (b_ty).
2591 ty::ExistentialPredicate::Trait(target_principal) => {
2592 let hr_source_principal = upcast_principal.map_bound(|trait_ref| {
2593 ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
2594 });
2595 let hr_target_principal = bound.rebind(target_principal);
2596
2597 nested.extend(
2598 self.infcx
2599 .enter_forall(hr_target_principal, |target_principal| {
2600 let source_principal =
2601 self.infcx.instantiate_binder_with_fresh_vars(
2602 obligation.cause.span,
2603 HigherRankedType,
2604 hr_source_principal,
2605 );
2606 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2607 DefineOpaqueTypes::Yes,
2608 ToTrace::to_trace(
2609 &obligation.cause,
2610 hr_target_principal,
2611 hr_source_principal,
2612 ),
2613 target_principal,
2614 source_principal,
2615 )
2616 })
2617 .map_err(|_| SelectionError::Unimplemented)?
2618 .into_obligations(),
2619 );
2620 }
2621 // Check that b_ty's projection is satisfied by exactly one of
2622 // a_ty's projections. First, we look through the list to see if
2623 // any match. If not, error. Then, if *more* than one matches, we
2624 // return ambiguity. Otherwise, if exactly one matches, equate
2625 // it with b_ty's projection.
2626 ty::ExistentialPredicate::Projection(target_projection) => {
2627 let hr_target_projection = bound.rebind(target_projection);
2628
2629 let mut matching_projections =
2630 a_data.projection_bounds().filter(|&hr_source_projection| {
2631 // Eager normalization means that we can just use can_eq
2632 // here instead of equating and processing obligations.
2633 hr_source_projection.item_def_id() == hr_target_projection.item_def_id()
2634 && self.infcx.probe(|_| {
2635 self.infcx
2636 .enter_forall(hr_target_projection, |target_projection| {
2637 let source_projection =
2638 self.infcx.instantiate_binder_with_fresh_vars(
2639 obligation.cause.span,
2640 HigherRankedType,
2641 hr_source_projection,
2642 );
2643 self.infcx
2644 .at(&obligation.cause, obligation.param_env)
2645 .eq_trace(
2646 DefineOpaqueTypes::Yes,
2647 ToTrace::to_trace(
2648 &obligation.cause,
2649 hr_target_projection,
2650 hr_source_projection,
2651 ),
2652 target_projection,
2653 source_projection,
2654 )
2655 })
2656 .is_ok()
2657 })
2658 });
2659
2660 let Some(hr_source_projection) = matching_projections.next() else {
2661 return Err(SelectionError::Unimplemented);
2662 };
2663 if matching_projections.next().is_some() {
2664 return Ok(None);
2665 }
2666 nested.extend(
2667 self.infcx
2668 .enter_forall(hr_target_projection, |target_projection| {
2669 let source_projection =
2670 self.infcx.instantiate_binder_with_fresh_vars(
2671 obligation.cause.span,
2672 HigherRankedType,
2673 hr_source_projection,
2674 );
2675 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2676 DefineOpaqueTypes::Yes,
2677 ToTrace::to_trace(
2678 &obligation.cause,
2679 hr_target_projection,
2680 hr_source_projection,
2681 ),
2682 target_projection,
2683 source_projection,
2684 )
2685 })
2686 .map_err(|_| SelectionError::Unimplemented)?
2687 .into_obligations(),
2688 );
2689 }
2690 // Check that b_ty's auto traits are present in a_ty's bounds.
2691 ty::ExistentialPredicate::AutoTrait(def_id) => {
2692 if !a_auto_traits.contains(&def_id) {
2693 return Err(SelectionError::Unimplemented);
2694 }
2695 }
2696 }
2697 }
2698
2699 nested.push(Obligation::with_depth(
2700 tcx,
2701 obligation.cause.clone(),
2702 obligation.recursion_depth + 1,
2703 obligation.param_env,
2704 ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region)),
2705 ));
2706
2707 Ok(Some(nested))
2708 }
2709
2710 /// Normalize `where_clause_trait_ref` and try to match it against
2711 /// `obligation`. If successful, return any predicates that
2712 /// result from the normalization.
2713 fn match_where_clause_trait_ref(
2714 &mut self,
2715 obligation: &PolyTraitObligation<'tcx>,
2716 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
2717 ) -> Result<PredicateObligations<'tcx>, ()> {
2718 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
2719 }
2720
2721 /// Returns `Ok` if `poly_trait_ref` being true implies that the
2722 /// obligation is satisfied.
2723 #[instrument(skip(self), level = "debug")]
2724 fn match_poly_trait_ref(
2725 &mut self,
2726 obligation: &PolyTraitObligation<'tcx>,
2727 poly_trait_ref: ty::PolyTraitRef<'tcx>,
2728 ) -> Result<PredicateObligations<'tcx>, ()> {
2729 let predicate = self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2730 let trait_ref = self.infcx.instantiate_binder_with_fresh_vars(
2731 obligation.cause.span,
2732 HigherRankedType,
2733 poly_trait_ref,
2734 );
2735 self.infcx
2736 .at(&obligation.cause, obligation.param_env)
2737 .eq(DefineOpaqueTypes::No, predicate.trait_ref, trait_ref)
2738 .map(|InferOk { obligations, .. }| obligations)
2739 .map_err(|_| ())
2740 }
2741
2742 ///////////////////////////////////////////////////////////////////////////
2743 // Miscellany
2744
2745 fn match_fresh_trait_refs(
2746 &self,
2747 previous: ty::PolyTraitPredicate<'tcx>,
2748 current: ty::PolyTraitPredicate<'tcx>,
2749 ) -> bool {
2750 let mut matcher = _match::MatchAgainstFreshVars::new(self.tcx());
2751 matcher.relate(previous, current).is_ok()
2752 }
2753
2754 fn push_stack<'o>(
2755 &mut self,
2756 previous_stack: TraitObligationStackList<'o, 'tcx>,
2757 obligation: &'o PolyTraitObligation<'tcx>,
2758 ) -> TraitObligationStack<'o, 'tcx> {
2759 let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener);
2760
2761 let dfn = previous_stack.cache.next_dfn();
2762 let depth = previous_stack.depth() + 1;
2763 TraitObligationStack {
2764 obligation,
2765 fresh_trait_pred,
2766 reached_depth: Cell::new(depth),
2767 previous: previous_stack,
2768 dfn,
2769 depth,
2770 }
2771 }
2772
2773 #[instrument(skip(self), level = "debug")]
2774 fn closure_trait_ref_unnormalized(
2775 &mut self,
2776 self_ty: Ty<'tcx>,
2777 fn_trait_def_id: DefId,
2778 ) -> ty::PolyTraitRef<'tcx> {
2779 let ty::Closure(_, args) = *self_ty.kind() else {
2780 bug!("expected closure, found {self_ty}");
2781 };
2782 let closure_sig = args.as_closure().sig();
2783
2784 closure_trait_ref_and_return_type(
2785 self.tcx(),
2786 fn_trait_def_id,
2787 self_ty,
2788 closure_sig,
2789 util::TupleArgumentsFlag::No,
2790 )
2791 .map_bound(|(trait_ref, _)| trait_ref)
2792 }
2793
2794 /// Returns the obligations that are implied by instantiating an
2795 /// impl or trait. The obligations are instantiated and fully
2796 /// normalized. This is used when confirming an impl or default
2797 /// impl.
2798 #[instrument(level = "debug", skip(self, cause, param_env))]
2799 fn impl_or_trait_obligations(
2800 &mut self,
2801 cause: &ObligationCause<'tcx>,
2802 recursion_depth: usize,
2803 param_env: ty::ParamEnv<'tcx>,
2804 def_id: DefId, // of impl or trait
2805 args: GenericArgsRef<'tcx>, // for impl or trait
2806 parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
2807 ) -> PredicateObligations<'tcx> {
2808 let tcx = self.tcx();
2809
2810 // To allow for one-pass evaluation of the nested obligation,
2811 // each predicate must be preceded by the obligations required
2812 // to normalize it.
2813 // for example, if we have:
2814 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
2815 // the impl will have the following predicates:
2816 // <V as Iterator>::Item = U,
2817 // U: Iterator, U: Sized,
2818 // V: Iterator, V: Sized,
2819 // <U as Iterator>::Item: Copy
2820 // When we instantiate, say, `V => IntoIter<u32>, U => $0`, the last
2821 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
2822 // `$1: Copy`, so we must ensure the obligations are emitted in
2823 // that order.
2824 let predicates = tcx.predicates_of(def_id);
2825 assert_eq!(predicates.parent, None);
2826 let predicates = predicates.instantiate_own(tcx, args);
2827 let mut obligations = PredicateObligations::with_capacity(predicates.len());
2828 for (index, (predicate, span)) in predicates.into_iter().enumerate() {
2829 let cause = if tcx.is_lang_item(parent_trait_pred.def_id(), LangItem::CoerceUnsized) {
2830 cause.clone()
2831 } else {
2832 cause.clone().derived_cause(parent_trait_pred, |derived| {
2833 ObligationCauseCode::ImplDerived(Box::new(ImplDerivedCause {
2834 derived,
2835 impl_or_alias_def_id: def_id,
2836 impl_def_predicate_index: Some(index),
2837 span,
2838 }))
2839 })
2840 };
2841 let clause = normalize_with_depth_to(
2842 self,
2843 param_env,
2844 cause.clone(),
2845 recursion_depth,
2846 predicate,
2847 &mut obligations,
2848 );
2849 obligations.push(Obligation {
2850 cause,
2851 recursion_depth,
2852 param_env,
2853 predicate: clause.as_predicate(),
2854 });
2855 }
2856
2857 // Register any outlives obligations from the trait here, cc #124336.
2858 if matches!(tcx.def_kind(def_id), DefKind::Impl { of_trait: true }) {
2859 for clause in tcx.impl_super_outlives(def_id).iter_instantiated(tcx, args) {
2860 let clause = normalize_with_depth_to(
2861 self,
2862 param_env,
2863 cause.clone(),
2864 recursion_depth,
2865 clause,
2866 &mut obligations,
2867 );
2868 obligations.push(Obligation {
2869 cause: cause.clone(),
2870 recursion_depth,
2871 param_env,
2872 predicate: clause.as_predicate(),
2873 });
2874 }
2875 }
2876
2877 obligations
2878 }
2879
2880 pub(super) fn should_stall_coroutine(&self, def_id: DefId) -> bool {
2881 match self.infcx.typing_mode() {
2882 TypingMode::Analysis { defining_opaque_types_and_generators: stalled_generators } => {
2883 def_id.as_local().is_some_and(|def_id| stalled_generators.contains(&def_id))
2884 }
2885 TypingMode::Coherence
2886 | TypingMode::PostAnalysis
2887 | TypingMode::Borrowck { defining_opaque_types: _ }
2888 | TypingMode::PostBorrowckAnalysis { defined_opaque_types: _ } => false,
2889 }
2890 }
2891}
2892
2893impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2894 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2895 TraitObligationStackList::with(self)
2896 }
2897
2898 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2899 self.previous.cache
2900 }
2901
2902 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2903 self.list()
2904 }
2905
2906 /// Indicates that attempting to evaluate this stack entry
2907 /// required accessing something from the stack at depth `reached_depth`.
2908 fn update_reached_depth(&self, reached_depth: usize) {
2909 assert!(
2910 self.depth >= reached_depth,
2911 "invoked `update_reached_depth` with something under this stack: \
2912 self.depth={} reached_depth={}",
2913 self.depth,
2914 reached_depth,
2915 );
2916 debug!(reached_depth, "update_reached_depth");
2917 let mut p = self;
2918 while reached_depth < p.depth {
2919 debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant");
2920 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2921 p = p.previous.head.unwrap();
2922 }
2923 }
2924}
2925
2926/// The "provisional evaluation cache" is used to store intermediate cache results
2927/// when solving auto traits. Auto traits are unusual in that they can support
2928/// cycles. So, for example, a "proof tree" like this would be ok:
2929///
2930/// - `Foo<T>: Send` :-
2931/// - `Bar<T>: Send` :-
2932/// - `Foo<T>: Send` -- cycle, but ok
2933/// - `Baz<T>: Send`
2934///
2935/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2936/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2937/// For non-auto traits, this cycle would be an error, but for auto traits (because
2938/// they are coinductive) it is considered ok.
2939///
2940/// However, there is a complication: at the point where we have
2941/// "proven" `Bar<T>: Send`, we have in fact only proven it
2942/// *provisionally*. In particular, we proved that `Bar<T>: Send`
2943/// *under the assumption* that `Foo<T>: Send`. But what if we later
2944/// find out this assumption is wrong? Specifically, we could
2945/// encounter some kind of error proving `Baz<T>: Send`. In that case,
2946/// `Bar<T>: Send` didn't turn out to be true.
2947///
2948/// In Issue #60010, we found a bug in rustc where it would cache
2949/// these intermediate results. This was fixed in #60444 by disabling
2950/// *all* caching for things involved in a cycle -- in our example,
2951/// that would mean we don't cache that `Bar<T>: Send`. But this led
2952/// to large slowdowns.
2953///
2954/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2955/// first requires proving `Bar<T>: Send` (which is true:
2956///
2957/// - `Foo<T>: Send` :-
2958/// - `Bar<T>: Send` :-
2959/// - `Foo<T>: Send` -- cycle, but ok
2960/// - `Baz<T>: Send`
2961/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2962/// - `*const T: Send` -- but what if we later encounter an error?
2963///
2964/// The *provisional evaluation cache* resolves this issue. It stores
2965/// cache results that we've proven but which were involved in a cycle
2966/// in some way. We track the minimal stack depth (i.e., the
2967/// farthest from the top of the stack) that we are dependent on.
2968/// The idea is that the cache results within are all valid -- so long as
2969/// none of the nodes in between the current node and the node at that minimum
2970/// depth result in an error (in which case the cached results are just thrown away).
2971///
2972/// During evaluation, we consult this provisional cache and rely on
2973/// it. Accessing a cached value is considered equivalent to accessing
2974/// a result at `reached_depth`, so it marks the *current* solution as
2975/// provisional as well. If an error is encountered, we toss out any
2976/// provisional results added from the subtree that encountered the
2977/// error. When we pop the node at `reached_depth` from the stack, we
2978/// can commit all the things that remain in the provisional cache.
2979struct ProvisionalEvaluationCache<'tcx> {
2980 /// next "depth first number" to issue -- just a counter
2981 dfn: Cell<usize>,
2982
2983 /// Map from cache key to the provisionally evaluated thing.
2984 /// The cache entries contain the result but also the DFN in which they
2985 /// were added. The DFN is used to clear out values on failure.
2986 ///
2987 /// Imagine we have a stack like:
2988 ///
2989 /// - `A B C` and we add a cache for the result of C (DFN 2)
2990 /// - Then we have a stack `A B D` where `D` has DFN 3
2991 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2992 /// - `E` generates various cache entries which have cyclic dependencies on `B`
2993 /// - `A B D E F` and so forth
2994 /// - the DFN of `F` for example would be 5
2995 /// - then we determine that `E` is in error -- we will then clear
2996 /// all cache values whose DFN is >= 4 -- in this case, that
2997 /// means the cached value for `F`.
2998 map: RefCell<FxIndexMap<ty::PolyTraitPredicate<'tcx>, ProvisionalEvaluation>>,
2999
3000 /// The stack of terms that we assume to be well-formed because a `WF(term)` predicate
3001 /// is on the stack above (and because of wellformedness is coinductive).
3002 /// In an "ideal" world, this would share a stack with trait predicates in
3003 /// `TraitObligationStack`. However, trait predicates are *much* hotter than
3004 /// `WellFormed` predicates, and it's very likely that the additional matches
3005 /// will have a perf effect. The value here is the well-formed `GenericArg`
3006 /// and the depth of the trait predicate *above* that well-formed predicate.
3007 wf_args: RefCell<Vec<(ty::Term<'tcx>, usize)>>,
3008}
3009
3010/// A cache value for the provisional cache: contains the depth-first
3011/// number (DFN) and result.
3012#[derive(Copy, Clone, Debug)]
3013struct ProvisionalEvaluation {
3014 from_dfn: usize,
3015 reached_depth: usize,
3016 result: EvaluationResult,
3017}
3018
3019impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
3020 fn default() -> Self {
3021 Self { dfn: Cell::new(0), map: Default::default(), wf_args: Default::default() }
3022 }
3023}
3024
3025impl<'tcx> ProvisionalEvaluationCache<'tcx> {
3026 /// Get the next DFN in sequence (basically a counter).
3027 fn next_dfn(&self) -> usize {
3028 let result = self.dfn.get();
3029 self.dfn.set(result + 1);
3030 result
3031 }
3032
3033 /// Check the provisional cache for any result for
3034 /// `fresh_trait_ref`. If there is a hit, then you must consider
3035 /// it an access to the stack slots at depth
3036 /// `reached_depth` (from the returned value).
3037 fn get_provisional(
3038 &self,
3039 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3040 ) -> Option<ProvisionalEvaluation> {
3041 debug!(
3042 ?fresh_trait_pred,
3043 "get_provisional = {:#?}",
3044 self.map.borrow().get(&fresh_trait_pred),
3045 );
3046 Some(*self.map.borrow().get(&fresh_trait_pred)?)
3047 }
3048
3049 /// Insert a provisional result into the cache. The result came
3050 /// from the node with the given DFN. It accessed a minimum depth
3051 /// of `reached_depth` to compute. It evaluated `fresh_trait_pred`
3052 /// and resulted in `result`.
3053 fn insert_provisional(
3054 &self,
3055 from_dfn: usize,
3056 reached_depth: usize,
3057 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3058 result: EvaluationResult,
3059 ) {
3060 debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
3061
3062 let mut map = self.map.borrow_mut();
3063
3064 // Subtle: when we complete working on the DFN `from_dfn`, anything
3065 // that remains in the provisional cache must be dependent on some older
3066 // stack entry than `from_dfn`. We have to update their depth with our transitive
3067 // depth in that case or else it would be referring to some popped note.
3068 //
3069 // Example:
3070 // A (reached depth 0)
3071 // ...
3072 // B // depth 1 -- reached depth = 0
3073 // C // depth 2 -- reached depth = 1 (should be 0)
3074 // B
3075 // A // depth 0
3076 // D (reached depth 1)
3077 // C (cache -- reached depth = 2)
3078 for (_k, v) in &mut *map {
3079 if v.from_dfn >= from_dfn {
3080 v.reached_depth = reached_depth.min(v.reached_depth);
3081 }
3082 }
3083
3084 map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result });
3085 }
3086
3087 /// Invoked when the node with dfn `dfn` does not get a successful
3088 /// result. This will clear out any provisional cache entries
3089 /// that were added since `dfn` was created. This is because the
3090 /// provisional entries are things which must assume that the
3091 /// things on the stack at the time of their creation succeeded --
3092 /// since the failing node is presently at the top of the stack,
3093 /// these provisional entries must either depend on it or some
3094 /// ancestor of it.
3095 fn on_failure(&self, dfn: usize) {
3096 debug!(?dfn, "on_failure");
3097 self.map.borrow_mut().retain(|key, eval| {
3098 if !eval.from_dfn >= dfn {
3099 debug!("on_failure: removing {:?}", key);
3100 false
3101 } else {
3102 true
3103 }
3104 });
3105 }
3106
3107 /// Invoked when the node at depth `depth` completed without
3108 /// depending on anything higher in the stack (if that completion
3109 /// was a failure, then `on_failure` should have been invoked
3110 /// already).
3111 ///
3112 /// Note that we may still have provisional cache items remaining
3113 /// in the cache when this is done. For example, if there is a
3114 /// cycle:
3115 ///
3116 /// * A depends on...
3117 /// * B depends on A
3118 /// * C depends on...
3119 /// * D depends on C
3120 /// * ...
3121 ///
3122 /// Then as we complete the C node we will have a provisional cache
3123 /// with results for A, B, C, and D. This method would clear out
3124 /// the C and D results, but leave A and B provisional.
3125 ///
3126 /// This is determined based on the DFN: we remove any provisional
3127 /// results created since `dfn` started (e.g., in our example, dfn
3128 /// would be 2, representing the C node, and hence we would
3129 /// remove the result for D, which has DFN 3, but not the results for
3130 /// A and B, which have DFNs 0 and 1 respectively).
3131 ///
3132 /// Note that we *do not* attempt to cache these cycle participants
3133 /// in the evaluation cache. Doing so would require carefully computing
3134 /// the correct `DepNode` to store in the cache entry:
3135 /// cycle participants may implicitly depend on query results
3136 /// related to other participants in the cycle, due to our logic
3137 /// which examines the evaluation stack.
3138 ///
3139 /// We used to try to perform this caching,
3140 /// but it lead to multiple incremental compilation ICEs
3141 /// (see #92987 and #96319), and was very hard to understand.
3142 /// Fortunately, removing the caching didn't seem to
3143 /// have a performance impact in practice.
3144 fn on_completion(&self, dfn: usize) {
3145 debug!(?dfn, "on_completion");
3146 self.map.borrow_mut().retain(|fresh_trait_pred, eval| {
3147 if eval.from_dfn >= dfn {
3148 debug!(?fresh_trait_pred, ?eval, "on_completion");
3149 return false;
3150 }
3151 true
3152 });
3153 }
3154}
3155
3156#[derive(Copy, Clone)]
3157struct TraitObligationStackList<'o, 'tcx> {
3158 cache: &'o ProvisionalEvaluationCache<'tcx>,
3159 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
3160}
3161
3162impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
3163 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3164 TraitObligationStackList { cache, head: None }
3165 }
3166
3167 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3168 TraitObligationStackList { cache: r.cache(), head: Some(r) }
3169 }
3170
3171 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3172 self.head
3173 }
3174
3175 fn depth(&self) -> usize {
3176 if let Some(head) = self.head { head.depth } else { 0 }
3177 }
3178}
3179
3180impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
3181 type Item = &'o TraitObligationStack<'o, 'tcx>;
3182
3183 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3184 let o = self.head?;
3185 *self = o.previous;
3186 Some(o)
3187 }
3188}
3189
3190impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
3191 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3192 write!(f, "TraitObligationStack({:?})", self.obligation)
3193 }
3194}
3195
3196pub(crate) enum ProjectionMatchesProjection {
3197 Yes,
3198 Ambiguous,
3199 No,
3200}
3201
3202#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
3203pub(crate) struct AutoImplConstituents<'tcx> {
3204 pub types: Vec<Ty<'tcx>>,
3205 pub assumptions: Vec<ty::ArgOutlivesPredicate<'tcx>>,
3206}