1//! Miscellaneous type-system utilities that are too small to deserve their own modules.
23use std::{fmt, iter};
45use rustc_abi::{Float, Integer, IntegerType, Size};
6use rustc_apfloat::Floatas _;
7use rustc_data_structures::fx::{FxHashMap, FxHashSet};
8use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
9use rustc_data_structures::stack::ensure_sufficient_stack;
10use rustc_errors::ErrorGuaranteed;
11use rustc_hashes::Hash128;
12use rustc_hir::def::{CtorOf, DefKind, Res};
13use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
14use rustc_hir::limit::Limit;
15use rustc_hir::{selfas hir, find_attr};
16use rustc_index::bit_set::GrowableBitSet;
17use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension};
18use rustc_span::sym;
19use rustc_type_ir::solve::SizedTraitKind;
20use smallvec::{SmallVec, smallvec};
21use tracing::{debug, instrument};
2223use super::TypingEnv;
24use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
25use crate::mir;
26use crate::query::Providers;
27use crate::traits::ObligationCause;
28use crate::ty::layout::{FloatExt, IntegerExt};
29use crate::ty::{
30self, Asyncness, FallibleTypeFolder, GenericArgKind, GenericArgsRef, Ty, TyCtxt, TypeFoldable,
31TypeFolder, TypeSuperFoldable, TypeVisitableExt, Upcast,
32};
3334#[derive(#[automatically_derived]
impl<'tcx> ::core::marker::Copy for Discr<'tcx> { }Copy, #[automatically_derived]
impl<'tcx> ::core::clone::Clone for Discr<'tcx> {
#[inline]
fn clone(&self) -> Discr<'tcx> {
let _: ::core::clone::AssertParamIsClone<u128>;
let _: ::core::clone::AssertParamIsClone<Ty<'tcx>>;
*self
}
}Clone, #[automatically_derived]
impl<'tcx> ::core::fmt::Debug for Discr<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "Discr", "val",
&self.val, "ty", &&self.ty)
}
}Debug)]
35pub struct Discr<'tcx> {
36/// Bit representation of the discriminant (e.g., `-1i8` is `0xFF_u128`).
37pub val: u128,
38pub ty: Ty<'tcx>,
39}
4041/// Used as an input to [`TyCtxt::uses_unique_generic_params`].
42#[derive(#[automatically_derived]
impl ::core::marker::Copy for CheckRegions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for CheckRegions {
#[inline]
fn clone(&self) -> CheckRegions { *self }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for CheckRegions {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
CheckRegions::No => "No",
CheckRegions::OnlyParam => "OnlyParam",
CheckRegions::FromFunction => "FromFunction",
})
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CheckRegions {
#[inline]
fn eq(&self, other: &CheckRegions) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for CheckRegions {
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq)]
43pub enum CheckRegions {
44 No,
45/// Only permit parameter regions. This should be used
46 /// for everything apart from functions, which may use
47 /// `ReBound` to represent late-bound regions.
48OnlyParam,
49/// Check region parameters from a function definition.
50 /// Allows `ReEarlyParam` and `ReBound` to handle early
51 /// and late-bound region parameters.
52FromFunction,
53}
5455#[derive(#[automatically_derived]
impl<'tcx> ::core::marker::Copy for NotUniqueParam<'tcx> { }Copy, #[automatically_derived]
impl<'tcx> ::core::clone::Clone for NotUniqueParam<'tcx> {
#[inline]
fn clone(&self) -> NotUniqueParam<'tcx> {
let _: ::core::clone::AssertParamIsClone<ty::GenericArg<'tcx>>;
let _: ::core::clone::AssertParamIsClone<ty::GenericArg<'tcx>>;
*self
}
}Clone, #[automatically_derived]
impl<'tcx> ::core::fmt::Debug for NotUniqueParam<'tcx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
NotUniqueParam::DuplicateParam(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"DuplicateParam", &__self_0),
NotUniqueParam::NotParam(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"NotParam", &__self_0),
}
}
}Debug)]
56pub enum NotUniqueParam<'tcx> {
57 DuplicateParam(ty::GenericArg<'tcx>),
58 NotParam(ty::GenericArg<'tcx>),
59}
6061impl<'tcx> fmt::Displayfor Discr<'tcx> {
62fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
63match *self.ty.kind() {
64 ty::Int(ity) => {
65let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
66let x = self.val;
67// sign extend the raw representation to be an i128
68let x = size.sign_extend(x) as i128;
69fmt.write_fmt(format_args!("{0}", x))write!(fmt, "{x}")70 }
71_ => fmt.write_fmt(format_args!("{0}", self.val))write!(fmt, "{}", self.val),
72 }
73 }
74}
7576impl<'tcx> Discr<'tcx> {
77/// Adds `1` to the value and wraps around if the maximum for the type is reached.
78pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
79self.checked_add(tcx, 1).0
80}
81pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
82let (size, signed) = self.ty.int_size_and_signed(tcx);
83let (val, oflo) = if signed {
84let min = size.signed_int_min();
85let max = size.signed_int_max();
86let val = size.sign_extend(self.val);
87if !(n < (i128::MAX as u128)) {
::core::panicking::panic("assertion failed: n < (i128::MAX as u128)")
};assert!(n < (i128::MAX as u128));
88let n = nas i128;
89let oflo = val > max - n;
90let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
91// zero the upper bits
92let val = valas u128;
93let val = size.truncate(val);
94 (val, oflo)
95 } else {
96let max = size.unsigned_int_max();
97let val = self.val;
98let oflo = val > max - n;
99let val = if oflo { n - (max - val) - 1 } else { val + n };
100 (val, oflo)
101 };
102 (Self { val, ty: self.ty }, oflo)
103 }
104}
105106impl IntTypeExt for IntegerType {
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self {
IntegerType::Pointer(true) => tcx.types.isize,
IntegerType::Pointer(false) => tcx.types.usize,
IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
}
}
fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
Discr { val: 0, ty: self.to_ty(tcx) }
}
fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>)
-> Option<Discr<'tcx>> {
if let Some(val) = val {
match (&self.to_ty(tcx), &val.ty) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
let (new, oflo) = val.checked_add(tcx, 1);
if oflo { None } else { Some(new) }
} else { Some(self.initial_discriminant(tcx)) }
}
}#[extension(pub trait IntTypeExt)]107impl IntegerType {
108fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
109match self {
110 IntegerType::Pointer(true) => tcx.types.isize,
111 IntegerType::Pointer(false) => tcx.types.usize,
112 IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
113 }
114 }
115116fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
117Discr { val: 0, ty: self.to_ty(tcx) }
118 }
119120fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
121if let Some(val) = val {
122assert_eq!(self.to_ty(tcx), val.ty);
123let (new, oflo) = val.checked_add(tcx, 1);
124if oflo { None } else { Some(new) }
125 } else {
126Some(self.initial_discriminant(tcx))
127 }
128 }
129}
130131impl<'tcx> TyCtxt<'tcx> {
132/// Creates a hash of the type `Ty` which will be the same no matter what crate
133 /// context it's calculated within. This is used by the `type_id` intrinsic.
134pub fn type_id_hash(self, ty: Ty<'tcx>) -> Hash128 {
135// We don't have region information, so we erase all free regions. Equal types
136 // must have the same `TypeId`, so we must anonymize all bound regions as well.
137let ty = self.erase_and_anonymize_regions(ty);
138139self.with_stable_hashing_context(|mut hcx| {
140let mut hasher = StableHasher::new();
141hcx.while_hashing_spans(false, |hcx| ty.hash_stable(hcx, &mut hasher));
142hasher.finish()
143 })
144 }
145146pub fn res_generics_def_id(self, res: Res) -> Option<DefId> {
147match res {
148 Res::Def(DefKind::Ctor(CtorOf::Variant, _), def_id) => {
149Some(self.parent(self.parent(def_id)))
150 }
151 Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Struct, _), def_id) => {
152Some(self.parent(def_id))
153 }
154// Other `DefKind`s don't have generics and would ICE when calling
155 // `generics_of`.
156Res::Def(
157 DefKind::Struct158 | DefKind::Union159 | DefKind::Enum160 | DefKind::Trait161 | DefKind::OpaqueTy162 | DefKind::TyAlias163 | DefKind::ForeignTy164 | DefKind::TraitAlias165 | DefKind::AssocTy166 | DefKind::Fn167 | DefKind::AssocFn168 | DefKind::AssocConst169 | DefKind::Impl { .. },
170 def_id,
171 ) => Some(def_id),
172 Res::Err => None,
173_ => None,
174 }
175 }
176177/// Checks whether `ty: Copy` holds while ignoring region constraints.
178 ///
179 /// This impacts whether values of `ty` are *moved* or *copied*
180 /// when referenced. This means that we may generate MIR which
181 /// does copies even when the type actually doesn't satisfy the
182 /// full requirements for the `Copy` trait (cc #29149) -- this
183 /// winds up being reported as an error during NLL borrow check.
184 ///
185 /// This function should not be used if there is an `InferCtxt` available.
186 /// Use `InferCtxt::type_is_copy_modulo_regions` instead.
187pub fn type_is_copy_modulo_regions(
188self,
189 typing_env: ty::TypingEnv<'tcx>,
190 ty: Ty<'tcx>,
191 ) -> bool {
192ty.is_trivially_pure_clone_copy() || self.is_copy_raw(typing_env.as_query_input(ty))
193 }
194195/// Checks whether `ty: UseCloned` holds while ignoring region constraints.
196 ///
197 /// This function should not be used if there is an `InferCtxt` available.
198 /// Use `InferCtxt::type_is_copy_modulo_regions` instead.
199pub fn type_is_use_cloned_modulo_regions(
200self,
201 typing_env: ty::TypingEnv<'tcx>,
202 ty: Ty<'tcx>,
203 ) -> bool {
204ty.is_trivially_pure_clone_copy() || self.is_use_cloned_raw(typing_env.as_query_input(ty))
205 }
206207/// Returns the deeply last field of nested structures, or the same type if
208 /// not a structure at all. Corresponds to the only possible unsized field,
209 /// and its type can be used to determine unsizing strategy.
210 ///
211 /// Should only be called if `ty` has no inference variables and does not
212 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
213 /// normalization attempt may cause compiler bugs.
214pub fn struct_tail_for_codegen(
215self,
216 ty: Ty<'tcx>,
217 typing_env: ty::TypingEnv<'tcx>,
218 ) -> Ty<'tcx> {
219let tcx = self;
220tcx.struct_tail_raw(
221ty,
222&ObligationCause::dummy(),
223 |ty| tcx.normalize_erasing_regions(typing_env, ty),
224 || {},
225 )
226 }
227228/// Returns true if a type has metadata.
229pub fn type_has_metadata(self, ty: Ty<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
230if ty.is_sized(self, typing_env) {
231return false;
232 }
233234let tail = self.struct_tail_for_codegen(ty, typing_env);
235match tail.kind() {
236 ty::Foreign(..) => false,
237 ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
238_ => crate::util::bug::bug_fmt(format_args!("unexpected unsized tail: {0:?}",
tail))bug!("unexpected unsized tail: {:?}", tail),
239 }
240 }
241242/// Returns the deeply last field of nested structures, or the same type if
243 /// not a structure at all. Corresponds to the only possible unsized field,
244 /// and its type can be used to determine unsizing strategy.
245 ///
246 /// This is parameterized over the normalization strategy (i.e. how to
247 /// handle `<T as Trait>::Assoc` and `impl Trait`). You almost certainly do
248 /// **NOT** want to pass the identity function here, unless you know what
249 /// you're doing, or you're within normalization code itself and will handle
250 /// an unnormalized tail recursively.
251 ///
252 /// See also `struct_tail_for_codegen`, which is suitable for use
253 /// during codegen.
254pub fn struct_tail_raw(
255self,
256mut ty: Ty<'tcx>,
257 cause: &ObligationCause<'tcx>,
258mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
259// This is currently used to allow us to walk a ValTree
260 // in lockstep with the type in order to get the ValTree branch that
261 // corresponds to an unsized field.
262mut f: impl FnMut() -> (),
263 ) -> Ty<'tcx> {
264let recursion_limit = self.recursion_limit();
265for iteration in 0.. {
266if !recursion_limit.value_within_limit(iteration) {
267let suggested_limit = match recursion_limit {
268 Limit(0) => Limit(2),
269 limit => limit * 2,
270 };
271let reported = self.dcx().emit_err(crate::error::RecursionLimitReached {
272 span: cause.span,
273 ty,
274 suggested_limit,
275 });
276return Ty::new_error(self, reported);
277 }
278match *ty.kind() {
279 ty::Adt(def, args) => {
280if !def.is_struct() {
281break;
282 }
283match def.non_enum_variant().tail_opt() {
284Some(field) => {
285 f();
286 ty = field.ty(self, args);
287 }
288None => break,
289 }
290 }
291292 ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => {
293 f();
294 ty = last_ty;
295 }
296297 ty::Tuple(_) => break,
298299 ty::Pat(inner, _) => {
300 f();
301 ty = inner;
302 }
303304 ty::Alias(..) => {
305let normalized = normalize(ty);
306if ty == normalized {
307return ty;
308 } else {
309 ty = normalized;
310 }
311 }
312313_ => {
314break;
315 }
316 }
317 }
318ty319 }
320321/// Same as applying `struct_tail` on `source` and `target`, but only
322 /// keeps going as long as the two types are instances of the same
323 /// structure definitions.
324 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, dyn Trait)`,
325 /// whereas struct_tail produces `T`, and `Trait`, respectively.
326 ///
327 /// Should only be called if the types have no inference variables and do
328 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
329 /// normalization attempt may cause compiler bugs.
330pub fn struct_lockstep_tails_for_codegen(
331self,
332 source: Ty<'tcx>,
333 target: Ty<'tcx>,
334 typing_env: ty::TypingEnv<'tcx>,
335 ) -> (Ty<'tcx>, Ty<'tcx>) {
336let tcx = self;
337tcx.struct_lockstep_tails_raw(source, target, |ty| {
338tcx.normalize_erasing_regions(typing_env, ty)
339 })
340 }
341342/// Same as applying `struct_tail` on `source` and `target`, but only
343 /// keeps going as long as the two types are instances of the same
344 /// structure definitions.
345 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
346 /// whereas struct_tail produces `T`, and `Trait`, respectively.
347 ///
348 /// See also `struct_lockstep_tails_for_codegen`, which is suitable for use
349 /// during codegen.
350pub fn struct_lockstep_tails_raw(
351self,
352 source: Ty<'tcx>,
353 target: Ty<'tcx>,
354 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
355 ) -> (Ty<'tcx>, Ty<'tcx>) {
356let (mut a, mut b) = (source, target);
357loop {
358match (a.kind(), b.kind()) {
359 (&ty::Adt(a_def, a_args), &ty::Adt(b_def, b_args))
360if a_def == b_def && a_def.is_struct() =>
361 {
362if let Some(f) = a_def.non_enum_variant().tail_opt() {
363a = f.ty(self, a_args);
364b = f.ty(self, b_args);
365 } else {
366break;
367 }
368 }
369 (&ty::Tuple(a_tys), &ty::Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
370if let Some(&a_last) = a_tys.last() {
371a = a_last;
372b = *b_tys.last().unwrap();
373 } else {
374break;
375 }
376 }
377 (ty::Alias(..), _) | (_, ty::Alias(..)) => {
378// If either side is a projection, attempt to
379 // progress via normalization. (Should be safe to
380 // apply to both sides as normalization is
381 // idempotent.)
382let a_norm = normalize(a);
383let b_norm = normalize(b);
384if a == a_norm && b == b_norm {
385break;
386 } else {
387a = a_norm;
388b = b_norm;
389 }
390 }
391392_ => break,
393 }
394 }
395 (a, b)
396 }
397398/// Calculate the destructor of a given type.
399pub fn calculate_dtor(
400self,
401 adt_did: LocalDefId,
402 validate: impl Fn(Self, LocalDefId) -> Result<(), ErrorGuaranteed>,
403 ) -> Option<ty::Destructor> {
404let drop_trait = self.lang_items().drop_trait()?;
405self.ensure_ok().coherent_trait(drop_trait).ok()?;
406407let mut dtor_candidate = None;
408// `Drop` impls can only be written in the same crate as the adt, and cannot be blanket impls
409for &impl_did in self.local_trait_impls(drop_trait) {
410let Some(adt_def) = self.type_of(impl_did).skip_binder().ty_adt_def() else { continue };
411if adt_def.did() != adt_did.to_def_id() {
412continue;
413 }
414415if validate(self, impl_did).is_err() {
416// Already `ErrorGuaranteed`, no need to delay a span bug here.
417continue;
418 }
419420let Some(&item_id) = self.associated_item_def_ids(impl_did).first() else {
421self.dcx()
422 .span_delayed_bug(self.def_span(impl_did), "Drop impl without drop function");
423continue;
424 };
425426if self.def_kind(item_id) != DefKind::AssocFn {
427self.dcx().span_delayed_bug(self.def_span(item_id), "drop is not a function");
428continue;
429 }
430431if let Some(old_item_id) = dtor_candidate {
432self.dcx()
433 .struct_span_err(self.def_span(item_id), "multiple drop impls found")
434 .with_span_note(self.def_span(old_item_id), "other impl here")
435 .delay_as_bug();
436 }
437438 dtor_candidate = Some(item_id);
439 }
440441let did = dtor_candidate?;
442Some(ty::Destructor { did })
443 }
444445/// Calculate the async destructor of a given type.
446pub fn calculate_async_dtor(
447self,
448 adt_did: LocalDefId,
449 validate: impl Fn(Self, LocalDefId) -> Result<(), ErrorGuaranteed>,
450 ) -> Option<ty::AsyncDestructor> {
451let async_drop_trait = self.lang_items().async_drop_trait()?;
452self.ensure_ok().coherent_trait(async_drop_trait).ok()?;
453454let mut dtor_candidate = None;
455// `AsyncDrop` impls can only be written in the same crate as the adt, and cannot be blanket impls
456for &impl_did in self.local_trait_impls(async_drop_trait) {
457let Some(adt_def) = self.type_of(impl_did).skip_binder().ty_adt_def() else { continue };
458if adt_def.did() != adt_did.to_def_id() {
459continue;
460 }
461462if validate(self, impl_did).is_err() {
463// Already `ErrorGuaranteed`, no need to delay a span bug here.
464continue;
465 }
466467if let Some(old_impl_did) = dtor_candidate {
468self.dcx()
469 .struct_span_err(self.def_span(impl_did), "multiple async drop impls found")
470 .with_span_note(self.def_span(old_impl_did), "other impl here")
471 .delay_as_bug();
472 }
473474 dtor_candidate = Some(impl_did);
475 }
476477Some(ty::AsyncDestructor { impl_did: dtor_candidate?.into() })
478 }
479480/// Returns the set of types that are required to be alive in
481 /// order to run the destructor of `def` (see RFCs 769 and
482 /// 1238).
483 ///
484 /// Note that this returns only the constraints for the
485 /// destructor of `def` itself. For the destructors of the
486 /// contents, you need `adt_dtorck_constraint`.
487pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::GenericArg<'tcx>> {
488let dtor = match def.destructor(self) {
489None => {
490{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/ty/util.rs:490",
"rustc_middle::ty::util", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/ty/util.rs"),
::tracing_core::__macro_support::Option::Some(490u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::ty::util"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("destructor_constraints({0:?}) - no dtor",
def.did()) as &dyn Value))])
});
} else { ; }
};debug!("destructor_constraints({:?}) - no dtor", def.did());
491return ::alloc::vec::Vec::new()vec![];
492 }
493Some(dtor) => dtor.did,
494 };
495496let impl_def_id = self.parent(dtor);
497let impl_generics = self.generics_of(impl_def_id);
498499// We have a destructor - all the parameters that are not
500 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
501 // must be live.
502503 // We need to return the list of parameters from the ADTs
504 // generics/args that correspond to impure parameters on the
505 // impl's generics. This is a bit ugly, but conceptually simple:
506 //
507 // Suppose our ADT looks like the following
508 //
509 // struct S<X, Y, Z>(X, Y, Z);
510 //
511 // and the impl is
512 //
513 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
514 //
515 // We want to return the parameters (X, Y). For that, we match
516 // up the item-args <X, Y, Z> with the args on the impl ADT,
517 // <P1, P2, P0>, and then look up which of the impl args refer to
518 // parameters marked as pure.
519520let impl_args = match *self.type_of(impl_def_id).instantiate_identity().kind() {
521 ty::Adt(def_, args) if def_ == def => args,
522_ => crate::util::bug::span_bug_fmt(self.def_span(impl_def_id),
format_args!("expected ADT for self type of `Drop` impl"))span_bug!(self.def_span(impl_def_id), "expected ADT for self type of `Drop` impl"),
523 };
524525let item_args = ty::GenericArgs::identity_for_item(self, def.did());
526527let result = iter::zip(item_args, impl_args)
528 .filter(|&(_, arg)| {
529match arg.kind() {
530GenericArgKind::Lifetime(region) => match region.kind() {
531 ty::ReEarlyParam(ebr) => {
532 !impl_generics.region_param(ebr, self).pure_wrt_drop
533 }
534// Error: not a region param
535_ => false,
536 },
537GenericArgKind::Type(ty) => match *ty.kind() {
538 ty::Param(pt) => !impl_generics.type_param(pt, self).pure_wrt_drop,
539// Error: not a type param
540_ => false,
541 },
542GenericArgKind::Const(ct) => match ct.kind() {
543 ty::ConstKind::Param(pc) => {
544 !impl_generics.const_param(pc, self).pure_wrt_drop
545 }
546// Error: not a const param
547_ => false,
548 },
549 }
550 })
551 .map(|(item_param, _)| item_param)
552 .collect();
553{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/ty/util.rs:553",
"rustc_middle::ty::util", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/ty/util.rs"),
::tracing_core::__macro_support::Option::Some(553u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::ty::util"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("destructor_constraint({0:?}) = {1:?}",
def.did(), result) as &dyn Value))])
});
} else { ; }
};debug!("destructor_constraint({:?}) = {:?}", def.did(), result);
554result555 }
556557/// Checks whether each generic argument is simply a unique generic parameter.
558pub fn uses_unique_generic_params(
559self,
560 args: &[ty::GenericArg<'tcx>],
561 ignore_regions: CheckRegions,
562 ) -> Result<(), NotUniqueParam<'tcx>> {
563let mut seen = GrowableBitSet::default();
564let mut seen_late = FxHashSet::default();
565for arg in args {
566match arg.kind() {
567 GenericArgKind::Lifetime(lt) => match (ignore_regions, lt.kind()) {
568 (CheckRegions::FromFunction, ty::ReBound(di, reg)) => {
569if !seen_late.insert((di, reg)) {
570return Err(NotUniqueParam::DuplicateParam(lt.into()));
571 }
572 }
573 (CheckRegions::OnlyParam | CheckRegions::FromFunction, ty::ReEarlyParam(p)) => {
574if !seen.insert(p.index) {
575return Err(NotUniqueParam::DuplicateParam(lt.into()));
576 }
577 }
578 (CheckRegions::OnlyParam | CheckRegions::FromFunction, _) => {
579return Err(NotUniqueParam::NotParam(lt.into()));
580 }
581 (CheckRegions::No, _) => {}
582 },
583 GenericArgKind::Type(t) => match t.kind() {
584 ty::Param(p) => {
585if !seen.insert(p.index) {
586return Err(NotUniqueParam::DuplicateParam(t.into()));
587 }
588 }
589_ => return Err(NotUniqueParam::NotParam(t.into())),
590 },
591 GenericArgKind::Const(c) => match c.kind() {
592 ty::ConstKind::Param(p) => {
593if !seen.insert(p.index) {
594return Err(NotUniqueParam::DuplicateParam(c.into()));
595 }
596 }
597_ => return Err(NotUniqueParam::NotParam(c.into())),
598 },
599 }
600 }
601602Ok(())
603 }
604605/// Returns `true` if `def_id` refers to a closure, coroutine, or coroutine-closure
606 /// (i.e. an async closure). These are all represented by `hir::Closure`, and all
607 /// have the same `DefKind`.
608 ///
609 /// Note that closures have a `DefId`, but the closure *expression* also has a
610 /// `HirId` that is located within the context where the closure appears. The
611 /// parent of the closure's `DefId` will also be the context where it appears.
612pub fn is_closure_like(self, def_id: DefId) -> bool {
613#[allow(non_exhaustive_omitted_patterns)] match self.def_kind(def_id) {
DefKind::Closure => true,
_ => false,
}matches!(self.def_kind(def_id), DefKind::Closure)614 }
615616/// Returns `true` if `def_id` refers to a definition that does not have its own
617 /// type-checking context, i.e. closure, coroutine or inline const.
618pub fn is_typeck_child(self, def_id: DefId) -> bool {
619self.def_kind(def_id).is_typeck_child()
620 }
621622/// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
623pub fn is_trait(self, def_id: DefId) -> bool {
624self.def_kind(def_id) == DefKind::Trait625 }
626627/// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
628 /// and `false` otherwise.
629pub fn is_trait_alias(self, def_id: DefId) -> bool {
630self.def_kind(def_id) == DefKind::TraitAlias631 }
632633/// Returns `true` if this `DefId` refers to the implicit constructor for
634 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
635pub fn is_constructor(self, def_id: DefId) -> bool {
636#[allow(non_exhaustive_omitted_patterns)] match self.def_kind(def_id) {
DefKind::Ctor(..) => true,
_ => false,
}matches!(self.def_kind(def_id), DefKind::Ctor(..))637 }
638639/// Given the `DefId`, returns the `DefId` of the innermost item that
640 /// has its own type-checking context or "inference environment".
641 ///
642 /// For example, a closure has its own `DefId`, but it is type-checked
643 /// with the containing item. Therefore, when we fetch the `typeck` of the closure,
644 /// for example, we really wind up fetching the `typeck` of the enclosing fn item.
645pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
646let mut def_id = def_id;
647while self.is_typeck_child(def_id) {
648 def_id = self.parent(def_id);
649 }
650def_id651 }
652653/// Given the `DefId` and args a closure, creates the type of
654 /// `self` argument that the closure expects. For example, for a
655 /// `Fn` closure, this would return a reference type `&T` where
656 /// `T = closure_ty`.
657 ///
658 /// Returns `None` if this closure's kind has not yet been inferred.
659 /// This should only be possible during type checking.
660 ///
661 /// Note that the return value is a late-bound region and hence
662 /// wrapped in a binder.
663pub fn closure_env_ty(
664self,
665 closure_ty: Ty<'tcx>,
666 closure_kind: ty::ClosureKind,
667 env_region: ty::Region<'tcx>,
668 ) -> Ty<'tcx> {
669match closure_kind {
670 ty::ClosureKind::Fn => Ty::new_imm_ref(self, env_region, closure_ty),
671 ty::ClosureKind::FnMut => Ty::new_mut_ref(self, env_region, closure_ty),
672 ty::ClosureKind::FnOnce => closure_ty,
673 }
674 }
675676/// Returns `true` if the node pointed to by `def_id` is a `static` item.
677#[inline]
678pub fn is_static(self, def_id: DefId) -> bool {
679#[allow(non_exhaustive_omitted_patterns)] match self.def_kind(def_id) {
DefKind::Static { .. } => true,
_ => false,
}matches!(self.def_kind(def_id), DefKind::Static { .. })680 }
681682#[inline]
683pub fn static_mutability(self, def_id: DefId) -> Option<hir::Mutability> {
684if let DefKind::Static { mutability, .. } = self.def_kind(def_id) {
685Some(mutability)
686 } else {
687None688 }
689 }
690691/// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
692pub fn is_thread_local_static(self, def_id: DefId) -> bool {
693self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
694 }
695696/// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
697#[inline]
698pub fn is_mutable_static(self, def_id: DefId) -> bool {
699self.static_mutability(def_id) == Some(hir::Mutability::Mut)
700 }
701702/// Returns `true` if the item pointed to by `def_id` is a thread local which needs a
703 /// thread local shim generated.
704#[inline]
705pub fn needs_thread_local_shim(self, def_id: DefId) -> bool {
706 !self.sess.target.dll_tls_export
707 && self.is_thread_local_static(def_id)
708 && !self.is_foreign_item(def_id)
709 }
710711/// Returns the type a reference to the thread local takes in MIR.
712pub fn thread_local_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
713let static_ty = self.type_of(def_id).instantiate_identity();
714if self.is_mutable_static(def_id) {
715Ty::new_mut_ptr(self, static_ty)
716 } else if self.is_foreign_item(def_id) {
717Ty::new_imm_ptr(self, static_ty)
718 } else {
719// FIXME: These things don't *really* have 'static lifetime.
720Ty::new_imm_ref(self, self.lifetimes.re_static, static_ty)
721 }
722 }
723724/// Get the type of the pointer to the static that we use in MIR.
725pub fn static_ptr_ty(self, def_id: DefId, typing_env: ty::TypingEnv<'tcx>) -> Ty<'tcx> {
726// Make sure that any constants in the static's type are evaluated.
727let static_ty =
728self.normalize_erasing_regions(typing_env, self.type_of(def_id).instantiate_identity());
729730// Make sure that accesses to unsafe statics end up using raw pointers.
731 // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
732if self.is_mutable_static(def_id) {
733Ty::new_mut_ptr(self, static_ty)
734 } else if self.is_foreign_item(def_id) {
735Ty::new_imm_ptr(self, static_ty)
736 } else {
737Ty::new_imm_ref(self, self.lifetimes.re_erased, static_ty)
738 }
739 }
740741/// Expands the given impl trait type, stopping if the type is recursive.
742x;#[instrument(skip(self), level = "debug", ret)]743pub fn try_expand_impl_trait_type(
744self,
745 def_id: DefId,
746 args: GenericArgsRef<'tcx>,
747 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
748let mut visitor = OpaqueTypeExpander {
749 seen_opaque_tys: FxHashSet::default(),
750 expanded_cache: FxHashMap::default(),
751 primary_def_id: Some(def_id),
752 found_recursion: false,
753 found_any_recursion: false,
754 check_recursion: true,
755 tcx: self,
756 };
757758let expanded_type = visitor.expand_opaque_ty(def_id, args).unwrap();
759if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
760 }
761762/// Query and get an English description for the item's kind.
763pub fn def_descr(self, def_id: DefId) -> &'static str {
764self.def_kind_descr(self.def_kind(def_id), def_id)
765 }
766767/// Get an English description for the item's kind.
768pub fn def_kind_descr(self, def_kind: DefKind, def_id: DefId) -> &'static str {
769match def_kind {
770 DefKind::AssocFnif self.associated_item(def_id).is_method() => "method",
771 DefKind::AssocTyif self.opt_rpitit_info(def_id).is_some() => "opaque type",
772 DefKind::Closureif let Some(coroutine_kind) = self.coroutine_kind(def_id) => {
773match coroutine_kind {
774 hir::CoroutineKind::Desugared(
775 hir::CoroutineDesugaring::Async,
776 hir::CoroutineSource::Fn,
777 ) => "async fn",
778 hir::CoroutineKind::Desugared(
779 hir::CoroutineDesugaring::Async,
780 hir::CoroutineSource::Block,
781 ) => "async block",
782 hir::CoroutineKind::Desugared(
783 hir::CoroutineDesugaring::Async,
784 hir::CoroutineSource::Closure,
785 ) => "async closure",
786 hir::CoroutineKind::Desugared(
787 hir::CoroutineDesugaring::AsyncGen,
788 hir::CoroutineSource::Fn,
789 ) => "async gen fn",
790 hir::CoroutineKind::Desugared(
791 hir::CoroutineDesugaring::AsyncGen,
792 hir::CoroutineSource::Block,
793 ) => "async gen block",
794 hir::CoroutineKind::Desugared(
795 hir::CoroutineDesugaring::AsyncGen,
796 hir::CoroutineSource::Closure,
797 ) => "async gen closure",
798 hir::CoroutineKind::Desugared(
799 hir::CoroutineDesugaring::Gen,
800 hir::CoroutineSource::Fn,
801 ) => "gen fn",
802 hir::CoroutineKind::Desugared(
803 hir::CoroutineDesugaring::Gen,
804 hir::CoroutineSource::Block,
805 ) => "gen block",
806 hir::CoroutineKind::Desugared(
807 hir::CoroutineDesugaring::Gen,
808 hir::CoroutineSource::Closure,
809 ) => "gen closure",
810 hir::CoroutineKind::Coroutine(_) => "coroutine",
811 }
812 }
813_ => def_kind.descr(def_id),
814 }
815 }
816817/// Gets an English article for the [`TyCtxt::def_descr`].
818pub fn def_descr_article(self, def_id: DefId) -> &'static str {
819self.def_kind_descr_article(self.def_kind(def_id), def_id)
820 }
821822/// Gets an English article for the [`TyCtxt::def_kind_descr`].
823pub fn def_kind_descr_article(self, def_kind: DefKind, def_id: DefId) -> &'static str {
824match def_kind {
825 DefKind::AssocFnif self.associated_item(def_id).is_method() => "a",
826 DefKind::Closureif let Some(coroutine_kind) = self.coroutine_kind(def_id) => {
827match coroutine_kind {
828 hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, ..) => "an",
829 hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, ..) => "an",
830 hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, ..) => "a",
831 hir::CoroutineKind::Coroutine(_) => "a",
832 }
833 }
834_ => def_kind.article(),
835 }
836 }
837838/// Return `true` if the supplied `CrateNum` is "user-visible," meaning either a [public]
839 /// dependency, or a [direct] private dependency. This is used to decide whether the crate can
840 /// be shown in `impl` suggestions.
841 ///
842 /// [public]: TyCtxt::is_private_dep
843 /// [direct]: rustc_session::cstore::ExternCrate::is_direct
844pub fn is_user_visible_dep(self, key: CrateNum) -> bool {
845// `#![rustc_private]` overrides defaults to make private dependencies usable.
846if self.features().enabled(sym::rustc_private) {
847return true;
848 }
849850// | Private | Direct | Visible | |
851 // |---------|--------|---------|--------------------|
852 // | Yes | Yes | Yes | !true || true |
853 // | No | Yes | Yes | !false || true |
854 // | Yes | No | No | !true || false |
855 // | No | No | Yes | !false || false |
856!self.is_private_dep(key)
857// If `extern_crate` is `None`, then the crate was injected (e.g., by the allocator).
858 // Treat that kind of crate as "indirect", since it's an implementation detail of
859 // the language.
860|| self.extern_crate(key).is_some_and(|e| e.is_direct())
861 }
862863/// Expand any [free alias types][free] contained within the given `value`.
864 ///
865 /// This should be used over other normalization routines in situations where
866 /// it's important not to normalize other alias types and where the predicates
867 /// on the corresponding type alias shouldn't be taken into consideration.
868 ///
869 /// Whenever possible **prefer not to use this function**! Instead, use standard
870 /// normalization routines or if feasible don't normalize at all.
871 ///
872 /// This function comes in handy if you want to mimic the behavior of eager
873 /// type alias expansion in a localized manner.
874 ///
875 /// <div class="warning">
876 /// This delays a bug on overflow! Therefore you need to be certain that the
877 /// contained types get fully normalized at a later stage. Note that even on
878 /// overflow all well-behaved free alias types get expanded correctly, so the
879 /// result is still useful.
880 /// </div>
881 ///
882 /// [free]: ty::Free
883pub fn expand_free_alias_tys<T: TypeFoldable<TyCtxt<'tcx>>>(self, value: T) -> T {
884value.fold_with(&mut FreeAliasTypeExpander { tcx: self, depth: 0 })
885 }
886887/// Peel off all [free alias types] in this type until there are none left.
888 ///
889 /// This only expands free alias types in “head” / outermost positions. It can
890 /// be used over [expand_free_alias_tys] as an optimization in situations where
891 /// one only really cares about the *kind* of the final aliased type but not
892 /// the types the other constituent types alias.
893 ///
894 /// <div class="warning">
895 /// This delays a bug on overflow! Therefore you need to be certain that the
896 /// type gets fully normalized at a later stage.
897 /// </div>
898 ///
899 /// [free]: ty::Free
900 /// [expand_free_alias_tys]: Self::expand_free_alias_tys
901pub fn peel_off_free_alias_tys(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
902let ty::Alias(ty::Free, _) = ty.kind() else { return ty };
903904let limit = self.recursion_limit();
905let mut depth = 0;
906907while let ty::Alias(ty::Free, alias) = ty.kind() {
908if !limit.value_within_limit(depth) {
909let guar = self.dcx().delayed_bug("overflow expanding free alias type");
910return Ty::new_error(self, guar);
911 }
912913 ty = self.type_of(alias.def_id).instantiate(self, alias.args);
914 depth += 1;
915 }
916917ty918 }
919920// Computes the variances for an alias (opaque or RPITIT) that represent
921 // its (un)captured regions.
922pub fn opt_alias_variances(
923self,
924 kind: impl Into<ty::AliasTermKind>,
925 def_id: DefId,
926 ) -> Option<&'tcx [ty::Variance]> {
927match kind.into() {
928 ty::AliasTermKind::ProjectionTy => {
929if self.is_impl_trait_in_trait(def_id) {
930Some(self.variances_of(def_id))
931 } else {
932None933 }
934 }
935 ty::AliasTermKind::OpaqueTy => Some(self.variances_of(def_id)),
936 ty::AliasTermKind::InherentTy937 | ty::AliasTermKind::InherentConst938 | ty::AliasTermKind::FreeTy939 | ty::AliasTermKind::FreeConst940 | ty::AliasTermKind::UnevaluatedConst941 | ty::AliasTermKind::ProjectionConst => None,
942 }
943 }
944}
945946struct OpaqueTypeExpander<'tcx> {
947// Contains the DefIds of the opaque types that are currently being
948 // expanded. When we expand an opaque type we insert the DefId of
949 // that type, and when we finish expanding that type we remove the
950 // its DefId.
951seen_opaque_tys: FxHashSet<DefId>,
952// Cache of all expansions we've seen so far. This is a critical
953 // optimization for some large types produced by async fn trees.
954expanded_cache: FxHashMap<(DefId, GenericArgsRef<'tcx>), Ty<'tcx>>,
955 primary_def_id: Option<DefId>,
956 found_recursion: bool,
957 found_any_recursion: bool,
958/// Whether or not to check for recursive opaque types.
959 /// This is `true` when we're explicitly checking for opaque type
960 /// recursion, and 'false' otherwise to avoid unnecessary work.
961check_recursion: bool,
962 tcx: TyCtxt<'tcx>,
963}
964965impl<'tcx> OpaqueTypeExpander<'tcx> {
966fn expand_opaque_ty(&mut self, def_id: DefId, args: GenericArgsRef<'tcx>) -> Option<Ty<'tcx>> {
967if self.found_any_recursion {
968return None;
969 }
970let args = args.fold_with(self);
971if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
972let expanded_ty = match self.expanded_cache.get(&(def_id, args)) {
973Some(expanded_ty) => *expanded_ty,
974None => {
975let generic_ty = self.tcx.type_of(def_id);
976let concrete_ty = generic_ty.instantiate(self.tcx, args);
977let expanded_ty = self.fold_ty(concrete_ty);
978self.expanded_cache.insert((def_id, args), expanded_ty);
979expanded_ty980 }
981 };
982if self.check_recursion {
983self.seen_opaque_tys.remove(&def_id);
984 }
985Some(expanded_ty)
986 } else {
987// If another opaque type that we contain is recursive, then it
988 // will report the error, so we don't have to.
989self.found_any_recursion = true;
990self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
991None992 }
993 }
994}
995996impl<'tcx> TypeFolder<TyCtxt<'tcx>> for OpaqueTypeExpander<'tcx> {
997fn cx(&self) -> TyCtxt<'tcx> {
998self.tcx
999 }
10001001fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
1002if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) = *t.kind() {
1003self.expand_opaque_ty(def_id, args).unwrap_or(t)
1004 } else if t.has_opaque_types() {
1005t.super_fold_with(self)
1006 } else {
1007t1008 }
1009 }
10101011fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
1012if let ty::PredicateKind::Clause(clause) = p.kind().skip_binder()
1013 && let ty::ClauseKind::Projection(projection_pred) = clause1014 {
1015p.kind()
1016 .rebind(ty::ProjectionPredicate {
1017 projection_term: projection_pred.projection_term.fold_with(self),
1018// Don't fold the term on the RHS of the projection predicate.
1019 // This is because for default trait methods with RPITITs, we
1020 // install a `NormalizesTo(Projection(RPITIT) -> Opaque(RPITIT))`
1021 // predicate, which would trivially cause a cycle when we do
1022 // anything that requires `TypingEnv::with_post_analysis_normalized`.
1023term: projection_pred.term,
1024 })
1025 .upcast(self.tcx)
1026 } else {
1027p.super_fold_with(self)
1028 }
1029 }
1030}
10311032struct FreeAliasTypeExpander<'tcx> {
1033 tcx: TyCtxt<'tcx>,
1034 depth: usize,
1035}
10361037impl<'tcx> TypeFolder<TyCtxt<'tcx>> for FreeAliasTypeExpander<'tcx> {
1038fn cx(&self) -> TyCtxt<'tcx> {
1039self.tcx
1040 }
10411042fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
1043if !ty.has_type_flags(ty::TypeFlags::HAS_TY_FREE_ALIAS) {
1044return ty;
1045 }
1046let ty::Alias(ty::Free, alias) = ty.kind() else {
1047return ty.super_fold_with(self);
1048 };
1049if !self.tcx.recursion_limit().value_within_limit(self.depth) {
1050let guar = self.tcx.dcx().delayed_bug("overflow expanding free alias type");
1051return Ty::new_error(self.tcx, guar);
1052 }
10531054self.depth += 1;
1055let ty = ensure_sufficient_stack(|| {
1056self.tcx.type_of(alias.def_id).instantiate(self.tcx, alias.args).fold_with(self)
1057 });
1058self.depth -= 1;
1059ty1060 }
10611062fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
1063if !ct.has_type_flags(ty::TypeFlags::HAS_TY_FREE_ALIAS) {
1064return ct;
1065 }
1066ct.super_fold_with(self)
1067 }
1068}
10691070impl<'tcx> Ty<'tcx> {
1071/// Returns the `Size` for primitive types (bool, uint, int, char, float).
1072pub fn primitive_size(self, tcx: TyCtxt<'tcx>) -> Size {
1073match *self.kind() {
1074 ty::Bool => Size::from_bytes(1),
1075 ty::Char => Size::from_bytes(4),
1076 ty::Int(ity) => Integer::from_int_ty(&tcx, ity).size(),
1077 ty::Uint(uty) => Integer::from_uint_ty(&tcx, uty).size(),
1078 ty::Float(fty) => Float::from_float_ty(fty).size(),
1079_ => crate::util::bug::bug_fmt(format_args!("non primitive type"))bug!("non primitive type"),
1080 }
1081 }
10821083pub fn int_size_and_signed(self, tcx: TyCtxt<'tcx>) -> (Size, bool) {
1084match *self.kind() {
1085 ty::Int(ity) => (Integer::from_int_ty(&tcx, ity).size(), true),
1086 ty::Uint(uty) => (Integer::from_uint_ty(&tcx, uty).size(), false),
1087_ => crate::util::bug::bug_fmt(format_args!("non integer discriminant"))bug!("non integer discriminant"),
1088 }
1089 }
10901091/// Returns the minimum and maximum values for the given numeric type (including `char`s) or
1092 /// returns `None` if the type is not numeric.
1093pub fn numeric_min_and_max_as_bits(self, tcx: TyCtxt<'tcx>) -> Option<(u128, u128)> {
1094use rustc_apfloat::ieee::{Double, Half, Quad, Single};
1095Some(match self.kind() {
1096 ty::Int(_) | ty::Uint(_) => {
1097let (size, signed) = self.int_size_and_signed(tcx);
1098let min = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 };
1099let max =
1100if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() };
1101 (min, max)
1102 }
1103 ty::Char => (0, std::char::MAXas u128),
1104 ty::Float(ty::FloatTy::F16) => ((-Half::INFINITY).to_bits(), Half::INFINITY.to_bits()),
1105 ty::Float(ty::FloatTy::F32) => {
1106 ((-Single::INFINITY).to_bits(), Single::INFINITY.to_bits())
1107 }
1108 ty::Float(ty::FloatTy::F64) => {
1109 ((-Double::INFINITY).to_bits(), Double::INFINITY.to_bits())
1110 }
1111 ty::Float(ty::FloatTy::F128) => ((-Quad::INFINITY).to_bits(), Quad::INFINITY.to_bits()),
1112_ => return None,
1113 })
1114 }
11151116/// Returns the maximum value for the given numeric type (including `char`s)
1117 /// or returns `None` if the type is not numeric.
1118pub fn numeric_max_val(self, tcx: TyCtxt<'tcx>) -> Option<mir::Const<'tcx>> {
1119let typing_env = TypingEnv::fully_monomorphized();
1120self.numeric_min_and_max_as_bits(tcx)
1121 .map(|(_, max)| mir::Const::from_bits(tcx, max, typing_env, self))
1122 }
11231124/// Returns the minimum value for the given numeric type (including `char`s)
1125 /// or returns `None` if the type is not numeric.
1126pub fn numeric_min_val(self, tcx: TyCtxt<'tcx>) -> Option<mir::Const<'tcx>> {
1127let typing_env = TypingEnv::fully_monomorphized();
1128self.numeric_min_and_max_as_bits(tcx)
1129 .map(|(min, _)| mir::Const::from_bits(tcx, min, typing_env, self))
1130 }
11311132/// Checks whether values of this type `T` have a size known at
1133 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
1134 /// for the purposes of this check, so it can be an
1135 /// over-approximation in generic contexts, where one can have
1136 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
1137 /// actually carry lifetime requirements.
1138pub fn is_sized(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1139self.has_trivial_sizedness(tcx, SizedTraitKind::Sized)
1140 || tcx.is_sized_raw(typing_env.as_query_input(self))
1141 }
11421143/// Checks whether values of this type `T` implement the `Freeze`
1144 /// trait -- frozen types are those that do not contain an
1145 /// `UnsafeCell` anywhere. This is a language concept used to
1146 /// distinguish "true immutability", which is relevant to
1147 /// optimization as well as the rules around static values. Note
1148 /// that the `Freeze` trait is not exposed to end users and is
1149 /// effectively an implementation detail.
1150pub fn is_freeze(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1151self.is_trivially_freeze() || tcx.is_freeze_raw(typing_env.as_query_input(self))
1152 }
11531154/// Fast path helper for testing if a type is `Freeze`.
1155 ///
1156 /// Returning true means the type is known to be `Freeze`. Returning
1157 /// `false` means nothing -- could be `Freeze`, might not be.
1158pub fn is_trivially_freeze(self) -> bool {
1159match self.kind() {
1160 ty::Int(_)
1161 | ty::Uint(_)
1162 | ty::Float(_)
1163 | ty::Bool1164 | ty::Char1165 | ty::Str1166 | ty::Never1167 | ty::Ref(..)
1168 | ty::RawPtr(_, _)
1169 | ty::FnDef(..)
1170 | ty::Error(_)
1171 | ty::FnPtr(..) => true,
1172 ty::Tuple(fields) => fields.iter().all(Self::is_trivially_freeze),
1173 ty::Pat(ty, _) | ty::Slice(ty) | ty::Array(ty, _) => ty.is_trivially_freeze(),
1174 ty::Adt(..)
1175 | ty::Bound(..)
1176 | ty::Closure(..)
1177 | ty::CoroutineClosure(..)
1178 | ty::Dynamic(..)
1179 | ty::Foreign(_)
1180 | ty::Coroutine(..)
1181 | ty::CoroutineWitness(..)
1182 | ty::UnsafeBinder(_)
1183 | ty::Infer(_)
1184 | ty::Alias(..)
1185 | ty::Param(_)
1186 | ty::Placeholder(_) => false,
1187 }
1188 }
11891190/// Checks whether values of this type `T` implement the `UnsafeUnpin` trait.
1191pub fn is_unsafe_unpin(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1192self.is_trivially_unpin() || tcx.is_unsafe_unpin_raw(typing_env.as_query_input(self))
1193 }
11941195/// Checks whether values of this type `T` implement the `Unpin` trait.
1196 ///
1197 /// Note that this is a safe trait, so it cannot be very semantically meaningful.
1198 /// However, as a hack to mitigate <https://github.com/rust-lang/rust/issues/63818> until a
1199 /// proper solution is implemented, we do give special semantics to the `Unpin` trait.
1200pub fn is_unpin(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1201self.is_trivially_unpin() || tcx.is_unpin_raw(typing_env.as_query_input(self))
1202 }
12031204/// Fast path helper for testing if a type is `Unpin` *and* `UnsafeUnpin`.
1205 ///
1206 /// Returning true means the type is known to be `Unpin` and `UnsafeUnpin`. Returning
1207 /// `false` means nothing -- could be `Unpin`, might not be.
1208fn is_trivially_unpin(self) -> bool {
1209match self.kind() {
1210 ty::Int(_)
1211 | ty::Uint(_)
1212 | ty::Float(_)
1213 | ty::Bool1214 | ty::Char1215 | ty::Str1216 | ty::Never1217 | ty::Ref(..)
1218 | ty::RawPtr(_, _)
1219 | ty::FnDef(..)
1220 | ty::Error(_)
1221 | ty::FnPtr(..) => true,
1222 ty::Tuple(fields) => fields.iter().all(Self::is_trivially_unpin),
1223 ty::Pat(ty, _) | ty::Slice(ty) | ty::Array(ty, _) => ty.is_trivially_unpin(),
1224 ty::Adt(..)
1225 | ty::Bound(..)
1226 | ty::Closure(..)
1227 | ty::CoroutineClosure(..)
1228 | ty::Dynamic(..)
1229 | ty::Foreign(_)
1230 | ty::Coroutine(..)
1231 | ty::CoroutineWitness(..)
1232 | ty::UnsafeBinder(_)
1233 | ty::Infer(_)
1234 | ty::Alias(..)
1235 | ty::Param(_)
1236 | ty::Placeholder(_) => false,
1237 }
1238 }
12391240/// Checks whether this type is an ADT that has unsafe fields.
1241pub fn has_unsafe_fields(self) -> bool {
1242if let ty::Adt(adt_def, ..) = self.kind() {
1243adt_def.all_fields().any(|x| x.safety.is_unsafe())
1244 } else {
1245false
1246}
1247 }
12481249/// Checks whether values of this type `T` implement the `AsyncDrop` trait.
1250pub fn is_async_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1251 !self.is_trivially_not_async_drop()
1252 && tcx.is_async_drop_raw(typing_env.as_query_input(self))
1253 }
12541255/// Fast path helper for testing if a type is `AsyncDrop`.
1256 ///
1257 /// Returning true means the type is known to be `!AsyncDrop`. Returning
1258 /// `false` means nothing -- could be `AsyncDrop`, might not be.
1259fn is_trivially_not_async_drop(self) -> bool {
1260match self.kind() {
1261 ty::Int(_)
1262 | ty::Uint(_)
1263 | ty::Float(_)
1264 | ty::Bool1265 | ty::Char1266 | ty::Str1267 | ty::Never1268 | ty::Ref(..)
1269 | ty::RawPtr(..)
1270 | ty::FnDef(..)
1271 | ty::Error(_)
1272 | ty::FnPtr(..) => true,
1273// FIXME(unsafe_binders):
1274 ty::UnsafeBinder(_) => ::core::panicking::panic("not yet implemented")todo!(),
1275 ty::Tuple(fields) => fields.iter().all(Self::is_trivially_not_async_drop),
1276 ty::Pat(elem_ty, _) | ty::Slice(elem_ty) | ty::Array(elem_ty, _) => {
1277elem_ty.is_trivially_not_async_drop()
1278 }
1279 ty::Adt(..)
1280 | ty::Bound(..)
1281 | ty::Closure(..)
1282 | ty::CoroutineClosure(..)
1283 | ty::Dynamic(..)
1284 | ty::Foreign(_)
1285 | ty::Coroutine(..)
1286 | ty::CoroutineWitness(..)
1287 | ty::Infer(_)
1288 | ty::Alias(..)
1289 | ty::Param(_)
1290 | ty::Placeholder(_) => false,
1291 }
1292 }
12931294/// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
1295 /// non-copy and *might* have a destructor attached; if it returns
1296 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
1297 ///
1298 /// (Note that this implies that if `ty` has a destructor attached,
1299 /// then `needs_drop` will definitely return `true` for `ty`.)
1300 ///
1301 /// Note that this method is used to check eligible types in unions.
1302#[inline]
1303pub fn needs_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1304// Avoid querying in simple cases.
1305match needs_drop_components(tcx, self) {
1306Err(AlwaysRequiresDrop) => true,
1307Ok(components) => {
1308let query_ty = match *components {
1309 [] => return false,
1310// If we've got a single component, call the query with that
1311 // to increase the chance that we hit the query cache.
1312[component_ty] => component_ty,
1313_ => self,
1314 };
13151316// This doesn't depend on regions, so try to minimize distinct
1317 // query keys used. If normalization fails, we just use `query_ty`.
1318if true {
if !!typing_env.param_env.has_infer() {
::core::panicking::panic("assertion failed: !typing_env.param_env.has_infer()")
};
};debug_assert!(!typing_env.param_env.has_infer());
1319let query_ty = tcx1320 .try_normalize_erasing_regions(typing_env, query_ty)
1321 .unwrap_or_else(|_| tcx.erase_and_anonymize_regions(query_ty));
13221323tcx.needs_drop_raw(typing_env.as_query_input(query_ty))
1324 }
1325 }
1326 }
13271328/// If `ty.needs_async_drop(...)` returns `true`, then `ty` is definitely
1329 /// non-copy and *might* have a async destructor attached; if it returns
1330 /// `false`, then `ty` definitely has no async destructor (i.e., no async
1331 /// drop glue).
1332 ///
1333 /// (Note that this implies that if `ty` has an async destructor attached,
1334 /// then `needs_async_drop` will definitely return `true` for `ty`.)
1335 ///
1336// FIXME(zetanumbers): Note that this method is used to check eligible types
1337 // in unions.
1338#[inline]
1339pub fn needs_async_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1340// Avoid querying in simple cases.
1341match needs_drop_components(tcx, self) {
1342Err(AlwaysRequiresDrop) => true,
1343Ok(components) => {
1344let query_ty = match *components {
1345 [] => return false,
1346// If we've got a single component, call the query with that
1347 // to increase the chance that we hit the query cache.
1348[component_ty] => component_ty,
1349_ => self,
1350 };
13511352// This doesn't depend on regions, so try to minimize distinct
1353 // query keys used.
1354 // If normalization fails, we just use `query_ty`.
1355if true {
if !!typing_env.has_infer() {
::core::panicking::panic("assertion failed: !typing_env.has_infer()")
};
};debug_assert!(!typing_env.has_infer());
1356let query_ty = tcx1357 .try_normalize_erasing_regions(typing_env, query_ty)
1358 .unwrap_or_else(|_| tcx.erase_and_anonymize_regions(query_ty));
13591360tcx.needs_async_drop_raw(typing_env.as_query_input(query_ty))
1361 }
1362 }
1363 }
13641365/// Checks if `ty` has a significant drop.
1366 ///
1367 /// Note that this method can return false even if `ty` has a destructor
1368 /// attached; even if that is the case then the adt has been marked with
1369 /// the attribute `rustc_insignificant_dtor`.
1370 ///
1371 /// Note that this method is used to check for change in drop order for
1372 /// 2229 drop reorder migration analysis.
1373#[inline]
1374pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
1375// Avoid querying in simple cases.
1376match needs_drop_components(tcx, self) {
1377Err(AlwaysRequiresDrop) => true,
1378Ok(components) => {
1379let query_ty = match *components {
1380 [] => return false,
1381// If we've got a single component, call the query with that
1382 // to increase the chance that we hit the query cache.
1383[component_ty] => component_ty,
1384_ => self,
1385 };
13861387// FIXME
1388 // We should be canonicalizing, or else moving this to a method of inference
1389 // context, or *something* like that,
1390 // but for now just avoid passing inference variables
1391 // to queries that can't cope with them.
1392 // Instead, conservatively return "true" (may change drop order).
1393if query_ty.has_infer() {
1394return true;
1395 }
13961397// This doesn't depend on regions, so try to minimize distinct
1398 // query keys used.
1399 // FIX: Use try_normalize to avoid crashing. If it fails, return true.
1400tcx.try_normalize_erasing_regions(typing_env, query_ty)
1401 .map(|erased| tcx.has_significant_drop_raw(typing_env.as_query_input(erased)))
1402 .unwrap_or(true)
1403 }
1404 }
1405 }
14061407/// Returns `true` if equality for this type is both reflexive and structural.
1408 ///
1409 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
1410 ///
1411 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
1412 /// types, equality for the type as a whole is structural when it is the same as equality
1413 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
1414 /// equality is indicated by an implementation of `StructuralPartialEq` for that type.
1415 ///
1416 /// This function is "shallow" because it may return `true` for a composite type whose fields
1417 /// are not `StructuralPartialEq`. For example, `[T; 4]` has structural equality regardless of `T`
1418 /// because equality for arrays is determined by the equality of each array element. If you
1419 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
1420 /// down, you will need to use a type visitor.
1421#[inline]
1422pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool {
1423match self.kind() {
1424// Look for an impl of `StructuralPartialEq`.
1425ty::Adt(..) => tcx.has_structural_eq_impl(self),
14261427// Primitive types that satisfy `Eq`.
1428ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true,
14291430// Composite types that satisfy `Eq` when all of their fields do.
1431 //
1432 // Because this function is "shallow", we return `true` for these composites regardless
1433 // of the type(s) contained within.
1434ty::Pat(..) | ty::Ref(..) | ty::Array(..) | ty::Slice(_) | ty::Tuple(..) => true,
14351436// Raw pointers use bitwise comparison.
1437ty::RawPtr(_, _) | ty::FnPtr(..) => true,
14381439// Floating point numbers are not `Eq`.
1440ty::Float(_) => false,
14411442// Conservatively return `false` for all others...
14431444 // Anonymous function types
1445ty::FnDef(..)
1446 | ty::Closure(..)
1447 | ty::CoroutineClosure(..)
1448 | ty::Dynamic(..)
1449 | ty::Coroutine(..) => false,
14501451// Generic or inferred types
1452 //
1453 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
1454 // called for known, fully-monomorphized types.
1455ty::Alias(..) | ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) => {
1456false
1457}
14581459 ty::Foreign(_) | ty::CoroutineWitness(..) | ty::Error(_) | ty::UnsafeBinder(_) => false,
1460 }
1461 }
14621463/// Peel off all reference types in this type until there are none left.
1464 ///
1465 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
1466 ///
1467 /// # Examples
1468 ///
1469 /// - `u8` -> `u8`
1470 /// - `&'a mut u8` -> `u8`
1471 /// - `&'a &'b u8` -> `u8`
1472 /// - `&'a *const &'b u8 -> *const &'b u8`
1473pub fn peel_refs(self) -> Ty<'tcx> {
1474let mut ty = self;
1475while let ty::Ref(_, inner_ty, _) = ty.kind() {
1476 ty = *inner_ty;
1477 }
1478ty1479 }
14801481// FIXME(compiler-errors): Think about removing this.
1482#[inline]
1483pub fn outer_exclusive_binder(self) -> ty::DebruijnIndex {
1484self.0.outer_exclusive_binder
1485 }
1486}
14871488/// Returns a list of types such that the given type needs drop if and only if
1489/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1490/// this type always needs drop.
1491//
1492// FIXME(zetanumbers): consider replacing this with only
1493// `needs_drop_components_with_async`
1494#[inline]
1495pub fn needs_drop_components<'tcx>(
1496 tcx: TyCtxt<'tcx>,
1497 ty: Ty<'tcx>,
1498) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
1499needs_drop_components_with_async(tcx, ty, Asyncness::No)
1500}
15011502/// Returns a list of types such that the given type needs drop if and only if
1503/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1504/// this type always needs drop.
1505pub fn needs_drop_components_with_async<'tcx>(
1506 tcx: TyCtxt<'tcx>,
1507 ty: Ty<'tcx>,
1508 asyncness: Asyncness,
1509) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
1510match *ty.kind() {
1511 ty::Infer(ty::FreshIntTy(_))
1512 | ty::Infer(ty::FreshFloatTy(_))
1513 | ty::Bool1514 | ty::Int(_)
1515 | ty::Uint(_)
1516 | ty::Float(_)
1517 | ty::Never1518 | ty::FnDef(..)
1519 | ty::FnPtr(..)
1520 | ty::Char1521 | ty::RawPtr(_, _)
1522 | ty::Ref(..)
1523 | ty::Str => Ok(SmallVec::new()),
15241525// Foreign types can never have destructors.
1526ty::Foreign(..) => Ok(SmallVec::new()),
15271528// FIXME(zetanumbers): Temporary workaround for async drop of dynamic types
1529ty::Dynamic(..) | ty::Error(_) => {
1530if asyncness.is_async() {
1531Ok(SmallVec::new())
1532 } else {
1533Err(AlwaysRequiresDrop)
1534 }
1535 }
15361537 ty::Pat(ty, _) | ty::Slice(ty) => needs_drop_components_with_async(tcx, ty, asyncness),
1538 ty::Array(elem_ty, size) => {
1539match needs_drop_components_with_async(tcx, elem_ty, asyncness) {
1540Ok(v) if v.is_empty() => Ok(v),
1541 res => match size.try_to_target_usize(tcx) {
1542// Arrays of size zero don't need drop, even if their element
1543 // type does.
1544Some(0) => Ok(SmallVec::new()),
1545Some(_) => res,
1546// We don't know which of the cases above we are in, so
1547 // return the whole type and let the caller decide what to
1548 // do.
1549None => Ok({
let count = 0usize + 1usize;
let mut vec = ::smallvec::SmallVec::new();
if count <= vec.inline_size() {
vec.push(ty);
vec
} else {
::smallvec::SmallVec::from_vec(::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[ty])))
}
}smallvec![ty]),
1550 },
1551 }
1552 }
1553// If any field needs drop, then the whole tuple does.
1554ty::Tuple(fields) => fields.iter().try_fold(SmallVec::new(), move |mut acc, elem| {
1555acc.extend(needs_drop_components_with_async(tcx, elem, asyncness)?);
1556Ok(acc)
1557 }),
15581559// These require checking for `Copy` bounds or `Adt` destructors.
1560ty::Adt(..)
1561 | ty::Alias(..)
1562 | ty::Param(_)
1563 | ty::Bound(..)
1564 | ty::Placeholder(..)
1565 | ty::Infer(_)
1566 | ty::Closure(..)
1567 | ty::CoroutineClosure(..)
1568 | ty::Coroutine(..)
1569 | ty::CoroutineWitness(..)
1570 | ty::UnsafeBinder(_) => Ok({
let count = 0usize + 1usize;
let mut vec = ::smallvec::SmallVec::new();
if count <= vec.inline_size() {
vec.push(ty);
vec
} else {
::smallvec::SmallVec::from_vec(::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[ty])))
}
}smallvec![ty]),
1571 }
1572}
15731574/// Does the equivalent of
1575/// ```ignore (illustrative)
1576/// let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1577/// folder.tcx().intern_*(&v)
1578/// ```
1579pub fn fold_list<'tcx, F, L, T>(
1580 list: L,
1581 folder: &mut F,
1582 intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> L,
1583) -> L
1584where
1585F: TypeFolder<TyCtxt<'tcx>>,
1586 L: AsRef<[T]>,
1587 T: TypeFoldable<TyCtxt<'tcx>> + PartialEq + Copy,
1588{
1589let slice = list.as_ref();
1590let mut iter = slice.iter().copied();
1591// Look for the first element that changed
1592match iter.by_ref().enumerate().find_map(|(i, t)| {
1593let new_t = t.fold_with(folder);
1594if new_t != t { Some((i, new_t)) } else { None }
1595 }) {
1596Some((i, new_t)) => {
1597// An element changed, prepare to intern the resulting list
1598let mut new_list = SmallVec::<[_; 8]>::with_capacity(slice.len());
1599new_list.extend_from_slice(&slice[..i]);
1600new_list.push(new_t);
1601for t in iter {
1602 new_list.push(t.fold_with(folder))
1603 }
1604intern(folder.cx(), &new_list)
1605 }
1606None => list,
1607 }
1608}
16091610/// Does the equivalent of
1611/// ```ignore (illustrative)
1612/// let v = self.iter().map(|p| p.try_fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1613/// folder.tcx().intern_*(&v)
1614/// ```
1615pub fn try_fold_list<'tcx, F, L, T>(
1616 list: L,
1617 folder: &mut F,
1618 intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> L,
1619) -> Result<L, F::Error>
1620where
1621F: FallibleTypeFolder<TyCtxt<'tcx>>,
1622 L: AsRef<[T]>,
1623 T: TypeFoldable<TyCtxt<'tcx>> + PartialEq + Copy,
1624{
1625let slice = list.as_ref();
1626let mut iter = slice.iter().copied();
1627// Look for the first element that changed
1628match iter.by_ref().enumerate().find_map(|(i, t)| match t.try_fold_with(folder) {
1629Ok(new_t) if new_t == t => None,
1630 new_t => Some((i, new_t)),
1631 }) {
1632Some((i, Ok(new_t))) => {
1633// An element changed, prepare to intern the resulting list
1634let mut new_list = SmallVec::<[_; 8]>::with_capacity(slice.len());
1635new_list.extend_from_slice(&slice[..i]);
1636new_list.push(new_t);
1637for t in iter {
1638 new_list.push(t.try_fold_with(folder)?)
1639 }
1640Ok(intern(folder.cx(), &new_list))
1641 }
1642Some((_, Err(err))) => {
1643return Err(err);
1644 }
1645None => Ok(list),
1646 }
1647}
16481649#[derive(#[automatically_derived]
impl ::core::marker::Copy for AlwaysRequiresDrop { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AlwaysRequiresDrop {
#[inline]
fn clone(&self) -> AlwaysRequiresDrop { *self }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AlwaysRequiresDrop {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f, "AlwaysRequiresDrop")
}
}Debug, const _: () =
{
impl<'__ctx>
::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>
for AlwaysRequiresDrop {
#[inline]
fn hash_stable(&self,
__hcx: &mut ::rustc_middle::ich::StableHashingContext<'__ctx>,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self { AlwaysRequiresDrop => {} }
}
}
};HashStable, const _: () =
{
impl<'tcx, __E: ::rustc_middle::ty::codec::TyEncoder<'tcx>>
::rustc_serialize::Encodable<__E> for AlwaysRequiresDrop {
fn encode(&self, __encoder: &mut __E) {
match *self { AlwaysRequiresDrop => {} }
}
}
};TyEncodable, const _: () =
{
impl<'tcx, __D: ::rustc_middle::ty::codec::TyDecoder<'tcx>>
::rustc_serialize::Decodable<__D> for AlwaysRequiresDrop {
fn decode(__decoder: &mut __D) -> Self { AlwaysRequiresDrop }
}
};TyDecodable)]
1650pub struct AlwaysRequiresDrop;
16511652/// Reveals all opaque types in the given value, replacing them
1653/// with their underlying types.
1654pub fn reveal_opaque_types_in_bounds<'tcx>(
1655 tcx: TyCtxt<'tcx>,
1656 val: ty::Clauses<'tcx>,
1657) -> ty::Clauses<'tcx> {
1658if !!tcx.next_trait_solver_globally() {
::core::panicking::panic("assertion failed: !tcx.next_trait_solver_globally()")
};assert!(!tcx.next_trait_solver_globally());
1659let mut visitor = OpaqueTypeExpander {
1660 seen_opaque_tys: FxHashSet::default(),
1661 expanded_cache: FxHashMap::default(),
1662 primary_def_id: None,
1663 found_recursion: false,
1664 found_any_recursion: false,
1665 check_recursion: false,
1666tcx,
1667 };
1668val.fold_with(&mut visitor)
1669}
16701671/// Determines whether an item is directly annotated with `doc(hidden)`.
1672fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
1673{
#[allow(deprecated)]
{
{
'done:
{
for i in tcx.get_all_attrs(def_id) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(Doc(doc)) if
doc.hidden.is_some() => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}
}.is_some()find_attr!(tcx, def_id, Doc(doc) if doc.hidden.is_some())1674}
16751676/// Determines whether an item is annotated with `doc(notable_trait)`.
1677pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
1678{
#[allow(deprecated)]
{
{
'done:
{
for i in tcx.get_all_attrs(def_id) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(Doc(doc)) if
doc.notable_trait.is_some() => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}
}.is_some()find_attr!(tcx, def_id, Doc(doc) if doc.notable_trait.is_some())1679}
16801681/// Determines whether an item is an intrinsic (which may be via Abi or via the `rustc_intrinsic` attribute).
1682///
1683/// We double check the feature gate here because whether a function may be defined as an intrinsic causes
1684/// the compiler to make some assumptions about its shape; if the user doesn't use a feature gate, they may
1685/// cause an ICE that we otherwise may want to prevent.
1686pub fn intrinsic_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<ty::IntrinsicDef> {
1687if tcx.features().intrinsics() && {
#[allow(deprecated)]
{
{
'done:
{
for i in tcx.get_all_attrs(def_id) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcIntrinsic) => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}
}.is_some()find_attr!(tcx, def_id, RustcIntrinsic) {
1688let must_be_overridden = match tcx.hir_node_by_def_id(def_id) {
1689 hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn { has_body, .. }, .. }) => {
1690 !has_body1691 }
1692_ => true,
1693 };
1694Some(ty::IntrinsicDef {
1695 name: tcx.item_name(def_id),
1696must_be_overridden,
1697 const_stable: {
#[allow(deprecated)]
{
{
'done:
{
for i in tcx.get_all_attrs(def_id) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcIntrinsicConstStableIndirect)
=> {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}
}.is_some()find_attr!(tcx, def_id, RustcIntrinsicConstStableIndirect),
1698 })
1699 } else {
1700None1701 }
1702}
17031704pub fn provide(providers: &mut Providers) {
1705*providers = Providers {
1706reveal_opaque_types_in_bounds,
1707is_doc_hidden,
1708is_doc_notable_trait,
1709intrinsic_raw,
1710 ..*providers1711 }
1712}