Skip to main content

rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
4#![cfg_attr(feature = "nightly", feature(step_trait))]
5// tidy-alphabetical-end
6
7/*! ABI handling for rustc
8
9## What is an "ABI"?
10
11Literally, "application binary interface", which means it is everything about how code interacts,
12at the machine level, with other code. This means it technically covers all of the following:
13- object binary format for e.g. relocations or offset tables
14- in-memory layout of types
15- procedure calling conventions
16
17When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
18To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
19Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
20You will encounter all of them and more if you study target-specific codegen enough!
21Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
22either or both of
23- `repr(Rust)` types have a mostly-unspecified layout
24- `extern "Rust" fn(A) -> R` has an unspecified calling convention
25
26## Crate Goal
27
28ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
29It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
30Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
31It should contain traits and types that other crates then use in their implementation.
32For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
33but `rustc_abi` contains the types for calculating layout and describing register-passing.
34This makes it easier to describe things in the same way across targets, codegen backends, and
35even other Rust compilers, such as rust-analyzer!
36
37*/
38
39use std::fmt;
40#[cfg(feature = "nightly")]
41use std::iter::Step;
42use std::num::{NonZeroUsize, ParseIntError};
43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
44use std::str::FromStr;
45
46use bitflags::bitflags;
47#[cfg(feature = "nightly")]
48use rustc_data_structures::stable_hasher::StableOrd;
49#[cfg(feature = "nightly")]
50use rustc_error_messages::{DiagArgValue, IntoDiagArg};
51#[cfg(feature = "nightly")]
52use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};
53use rustc_hashes::Hash64;
54use rustc_index::{Idx, IndexSlice, IndexVec};
55#[cfg(feature = "nightly")]
56use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
57#[cfg(feature = "nightly")]
58use rustc_span::{Symbol, sym};
59
60mod callconv;
61mod canon_abi;
62mod extern_abi;
63mod layout;
64#[cfg(test)]
65mod tests;
66
67pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
68pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
69#[cfg(feature = "nightly")]
70pub use extern_abi::CVariadicStatus;
71pub use extern_abi::{ExternAbi, all_names};
72pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};
73#[cfg(feature = "nightly")]
74pub use layout::{Layout, TyAbiInterface, TyAndLayout};
75
76#[derive(#[automatically_derived]
impl ::core::clone::Clone for ReprFlags {
    #[inline]
    fn clone(&self) -> ReprFlags {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ReprFlags { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprFlags {
    #[inline]
    fn eq(&self, other: &ReprFlags) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ReprFlags {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::default::Default for ReprFlags {
    #[inline]
    fn default() -> ReprFlags {
        ReprFlags(::core::default::Default::default())
    }
}Default)]
77#[cfg_attr(
78    feature = "nightly",
79    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprFlags {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprFlags {
            fn decode(__decoder: &mut __D) -> Self {
                ReprFlags(::rustc_serialize::Decodable::decode(__decoder))
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprFlags where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
80)]
81pub struct ReprFlags(u8);
82
83impl ReprFlags {
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_C: Self = Self::from_bits_retain(1 << 0);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SIMD: Self = Self::from_bits_retain(1 << 1);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_TRANSPARENT: Self = Self::from_bits_retain(1 << 2);
    #[doc = r" Internal only for now. If true, don't reorder fields."]
    #[doc = r" On its own it does not prevent ABI optimizations."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_LINEAR: Self = Self::from_bits_retain(1 << 3);
    #[doc =
    r" If true, the type's crate has opted into layout randomization."]
    #[doc =
    r" Other flags can still inhibit reordering and thus randomization."]
    #[doc = r" The seed stored in `ReprOptions.field_shuffle_seed`."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const RANDOMIZE_LAYOUT: Self = Self::from_bits_retain(1 << 4);
    #[doc =
    r" If true, the type is always passed indirectly by non-Rustic ABIs."]
    #[doc =
    r" See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS: Self =
        Self::from_bits_retain(1 << 5);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SCALABLE: Self = Self::from_bits_retain(1 << 6);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const FIELD_ORDER_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                        ReprFlags::IS_SIMD.bits() | ReprFlags::IS_SCALABLE.bits() |
                ReprFlags::IS_LINEAR.bits());
    #[allow(deprecated, non_upper_case_globals,)]
    pub const ABI_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                ReprFlags::IS_SIMD.bits());
}
impl ::bitflags::Flags for ReprFlags {
    const FLAGS: &'static [::bitflags::Flag<ReprFlags>] =
        &[{

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_C", ReprFlags::IS_C)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SIMD", ReprFlags::IS_SIMD)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_TRANSPARENT",
                            ReprFlags::IS_TRANSPARENT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_LINEAR", ReprFlags::IS_LINEAR)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("RANDOMIZE_LAYOUT",
                            ReprFlags::RANDOMIZE_LAYOUT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS",
                            ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SCALABLE", ReprFlags::IS_SCALABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("FIELD_ORDER_UNOPTIMIZABLE",
                            ReprFlags::FIELD_ORDER_UNOPTIMIZABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("ABI_UNOPTIMIZABLE",
                            ReprFlags::ABI_UNOPTIMIZABLE)
                    }];
    type Bits = u8;
    fn bits(&self) -> u8 { ReprFlags::bits(self) }
    fn from_bits_retain(bits: u8) -> ReprFlags {
        ReprFlags::from_bits_retain(bits)
    }
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
    {
        #[allow(dead_code, deprecated, unused_attributes)]
        impl ReprFlags {
            /// Get a flags value with all bits unset.
            #[inline]
            pub const fn empty() -> Self {
                Self(<u8 as ::bitflags::Bits>::EMPTY)
            }
            /// Get a flags value with all known bits set.
            #[inline]
            pub const fn all() -> Self {
                let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
                let mut i = 0;
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                let _ = i;
                Self(truncated)
            }
            /// Get the underlying bits value.
            ///
            /// The returned value is exactly the bits set in this flags value.
            #[inline]
            pub const fn bits(&self) -> u8 { self.0 }
            /// Convert from a bits value.
            ///
            /// This method will return `None` if any unknown bits are set.
            #[inline]
            pub const fn from_bits(bits: u8)
                -> ::bitflags::__private::core::option::Option<Self> {
                let truncated = Self::from_bits_truncate(bits).0;
                if truncated == bits {
                    ::bitflags::__private::core::option::Option::Some(Self(bits))
                } else { ::bitflags::__private::core::option::Option::None }
            }
            /// Convert from a bits value, unsetting any unknown bits.
            #[inline]
            pub const fn from_bits_truncate(bits: u8) -> Self {
                Self(bits & Self::all().0)
            }
            /// Convert from a bits value exactly.
            #[inline]
            pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
            /// Get a flags value with the bits of a flag with the given name set.
            ///
            /// This method will return `None` if `name` is empty or doesn't
            /// correspond to any named flag.
            #[inline]
            pub fn from_name(name: &str)
                -> ::bitflags::__private::core::option::Option<Self> {
                {
                    if name == "IS_C" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_C.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SIMD" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SIMD.bits()));
                    }
                };
                ;
                {
                    if name == "IS_TRANSPARENT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_TRANSPARENT.bits()));
                    }
                };
                ;
                {
                    if name == "IS_LINEAR" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_LINEAR.bits()));
                    }
                };
                ;
                {
                    if name == "RANDOMIZE_LAYOUT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::RANDOMIZE_LAYOUT.bits()));
                    }
                };
                ;
                {
                    if name == "PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SCALABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SCALABLE.bits()));
                    }
                };
                ;
                {
                    if name == "FIELD_ORDER_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                {
                    if name == "ABI_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::ABI_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                let _ = name;
                ::bitflags::__private::core::option::Option::None
            }
            /// Whether all bits in this flags value are unset.
            #[inline]
            pub const fn is_empty(&self) -> bool {
                self.0 == <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all known bits in this flags value are set.
            #[inline]
            pub const fn is_all(&self) -> bool {
                Self::all().0 | self.0 == self.0
            }
            /// Whether any set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn intersects(&self, other: Self) -> bool {
                self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn contains(&self, other: Self) -> bool {
                self.0 & other.0 == other.0
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            pub fn insert(&mut self, other: Self) {
                *self = Self(self.0).union(other);
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `remove` won't truncate `other`, but the `!` operator will.
            #[inline]
            pub fn remove(&mut self, other: Self) {
                *self = Self(self.0).difference(other);
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            pub fn toggle(&mut self, other: Self) {
                *self = Self(self.0).symmetric_difference(other);
            }
            /// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
            #[inline]
            pub fn set(&mut self, other: Self, value: bool) {
                if value { self.insert(other); } else { self.remove(other); }
            }
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn intersection(self, other: Self) -> Self {
                Self(self.0 & other.0)
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn union(self, other: Self) -> Self {
                Self(self.0 | other.0)
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            #[must_use]
            pub const fn difference(self, other: Self) -> Self {
                Self(self.0 & !other.0)
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn symmetric_difference(self, other: Self) -> Self {
                Self(self.0 ^ other.0)
            }
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            #[must_use]
            pub const fn complement(self) -> Self {
                Self::from_bits_truncate(!self.0)
            }
        }
        impl ::bitflags::__private::core::fmt::Binary for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::Octal for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::LowerHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::UpperHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::ops::BitOr for ReprFlags {
            type Output = Self;
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor(self, other: ReprFlags) -> Self { self.union(other) }
        }
        impl ::bitflags::__private::core::ops::BitOrAssign for ReprFlags {
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor_assign(&mut self, other: Self) { self.insert(other); }
        }
        impl ::bitflags::__private::core::ops::BitXor for ReprFlags {
            type Output = Self;
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor(self, other: Self) -> Self {
                self.symmetric_difference(other)
            }
        }
        impl ::bitflags::__private::core::ops::BitXorAssign for ReprFlags {
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
        }
        impl ::bitflags::__private::core::ops::BitAnd for ReprFlags {
            type Output = Self;
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand(self, other: Self) -> Self { self.intersection(other) }
        }
        impl ::bitflags::__private::core::ops::BitAndAssign for ReprFlags {
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand_assign(&mut self, other: Self) {
                *self =
                    Self::from_bits_retain(self.bits()).intersection(other);
            }
        }
        impl ::bitflags::__private::core::ops::Sub for ReprFlags {
            type Output = Self;
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub(self, other: Self) -> Self { self.difference(other) }
        }
        impl ::bitflags::__private::core::ops::SubAssign for ReprFlags {
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub_assign(&mut self, other: Self) { self.remove(other); }
        }
        impl ::bitflags::__private::core::ops::Not for ReprFlags {
            type Output = Self;
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            fn not(self) -> Self { self.complement() }
        }
        impl ::bitflags::__private::core::iter::Extend<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(&mut self, iterator: T) {
                for item in iterator { self.insert(item) }
            }
        }
        impl ::bitflags::__private::core::iter::FromIterator<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(iterator: T) -> Self {
                use ::bitflags::__private::core::iter::Extend;
                let mut result = Self::empty();
                result.extend(iterator);
                result
            }
        }
        impl ReprFlags {
            /// Yield a set of contained flags values.
            ///
            /// Each yielded flags value will correspond to a defined named flag. Any unknown bits
            /// will be yielded together as a final flags value.
            #[inline]
            pub const fn iter(&self) -> ::bitflags::iter::Iter<ReprFlags> {
                ::bitflags::iter::Iter::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
            /// Yield a set of contained named flags values.
            ///
            /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
            /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
            #[inline]
            pub const fn iter_names(&self)
                -> ::bitflags::iter::IterNames<ReprFlags> {
                ::bitflags::iter::IterNames::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
        }
        impl ::bitflags::__private::core::iter::IntoIterator for ReprFlags {
            type Item = ReprFlags;
            type IntoIter = ::bitflags::iter::Iter<ReprFlags>;
            fn into_iter(self) -> Self::IntoIter { self.iter() }
        }
    };bitflags! {
84    impl ReprFlags: u8 {
85        const IS_C               = 1 << 0;
86        const IS_SIMD            = 1 << 1;
87        const IS_TRANSPARENT     = 1 << 2;
88        /// Internal only for now. If true, don't reorder fields.
89        /// On its own it does not prevent ABI optimizations.
90        const IS_LINEAR          = 1 << 3;
91        /// If true, the type's crate has opted into layout randomization.
92        /// Other flags can still inhibit reordering and thus randomization.
93        /// The seed stored in `ReprOptions.field_shuffle_seed`.
94        const RANDOMIZE_LAYOUT   = 1 << 4;
95        /// If true, the type is always passed indirectly by non-Rustic ABIs.
96        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
97        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
98        const IS_SCALABLE        = 1 << 6;
99         // Any of these flags being set prevent field reordering optimisation.
100        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
101                                 | ReprFlags::IS_SIMD.bits()
102                                 | ReprFlags::IS_SCALABLE.bits()
103                                 | ReprFlags::IS_LINEAR.bits();
104        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
105    }
106}
107
108// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
109// `rustc_data_structures` to make it build on stable.
110impl std::fmt::Debug for ReprFlags {
111    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
112        bitflags::parser::to_writer(self, f)
113    }
114}
115
116#[derive(#[automatically_derived]
impl ::core::marker::Copy for IntegerType { }Copy, #[automatically_derived]
impl ::core::clone::Clone for IntegerType {
    #[inline]
    fn clone(&self) -> IntegerType {
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Integer>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for IntegerType {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            IntegerType::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
            IntegerType::Fixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Fixed",
                    __self_0, &__self_1),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for IntegerType {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for IntegerType {
    #[inline]
    fn eq(&self, other: &IntegerType) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (IntegerType::Pointer(__self_0),
                    IntegerType::Pointer(__arg1_0)) => __self_0 == __arg1_0,
                (IntegerType::Fixed(__self_0, __self_1),
                    IntegerType::Fixed(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq)]
117#[cfg_attr(
118    feature = "nightly",
119    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for IntegerType {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        IntegerType::Pointer(ref __binding_0) => { 0usize }
                        IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                            1usize
                        }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for IntegerType {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        IntegerType::Pointer(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => {
                        IntegerType::Fixed(::rustc_serialize::Decodable::decode(__decoder),
                            ::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `IntegerType`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for IntegerType where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
120)]
121pub enum IntegerType {
122    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
123    /// `Pointer(true)` means `isize`.
124    Pointer(bool),
125    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
126    /// `Fixed(I8, false)` means `u8`.
127    Fixed(Integer, bool),
128}
129
130impl IntegerType {
131    pub fn is_signed(&self) -> bool {
132        match self {
133            IntegerType::Pointer(b) => *b,
134            IntegerType::Fixed(_, b) => *b,
135        }
136    }
137}
138
139#[derive(#[automatically_derived]
impl ::core::marker::Copy for ScalableElt { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ScalableElt {
    #[inline]
    fn clone(&self) -> ScalableElt {
        let _: ::core::clone::AssertParamIsClone<u16>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ScalableElt {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            ScalableElt::ElementCount(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "ElementCount", &__self_0),
            ScalableElt::Container =>
                ::core::fmt::Formatter::write_str(f, "Container"),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ScalableElt {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u16>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ScalableElt {
    #[inline]
    fn eq(&self, other: &ScalableElt) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (ScalableElt::ElementCount(__self_0),
                    ScalableElt::ElementCount(__arg1_0)) =>
                    __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq)]
140#[cfg_attr(
141    feature = "nightly",
142    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ScalableElt {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        ScalableElt::ElementCount(ref __binding_0) => { 0usize }
                        ScalableElt::Container => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ScalableElt {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        ScalableElt::ElementCount(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => { ScalableElt::Container }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `ScalableElt`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ScalableElt where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };HashStable_Generic)
143)]
144pub enum ScalableElt {
145    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector
146    ElementCount(u16),
147    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only
148    /// contain other scalable vectors
149    Container,
150}
151
152/// Represents the repr options provided by the user.
153#[derive(#[automatically_derived]
impl ::core::marker::Copy for ReprOptions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ReprOptions {
    #[inline]
    fn clone(&self) -> ReprOptions {
        let _: ::core::clone::AssertParamIsClone<Option<IntegerType>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<ReprFlags>;
        let _: ::core::clone::AssertParamIsClone<Option<ScalableElt>>;
        let _: ::core::clone::AssertParamIsClone<Hash64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ReprOptions {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["int", "align", "pack", "flags", "scalable",
                        "field_shuffle_seed"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.int, &self.align, &self.pack, &self.flags, &self.scalable,
                        &&self.field_shuffle_seed];
        ::core::fmt::Formatter::debug_struct_fields_finish(f, "ReprOptions",
            names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ReprOptions {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Option<IntegerType>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<ReprFlags>;
        let _: ::core::cmp::AssertParamIsEq<Option<ScalableElt>>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprOptions {
    #[inline]
    fn eq(&self, other: &ReprOptions) -> bool {
        self.int == other.int && self.align == other.align &&
                        self.pack == other.pack && self.flags == other.flags &&
                self.scalable == other.scalable &&
            self.field_shuffle_seed == other.field_shuffle_seed
    }
}PartialEq, #[automatically_derived]
impl ::core::default::Default for ReprOptions {
    #[inline]
    fn default() -> ReprOptions {
        ReprOptions {
            int: ::core::default::Default::default(),
            align: ::core::default::Default::default(),
            pack: ::core::default::Default::default(),
            flags: ::core::default::Default::default(),
            scalable: ::core::default::Default::default(),
            field_shuffle_seed: ::core::default::Default::default(),
        }
    }
}Default)]
154#[cfg_attr(
155    feature = "nightly",
156    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprOptions {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprOptions {
            fn decode(__decoder: &mut __D) -> Self {
                ReprOptions {
                    int: ::rustc_serialize::Decodable::decode(__decoder),
                    align: ::rustc_serialize::Decodable::decode(__decoder),
                    pack: ::rustc_serialize::Decodable::decode(__decoder),
                    flags: ::rustc_serialize::Decodable::decode(__decoder),
                    scalable: ::rustc_serialize::Decodable::decode(__decoder),
                    field_shuffle_seed: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprOptions where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
157)]
158pub struct ReprOptions {
159    pub int: Option<IntegerType>,
160    pub align: Option<Align>,
161    pub pack: Option<Align>,
162    pub flags: ReprFlags,
163    /// `#[rustc_scalable_vector]`
164    pub scalable: Option<ScalableElt>,
165    /// The seed to be used for randomizing a type's layout
166    ///
167    /// Note: This could technically be a `u128` which would
168    /// be the "most accurate" hash as it'd encompass the item and crate
169    /// hash without loss, but it does pay the price of being larger.
170    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
171    /// purposes (primarily `-Z randomize-layout`)
172    pub field_shuffle_seed: Hash64,
173}
174
175impl ReprOptions {
176    #[inline]
177    pub fn simd(&self) -> bool {
178        self.flags.contains(ReprFlags::IS_SIMD)
179    }
180
181    #[inline]
182    pub fn scalable(&self) -> bool {
183        self.flags.contains(ReprFlags::IS_SCALABLE)
184    }
185
186    #[inline]
187    pub fn c(&self) -> bool {
188        self.flags.contains(ReprFlags::IS_C)
189    }
190
191    #[inline]
192    pub fn packed(&self) -> bool {
193        self.pack.is_some()
194    }
195
196    #[inline]
197    pub fn transparent(&self) -> bool {
198        self.flags.contains(ReprFlags::IS_TRANSPARENT)
199    }
200
201    #[inline]
202    pub fn linear(&self) -> bool {
203        self.flags.contains(ReprFlags::IS_LINEAR)
204    }
205
206    /// Returns the discriminant type, given these `repr` options.
207    /// This must only be called on enums!
208    ///
209    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:
210    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a
211    /// smaller type for the tag that stores the discriminant at runtime and that will work just
212    /// fine, it just induces casts when getting/setting the discriminant.
213    pub fn discr_type(&self) -> IntegerType {
214        self.int.unwrap_or(IntegerType::Pointer(true))
215    }
216
217    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
218    /// layout" optimizations, such as representing `Foo<&T>` as a
219    /// single pointer.
220    pub fn inhibit_enum_layout_opt(&self) -> bool {
221        self.c() || self.int.is_some()
222    }
223
224    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
225        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
226    }
227
228    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
229    /// e.g. `repr(C)` or `repr(<int>)`.
230    pub fn inhibit_struct_field_reordering(&self) -> bool {
231        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
232    }
233
234    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
235    /// was enabled for its declaration crate.
236    pub fn can_randomize_type_layout(&self) -> bool {
237        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
238    }
239
240    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
241    pub fn inhibits_union_abi_opt(&self) -> bool {
242        self.c()
243    }
244}
245
246/// The maximum supported number of lanes in a SIMD vector.
247///
248/// This value is selected based on backend support:
249/// * LLVM does not appear to have a vector width limit.
250/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
251pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
252
253/// How pointers are represented in a given address space
254#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerSpec { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerSpec {
    #[inline]
    fn clone(&self) -> PointerSpec {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointerSpec {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "PointerSpec",
            "pointer_size", &self.pointer_size, "pointer_align",
            &self.pointer_align, "pointer_offset", &self.pointer_offset,
            "_is_fat", &&self._is_fat)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerSpec {
    #[inline]
    fn eq(&self, other: &PointerSpec) -> bool {
        self._is_fat == other._is_fat &&
                    self.pointer_size == other.pointer_size &&
                self.pointer_align == other.pointer_align &&
            self.pointer_offset == other.pointer_offset
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerSpec {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq)]
255pub struct PointerSpec {
256    /// The size of the bitwise representation of the pointer.
257    pointer_size: Size,
258    /// The alignment of pointers for this address space
259    pointer_align: Align,
260    /// The size of the value a pointer can be offset by in this address space.
261    pointer_offset: Size,
262    /// Pointers into this address space contain extra metadata
263    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
264    _is_fat: bool,
265}
266
267/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
268/// for a target, which contains everything needed to compute layouts.
269#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TargetDataLayout {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["endian", "i1_align", "i8_align", "i16_align", "i32_align",
                        "i64_align", "i128_align", "f16_align", "f32_align",
                        "f64_align", "f128_align", "aggregate_align",
                        "vector_align", "default_address_space",
                        "default_address_space_pointer_spec", "address_space_info",
                        "instruction_address_space", "c_enum_min_size"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.endian, &self.i1_align, &self.i8_align, &self.i16_align,
                        &self.i32_align, &self.i64_align, &self.i128_align,
                        &self.f16_align, &self.f32_align, &self.f64_align,
                        &self.f128_align, &self.aggregate_align, &self.vector_align,
                        &self.default_address_space,
                        &self.default_address_space_pointer_spec,
                        &self.address_space_info, &self.instruction_address_space,
                        &&self.c_enum_min_size];
        ::core::fmt::Formatter::debug_struct_fields_finish(f,
            "TargetDataLayout", names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for TargetDataLayout {
    #[inline]
    fn eq(&self, other: &TargetDataLayout) -> bool {
        self.endian == other.endian && self.i1_align == other.i1_align &&
                                                                        self.i8_align == other.i8_align &&
                                                                    self.i16_align == other.i16_align &&
                                                                self.i32_align == other.i32_align &&
                                                            self.i64_align == other.i64_align &&
                                                        self.i128_align == other.i128_align &&
                                                    self.f16_align == other.f16_align &&
                                                self.f32_align == other.f32_align &&
                                            self.f64_align == other.f64_align &&
                                        self.f128_align == other.f128_align &&
                                    self.aggregate_align == other.aggregate_align &&
                                self.vector_align == other.vector_align &&
                            self.default_address_space == other.default_address_space &&
                        self.default_address_space_pointer_spec ==
                            other.default_address_space_pointer_spec &&
                    self.address_space_info == other.address_space_info &&
                self.instruction_address_space ==
                    other.instruction_address_space &&
            self.c_enum_min_size == other.c_enum_min_size
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for TargetDataLayout {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Endian>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(Size, Align)>>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
        let _: ::core::cmp::AssertParamIsEq<PointerSpec>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(AddressSpace, PointerSpec)>>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq)]
270pub struct TargetDataLayout {
271    pub endian: Endian,
272    pub i1_align: Align,
273    pub i8_align: Align,
274    pub i16_align: Align,
275    pub i32_align: Align,
276    pub i64_align: Align,
277    pub i128_align: Align,
278    pub f16_align: Align,
279    pub f32_align: Align,
280    pub f64_align: Align,
281    pub f128_align: Align,
282    pub aggregate_align: Align,
283
284    /// Alignments for vector types.
285    pub vector_align: Vec<(Size, Align)>,
286
287    pub default_address_space: AddressSpace,
288    pub default_address_space_pointer_spec: PointerSpec,
289
290    /// Address space information of all known address spaces.
291    ///
292    /// # Note
293    ///
294    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
295    /// which instead lives in [`Self::default_address_space_pointer_spec`].
296    address_space_info: Vec<(AddressSpace, PointerSpec)>,
297
298    pub instruction_address_space: AddressSpace,
299
300    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
301    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
302    /// so the only valid spec for LLVM is c_int::BITS or 8
303    pub c_enum_min_size: Integer,
304}
305
306impl Default for TargetDataLayout {
307    /// Creates an instance of `TargetDataLayout`.
308    fn default() -> TargetDataLayout {
309        let align = |bits| Align::from_bits(bits).unwrap();
310        TargetDataLayout {
311            endian: Endian::Big,
312            i1_align: align(8),
313            i8_align: align(8),
314            i16_align: align(16),
315            i32_align: align(32),
316            i64_align: align(32),
317            i128_align: align(32),
318            f16_align: align(16),
319            f32_align: align(32),
320            f64_align: align(64),
321            f128_align: align(128),
322            aggregate_align: align(8),
323            vector_align: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [(Size::from_bits(64), align(64)),
                (Size::from_bits(128), align(128))]))vec![
324                (Size::from_bits(64), align(64)),
325                (Size::from_bits(128), align(128)),
326            ],
327            default_address_space: AddressSpace::ZERO,
328            default_address_space_pointer_spec: PointerSpec {
329                pointer_size: Size::from_bits(64),
330                pointer_align: align(64),
331                pointer_offset: Size::from_bits(64),
332                _is_fat: false,
333            },
334            address_space_info: ::alloc::vec::Vec::new()vec![],
335            instruction_address_space: AddressSpace::ZERO,
336            c_enum_min_size: Integer::I32,
337        }
338    }
339}
340
341pub enum TargetDataLayoutError<'a> {
342    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
343    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
344    MissingAlignment { cause: &'a str },
345    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
346    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
347    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
348    InvalidBitsSize { err: String },
349    UnknownPointerSpecification { err: String },
350}
351
352#[cfg(feature = "nightly")]
353impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetDataLayoutError<'_> {
354    fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
355        match self {
356            TargetDataLayoutError::InvalidAddressSpace { addr_space, err, cause } => {
357                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))
358                    .with_arg("addr_space", addr_space)
359                    .with_arg("cause", cause)
360                    .with_arg("err", err)
361            }
362            TargetDataLayoutError::InvalidBits { kind, bit, cause, err } => {
363                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))
364                    .with_arg("kind", kind)
365                    .with_arg("bit", bit)
366                    .with_arg("cause", cause)
367                    .with_arg("err", err)
368            }
369            TargetDataLayoutError::MissingAlignment { cause } => {
370                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("missing alignment for `{$cause}` in \"data-layout\""))msg!("missing alignment for `{$cause}` in \"data-layout\""))
371                    .with_arg("cause", cause)
372            }
373            TargetDataLayoutError::InvalidAlignment { cause, err } => {
374                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))
375                    .with_arg("cause", cause)
376                    .with_arg("err", err.to_string())
377            }
378            TargetDataLayoutError::InconsistentTargetArchitecture { dl, target } => {
379                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))
380                    .with_arg("dl", dl).with_arg("target", target)
381            }
382            TargetDataLayoutError::InconsistentTargetPointerWidth { pointer_size, target } => {
383                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))
384                    .with_arg("pointer_size", pointer_size).with_arg("target", target)
385            }
386            TargetDataLayoutError::InvalidBitsSize { err } => {
387                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$err}"))msg!("{$err}")).with_arg("err", err)
388            }
389            TargetDataLayoutError::UnknownPointerSpecification { err } => {
390                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("unknown pointer specification `{$err}` in datalayout string"))msg!("unknown pointer specification `{$err}` in datalayout string"))
391                    .with_arg("err", err)
392            }
393        }
394    }
395}
396
397impl TargetDataLayout {
398    /// Parse data layout from an
399    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
400    ///
401    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
402    /// determined from llvm string.
403    pub fn parse_from_llvm_datalayout_string<'a>(
404        input: &'a str,
405        default_address_space: AddressSpace,
406    ) -> Result<TargetDataLayout, TargetDataLayoutError<'a>> {
407        // Parse an address space index from a string.
408        let parse_address_space = |s: &'a str, cause: &'a str| {
409            s.parse::<u32>().map(AddressSpace).map_err(|err| {
410                TargetDataLayoutError::InvalidAddressSpace { addr_space: s, cause, err }
411            })
412        };
413
414        // Parse a bit count from a string.
415        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
416            s.parse::<u64>().map_err(|err| TargetDataLayoutError::InvalidBits {
417                kind,
418                bit: s,
419                cause,
420                err,
421            })
422        };
423
424        // Parse a size string.
425        let parse_size =
426            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
427
428        // Parse an alignment string.
429        let parse_align_str = |s: &'a str, cause: &'a str| {
430            let align_from_bits = |bits| {
431                Align::from_bits(bits)
432                    .map_err(|err| TargetDataLayoutError::InvalidAlignment { cause, err })
433            };
434            let abi = parse_bits(s, "alignment", cause)?;
435            Ok(align_from_bits(abi)?)
436        };
437
438        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
439        // ignoring the secondary alignment specifications.
440        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
441            if s.is_empty() {
442                return Err(TargetDataLayoutError::MissingAlignment { cause });
443            }
444            parse_align_str(s[0], cause)
445        };
446
447        let mut dl = TargetDataLayout::default();
448        dl.default_address_space = default_address_space;
449
450        let mut i128_align_src = 64;
451        for spec in input.split('-') {
452            let spec_parts = spec.split(':').collect::<Vec<_>>();
453
454            match &*spec_parts {
455                ["e"] => dl.endian = Endian::Little,
456                ["E"] => dl.endian = Endian::Big,
457                [p] if p.starts_with('P') => {
458                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
459                }
460                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
461                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
462                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
463                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
464                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
465                [p, s, a @ ..] if p.starts_with("p") => {
466                    let mut p = p.strip_prefix('p').unwrap();
467                    let mut _is_fat = false;
468
469                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
470                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
471
472                    if p.starts_with('f') {
473                        p = p.strip_prefix('f').unwrap();
474                        _is_fat = true;
475                    }
476
477                    // However, we currently don't take into account further specifications:
478                    // an error is emitted instead.
479                    if p.starts_with(char::is_alphabetic) {
480                        return Err(TargetDataLayoutError::UnknownPointerSpecification {
481                            err: p.to_string(),
482                        });
483                    }
484
485                    let addr_space = if !p.is_empty() {
486                        parse_address_space(p, "p-")?
487                    } else {
488                        AddressSpace::ZERO
489                    };
490
491                    let pointer_size = parse_size(s, "p-")?;
492                    let pointer_align = parse_align_seq(a, "p-")?;
493                    let info = PointerSpec {
494                        pointer_offset: pointer_size,
495                        pointer_size,
496                        pointer_align,
497                        _is_fat,
498                    };
499                    if addr_space == default_address_space {
500                        dl.default_address_space_pointer_spec = info;
501                    } else {
502                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
503                            Some(e) => e.1 = info,
504                            None => {
505                                dl.address_space_info.push((addr_space, info));
506                            }
507                        }
508                    }
509                }
510                [p, s, a, _pr, i] if p.starts_with("p") => {
511                    let mut p = p.strip_prefix('p').unwrap();
512                    let mut _is_fat = false;
513
514                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
515                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
516
517                    if p.starts_with('f') {
518                        p = p.strip_prefix('f').unwrap();
519                        _is_fat = true;
520                    }
521
522                    // However, we currently don't take into account further specifications:
523                    // an error is emitted instead.
524                    if p.starts_with(char::is_alphabetic) {
525                        return Err(TargetDataLayoutError::UnknownPointerSpecification {
526                            err: p.to_string(),
527                        });
528                    }
529
530                    let addr_space = if !p.is_empty() {
531                        parse_address_space(p, "p")?
532                    } else {
533                        AddressSpace::ZERO
534                    };
535
536                    let info = PointerSpec {
537                        pointer_size: parse_size(s, "p-")?,
538                        pointer_align: parse_align_str(a, "p-")?,
539                        pointer_offset: parse_size(i, "p-")?,
540                        _is_fat,
541                    };
542
543                    if addr_space == default_address_space {
544                        dl.default_address_space_pointer_spec = info;
545                    } else {
546                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
547                            Some(e) => e.1 = info,
548                            None => {
549                                dl.address_space_info.push((addr_space, info));
550                            }
551                        }
552                    }
553                }
554
555                [s, a @ ..] if s.starts_with('i') => {
556                    let Ok(bits) = s[1..].parse::<u64>() else {
557                        parse_size(&s[1..], "i")?; // For the user error.
558                        continue;
559                    };
560                    let a = parse_align_seq(a, s)?;
561                    match bits {
562                        1 => dl.i1_align = a,
563                        8 => dl.i8_align = a,
564                        16 => dl.i16_align = a,
565                        32 => dl.i32_align = a,
566                        64 => dl.i64_align = a,
567                        _ => {}
568                    }
569                    if bits >= i128_align_src && bits <= 128 {
570                        // Default alignment for i128 is decided by taking the alignment of
571                        // largest-sized i{64..=128}.
572                        i128_align_src = bits;
573                        dl.i128_align = a;
574                    }
575                }
576                [s, a @ ..] if s.starts_with('v') => {
577                    let v_size = parse_size(&s[1..], "v")?;
578                    let a = parse_align_seq(a, s)?;
579                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
580                        v.1 = a;
581                        continue;
582                    }
583                    // No existing entry, add a new one.
584                    dl.vector_align.push((v_size, a));
585                }
586                _ => {} // Ignore everything else.
587            }
588        }
589
590        // Inherit, if not given, address space information for specific LLVM elements from the
591        // default data address space.
592        if (dl.instruction_address_space != dl.default_address_space)
593            && dl
594                .address_space_info
595                .iter()
596                .find(|(a, _)| *a == dl.instruction_address_space)
597                .is_none()
598        {
599            dl.address_space_info.push((
600                dl.instruction_address_space,
601                dl.default_address_space_pointer_spec.clone(),
602            ));
603        }
604
605        Ok(dl)
606    }
607
608    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
609    /// space.
610    ///
611    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
612    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
613    /// index every address within an object along with one byte past the end, along with allowing
614    /// `isize` to store the difference between any two pointers into an object.
615    ///
616    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
617    /// so we adopt such a more-constrained size bound due to its technical limitations.
618    #[inline]
619    pub fn obj_size_bound(&self) -> u64 {
620        match self.pointer_size().bits() {
621            16 => 1 << 15,
622            32 => 1 << 31,
623            64 => 1 << 61,
624            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
625        }
626    }
627
628    /// Returns **exclusive** upper bound on object size in bytes.
629    ///
630    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
631    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
632    /// index every address within an object along with one byte past the end, along with allowing
633    /// `isize` to store the difference between any two pointers into an object.
634    ///
635    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
636    /// so we adopt such a more-constrained size bound due to its technical limitations.
637    #[inline]
638    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
639        match self.pointer_size_in(address_space).bits() {
640            16 => 1 << 15,
641            32 => 1 << 31,
642            64 => 1 << 61,
643            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
644        }
645    }
646
647    #[inline]
648    pub fn ptr_sized_integer(&self) -> Integer {
649        use Integer::*;
650        match self.pointer_offset().bits() {
651            16 => I16,
652            32 => I32,
653            64 => I64,
654            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
655        }
656    }
657
658    #[inline]
659    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
660        use Integer::*;
661        match self.pointer_offset_in(address_space).bits() {
662            16 => I16,
663            32 => I32,
664            64 => I64,
665            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
666        }
667    }
668
669    /// psABI-mandated alignment for a vector type, if any
670    #[inline]
671    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
672        self.vector_align
673            .iter()
674            .find(|(size, _align)| *size == vec_size)
675            .map(|(_size, align)| *align)
676    }
677
678    /// an alignment resembling the one LLVM would pick for a vector
679    #[inline]
680    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
681        self.cabi_vector_align(vec_size)
682            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
683    }
684
685    /// Get the pointer size in the default data address space.
686    #[inline]
687    pub fn pointer_size(&self) -> Size {
688        self.default_address_space_pointer_spec.pointer_size
689    }
690
691    /// Get the pointer size in a specific address space.
692    #[inline]
693    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
694        if c == self.default_address_space {
695            return self.default_address_space_pointer_spec.pointer_size;
696        }
697
698        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
699            e.1.pointer_size
700        } else {
701            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
702        }
703    }
704
705    /// Get the pointer index in the default data address space.
706    #[inline]
707    pub fn pointer_offset(&self) -> Size {
708        self.default_address_space_pointer_spec.pointer_offset
709    }
710
711    /// Get the pointer index in a specific address space.
712    #[inline]
713    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
714        if c == self.default_address_space {
715            return self.default_address_space_pointer_spec.pointer_offset;
716        }
717
718        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
719            e.1.pointer_offset
720        } else {
721            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
722        }
723    }
724
725    /// Get the pointer alignment in the default data address space.
726    #[inline]
727    pub fn pointer_align(&self) -> AbiAlign {
728        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
729    }
730
731    /// Get the pointer alignment in a specific address space.
732    #[inline]
733    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
734        AbiAlign::new(if c == self.default_address_space {
735            self.default_address_space_pointer_spec.pointer_align
736        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
737            e.1.pointer_align
738        } else {
739            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
740        })
741    }
742}
743
744pub trait HasDataLayout {
745    fn data_layout(&self) -> &TargetDataLayout;
746}
747
748impl HasDataLayout for TargetDataLayout {
749    #[inline]
750    fn data_layout(&self) -> &TargetDataLayout {
751        self
752    }
753}
754
755// used by rust-analyzer
756impl HasDataLayout for &TargetDataLayout {
757    #[inline]
758    fn data_layout(&self) -> &TargetDataLayout {
759        (**self).data_layout()
760    }
761}
762
763/// Endianness of the target, which must match cfg(target-endian).
764#[derive(#[automatically_derived]
impl ::core::marker::Copy for Endian { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Endian {
    #[inline]
    fn clone(&self) -> Endian { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Endian {
    #[inline]
    fn eq(&self, other: &Endian) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Endian {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq)]
765pub enum Endian {
766    Little,
767    Big,
768}
769
770impl Endian {
771    pub fn as_str(&self) -> &'static str {
772        match self {
773            Self::Little => "little",
774            Self::Big => "big",
775        }
776    }
777
778    #[cfg(feature = "nightly")]
779    pub fn desc_symbol(&self) -> Symbol {
780        match self {
781            Self::Little => sym::little,
782            Self::Big => sym::big,
783        }
784    }
785}
786
787impl fmt::Debug for Endian {
788    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
789        f.write_str(self.as_str())
790    }
791}
792
793impl FromStr for Endian {
794    type Err = String;
795
796    fn from_str(s: &str) -> Result<Self, Self::Err> {
797        match s {
798            "little" => Ok(Self::Little),
799            "big" => Ok(Self::Big),
800            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("unknown endian: \"{0}\"", s))
    })format!(r#"unknown endian: "{s}""#)),
801        }
802    }
803}
804
805/// Size of a type in bytes.
806#[derive(#[automatically_derived]
impl ::core::marker::Copy for Size { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Size {
    #[inline]
    fn clone(&self) -> Size {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Size {
    #[inline]
    fn eq(&self, other: &Size) -> bool { self.raw == other.raw }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Size {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Size {
    #[inline]
    fn partial_cmp(&self, other: &Size)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.raw, &other.raw)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Size {
    #[inline]
    fn cmp(&self, other: &Size) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.raw, &other.raw)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Size {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.raw, state)
    }
}Hash)]
807#[cfg_attr(
808    feature = "nightly",
809    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Size {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Size {
            fn decode(__decoder: &mut __D) -> Self {
                Size { raw: ::rustc_serialize::Decodable::decode(__decoder) }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Size where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
810)]
811pub struct Size {
812    raw: u64,
813}
814
815#[cfg(feature = "nightly")]
816impl StableOrd for Size {
817    const CAN_USE_UNSTABLE_SORT: bool = true;
818
819    // `Ord` is implemented as just comparing numerical values and numerical values
820    // are not changed by (de-)serialization.
821    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
822}
823
824// This is debug-printed a lot in larger structs, don't waste too much space there
825impl fmt::Debug for Size {
826    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
827        f.write_fmt(format_args!("Size({0} bytes)", self.bytes()))write!(f, "Size({} bytes)", self.bytes())
828    }
829}
830
831impl Size {
832    pub const ZERO: Size = Size { raw: 0 };
833
834    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
835    /// not a multiple of 8.
836    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
837        let bits = bits.try_into().ok().unwrap();
838        Size { raw: bits.div_ceil(8) }
839    }
840
841    #[inline]
842    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
843        let bytes: u64 = bytes.try_into().ok().unwrap();
844        Size { raw: bytes }
845    }
846
847    #[inline]
848    pub fn bytes(self) -> u64 {
849        self.raw
850    }
851
852    #[inline]
853    pub fn bytes_usize(self) -> usize {
854        self.bytes().try_into().unwrap()
855    }
856
857    #[inline]
858    pub fn bits(self) -> u64 {
859        #[cold]
860        fn overflow(bytes: u64) -> ! {
861            {
    ::core::panicking::panic_fmt(format_args!("Size::bits: {0} bytes in bits doesn\'t fit in u64",
            bytes));
}panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
862        }
863
864        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
865    }
866
867    #[inline]
868    pub fn bits_usize(self) -> usize {
869        self.bits().try_into().unwrap()
870    }
871
872    #[inline]
873    pub fn align_to(self, align: Align) -> Size {
874        let mask = align.bytes() - 1;
875        Size::from_bytes((self.bytes() + mask) & !mask)
876    }
877
878    #[inline]
879    pub fn is_aligned(self, align: Align) -> bool {
880        let mask = align.bytes() - 1;
881        self.bytes() & mask == 0
882    }
883
884    #[inline]
885    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
886        let dl = cx.data_layout();
887
888        let bytes = self.bytes().checked_add(offset.bytes())?;
889
890        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
891    }
892
893    #[inline]
894    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
895        let dl = cx.data_layout();
896
897        let bytes = self.bytes().checked_mul(count)?;
898        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
899    }
900
901    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
902    /// (i.e., if it is negative, fill with 1's on the left).
903    #[inline]
904    pub fn sign_extend(self, value: u128) -> i128 {
905        let size = self.bits();
906        if size == 0 {
907            // Truncated until nothing is left.
908            return 0;
909        }
910        // Sign-extend it.
911        let shift = 128 - size;
912        // Shift the unsigned value to the left, then shift back to the right as signed
913        // (essentially fills with sign bit on the left).
914        ((value << shift) as i128) >> shift
915    }
916
917    /// Truncates `value` to `self` bits.
918    #[inline]
919    pub fn truncate(self, value: u128) -> u128 {
920        let size = self.bits();
921        if size == 0 {
922            // Truncated until nothing is left.
923            return 0;
924        }
925        let shift = 128 - size;
926        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
927        (value << shift) >> shift
928    }
929
930    #[inline]
931    pub fn signed_int_min(&self) -> i128 {
932        self.sign_extend(1_u128 << (self.bits() - 1))
933    }
934
935    #[inline]
936    pub fn signed_int_max(&self) -> i128 {
937        i128::MAX >> (128 - self.bits())
938    }
939
940    #[inline]
941    pub fn unsigned_int_max(&self) -> u128 {
942        u128::MAX >> (128 - self.bits())
943    }
944}
945
946// Panicking addition, subtraction and multiplication for convenience.
947// Avoid during layout computation, return `LayoutError` instead.
948
949impl Add for Size {
950    type Output = Size;
951    #[inline]
952    fn add(self, other: Size) -> Size {
953        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
954            {
    ::core::panicking::panic_fmt(format_args!("Size::add: {0} + {1} doesn\'t fit in u64",
            self.bytes(), other.bytes()));
}panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
955        }))
956    }
957}
958
959impl Sub for Size {
960    type Output = Size;
961    #[inline]
962    fn sub(self, other: Size) -> Size {
963        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
964            {
    ::core::panicking::panic_fmt(format_args!("Size::sub: {0} - {1} would result in negative size",
            self.bytes(), other.bytes()));
}panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
965        }))
966    }
967}
968
969impl Mul<Size> for u64 {
970    type Output = Size;
971    #[inline]
972    fn mul(self, size: Size) -> Size {
973        size * self
974    }
975}
976
977impl Mul<u64> for Size {
978    type Output = Size;
979    #[inline]
980    fn mul(self, count: u64) -> Size {
981        match self.bytes().checked_mul(count) {
982            Some(bytes) => Size::from_bytes(bytes),
983            None => {
    ::core::panicking::panic_fmt(format_args!("Size::mul: {0} * {1} doesn\'t fit in u64",
            self.bytes(), count));
}panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
984        }
985    }
986}
987
988impl AddAssign for Size {
989    #[inline]
990    fn add_assign(&mut self, other: Size) {
991        *self = *self + other;
992    }
993}
994
995#[cfg(feature = "nightly")]
996impl Step for Size {
997    #[inline]
998    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
999        u64::steps_between(&start.bytes(), &end.bytes())
1000    }
1001
1002    #[inline]
1003    fn forward_checked(start: Self, count: usize) -> Option<Self> {
1004        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
1005    }
1006
1007    #[inline]
1008    fn forward(start: Self, count: usize) -> Self {
1009        Self::from_bytes(u64::forward(start.bytes(), count))
1010    }
1011
1012    #[inline]
1013    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
1014        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
1015    }
1016
1017    #[inline]
1018    fn backward_checked(start: Self, count: usize) -> Option<Self> {
1019        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
1020    }
1021
1022    #[inline]
1023    fn backward(start: Self, count: usize) -> Self {
1024        Self::from_bytes(u64::backward(start.bytes(), count))
1025    }
1026
1027    #[inline]
1028    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
1029        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
1030    }
1031}
1032
1033/// Alignment of a type in bytes (always a power of two).
1034#[derive(#[automatically_derived]
impl ::core::marker::Copy for Align { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Align {
    #[inline]
    fn clone(&self) -> Align {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Align {
    #[inline]
    fn eq(&self, other: &Align) -> bool { self.pow2 == other.pow2 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Align {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Align {
    #[inline]
    fn partial_cmp(&self, other: &Align)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.pow2, &other.pow2)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Align {
    #[inline]
    fn cmp(&self, other: &Align) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.pow2, &other.pow2)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Align {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.pow2, state)
    }
}Hash)]
1035#[cfg_attr(
1036    feature = "nightly",
1037    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Align {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Align {
            fn decode(__decoder: &mut __D) -> Self {
                Align {
                    pow2: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Align where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
1038)]
1039pub struct Align {
1040    pow2: u8,
1041}
1042
1043// This is debug-printed a lot in larger structs, don't waste too much space there
1044impl fmt::Debug for Align {
1045    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1046        f.write_fmt(format_args!("Align({0} bytes)", self.bytes()))write!(f, "Align({} bytes)", self.bytes())
1047    }
1048}
1049
1050#[derive(#[automatically_derived]
impl ::core::clone::Clone for AlignFromBytesError {
    #[inline]
    fn clone(&self) -> AlignFromBytesError {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AlignFromBytesError { }Copy)]
1051pub enum AlignFromBytesError {
1052    NotPowerOfTwo(u64),
1053    TooLarge(u64),
1054}
1055
1056impl fmt::Debug for AlignFromBytesError {
1057    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1058        fmt::Display::fmt(self, f)
1059    }
1060}
1061
1062impl fmt::Display for AlignFromBytesError {
1063    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1064        match self {
1065            AlignFromBytesError::NotPowerOfTwo(align) => f.write_fmt(format_args!("{0} is not a power of 2", align))write!(f, "{align} is not a power of 2"),
1066            AlignFromBytesError::TooLarge(align) => f.write_fmt(format_args!("{0} is too large", align))write!(f, "{align} is too large"),
1067        }
1068    }
1069}
1070
1071impl Align {
1072    pub const ONE: Align = Align { pow2: 0 };
1073    pub const EIGHT: Align = Align { pow2: 3 };
1074    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1075    pub const MAX: Align = Align { pow2: 29 };
1076
1077    /// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.
1078    #[inline]
1079    pub fn max_for_target(tdl: &TargetDataLayout) -> Align {
1080        let pointer_bits = tdl.pointer_size().bits();
1081        if let Ok(pointer_bits) = u8::try_from(pointer_bits)
1082            && pointer_bits <= Align::MAX.pow2
1083        {
1084            Align { pow2: pointer_bits - 1 }
1085        } else {
1086            Align::MAX
1087        }
1088    }
1089
1090    #[inline]
1091    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1092        Align::from_bytes(Size::from_bits(bits).bytes())
1093    }
1094
1095    #[inline]
1096    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1097        // Treat an alignment of 0 bytes like 1-byte alignment.
1098        if align == 0 {
1099            return Ok(Align::ONE);
1100        }
1101
1102        #[cold]
1103        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1104            AlignFromBytesError::NotPowerOfTwo(align)
1105        }
1106
1107        #[cold]
1108        const fn too_large(align: u64) -> AlignFromBytesError {
1109            AlignFromBytesError::TooLarge(align)
1110        }
1111
1112        let tz = align.trailing_zeros();
1113        if align != (1 << tz) {
1114            return Err(not_power_of_2(align));
1115        }
1116
1117        let pow2 = tz as u8;
1118        if pow2 > Self::MAX.pow2 {
1119            return Err(too_large(align));
1120        }
1121
1122        Ok(Align { pow2 })
1123    }
1124
1125    #[inline]
1126    pub const fn bytes(self) -> u64 {
1127        1 << self.pow2
1128    }
1129
1130    #[inline]
1131    pub fn bytes_usize(self) -> usize {
1132        self.bytes().try_into().unwrap()
1133    }
1134
1135    #[inline]
1136    pub const fn bits(self) -> u64 {
1137        self.bytes() * 8
1138    }
1139
1140    #[inline]
1141    pub fn bits_usize(self) -> usize {
1142        self.bits().try_into().unwrap()
1143    }
1144
1145    /// Obtain the greatest factor of `size` that is an alignment
1146    /// (the largest power of two the Size is a multiple of).
1147    ///
1148    /// Note that all numbers are factors of 0
1149    #[inline]
1150    pub fn max_aligned_factor(size: Size) -> Align {
1151        Align { pow2: size.bytes().trailing_zeros() as u8 }
1152    }
1153
1154    /// Reduces Align to an aligned factor of `size`.
1155    #[inline]
1156    pub fn restrict_for_offset(self, size: Size) -> Align {
1157        self.min(Align::max_aligned_factor(size))
1158    }
1159}
1160
1161/// A pair of alignments, ABI-mandated and preferred.
1162///
1163/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1164/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1165/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1166/// and thus in practice the two values are almost always identical.
1167///
1168/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1169/// It is of effectively no consequence for layout in structs and on the stack.
1170#[derive(#[automatically_derived]
impl ::core::marker::Copy for AbiAlign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AbiAlign {
    #[inline]
    fn clone(&self) -> AbiAlign {
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AbiAlign {
    #[inline]
    fn eq(&self, other: &AbiAlign) -> bool { self.abi == other.abi }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AbiAlign {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Align>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for AbiAlign {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.abi, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for AbiAlign {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field1_finish(f, "AbiAlign",
            "abi", &&self.abi)
    }
}Debug)]
1171#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AbiAlign where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AbiAlign { abi: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1172pub struct AbiAlign {
1173    pub abi: Align,
1174}
1175
1176impl AbiAlign {
1177    #[inline]
1178    pub fn new(align: Align) -> AbiAlign {
1179        AbiAlign { abi: align }
1180    }
1181
1182    #[inline]
1183    pub fn min(self, other: AbiAlign) -> AbiAlign {
1184        AbiAlign { abi: self.abi.min(other.abi) }
1185    }
1186
1187    #[inline]
1188    pub fn max(self, other: AbiAlign) -> AbiAlign {
1189        AbiAlign { abi: self.abi.max(other.abi) }
1190    }
1191}
1192
1193impl Deref for AbiAlign {
1194    type Target = Align;
1195
1196    fn deref(&self) -> &Self::Target {
1197        &self.abi
1198    }
1199}
1200
1201/// Integers, also used for enum discriminants.
1202#[derive(#[automatically_derived]
impl ::core::marker::Copy for Integer { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Integer {
    #[inline]
    fn clone(&self) -> Integer { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Integer {
    #[inline]
    fn eq(&self, other: &Integer) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Integer {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Integer {
    #[inline]
    fn partial_cmp(&self, other: &Integer)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Integer {
    #[inline]
    fn cmp(&self, other: &Integer) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Integer {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Integer {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Integer::I8 => "I8",
                Integer::I16 => "I16",
                Integer::I32 => "I32",
                Integer::I64 => "I64",
                Integer::I128 => "I128",
            })
    }
}Debug)]
1203#[cfg_attr(
1204    feature = "nightly",
1205    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Integer {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        Integer::I8 => { 0usize }
                        Integer::I16 => { 1usize }
                        Integer::I32 => { 2usize }
                        Integer::I64 => { 3usize }
                        Integer::I128 => { 4usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Integer {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { Integer::I8 }
                    1usize => { Integer::I16 }
                    2usize => { Integer::I32 }
                    3usize => { Integer::I64 }
                    4usize => { Integer::I128 }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `Integer`, expected 0..5, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Integer where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };HashStable_Generic)
1206)]
1207pub enum Integer {
1208    I8,
1209    I16,
1210    I32,
1211    I64,
1212    I128,
1213}
1214
1215impl Integer {
1216    pub fn int_ty_str(self) -> &'static str {
1217        use Integer::*;
1218        match self {
1219            I8 => "i8",
1220            I16 => "i16",
1221            I32 => "i32",
1222            I64 => "i64",
1223            I128 => "i128",
1224        }
1225    }
1226
1227    pub fn uint_ty_str(self) -> &'static str {
1228        use Integer::*;
1229        match self {
1230            I8 => "u8",
1231            I16 => "u16",
1232            I32 => "u32",
1233            I64 => "u64",
1234            I128 => "u128",
1235        }
1236    }
1237
1238    #[inline]
1239    pub fn size(self) -> Size {
1240        use Integer::*;
1241        match self {
1242            I8 => Size::from_bytes(1),
1243            I16 => Size::from_bytes(2),
1244            I32 => Size::from_bytes(4),
1245            I64 => Size::from_bytes(8),
1246            I128 => Size::from_bytes(16),
1247        }
1248    }
1249
1250    /// Gets the Integer type from an IntegerType.
1251    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1252        let dl = cx.data_layout();
1253
1254        match ity {
1255            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1256            IntegerType::Fixed(x, _) => x,
1257        }
1258    }
1259
1260    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1261        use Integer::*;
1262        let dl = cx.data_layout();
1263
1264        AbiAlign::new(match self {
1265            I8 => dl.i8_align,
1266            I16 => dl.i16_align,
1267            I32 => dl.i32_align,
1268            I64 => dl.i64_align,
1269            I128 => dl.i128_align,
1270        })
1271    }
1272
1273    /// Returns the largest signed value that can be represented by this Integer.
1274    #[inline]
1275    pub fn signed_max(self) -> i128 {
1276        use Integer::*;
1277        match self {
1278            I8 => i8::MAX as i128,
1279            I16 => i16::MAX as i128,
1280            I32 => i32::MAX as i128,
1281            I64 => i64::MAX as i128,
1282            I128 => i128::MAX,
1283        }
1284    }
1285
1286    /// Returns the smallest signed value that can be represented by this Integer.
1287    #[inline]
1288    pub fn signed_min(self) -> i128 {
1289        use Integer::*;
1290        match self {
1291            I8 => i8::MIN as i128,
1292            I16 => i16::MIN as i128,
1293            I32 => i32::MIN as i128,
1294            I64 => i64::MIN as i128,
1295            I128 => i128::MIN,
1296        }
1297    }
1298
1299    /// Finds the smallest Integer type which can represent the signed value.
1300    #[inline]
1301    pub fn fit_signed(x: i128) -> Integer {
1302        use Integer::*;
1303        match x {
1304            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1305            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1306            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1307            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1308            _ => I128,
1309        }
1310    }
1311
1312    /// Finds the smallest Integer type which can represent the unsigned value.
1313    #[inline]
1314    pub fn fit_unsigned(x: u128) -> Integer {
1315        use Integer::*;
1316        match x {
1317            0..=0x0000_0000_0000_00ff => I8,
1318            0..=0x0000_0000_0000_ffff => I16,
1319            0..=0x0000_0000_ffff_ffff => I32,
1320            0..=0xffff_ffff_ffff_ffff => I64,
1321            _ => I128,
1322        }
1323    }
1324
1325    /// Finds the smallest integer with the given alignment.
1326    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1327        use Integer::*;
1328        let dl = cx.data_layout();
1329
1330        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1331            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1332        })
1333    }
1334
1335    /// Find the largest integer with the given alignment or less.
1336    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1337        use Integer::*;
1338        let dl = cx.data_layout();
1339
1340        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1341        for candidate in [I64, I32, I16] {
1342            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1343                return candidate;
1344            }
1345        }
1346        I8
1347    }
1348
1349    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1350    // `Integer` given some requirements.
1351    #[inline]
1352    pub fn from_size(size: Size) -> Result<Self, String> {
1353        match size.bits() {
1354            8 => Ok(Integer::I8),
1355            16 => Ok(Integer::I16),
1356            32 => Ok(Integer::I32),
1357            64 => Ok(Integer::I64),
1358            128 => Ok(Integer::I128),
1359            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("rust does not support integers with {0} bits",
                size.bits()))
    })format!("rust does not support integers with {} bits", size.bits())),
1360        }
1361    }
1362}
1363
1364/// Floating-point types.
1365#[derive(#[automatically_derived]
impl ::core::marker::Copy for Float { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Float {
    #[inline]
    fn clone(&self) -> Float { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Float {
    #[inline]
    fn eq(&self, other: &Float) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Float {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Float {
    #[inline]
    fn partial_cmp(&self, other: &Float)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Float {
    #[inline]
    fn cmp(&self, other: &Float) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Float {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Float {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Float::F16 => "F16",
                Float::F32 => "F32",
                Float::F64 => "F64",
                Float::F128 => "F128",
            })
    }
}Debug)]
1366#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Float where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Float::F16 => {}
                    Float::F32 => {}
                    Float::F64 => {}
                    Float::F128 => {}
                }
            }
        }
    };HashStable_Generic))]
1367pub enum Float {
1368    F16,
1369    F32,
1370    F64,
1371    F128,
1372}
1373
1374impl Float {
1375    pub fn size(self) -> Size {
1376        use Float::*;
1377
1378        match self {
1379            F16 => Size::from_bits(16),
1380            F32 => Size::from_bits(32),
1381            F64 => Size::from_bits(64),
1382            F128 => Size::from_bits(128),
1383        }
1384    }
1385
1386    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1387        use Float::*;
1388        let dl = cx.data_layout();
1389
1390        AbiAlign::new(match self {
1391            F16 => dl.f16_align,
1392            F32 => dl.f32_align,
1393            F64 => dl.f64_align,
1394            F128 => dl.f128_align,
1395        })
1396    }
1397}
1398
1399/// Fundamental unit of memory access and layout.
1400#[derive(#[automatically_derived]
impl ::core::marker::Copy for Primitive { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Primitive {
    #[inline]
    fn clone(&self) -> Primitive {
        let _: ::core::clone::AssertParamIsClone<Integer>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Float>;
        let _: ::core::clone::AssertParamIsClone<AddressSpace>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Primitive {
    #[inline]
    fn eq(&self, other: &Primitive) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Primitive::Int(__self_0, __self_1),
                    Primitive::Int(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (Primitive::Float(__self_0), Primitive::Float(__arg1_0)) =>
                    __self_0 == __arg1_0,
                (Primitive::Pointer(__self_0), Primitive::Pointer(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Primitive {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Integer>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Float>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Primitive {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Primitive::Int(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Primitive::Float(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            Primitive::Pointer(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Primitive {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Primitive::Int(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Int",
                    __self_0, &__self_1),
            Primitive::Float(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Float",
                    &__self_0),
            Primitive::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
        }
    }
}Debug)]
1401#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Primitive where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Primitive::Int(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Float(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1402pub enum Primitive {
1403    /// The `bool` is the signedness of the `Integer` type.
1404    ///
1405    /// One would think we would not care about such details this low down,
1406    /// but some ABIs are described in terms of C types and ISAs where the
1407    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1408    /// a negative integer passed by zero-extension will appear positive in
1409    /// the callee, and most operations on it will produce the wrong values.
1410    Int(Integer, bool),
1411    Float(Float),
1412    Pointer(AddressSpace),
1413}
1414
1415impl Primitive {
1416    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1417        use Primitive::*;
1418        let dl = cx.data_layout();
1419
1420        match self {
1421            Int(i, _) => i.size(),
1422            Float(f) => f.size(),
1423            Pointer(a) => dl.pointer_size_in(a),
1424        }
1425    }
1426
1427    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1428        use Primitive::*;
1429        let dl = cx.data_layout();
1430
1431        match self {
1432            Int(i, _) => i.align(dl),
1433            Float(f) => f.align(dl),
1434            Pointer(a) => dl.pointer_align_in(a),
1435        }
1436    }
1437}
1438
1439/// Inclusive wrap-around range of valid values, that is, if
1440/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1441///
1442/// That is, for an i8 primitive, a range of `254..=2` means following
1443/// sequence:
1444///
1445///    254 (-2), 255 (-1), 0, 1, 2
1446///
1447/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1448#[derive(#[automatically_derived]
impl ::core::clone::Clone for WrappingRange {
    #[inline]
    fn clone(&self) -> WrappingRange {
        let _: ::core::clone::AssertParamIsClone<u128>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for WrappingRange { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for WrappingRange {
    #[inline]
    fn eq(&self, other: &WrappingRange) -> bool {
        self.start == other.start && self.end == other.end
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for WrappingRange {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for WrappingRange {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.start, state);
        ::core::hash::Hash::hash(&self.end, state)
    }
}Hash)]
1449#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for WrappingRange where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    WrappingRange { start: ref __binding_0, end: ref __binding_1
                        } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1450pub struct WrappingRange {
1451    pub start: u128,
1452    pub end: u128,
1453}
1454
1455impl WrappingRange {
1456    pub fn full(size: Size) -> Self {
1457        Self { start: 0, end: size.unsigned_int_max() }
1458    }
1459
1460    /// Returns `true` if `v` is contained in the range.
1461    #[inline(always)]
1462    pub fn contains(&self, v: u128) -> bool {
1463        if self.start <= self.end {
1464            self.start <= v && v <= self.end
1465        } else {
1466            self.start <= v || v <= self.end
1467        }
1468    }
1469
1470    /// Returns `true` if all the values in `other` are contained in this range,
1471    /// when the values are considered as having width `size`.
1472    #[inline(always)]
1473    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1474        if self.is_full_for(size) {
1475            true
1476        } else {
1477            let trunc = |x| size.truncate(x);
1478
1479            let delta = self.start;
1480            let max = trunc(self.end.wrapping_sub(delta));
1481
1482            let other_start = trunc(other.start.wrapping_sub(delta));
1483            let other_end = trunc(other.end.wrapping_sub(delta));
1484
1485            // Having shifted both input ranges by `delta`, now we only need to check
1486            // whether `0..=max` contains `other_start..=other_end`, which can only
1487            // happen if the other doesn't wrap since `self` isn't everything.
1488            (other_start <= other_end) && (other_end <= max)
1489        }
1490    }
1491
1492    /// Returns `self` with replaced `start`
1493    #[inline(always)]
1494    fn with_start(mut self, start: u128) -> Self {
1495        self.start = start;
1496        self
1497    }
1498
1499    /// Returns `self` with replaced `end`
1500    #[inline(always)]
1501    fn with_end(mut self, end: u128) -> Self {
1502        self.end = end;
1503        self
1504    }
1505
1506    /// Returns `true` if `size` completely fills the range.
1507    ///
1508    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1509    /// Niche calculations can produce full ranges which are not the canonical one;
1510    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1511    #[inline]
1512    fn is_full_for(&self, size: Size) -> bool {
1513        let max_value = size.unsigned_int_max();
1514        if true {
    if !(self.start <= max_value && self.end <= max_value) {
        ::core::panicking::panic("assertion failed: self.start <= max_value && self.end <= max_value")
    };
};debug_assert!(self.start <= max_value && self.end <= max_value);
1515        self.start == (self.end.wrapping_add(1) & max_value)
1516    }
1517
1518    /// Checks whether this range is considered non-wrapping when the values are
1519    /// interpreted as *unsigned* numbers of width `size`.
1520    ///
1521    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1522    /// and `Err(..)` if the range is full so it depends how you think about it.
1523    #[inline]
1524    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1525        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1526    }
1527
1528    /// Checks whether this range is considered non-wrapping when the values are
1529    /// interpreted as *signed* numbers of width `size`.
1530    ///
1531    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1532    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1533    ///
1534    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1535    /// and `Err(..)` if the range is full so it depends how you think about it.
1536    #[inline]
1537    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1538        if self.is_full_for(size) {
1539            Err(..)
1540        } else {
1541            let start: i128 = size.sign_extend(self.start);
1542            let end: i128 = size.sign_extend(self.end);
1543            Ok(start <= end)
1544        }
1545    }
1546}
1547
1548impl fmt::Debug for WrappingRange {
1549    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1550        if self.start > self.end {
1551            fmt.write_fmt(format_args!("(..={0}) | ({1}..)", self.end, self.start))write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1552        } else {
1553            fmt.write_fmt(format_args!("{0}..={1}", self.start, self.end))write!(fmt, "{}..={}", self.start, self.end)?;
1554        }
1555        Ok(())
1556    }
1557}
1558
1559/// Information about one scalar component of a Rust type.
1560#[derive(#[automatically_derived]
impl ::core::clone::Clone for Scalar {
    #[inline]
    fn clone(&self) -> Scalar {
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Scalar { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Scalar {
    #[inline]
    fn eq(&self, other: &Scalar) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Scalar::Initialized { value: __self_0, valid_range: __self_1
                    }, Scalar::Initialized {
                    value: __arg1_0, valid_range: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (Scalar::Union { value: __self_0 }, Scalar::Union {
                    value: __arg1_0 }) => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Scalar {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Scalar {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Scalar::Union { value: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Scalar {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Initialized", "value", __self_0, "valid_range", &__self_1),
            Scalar::Union { value: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f, "Union",
                    "value", &__self_0),
        }
    }
}Debug)]
1561#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Scalar where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Scalar::Initialized {
                        value: ref __binding_0, valid_range: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Scalar::Union { value: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1562pub enum Scalar {
1563    Initialized {
1564        value: Primitive,
1565
1566        // FIXME(eddyb) always use the shortest range, e.g., by finding
1567        // the largest space between two consecutive valid values and
1568        // taking everything else as the (shortest) valid range.
1569        valid_range: WrappingRange,
1570    },
1571    Union {
1572        /// Even for unions, we need to use the correct registers for the kind of
1573        /// values inside the union, so we keep the `Primitive` type around. We
1574        /// also use it to compute the size of the scalar.
1575        /// However, unions never have niches and even allow undef,
1576        /// so there is no `valid_range`.
1577        value: Primitive,
1578    },
1579}
1580
1581impl Scalar {
1582    #[inline]
1583    pub fn is_bool(&self) -> bool {
1584        use Integer::*;
1585        #[allow(non_exhaustive_omitted_patterns)] match self {
    Scalar::Initialized {
        value: Primitive::Int(I8, false),
        valid_range: WrappingRange { start: 0, end: 1 } } => true,
    _ => false,
}matches!(
1586            self,
1587            Scalar::Initialized {
1588                value: Primitive::Int(I8, false),
1589                valid_range: WrappingRange { start: 0, end: 1 }
1590            }
1591        )
1592    }
1593
1594    /// Get the primitive representation of this type, ignoring the valid range and whether the
1595    /// value is allowed to be undefined (due to being a union).
1596    pub fn primitive(&self) -> Primitive {
1597        match *self {
1598            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1599        }
1600    }
1601
1602    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1603        self.primitive().align(cx)
1604    }
1605
1606    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1607        self.primitive().size(cx)
1608    }
1609
1610    #[inline]
1611    pub fn to_union(&self) -> Self {
1612        Self::Union { value: self.primitive() }
1613    }
1614
1615    #[inline]
1616    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1617        match *self {
1618            Scalar::Initialized { valid_range, .. } => valid_range,
1619            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1620        }
1621    }
1622
1623    #[inline]
1624    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1625    /// union.
1626    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1627        match self {
1628            Scalar::Initialized { valid_range, .. } => valid_range,
1629            Scalar::Union { .. } => {
    ::core::panicking::panic_fmt(format_args!("cannot change the valid range of a union"));
}panic!("cannot change the valid range of a union"),
1630        }
1631    }
1632
1633    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1634    /// layout.
1635    #[inline]
1636    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1637        match *self {
1638            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1639            Scalar::Union { .. } => true,
1640        }
1641    }
1642
1643    /// Returns `true` if this type can be left uninit.
1644    #[inline]
1645    pub fn is_uninit_valid(&self) -> bool {
1646        match *self {
1647            Scalar::Initialized { .. } => false,
1648            Scalar::Union { .. } => true,
1649        }
1650    }
1651
1652    /// Returns `true` if this is a signed integer scalar
1653    #[inline]
1654    pub fn is_signed(&self) -> bool {
1655        match self.primitive() {
1656            Primitive::Int(_, signed) => signed,
1657            _ => false,
1658        }
1659    }
1660}
1661
1662// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1663/// Describes how the fields of a type are located in memory.
1664#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    FieldsShape<FieldIdx> {
    #[inline]
    fn eq(&self, other: &FieldsShape<FieldIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (FieldsShape::Union(__self_0), FieldsShape::Union(__arg1_0))
                    => __self_0 == __arg1_0,
                (FieldsShape::Array { stride: __self_0, count: __self_1 },
                    FieldsShape::Array { stride: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (FieldsShape::Arbitrary {
                    offsets: __self_0, in_memory_order: __self_1 },
                    FieldsShape::Arbitrary {
                    offsets: __arg1_0, in_memory_order: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    FieldsShape<FieldIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<NonZeroUsize>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<FieldIdx, Size>>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<u32, FieldIdx>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    FieldsShape<FieldIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            FieldsShape::Union(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            FieldsShape::Array { stride: __self_0, count: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    FieldsShape<FieldIdx> {
    #[inline]
    fn clone(&self) -> FieldsShape<FieldIdx> {
        match self {
            FieldsShape::Primitive => FieldsShape::Primitive,
            FieldsShape::Union(__self_0) =>
                FieldsShape::Union(::core::clone::Clone::clone(__self_0)),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                FieldsShape::Array {
                    stride: ::core::clone::Clone::clone(__self_0),
                    count: ::core::clone::Clone::clone(__self_1),
                },
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                FieldsShape::Arbitrary {
                    offsets: ::core::clone::Clone::clone(__self_0),
                    in_memory_order: ::core::clone::Clone::clone(__self_1),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    FieldsShape<FieldIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            FieldsShape::Primitive =>
                ::core::fmt::Formatter::write_str(f, "Primitive"),
            FieldsShape::Union(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Union",
                    &__self_0),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Array",
                    "stride", __self_0, "count", &__self_1),
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Arbitrary", "offsets", __self_0, "in_memory_order",
                    &__self_1),
        }
    }
}Debug)]
1665#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            FieldsShape<FieldIdx> where
            __CTX: ::rustc_span::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    FieldsShape::Primitive => {}
                    FieldsShape::Union(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Array {
                        stride: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Arbitrary {
                        offsets: ref __binding_0, in_memory_order: ref __binding_1 }
                        => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1666pub enum FieldsShape<FieldIdx: Idx> {
1667    /// Scalar primitives and `!`, which never have fields.
1668    Primitive,
1669
1670    /// All fields start at no offset. The `usize` is the field count.
1671    Union(NonZeroUsize),
1672
1673    /// Array/vector-like placement, with all fields of identical types.
1674    Array { stride: Size, count: u64 },
1675
1676    /// Struct-like placement, with precomputed offsets.
1677    ///
1678    /// Fields are guaranteed to not overlap, but note that gaps
1679    /// before, between and after all the fields are NOT always
1680    /// padding, and as such their contents may not be discarded.
1681    /// For example, enum variants leave a gap at the start,
1682    /// where the discriminant field in the enum layout goes.
1683    Arbitrary {
1684        /// Offsets for the first byte of each field,
1685        /// ordered to match the source definition order.
1686        /// This vector does not go in increasing order.
1687        // FIXME(eddyb) use small vector optimization for the common case.
1688        offsets: IndexVec<FieldIdx, Size>,
1689
1690        /// Maps memory order field indices to source order indices,
1691        /// depending on how the fields were reordered (if at all).
1692        /// This is a permutation, with both the source order and the
1693        /// memory order using the same (0..n) index ranges.
1694        ///
1695        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1696        // FIXME(camlorn) also consider small vector optimization here.
1697        in_memory_order: IndexVec<u32, FieldIdx>,
1698    },
1699}
1700
1701impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1702    #[inline]
1703    pub fn count(&self) -> usize {
1704        match *self {
1705            FieldsShape::Primitive => 0,
1706            FieldsShape::Union(count) => count.get(),
1707            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1708            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1709        }
1710    }
1711
1712    #[inline]
1713    pub fn offset(&self, i: usize) -> Size {
1714        match *self {
1715            FieldsShape::Primitive => {
1716                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("FieldsShape::offset: `Primitive`s have no fields")));
}unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1717            }
1718            FieldsShape::Union(count) => {
1719                if !(i < count.get()) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of union with {1} fields",
                i, count));
    }
};assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1720                Size::ZERO
1721            }
1722            FieldsShape::Array { stride, count } => {
1723                let i = u64::try_from(i).unwrap();
1724                if !(i < count) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of array with {1} fields",
                i, count));
    }
};assert!(i < count, "tried to access field {i} of array with {count} fields");
1725                stride * i
1726            }
1727            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1728        }
1729    }
1730
1731    /// Gets source indices of the fields by increasing offsets.
1732    #[inline]
1733    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1734        // Primitives don't really have fields in the way that structs do,
1735        // but having this return an empty iterator for them is unhelpful
1736        // since that makes them look kinda like ZSTs, which they're not.
1737        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1738
1739        (0..pseudofield_count).map(move |i| match self {
1740            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1741            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1742        })
1743    }
1744}
1745
1746/// An identifier that specifies the address space that some operation
1747/// should operate on. Special address spaces have an effect on code generation,
1748/// depending on the target and the address spaces it implements.
1749#[derive(#[automatically_derived]
impl ::core::marker::Copy for AddressSpace { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AddressSpace {
    #[inline]
    fn clone(&self) -> AddressSpace {
        let _: ::core::clone::AssertParamIsClone<u32>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AddressSpace {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f, "AddressSpace",
            &&self.0)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for AddressSpace {
    #[inline]
    fn eq(&self, other: &AddressSpace) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AddressSpace {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u32>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for AddressSpace {
    #[inline]
    fn partial_cmp(&self, other: &AddressSpace)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for AddressSpace {
    #[inline]
    fn cmp(&self, other: &AddressSpace) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.0, &other.0)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for AddressSpace {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash)]
1750#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AddressSpace where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AddressSpace(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1751pub struct AddressSpace(pub u32);
1752
1753impl AddressSpace {
1754    /// LLVM's `0` address space.
1755    pub const ZERO: Self = AddressSpace(0);
1756}
1757
1758/// How many scalable vectors are in a `BackendRepr::ScalableVector`?
1759#[derive(#[automatically_derived]
impl ::core::clone::Clone for NumScalableVectors {
    #[inline]
    fn clone(&self) -> NumScalableVectors {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for NumScalableVectors { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for NumScalableVectors {
    #[inline]
    fn eq(&self, other: &NumScalableVectors) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for NumScalableVectors {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for NumScalableVectors {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for NumScalableVectors {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f,
            "NumScalableVectors", &&self.0)
    }
}Debug)]
1760#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for NumScalableVectors where
            __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    NumScalableVectors(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1761pub struct NumScalableVectors(pub u8);
1762
1763impl NumScalableVectors {
1764    /// Returns a `NumScalableVector` for a non-tuple scalable vector (e.g. a single vector).
1765    pub fn for_non_tuple() -> Self {
1766        NumScalableVectors(1)
1767    }
1768
1769    // Returns `NumScalableVectors` for values of two through eight, which are a valid number of
1770    // fields for a tuple of scalable vectors to have. `1` is a valid value of `NumScalableVectors`
1771    // but not for a tuple which would have a field count.
1772    pub fn from_field_count(count: usize) -> Option<Self> {
1773        match count {
1774            2..8 => Some(NumScalableVectors(count as u8)),
1775            _ => None,
1776        }
1777    }
1778}
1779
1780#[cfg(feature = "nightly")]
1781impl IntoDiagArg for NumScalableVectors {
1782    fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
1783        DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
1784            0 => {
    ::core::panicking::panic_fmt(format_args!("`NumScalableVectors(0)` is illformed"));
}panic!("`NumScalableVectors(0)` is illformed"),
1785            1 => "one",
1786            2 => "two",
1787            3 => "three",
1788            4 => "four",
1789            5 => "five",
1790            6 => "six",
1791            7 => "seven",
1792            8 => "eight",
1793            _ => {
    ::core::panicking::panic_fmt(format_args!("`NumScalableVectors(N)` for N>8 is illformed"));
}panic!("`NumScalableVectors(N)` for N>8 is illformed"),
1794        }))
1795    }
1796}
1797
1798/// The way we represent values to the backend
1799///
1800/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1801/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1802/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1803/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1804/// how the value will be lowered to the calling convention, in itself.
1805///
1806/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1807/// and larger values will usually prefer to be represented as memory.
1808#[derive(#[automatically_derived]
impl ::core::clone::Clone for BackendRepr {
    #[inline]
    fn clone(&self) -> BackendRepr {
        let _: ::core::clone::AssertParamIsClone<Scalar>;
        let _: ::core::clone::AssertParamIsClone<u64>;
        let _: ::core::clone::AssertParamIsClone<NumScalableVectors>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BackendRepr { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BackendRepr {
    #[inline]
    fn eq(&self, other: &BackendRepr) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (BackendRepr::Scalar(__self_0), BackendRepr::Scalar(__arg1_0))
                    => __self_0 == __arg1_0,
                (BackendRepr::ScalarPair(__self_0, __self_1),
                    BackendRepr::ScalarPair(__arg1_0, __arg1_1)) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (BackendRepr::SimdScalableVector {
                    element: __self_0,
                    count: __self_1,
                    number_of_vectors: __self_2 },
                    BackendRepr::SimdScalableVector {
                    element: __arg1_0,
                    count: __arg1_1,
                    number_of_vectors: __arg1_2 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0 &&
                        __self_2 == __arg1_2,
                (BackendRepr::SimdVector { element: __self_0, count: __self_1
                    }, BackendRepr::SimdVector {
                    element: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (BackendRepr::Memory { sized: __self_0 },
                    BackendRepr::Memory { sized: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for BackendRepr {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<NumScalableVectors>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for BackendRepr {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            BackendRepr::ScalarPair(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for BackendRepr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Scalar",
                    &__self_0),
            BackendRepr::ScalarPair(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "ScalarPair", __self_0, &__self_1),
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f,
                    "SimdScalableVector", "element", __self_0, "count",
                    __self_1, "number_of_vectors", &__self_2),
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "SimdVector", "element", __self_0, "count", &__self_1),
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Memory", "sized", &__self_0),
        }
    }
}Debug)]
1809#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for BackendRepr where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    BackendRepr::Scalar(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::ScalarPair(ref __binding_0, ref __binding_1) =>
                        {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdScalableVector {
                        element: ref __binding_0,
                        count: ref __binding_1,
                        number_of_vectors: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdVector {
                        element: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::Memory { sized: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1810pub enum BackendRepr {
1811    Scalar(Scalar),
1812    ScalarPair(Scalar, Scalar),
1813    SimdScalableVector {
1814        element: Scalar,
1815        count: u64,
1816        number_of_vectors: NumScalableVectors,
1817    },
1818    SimdVector {
1819        element: Scalar,
1820        count: u64,
1821    },
1822    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1823    Memory {
1824        /// If true, the size is exact, otherwise it's only a lower bound.
1825        sized: bool,
1826    },
1827}
1828
1829impl BackendRepr {
1830    /// Returns `true` if the layout corresponds to an unsized type.
1831    #[inline]
1832    pub fn is_unsized(&self) -> bool {
1833        match *self {
1834            BackendRepr::Scalar(_)
1835            | BackendRepr::ScalarPair(..)
1836            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
1837            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
1838            // fully implemented, scalable vectors will remain `Sized`, they just won't be
1839            // `const Sized` - whether `is_unsized` continues to return `false` at that point will
1840            // need to be revisited and will depend on what `is_unsized` is used for.
1841            | BackendRepr::SimdScalableVector { .. }
1842            | BackendRepr::SimdVector { .. } => false,
1843            BackendRepr::Memory { sized } => !sized,
1844        }
1845    }
1846
1847    #[inline]
1848    pub fn is_sized(&self) -> bool {
1849        !self.is_unsized()
1850    }
1851
1852    /// Returns `true` if this is a single signed integer scalar.
1853    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1854    #[inline]
1855    pub fn is_signed(&self) -> bool {
1856        match self {
1857            BackendRepr::Scalar(scal) => scal.is_signed(),
1858            _ => {
    ::core::panicking::panic_fmt(format_args!("`is_signed` on non-scalar ABI {0:?}",
            self));
}panic!("`is_signed` on non-scalar ABI {self:?}"),
1859        }
1860    }
1861
1862    /// Returns `true` if this is a scalar type
1863    #[inline]
1864    pub fn is_scalar(&self) -> bool {
1865        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(_) => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(_))
1866    }
1867
1868    /// Returns `true` if this is a bool
1869    #[inline]
1870    pub fn is_bool(&self) -> bool {
1871        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(s) if s.is_bool() => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1872    }
1873
1874    /// The psABI alignment for a `Scalar` or `ScalarPair`
1875    ///
1876    /// `None` for other variants.
1877    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1878        match *self {
1879            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1880            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1881            // The align of a Vector can vary in surprising ways
1882            BackendRepr::SimdVector { .. }
1883            | BackendRepr::Memory { .. }
1884            | BackendRepr::SimdScalableVector { .. } => None,
1885        }
1886    }
1887
1888    /// The psABI size for a `Scalar` or `ScalarPair`
1889    ///
1890    /// `None` for other variants
1891    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1892        match *self {
1893            // No padding in scalars.
1894            BackendRepr::Scalar(s) => Some(s.size(cx)),
1895            // May have some padding between the pair.
1896            BackendRepr::ScalarPair(s1, s2) => {
1897                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1898                let size = (field2_offset + s2.size(cx)).align_to(
1899                    self.scalar_align(cx)
1900                        // We absolutely must have an answer here or everything is FUBAR.
1901                        .unwrap(),
1902                );
1903                Some(size)
1904            }
1905            // The size of a Vector can vary in surprising ways
1906            BackendRepr::SimdVector { .. }
1907            | BackendRepr::Memory { .. }
1908            | BackendRepr::SimdScalableVector { .. } => None,
1909        }
1910    }
1911
1912    /// Discard validity range information and allow undef.
1913    pub fn to_union(&self) -> Self {
1914        match *self {
1915            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1916            BackendRepr::ScalarPair(s1, s2) => {
1917                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1918            }
1919            BackendRepr::SimdVector { element, count } => {
1920                BackendRepr::SimdVector { element: element.to_union(), count }
1921            }
1922            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1923            BackendRepr::SimdScalableVector { element, count, number_of_vectors } => {
1924                BackendRepr::SimdScalableVector {
1925                    element: element.to_union(),
1926                    count,
1927                    number_of_vectors,
1928                }
1929            }
1930        }
1931    }
1932
1933    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1934        match (self, other) {
1935            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1936            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1937            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1938            (
1939                BackendRepr::SimdVector { element: element_l, count: count_l },
1940                BackendRepr::SimdVector { element: element_r, count: count_r },
1941            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1942            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1943                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1944            }
1945            // Everything else must be strictly identical.
1946            _ => self == other,
1947        }
1948    }
1949}
1950
1951// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1952#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &Variants<FieldIdx, VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Variants::Single { index: __self_0 }, Variants::Single {
                    index: __arg1_0 }) => __self_0 == __arg1_0,
                (Variants::Multiple {
                    tag: __self_0,
                    tag_encoding: __self_1,
                    tag_field: __self_2,
                    variants: __self_3 }, Variants::Multiple {
                    tag: __arg1_0,
                    tag_encoding: __arg1_1,
                    tag_field: __arg1_2,
                    variants: __arg1_3 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1 &&
                            __self_2 == __arg1_2 && __self_3 == __arg1_3,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for Variants<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<TagEncoding<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<FieldIdx>;
        let _:
                ::core::cmp::AssertParamIsEq<IndexVec<VariantIdx,
                LayoutData<FieldIdx, VariantIdx>>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Variants::Single { index: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state);
                ::core::hash::Hash::hash(__self_3, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> Variants<FieldIdx, VariantIdx> {
        match self {
            Variants::Empty => Variants::Empty,
            Variants::Single { index: __self_0 } =>
                Variants::Single {
                    index: ::core::clone::Clone::clone(__self_0),
                },
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                Variants::Multiple {
                    tag: ::core::clone::Clone::clone(__self_0),
                    tag_encoding: ::core::clone::Clone::clone(__self_1),
                    tag_field: ::core::clone::Clone::clone(__self_2),
                    variants: ::core::clone::Clone::clone(__self_3),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx, VariantIdx: ::core::fmt::Debug + Idx>
    ::core::fmt::Debug for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Variants::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
            Variants::Single { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Single", "index", &__self_0),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                ::core::fmt::Formatter::debug_struct_field4_finish(f,
                    "Multiple", "tag", __self_0, "tag_encoding", __self_1,
                    "tag_field", __self_2, "variants", &__self_3),
        }
    }
}Debug)]
1953#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            Variants<FieldIdx, VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Variants::Empty => {}
                    Variants::Single { index: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Variants::Multiple {
                        tag: ref __binding_0,
                        tag_encoding: ref __binding_1,
                        tag_field: ref __binding_2,
                        variants: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1954pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1955    /// A type with no valid variants. Must be uninhabited.
1956    Empty,
1957
1958    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1959    Single {
1960        /// Always `0` for types that cannot have multiple variants.
1961        index: VariantIdx,
1962    },
1963
1964    /// Enum-likes with more than one variant: each variant comes with
1965    /// a *discriminant* (usually the same as the variant index but the user can
1966    /// assign explicit discriminant values). That discriminant is encoded
1967    /// as a *tag* on the machine. The layout of each variant is
1968    /// a struct, and they all have space reserved for the tag.
1969    /// For enums, the tag is the sole field of the layout.
1970    Multiple {
1971        tag: Scalar,
1972        tag_encoding: TagEncoding<VariantIdx>,
1973        tag_field: FieldIdx,
1974        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1975    },
1976}
1977
1978// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1979#[derive(#[automatically_derived]
impl<VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    TagEncoding<VariantIdx> {
    #[inline]
    fn eq(&self, other: &TagEncoding<VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (TagEncoding::Niche {
                    untagged_variant: __self_0,
                    niche_variants: __self_1,
                    niche_start: __self_2 }, TagEncoding::Niche {
                    untagged_variant: __arg1_0,
                    niche_variants: __arg1_1,
                    niche_start: __arg1_2 }) =>
                    __self_2 == __arg1_2 && __self_0 == __arg1_0 &&
                        __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<VariantIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    TagEncoding<VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<RangeInclusive<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl<VariantIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    TagEncoding<VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<VariantIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    TagEncoding<VariantIdx> {
    #[inline]
    fn clone(&self) -> TagEncoding<VariantIdx> {
        match self {
            TagEncoding::Direct => TagEncoding::Direct,
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                TagEncoding::Niche {
                    untagged_variant: ::core::clone::Clone::clone(__self_0),
                    niche_variants: ::core::clone::Clone::clone(__self_1),
                    niche_start: ::core::clone::Clone::clone(__self_2),
                },
        }
    }
}Clone, #[automatically_derived]
impl<VariantIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    TagEncoding<VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TagEncoding::Direct =>
                ::core::fmt::Formatter::write_str(f, "Direct"),
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
                    "untagged_variant", __self_0, "niche_variants", __self_1,
                    "niche_start", &__self_2),
        }
    }
}Debug)]
1980#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            TagEncoding<VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    TagEncoding::Direct => {}
                    TagEncoding::Niche {
                        untagged_variant: ref __binding_0,
                        niche_variants: ref __binding_1,
                        niche_start: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1981pub enum TagEncoding<VariantIdx: Idx> {
1982    /// The tag directly stores the discriminant, but possibly with a smaller layout
1983    /// (so converting the tag to the discriminant can require sign extension).
1984    Direct,
1985
1986    /// Niche (values invalid for a type) encoding the discriminant.
1987    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1988    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1989    ///
1990    /// The variant `untagged_variant` contains a niche at an arbitrary
1991    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1992    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1993    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1994    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1995    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1996    /// query implementation).
1997    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1998    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1999    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
2000    ///
2001    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
2002    /// `None` is the null pointer in the second tuple field, and
2003    /// `Some` is the identity function (with a non-null reference)
2004    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
2005    ///
2006    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
2007    /// range cannot be represented; they must be uninhabited.
2008    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
2009    Niche {
2010        untagged_variant: VariantIdx,
2011        /// This range *may* contain `untagged_variant` or uninhabited variants;
2012        /// these are then just "dead values" and not used to encode anything.
2013        niche_variants: RangeInclusive<VariantIdx>,
2014        /// This is inbounds of the type of the niche field
2015        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
2016        niche_start: u128,
2017    },
2018}
2019
2020#[derive(#[automatically_derived]
impl ::core::clone::Clone for Niche {
    #[inline]
    fn clone(&self) -> Niche {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Niche { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Niche {
    #[inline]
    fn eq(&self, other: &Niche) -> bool {
        self.offset == other.offset && self.value == other.value &&
            self.valid_range == other.valid_range
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Niche {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Niche {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.offset, state);
        ::core::hash::Hash::hash(&self.value, state);
        ::core::hash::Hash::hash(&self.valid_range, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Niche {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
            "offset", &self.offset, "value", &self.value, "valid_range",
            &&self.valid_range)
    }
}Debug)]
2021#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Niche where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Niche {
                        offset: ref __binding_0,
                        value: ref __binding_1,
                        valid_range: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
2022pub struct Niche {
2023    pub offset: Size,
2024    pub value: Primitive,
2025    pub valid_range: WrappingRange,
2026}
2027
2028impl Niche {
2029    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
2030        let Scalar::Initialized { value, valid_range } = scalar else { return None };
2031        let niche = Niche { offset, value, valid_range };
2032        if niche.available(cx) > 0 { Some(niche) } else { None }
2033    }
2034
2035    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
2036        let Self { value, valid_range: v, .. } = *self;
2037        let size = value.size(cx);
2038        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2039        let max_value = size.unsigned_int_max();
2040
2041        // Find out how many values are outside the valid range.
2042        let niche = v.end.wrapping_add(1)..v.start;
2043        niche.end.wrapping_sub(niche.start) & max_value
2044    }
2045
2046    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
2047        if !(count > 0) { ::core::panicking::panic("assertion failed: count > 0") };assert!(count > 0);
2048
2049        let Self { value, valid_range: v, .. } = *self;
2050        let size = value.size(cx);
2051        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2052        let max_value = size.unsigned_int_max();
2053
2054        let niche = v.end.wrapping_add(1)..v.start;
2055        let available = niche.end.wrapping_sub(niche.start) & max_value;
2056        if count > available {
2057            return None;
2058        }
2059
2060        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
2061        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
2062        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
2063        // and always taking the shortest path to niche zero. Having `None` in niche zero can
2064        // enable some special optimizations.
2065        //
2066        // Bound selection criteria:
2067        // 1. Select closest to zero given wrapping semantics.
2068        // 2. Avoid moving past zero if possible.
2069        //
2070        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
2071        // since they have to fit perfectly. If niche zero is already reserved, the selection of
2072        // bounds are of little interest.
2073        let move_start = |v: WrappingRange| {
2074            let start = v.start.wrapping_sub(count) & max_value;
2075            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
2076        };
2077        let move_end = |v: WrappingRange| {
2078            let start = v.end.wrapping_add(1) & max_value;
2079            let end = v.end.wrapping_add(count) & max_value;
2080            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
2081        };
2082        let distance_end_zero = max_value - v.end;
2083        if v.start > v.end {
2084            // zero is unavailable because wrapping occurs
2085            move_end(v)
2086        } else if v.start <= distance_end_zero {
2087            if count <= v.start {
2088                move_start(v)
2089            } else {
2090                // moved past zero, use other bound
2091                move_end(v)
2092            }
2093        } else {
2094            let end = v.end.wrapping_add(count) & max_value;
2095            let overshot_zero = (1..=v.end).contains(&end);
2096            if overshot_zero {
2097                // moved past zero, use other bound
2098                move_start(v)
2099            } else {
2100                move_end(v)
2101            }
2102        }
2103    }
2104}
2105
2106// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2107#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &LayoutData<FieldIdx, VariantIdx>) -> bool {
        self.uninhabited == other.uninhabited && self.fields == other.fields
                                        && self.variants == other.variants &&
                                    self.backend_repr == other.backend_repr &&
                                self.largest_niche == other.largest_niche &&
                            self.align == other.align && self.size == other.size &&
                    self.max_repr_align == other.max_repr_align &&
                self.unadjusted_abi_align == other.unadjusted_abi_align &&
            self.randomization_seed == other.randomization_seed
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<FieldsShape<FieldIdx>>;
        let _: ::core::cmp::AssertParamIsEq<Variants<FieldIdx, VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<BackendRepr>;
        let _: ::core::cmp::AssertParamIsEq<Option<Niche>>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<AbiAlign>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.fields, state);
        ::core::hash::Hash::hash(&self.variants, state);
        ::core::hash::Hash::hash(&self.backend_repr, state);
        ::core::hash::Hash::hash(&self.largest_niche, state);
        ::core::hash::Hash::hash(&self.uninhabited, state);
        ::core::hash::Hash::hash(&self.align, state);
        ::core::hash::Hash::hash(&self.size, state);
        ::core::hash::Hash::hash(&self.max_repr_align, state);
        ::core::hash::Hash::hash(&self.unadjusted_abi_align, state);
        ::core::hash::Hash::hash(&self.randomization_seed, state)
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> LayoutData<FieldIdx, VariantIdx> {
        LayoutData {
            fields: ::core::clone::Clone::clone(&self.fields),
            variants: ::core::clone::Clone::clone(&self.variants),
            backend_repr: ::core::clone::Clone::clone(&self.backend_repr),
            largest_niche: ::core::clone::Clone::clone(&self.largest_niche),
            uninhabited: ::core::clone::Clone::clone(&self.uninhabited),
            align: ::core::clone::Clone::clone(&self.align),
            size: ::core::clone::Clone::clone(&self.size),
            max_repr_align: ::core::clone::Clone::clone(&self.max_repr_align),
            unadjusted_abi_align: ::core::clone::Clone::clone(&self.unadjusted_abi_align),
            randomization_seed: ::core::clone::Clone::clone(&self.randomization_seed),
        }
    }
}Clone)]
2108#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            LayoutData<FieldIdx, VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    LayoutData {
                        fields: ref __binding_0,
                        variants: ref __binding_1,
                        backend_repr: ref __binding_2,
                        largest_niche: ref __binding_3,
                        uninhabited: ref __binding_4,
                        align: ref __binding_5,
                        size: ref __binding_6,
                        max_repr_align: ref __binding_7,
                        unadjusted_abi_align: ref __binding_8,
                        randomization_seed: ref __binding_9 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                        { __binding_6.hash_stable(__hcx, __hasher); }
                        { __binding_7.hash_stable(__hcx, __hasher); }
                        { __binding_8.hash_stable(__hcx, __hasher); }
                        { __binding_9.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
2109pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2110    /// Says where the fields are located within the layout.
2111    pub fields: FieldsShape<FieldIdx>,
2112
2113    /// Encodes information about multi-variant layouts.
2114    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2115    /// shared between all variants. One of them will be the discriminant,
2116    /// but e.g. coroutines can have more.
2117    ///
2118    /// To access all fields of this layout, both `fields` and the fields of the active variant
2119    /// must be taken into account.
2120    pub variants: Variants<FieldIdx, VariantIdx>,
2121
2122    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2123    /// and encodes value restrictions via `valid_range`.
2124    ///
2125    /// Note that this is entirely orthogonal to the recursive structure defined by
2126    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2127    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2128    /// have to be taken into account to find all fields of this layout.
2129    pub backend_repr: BackendRepr,
2130
2131    /// The leaf scalar with the largest number of invalid values
2132    /// (i.e. outside of its `valid_range`), if it exists.
2133    pub largest_niche: Option<Niche>,
2134    /// Is this type known to be uninhabted?
2135    ///
2136    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2137    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2138    pub uninhabited: bool,
2139
2140    pub align: AbiAlign,
2141    pub size: Size,
2142
2143    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2144    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2145    /// requested, even if the requested alignment is equal to the natural alignment.
2146    pub max_repr_align: Option<Align>,
2147
2148    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2149    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2150    /// in some cases.
2151    pub unadjusted_abi_align: Align,
2152
2153    /// The randomization seed based on this type's own repr and its fields.
2154    ///
2155    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2156    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2157    /// types.
2158    ///
2159    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2160    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2161    /// to reorder its fields based on that information. The current implementation is a conservative
2162    /// approximation of this goal.
2163    pub randomization_seed: Hash64,
2164}
2165
2166impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2167    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2168    pub fn is_aggregate(&self) -> bool {
2169        match self.backend_repr {
2170            BackendRepr::Scalar(_)
2171            | BackendRepr::SimdVector { .. }
2172            | BackendRepr::SimdScalableVector { .. } => false,
2173            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2174        }
2175    }
2176
2177    /// Returns `true` if this is an uninhabited type
2178    pub fn is_uninhabited(&self) -> bool {
2179        self.uninhabited
2180    }
2181}
2182
2183impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2184where
2185    FieldsShape<FieldIdx>: fmt::Debug,
2186    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2187{
2188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2189        // This is how `Layout` used to print before it become
2190        // `Interned<LayoutData>`. We print it like this to avoid having to update
2191        // expected output in a lot of tests.
2192        let LayoutData {
2193            size,
2194            align,
2195            backend_repr,
2196            fields,
2197            largest_niche,
2198            uninhabited,
2199            variants,
2200            max_repr_align,
2201            unadjusted_abi_align,
2202            randomization_seed,
2203        } = self;
2204        f.debug_struct("Layout")
2205            .field("size", size)
2206            .field("align", align)
2207            .field("backend_repr", backend_repr)
2208            .field("fields", fields)
2209            .field("largest_niche", largest_niche)
2210            .field("uninhabited", uninhabited)
2211            .field("variants", variants)
2212            .field("max_repr_align", max_repr_align)
2213            .field("unadjusted_abi_align", unadjusted_abi_align)
2214            .field("randomization_seed", randomization_seed)
2215            .finish()
2216    }
2217}
2218
2219#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerKind {
    #[inline]
    fn clone(&self) -> PointerKind {
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerKind {
    #[inline]
    fn eq(&self, other: &PointerKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (PointerKind::SharedRef { frozen: __self_0 },
                    PointerKind::SharedRef { frozen: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::MutableRef { unpin: __self_0 },
                    PointerKind::MutableRef { unpin: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::Box { unpin: __self_0, global: __self_1 },
                    PointerKind::Box { unpin: __arg1_0, global: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerKind {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::fmt::Debug for PointerKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            PointerKind::SharedRef { frozen: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "SharedRef", "frozen", &__self_0),
            PointerKind::MutableRef { unpin: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "MutableRef", "unpin", &__self_0),
            PointerKind::Box { unpin: __self_0, global: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Box",
                    "unpin", __self_0, "global", &__self_1),
        }
    }
}Debug)]
2220pub enum PointerKind {
2221    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2222    SharedRef { frozen: bool },
2223    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2224    MutableRef { unpin: bool },
2225    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2226    /// uses the global allocator or a custom one.
2227    Box { unpin: bool, global: bool },
2228}
2229
2230/// Encodes extra information we have about a pointer.
2231///
2232/// Note that this information is advisory only, and backends are free to ignore it:
2233/// if the information is wrong, that can cause UB, but if the information is absent,
2234/// that must always be okay.
2235#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointeeInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointeeInfo {
    #[inline]
    fn clone(&self) -> PointeeInfo {
        let _: ::core::clone::AssertParamIsClone<Option<PointerKind>>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointeeInfo {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "PointeeInfo",
            "safe", &self.safe, "size", &self.size, "align", &&self.align)
    }
}Debug)]
2236pub struct PointeeInfo {
2237    /// If this is `None`, then this is a raw pointer.
2238    pub safe: Option<PointerKind>,
2239    /// If `size` is not zero, then the pointer is either null or dereferenceable for this many bytes
2240    /// (independent of `safe`).
2241    ///
2242    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2243    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2244    /// while this function is still running.
2245    pub size: Size,
2246    /// The pointer is guaranteed to be aligned this much (independent of `safe`).
2247    pub align: Align,
2248}
2249
2250impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2251    /// Returns `true` if the layout corresponds to an unsized type.
2252    #[inline]
2253    pub fn is_unsized(&self) -> bool {
2254        self.backend_repr.is_unsized()
2255    }
2256
2257    #[inline]
2258    pub fn is_sized(&self) -> bool {
2259        self.backend_repr.is_sized()
2260    }
2261
2262    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2263    pub fn is_1zst(&self) -> bool {
2264        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2265    }
2266
2267    /// Returns `true` if the size of the type is only known at runtime.
2268    pub fn is_scalable_vector(&self) -> bool {
2269        #[allow(non_exhaustive_omitted_patterns)] match self.backend_repr {
    BackendRepr::SimdScalableVector { .. } => true,
    _ => false,
}matches!(self.backend_repr, BackendRepr::SimdScalableVector { .. })
2270    }
2271
2272    /// Returns the elements count of a scalable vector.
2273    pub fn scalable_vector_element_count(&self) -> Option<u64> {
2274        match self.backend_repr {
2275            BackendRepr::SimdScalableVector { count, .. } => Some(count),
2276            _ => None,
2277        }
2278    }
2279
2280    /// Returns `true` if the type is a ZST and not unsized.
2281    ///
2282    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2283    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2284    pub fn is_zst(&self) -> bool {
2285        match self.backend_repr {
2286            BackendRepr::Scalar(_)
2287            | BackendRepr::ScalarPair(..)
2288            | BackendRepr::SimdScalableVector { .. }
2289            | BackendRepr::SimdVector { .. } => false,
2290            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2291        }
2292    }
2293
2294    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2295    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2296    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2297    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2298    /// checks would otherwise be required.
2299    pub fn eq_abi(&self, other: &Self) -> bool {
2300        // The one thing that we are not capturing here is that for unsized types, the metadata must
2301        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2302        // 2nd point is quite hard to check though.
2303        self.size == other.size
2304            && self.is_sized() == other.is_sized()
2305            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2306            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2307            && self.align.abi == other.align.abi
2308            && self.max_repr_align == other.max_repr_align
2309            && self.unadjusted_abi_align == other.unadjusted_abi_align
2310    }
2311}
2312
2313#[derive(#[automatically_derived]
impl ::core::marker::Copy for StructKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for StructKind {
    #[inline]
    fn clone(&self) -> StructKind {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for StructKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            StructKind::AlwaysSized =>
                ::core::fmt::Formatter::write_str(f, "AlwaysSized"),
            StructKind::MaybeUnsized =>
                ::core::fmt::Formatter::write_str(f, "MaybeUnsized"),
            StructKind::Prefixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "Prefixed", __self_0, &__self_1),
        }
    }
}Debug)]
2314pub enum StructKind {
2315    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2316    AlwaysSized,
2317    /// A univariant, the last field of which may be coerced to unsized.
2318    MaybeUnsized,
2319    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2320    Prefixed(Size, Align),
2321}
2322
2323#[derive(#[automatically_derived]
impl ::core::clone::Clone for AbiFromStrErr {
    #[inline]
    fn clone(&self) -> AbiFromStrErr {
        match self {
            AbiFromStrErr::Unknown => AbiFromStrErr::Unknown,
            AbiFromStrErr::NoExplicitUnwind =>
                AbiFromStrErr::NoExplicitUnwind,
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AbiFromStrErr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                AbiFromStrErr::Unknown => "Unknown",
                AbiFromStrErr::NoExplicitUnwind => "NoExplicitUnwind",
            })
    }
}Debug)]
2324pub enum AbiFromStrErr {
2325    /// not a known ABI
2326    Unknown,
2327    /// no "-unwind" variant can be used here
2328    NoExplicitUnwind,
2329}