Skip to main content

rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
4#![cfg_attr(feature = "nightly", feature(step_trait))]
5// tidy-alphabetical-end
6
7/*! ABI handling for rustc
8
9## What is an "ABI"?
10
11Literally, "application binary interface", which means it is everything about how code interacts,
12at the machine level, with other code. This means it technically covers all of the following:
13- object binary format for e.g. relocations or offset tables
14- in-memory layout of types
15- procedure calling conventions
16
17When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
18To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
19Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
20You will encounter all of them and more if you study target-specific codegen enough!
21Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
22either or both of
23- `repr(Rust)` types have a mostly-unspecified layout
24- `extern "Rust" fn(A) -> R` has an unspecified calling convention
25
26## Crate Goal
27
28ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
29It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
30Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
31It should contain traits and types that other crates then use in their implementation.
32For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
33but `rustc_abi` contains the types for calculating layout and describing register-passing.
34This makes it easier to describe things in the same way across targets, codegen backends, and
35even other Rust compilers, such as rust-analyzer!
36
37*/
38
39use std::fmt;
40#[cfg(feature = "nightly")]
41use std::iter::Step;
42use std::num::{NonZeroUsize, ParseIntError};
43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
44use std::str::FromStr;
45
46use bitflags::bitflags;
47#[cfg(feature = "nightly")]
48use rustc_data_structures::stable_hasher::StableOrd;
49use rustc_hashes::Hash64;
50use rustc_index::{Idx, IndexSlice, IndexVec};
51#[cfg(feature = "nightly")]
52use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
53
54mod callconv;
55mod canon_abi;
56mod extern_abi;
57mod layout;
58#[cfg(test)]
59mod tests;
60
61pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
62pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
63#[cfg(feature = "nightly")]
64pub use extern_abi::CVariadicStatus;
65pub use extern_abi::{ExternAbi, all_names};
66pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};
67#[cfg(feature = "nightly")]
68pub use layout::{Layout, TyAbiInterface, TyAndLayout};
69
70/// Requirements for a `StableHashingContext` to be used in this crate.
71/// This is a hack to allow using the `HashStable_Generic` derive macro
72/// instead of implementing everything in `rustc_middle`.
73#[cfg(feature = "nightly")]
74pub trait HashStableContext {}
75
76#[derive(#[automatically_derived]
impl ::core::clone::Clone for ReprFlags {
    #[inline]
    fn clone(&self) -> ReprFlags {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ReprFlags { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprFlags {
    #[inline]
    fn eq(&self, other: &ReprFlags) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ReprFlags {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::default::Default for ReprFlags {
    #[inline]
    fn default() -> ReprFlags {
        ReprFlags(::core::default::Default::default())
    }
}Default)]
77#[cfg_attr(
78    feature = "nightly",
79    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprFlags {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprFlags {
            fn decode(__decoder: &mut __D) -> Self {
                ReprFlags(::rustc_serialize::Decodable::decode(__decoder))
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprFlags where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
80)]
81pub struct ReprFlags(u8);
82
83impl ReprFlags {
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_C: Self = Self::from_bits_retain(1 << 0);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SIMD: Self = Self::from_bits_retain(1 << 1);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_TRANSPARENT: Self = Self::from_bits_retain(1 << 2);
    #[doc = r" Internal only for now. If true, don't reorder fields."]
    #[doc = r" On its own it does not prevent ABI optimizations."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_LINEAR: Self = Self::from_bits_retain(1 << 3);
    #[doc =
    r" If true, the type's crate has opted into layout randomization."]
    #[doc =
    r" Other flags can still inhibit reordering and thus randomization."]
    #[doc = r" The seed stored in `ReprOptions.field_shuffle_seed`."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const RANDOMIZE_LAYOUT: Self = Self::from_bits_retain(1 << 4);
    #[doc =
    r" If true, the type is always passed indirectly by non-Rustic ABIs."]
    #[doc =
    r" See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS: Self =
        Self::from_bits_retain(1 << 5);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SCALABLE: Self = Self::from_bits_retain(1 << 6);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const FIELD_ORDER_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                        ReprFlags::IS_SIMD.bits() | ReprFlags::IS_SCALABLE.bits() |
                ReprFlags::IS_LINEAR.bits());
    #[allow(deprecated, non_upper_case_globals,)]
    pub const ABI_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                ReprFlags::IS_SIMD.bits());
}
impl ::bitflags::Flags for ReprFlags {
    const FLAGS: &'static [::bitflags::Flag<ReprFlags>] =
        &[{

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_C", ReprFlags::IS_C)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SIMD", ReprFlags::IS_SIMD)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_TRANSPARENT",
                            ReprFlags::IS_TRANSPARENT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_LINEAR", ReprFlags::IS_LINEAR)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("RANDOMIZE_LAYOUT",
                            ReprFlags::RANDOMIZE_LAYOUT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS",
                            ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SCALABLE", ReprFlags::IS_SCALABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("FIELD_ORDER_UNOPTIMIZABLE",
                            ReprFlags::FIELD_ORDER_UNOPTIMIZABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("ABI_UNOPTIMIZABLE",
                            ReprFlags::ABI_UNOPTIMIZABLE)
                    }];
    type Bits = u8;
    fn bits(&self) -> u8 { ReprFlags::bits(self) }
    fn from_bits_retain(bits: u8) -> ReprFlags {
        ReprFlags::from_bits_retain(bits)
    }
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
    {
        #[allow(dead_code, deprecated, unused_attributes)]
        impl ReprFlags {
            /// Get a flags value with all bits unset.
            #[inline]
            pub const fn empty() -> Self {
                Self(<u8 as ::bitflags::Bits>::EMPTY)
            }
            /// Get a flags value with all known bits set.
            #[inline]
            pub const fn all() -> Self {
                let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
                let mut i = 0;
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                let _ = i;
                Self(truncated)
            }
            /// Get the underlying bits value.
            ///
            /// The returned value is exactly the bits set in this flags value.
            #[inline]
            pub const fn bits(&self) -> u8 { self.0 }
            /// Convert from a bits value.
            ///
            /// This method will return `None` if any unknown bits are set.
            #[inline]
            pub const fn from_bits(bits: u8)
                -> ::bitflags::__private::core::option::Option<Self> {
                let truncated = Self::from_bits_truncate(bits).0;
                if truncated == bits {
                    ::bitflags::__private::core::option::Option::Some(Self(bits))
                } else { ::bitflags::__private::core::option::Option::None }
            }
            /// Convert from a bits value, unsetting any unknown bits.
            #[inline]
            pub const fn from_bits_truncate(bits: u8) -> Self {
                Self(bits & Self::all().0)
            }
            /// Convert from a bits value exactly.
            #[inline]
            pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
            /// Get a flags value with the bits of a flag with the given name set.
            ///
            /// This method will return `None` if `name` is empty or doesn't
            /// correspond to any named flag.
            #[inline]
            pub fn from_name(name: &str)
                -> ::bitflags::__private::core::option::Option<Self> {
                {
                    if name == "IS_C" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_C.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SIMD" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SIMD.bits()));
                    }
                };
                ;
                {
                    if name == "IS_TRANSPARENT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_TRANSPARENT.bits()));
                    }
                };
                ;
                {
                    if name == "IS_LINEAR" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_LINEAR.bits()));
                    }
                };
                ;
                {
                    if name == "RANDOMIZE_LAYOUT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::RANDOMIZE_LAYOUT.bits()));
                    }
                };
                ;
                {
                    if name == "PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SCALABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SCALABLE.bits()));
                    }
                };
                ;
                {
                    if name == "FIELD_ORDER_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                {
                    if name == "ABI_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::ABI_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                let _ = name;
                ::bitflags::__private::core::option::Option::None
            }
            /// Whether all bits in this flags value are unset.
            #[inline]
            pub const fn is_empty(&self) -> bool {
                self.0 == <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all known bits in this flags value are set.
            #[inline]
            pub const fn is_all(&self) -> bool {
                Self::all().0 | self.0 == self.0
            }
            /// Whether any set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn intersects(&self, other: Self) -> bool {
                self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn contains(&self, other: Self) -> bool {
                self.0 & other.0 == other.0
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            pub fn insert(&mut self, other: Self) {
                *self = Self(self.0).union(other);
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `remove` won't truncate `other`, but the `!` operator will.
            #[inline]
            pub fn remove(&mut self, other: Self) {
                *self = Self(self.0).difference(other);
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            pub fn toggle(&mut self, other: Self) {
                *self = Self(self.0).symmetric_difference(other);
            }
            /// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
            #[inline]
            pub fn set(&mut self, other: Self, value: bool) {
                if value { self.insert(other); } else { self.remove(other); }
            }
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn intersection(self, other: Self) -> Self {
                Self(self.0 & other.0)
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn union(self, other: Self) -> Self {
                Self(self.0 | other.0)
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            #[must_use]
            pub const fn difference(self, other: Self) -> Self {
                Self(self.0 & !other.0)
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn symmetric_difference(self, other: Self) -> Self {
                Self(self.0 ^ other.0)
            }
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            #[must_use]
            pub const fn complement(self) -> Self {
                Self::from_bits_truncate(!self.0)
            }
        }
        impl ::bitflags::__private::core::fmt::Binary for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::Octal for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::LowerHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::UpperHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::ops::BitOr for ReprFlags {
            type Output = Self;
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor(self, other: ReprFlags) -> Self { self.union(other) }
        }
        impl ::bitflags::__private::core::ops::BitOrAssign for ReprFlags {
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor_assign(&mut self, other: Self) { self.insert(other); }
        }
        impl ::bitflags::__private::core::ops::BitXor for ReprFlags {
            type Output = Self;
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor(self, other: Self) -> Self {
                self.symmetric_difference(other)
            }
        }
        impl ::bitflags::__private::core::ops::BitXorAssign for ReprFlags {
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
        }
        impl ::bitflags::__private::core::ops::BitAnd for ReprFlags {
            type Output = Self;
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand(self, other: Self) -> Self { self.intersection(other) }
        }
        impl ::bitflags::__private::core::ops::BitAndAssign for ReprFlags {
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand_assign(&mut self, other: Self) {
                *self =
                    Self::from_bits_retain(self.bits()).intersection(other);
            }
        }
        impl ::bitflags::__private::core::ops::Sub for ReprFlags {
            type Output = Self;
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub(self, other: Self) -> Self { self.difference(other) }
        }
        impl ::bitflags::__private::core::ops::SubAssign for ReprFlags {
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub_assign(&mut self, other: Self) { self.remove(other); }
        }
        impl ::bitflags::__private::core::ops::Not for ReprFlags {
            type Output = Self;
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            fn not(self) -> Self { self.complement() }
        }
        impl ::bitflags::__private::core::iter::Extend<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(&mut self, iterator: T) {
                for item in iterator { self.insert(item) }
            }
        }
        impl ::bitflags::__private::core::iter::FromIterator<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(iterator: T) -> Self {
                use ::bitflags::__private::core::iter::Extend;
                let mut result = Self::empty();
                result.extend(iterator);
                result
            }
        }
        impl ReprFlags {
            /// Yield a set of contained flags values.
            ///
            /// Each yielded flags value will correspond to a defined named flag. Any unknown bits
            /// will be yielded together as a final flags value.
            #[inline]
            pub const fn iter(&self) -> ::bitflags::iter::Iter<ReprFlags> {
                ::bitflags::iter::Iter::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
            /// Yield a set of contained named flags values.
            ///
            /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
            /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
            #[inline]
            pub const fn iter_names(&self)
                -> ::bitflags::iter::IterNames<ReprFlags> {
                ::bitflags::iter::IterNames::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
        }
        impl ::bitflags::__private::core::iter::IntoIterator for ReprFlags {
            type Item = ReprFlags;
            type IntoIter = ::bitflags::iter::Iter<ReprFlags>;
            fn into_iter(self) -> Self::IntoIter { self.iter() }
        }
    };bitflags! {
84    impl ReprFlags: u8 {
85        const IS_C               = 1 << 0;
86        const IS_SIMD            = 1 << 1;
87        const IS_TRANSPARENT     = 1 << 2;
88        /// Internal only for now. If true, don't reorder fields.
89        /// On its own it does not prevent ABI optimizations.
90        const IS_LINEAR          = 1 << 3;
91        /// If true, the type's crate has opted into layout randomization.
92        /// Other flags can still inhibit reordering and thus randomization.
93        /// The seed stored in `ReprOptions.field_shuffle_seed`.
94        const RANDOMIZE_LAYOUT   = 1 << 4;
95        /// If true, the type is always passed indirectly by non-Rustic ABIs.
96        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
97        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
98        const IS_SCALABLE        = 1 << 6;
99         // Any of these flags being set prevent field reordering optimisation.
100        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
101                                 | ReprFlags::IS_SIMD.bits()
102                                 | ReprFlags::IS_SCALABLE.bits()
103                                 | ReprFlags::IS_LINEAR.bits();
104        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
105    }
106}
107
108// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
109// `rustc_data_structures` to make it build on stable.
110impl std::fmt::Debug for ReprFlags {
111    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
112        bitflags::parser::to_writer(self, f)
113    }
114}
115
116#[derive(#[automatically_derived]
impl ::core::marker::Copy for IntegerType { }Copy, #[automatically_derived]
impl ::core::clone::Clone for IntegerType {
    #[inline]
    fn clone(&self) -> IntegerType {
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Integer>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for IntegerType {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            IntegerType::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
            IntegerType::Fixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Fixed",
                    __self_0, &__self_1),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for IntegerType {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for IntegerType {
    #[inline]
    fn eq(&self, other: &IntegerType) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (IntegerType::Pointer(__self_0),
                    IntegerType::Pointer(__arg1_0)) => __self_0 == __arg1_0,
                (IntegerType::Fixed(__self_0, __self_1),
                    IntegerType::Fixed(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq)]
117#[cfg_attr(
118    feature = "nightly",
119    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for IntegerType {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        IntegerType::Pointer(ref __binding_0) => { 0usize }
                        IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                            1usize
                        }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for IntegerType {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        IntegerType::Pointer(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => {
                        IntegerType::Fixed(::rustc_serialize::Decodable::decode(__decoder),
                            ::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `IntegerType`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for IntegerType where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
120)]
121pub enum IntegerType {
122    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
123    /// `Pointer(true)` means `isize`.
124    Pointer(bool),
125    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
126    /// `Fixed(I8, false)` means `u8`.
127    Fixed(Integer, bool),
128}
129
130impl IntegerType {
131    pub fn is_signed(&self) -> bool {
132        match self {
133            IntegerType::Pointer(b) => *b,
134            IntegerType::Fixed(_, b) => *b,
135        }
136    }
137}
138
139#[derive(#[automatically_derived]
impl ::core::marker::Copy for ScalableElt { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ScalableElt {
    #[inline]
    fn clone(&self) -> ScalableElt {
        let _: ::core::clone::AssertParamIsClone<u16>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ScalableElt {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            ScalableElt::ElementCount(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "ElementCount", &__self_0),
            ScalableElt::Container =>
                ::core::fmt::Formatter::write_str(f, "Container"),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ScalableElt {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u16>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ScalableElt {
    #[inline]
    fn eq(&self, other: &ScalableElt) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (ScalableElt::ElementCount(__self_0),
                    ScalableElt::ElementCount(__arg1_0)) =>
                    __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq)]
140#[cfg_attr(
141    feature = "nightly",
142    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ScalableElt {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        ScalableElt::ElementCount(ref __binding_0) => { 0usize }
                        ScalableElt::Container => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ScalableElt {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        ScalableElt::ElementCount(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => { ScalableElt::Container }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `ScalableElt`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ScalableElt where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };HashStable_Generic)
143)]
144pub enum ScalableElt {
145    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector
146    ElementCount(u16),
147    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only
148    /// contain other scalable vectors
149    Container,
150}
151
152/// Represents the repr options provided by the user.
153#[derive(#[automatically_derived]
impl ::core::marker::Copy for ReprOptions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ReprOptions {
    #[inline]
    fn clone(&self) -> ReprOptions {
        let _: ::core::clone::AssertParamIsClone<Option<IntegerType>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<ReprFlags>;
        let _: ::core::clone::AssertParamIsClone<Option<ScalableElt>>;
        let _: ::core::clone::AssertParamIsClone<Hash64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ReprOptions {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["int", "align", "pack", "flags", "scalable",
                        "field_shuffle_seed"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.int, &self.align, &self.pack, &self.flags, &self.scalable,
                        &&self.field_shuffle_seed];
        ::core::fmt::Formatter::debug_struct_fields_finish(f, "ReprOptions",
            names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ReprOptions {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Option<IntegerType>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<ReprFlags>;
        let _: ::core::cmp::AssertParamIsEq<Option<ScalableElt>>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprOptions {
    #[inline]
    fn eq(&self, other: &ReprOptions) -> bool {
        self.int == other.int && self.align == other.align &&
                        self.pack == other.pack && self.flags == other.flags &&
                self.scalable == other.scalable &&
            self.field_shuffle_seed == other.field_shuffle_seed
    }
}PartialEq, #[automatically_derived]
impl ::core::default::Default for ReprOptions {
    #[inline]
    fn default() -> ReprOptions {
        ReprOptions {
            int: ::core::default::Default::default(),
            align: ::core::default::Default::default(),
            pack: ::core::default::Default::default(),
            flags: ::core::default::Default::default(),
            scalable: ::core::default::Default::default(),
            field_shuffle_seed: ::core::default::Default::default(),
        }
    }
}Default)]
154#[cfg_attr(
155    feature = "nightly",
156    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprOptions {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprOptions {
            fn decode(__decoder: &mut __D) -> Self {
                ReprOptions {
                    int: ::rustc_serialize::Decodable::decode(__decoder),
                    align: ::rustc_serialize::Decodable::decode(__decoder),
                    pack: ::rustc_serialize::Decodable::decode(__decoder),
                    flags: ::rustc_serialize::Decodable::decode(__decoder),
                    scalable: ::rustc_serialize::Decodable::decode(__decoder),
                    field_shuffle_seed: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprOptions where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
157)]
158pub struct ReprOptions {
159    pub int: Option<IntegerType>,
160    pub align: Option<Align>,
161    pub pack: Option<Align>,
162    pub flags: ReprFlags,
163    /// `#[rustc_scalable_vector]`
164    pub scalable: Option<ScalableElt>,
165    /// The seed to be used for randomizing a type's layout
166    ///
167    /// Note: This could technically be a `u128` which would
168    /// be the "most accurate" hash as it'd encompass the item and crate
169    /// hash without loss, but it does pay the price of being larger.
170    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
171    /// purposes (primarily `-Z randomize-layout`)
172    pub field_shuffle_seed: Hash64,
173}
174
175impl ReprOptions {
176    #[inline]
177    pub fn simd(&self) -> bool {
178        self.flags.contains(ReprFlags::IS_SIMD)
179    }
180
181    #[inline]
182    pub fn scalable(&self) -> bool {
183        self.flags.contains(ReprFlags::IS_SCALABLE)
184    }
185
186    #[inline]
187    pub fn c(&self) -> bool {
188        self.flags.contains(ReprFlags::IS_C)
189    }
190
191    #[inline]
192    pub fn packed(&self) -> bool {
193        self.pack.is_some()
194    }
195
196    #[inline]
197    pub fn transparent(&self) -> bool {
198        self.flags.contains(ReprFlags::IS_TRANSPARENT)
199    }
200
201    #[inline]
202    pub fn linear(&self) -> bool {
203        self.flags.contains(ReprFlags::IS_LINEAR)
204    }
205
206    /// Returns the discriminant type, given these `repr` options.
207    /// This must only be called on enums!
208    ///
209    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:
210    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a
211    /// smaller type for the tag that stores the discriminant at runtime and that will work just
212    /// fine, it just induces casts when getting/setting the discriminant.
213    pub fn discr_type(&self) -> IntegerType {
214        self.int.unwrap_or(IntegerType::Pointer(true))
215    }
216
217    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
218    /// layout" optimizations, such as representing `Foo<&T>` as a
219    /// single pointer.
220    pub fn inhibit_enum_layout_opt(&self) -> bool {
221        self.c() || self.int.is_some()
222    }
223
224    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
225        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
226    }
227
228    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
229    /// e.g. `repr(C)` or `repr(<int>)`.
230    pub fn inhibit_struct_field_reordering(&self) -> bool {
231        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
232    }
233
234    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
235    /// was enabled for its declaration crate.
236    pub fn can_randomize_type_layout(&self) -> bool {
237        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
238    }
239
240    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
241    pub fn inhibits_union_abi_opt(&self) -> bool {
242        self.c()
243    }
244}
245
246/// The maximum supported number of lanes in a SIMD vector.
247///
248/// This value is selected based on backend support:
249/// * LLVM does not appear to have a vector width limit.
250/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
251pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
252
253/// How pointers are represented in a given address space
254#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerSpec { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerSpec {
    #[inline]
    fn clone(&self) -> PointerSpec {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointerSpec {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "PointerSpec",
            "pointer_size", &self.pointer_size, "pointer_align",
            &self.pointer_align, "pointer_offset", &self.pointer_offset,
            "_is_fat", &&self._is_fat)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerSpec {
    #[inline]
    fn eq(&self, other: &PointerSpec) -> bool {
        self._is_fat == other._is_fat &&
                    self.pointer_size == other.pointer_size &&
                self.pointer_align == other.pointer_align &&
            self.pointer_offset == other.pointer_offset
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerSpec {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq)]
255pub struct PointerSpec {
256    /// The size of the bitwise representation of the pointer.
257    pointer_size: Size,
258    /// The alignment of pointers for this address space
259    pointer_align: Align,
260    /// The size of the value a pointer can be offset by in this address space.
261    pointer_offset: Size,
262    /// Pointers into this address space contain extra metadata
263    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
264    _is_fat: bool,
265}
266
267/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
268/// for a target, which contains everything needed to compute layouts.
269#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TargetDataLayout {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["endian", "i1_align", "i8_align", "i16_align", "i32_align",
                        "i64_align", "i128_align", "f16_align", "f32_align",
                        "f64_align", "f128_align", "aggregate_align",
                        "vector_align", "default_address_space",
                        "default_address_space_pointer_spec", "address_space_info",
                        "instruction_address_space", "c_enum_min_size"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.endian, &self.i1_align, &self.i8_align, &self.i16_align,
                        &self.i32_align, &self.i64_align, &self.i128_align,
                        &self.f16_align, &self.f32_align, &self.f64_align,
                        &self.f128_align, &self.aggregate_align, &self.vector_align,
                        &self.default_address_space,
                        &self.default_address_space_pointer_spec,
                        &self.address_space_info, &self.instruction_address_space,
                        &&self.c_enum_min_size];
        ::core::fmt::Formatter::debug_struct_fields_finish(f,
            "TargetDataLayout", names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for TargetDataLayout {
    #[inline]
    fn eq(&self, other: &TargetDataLayout) -> bool {
        self.endian == other.endian && self.i1_align == other.i1_align &&
                                                                        self.i8_align == other.i8_align &&
                                                                    self.i16_align == other.i16_align &&
                                                                self.i32_align == other.i32_align &&
                                                            self.i64_align == other.i64_align &&
                                                        self.i128_align == other.i128_align &&
                                                    self.f16_align == other.f16_align &&
                                                self.f32_align == other.f32_align &&
                                            self.f64_align == other.f64_align &&
                                        self.f128_align == other.f128_align &&
                                    self.aggregate_align == other.aggregate_align &&
                                self.vector_align == other.vector_align &&
                            self.default_address_space == other.default_address_space &&
                        self.default_address_space_pointer_spec ==
                            other.default_address_space_pointer_spec &&
                    self.address_space_info == other.address_space_info &&
                self.instruction_address_space ==
                    other.instruction_address_space &&
            self.c_enum_min_size == other.c_enum_min_size
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for TargetDataLayout {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Endian>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(Size, Align)>>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
        let _: ::core::cmp::AssertParamIsEq<PointerSpec>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(AddressSpace, PointerSpec)>>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq)]
270pub struct TargetDataLayout {
271    pub endian: Endian,
272    pub i1_align: Align,
273    pub i8_align: Align,
274    pub i16_align: Align,
275    pub i32_align: Align,
276    pub i64_align: Align,
277    pub i128_align: Align,
278    pub f16_align: Align,
279    pub f32_align: Align,
280    pub f64_align: Align,
281    pub f128_align: Align,
282    pub aggregate_align: Align,
283
284    /// Alignments for vector types.
285    pub vector_align: Vec<(Size, Align)>,
286
287    pub default_address_space: AddressSpace,
288    pub default_address_space_pointer_spec: PointerSpec,
289
290    /// Address space information of all known address spaces.
291    ///
292    /// # Note
293    ///
294    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
295    /// which instead lives in [`Self::default_address_space_pointer_spec`].
296    address_space_info: Vec<(AddressSpace, PointerSpec)>,
297
298    pub instruction_address_space: AddressSpace,
299
300    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
301    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
302    /// so the only valid spec for LLVM is c_int::BITS or 8
303    pub c_enum_min_size: Integer,
304}
305
306impl Default for TargetDataLayout {
307    /// Creates an instance of `TargetDataLayout`.
308    fn default() -> TargetDataLayout {
309        let align = |bits| Align::from_bits(bits).unwrap();
310        TargetDataLayout {
311            endian: Endian::Big,
312            i1_align: align(8),
313            i8_align: align(8),
314            i16_align: align(16),
315            i32_align: align(32),
316            i64_align: align(32),
317            i128_align: align(32),
318            f16_align: align(16),
319            f32_align: align(32),
320            f64_align: align(64),
321            f128_align: align(128),
322            aggregate_align: align(8),
323            vector_align: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [(Size::from_bits(64), align(64)),
                (Size::from_bits(128), align(128))]))vec![
324                (Size::from_bits(64), align(64)),
325                (Size::from_bits(128), align(128)),
326            ],
327            default_address_space: AddressSpace::ZERO,
328            default_address_space_pointer_spec: PointerSpec {
329                pointer_size: Size::from_bits(64),
330                pointer_align: align(64),
331                pointer_offset: Size::from_bits(64),
332                _is_fat: false,
333            },
334            address_space_info: ::alloc::vec::Vec::new()vec![],
335            instruction_address_space: AddressSpace::ZERO,
336            c_enum_min_size: Integer::I32,
337        }
338    }
339}
340
341pub enum TargetDataLayoutErrors<'a> {
342    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
343    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
344    MissingAlignment { cause: &'a str },
345    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
346    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
347    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
348    InvalidBitsSize { err: String },
349    UnknownPointerSpecification { err: String },
350}
351
352impl TargetDataLayout {
353    /// Parse data layout from an
354    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
355    ///
356    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
357    /// determined from llvm string.
358    pub fn parse_from_llvm_datalayout_string<'a>(
359        input: &'a str,
360        default_address_space: AddressSpace,
361    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
362        // Parse an address space index from a string.
363        let parse_address_space = |s: &'a str, cause: &'a str| {
364            s.parse::<u32>().map(AddressSpace).map_err(|err| {
365                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
366            })
367        };
368
369        // Parse a bit count from a string.
370        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
371            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
372                kind,
373                bit: s,
374                cause,
375                err,
376            })
377        };
378
379        // Parse a size string.
380        let parse_size =
381            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
382
383        // Parse an alignment string.
384        let parse_align_str = |s: &'a str, cause: &'a str| {
385            let align_from_bits = |bits| {
386                Align::from_bits(bits)
387                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
388            };
389            let abi = parse_bits(s, "alignment", cause)?;
390            Ok(align_from_bits(abi)?)
391        };
392
393        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
394        // ignoring the secondary alignment specifications.
395        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
396            if s.is_empty() {
397                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
398            }
399            parse_align_str(s[0], cause)
400        };
401
402        let mut dl = TargetDataLayout::default();
403        dl.default_address_space = default_address_space;
404
405        let mut i128_align_src = 64;
406        for spec in input.split('-') {
407            let spec_parts = spec.split(':').collect::<Vec<_>>();
408
409            match &*spec_parts {
410                ["e"] => dl.endian = Endian::Little,
411                ["E"] => dl.endian = Endian::Big,
412                [p] if p.starts_with('P') => {
413                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
414                }
415                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
416                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
417                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
418                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
419                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
420                [p, s, a @ ..] if p.starts_with("p") => {
421                    let mut p = p.strip_prefix('p').unwrap();
422                    let mut _is_fat = false;
423
424                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
425                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
426
427                    if p.starts_with('f') {
428                        p = p.strip_prefix('f').unwrap();
429                        _is_fat = true;
430                    }
431
432                    // However, we currently don't take into account further specifications:
433                    // an error is emitted instead.
434                    if p.starts_with(char::is_alphabetic) {
435                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
436                            err: p.to_string(),
437                        });
438                    }
439
440                    let addr_space = if !p.is_empty() {
441                        parse_address_space(p, "p-")?
442                    } else {
443                        AddressSpace::ZERO
444                    };
445
446                    let pointer_size = parse_size(s, "p-")?;
447                    let pointer_align = parse_align_seq(a, "p-")?;
448                    let info = PointerSpec {
449                        pointer_offset: pointer_size,
450                        pointer_size,
451                        pointer_align,
452                        _is_fat,
453                    };
454                    if addr_space == default_address_space {
455                        dl.default_address_space_pointer_spec = info;
456                    } else {
457                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
458                            Some(e) => e.1 = info,
459                            None => {
460                                dl.address_space_info.push((addr_space, info));
461                            }
462                        }
463                    }
464                }
465                [p, s, a, _pr, i] if p.starts_with("p") => {
466                    let mut p = p.strip_prefix('p').unwrap();
467                    let mut _is_fat = false;
468
469                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
470                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
471
472                    if p.starts_with('f') {
473                        p = p.strip_prefix('f').unwrap();
474                        _is_fat = true;
475                    }
476
477                    // However, we currently don't take into account further specifications:
478                    // an error is emitted instead.
479                    if p.starts_with(char::is_alphabetic) {
480                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
481                            err: p.to_string(),
482                        });
483                    }
484
485                    let addr_space = if !p.is_empty() {
486                        parse_address_space(p, "p")?
487                    } else {
488                        AddressSpace::ZERO
489                    };
490
491                    let info = PointerSpec {
492                        pointer_size: parse_size(s, "p-")?,
493                        pointer_align: parse_align_str(a, "p-")?,
494                        pointer_offset: parse_size(i, "p-")?,
495                        _is_fat,
496                    };
497
498                    if addr_space == default_address_space {
499                        dl.default_address_space_pointer_spec = info;
500                    } else {
501                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
502                            Some(e) => e.1 = info,
503                            None => {
504                                dl.address_space_info.push((addr_space, info));
505                            }
506                        }
507                    }
508                }
509
510                [s, a @ ..] if s.starts_with('i') => {
511                    let Ok(bits) = s[1..].parse::<u64>() else {
512                        parse_size(&s[1..], "i")?; // For the user error.
513                        continue;
514                    };
515                    let a = parse_align_seq(a, s)?;
516                    match bits {
517                        1 => dl.i1_align = a,
518                        8 => dl.i8_align = a,
519                        16 => dl.i16_align = a,
520                        32 => dl.i32_align = a,
521                        64 => dl.i64_align = a,
522                        _ => {}
523                    }
524                    if bits >= i128_align_src && bits <= 128 {
525                        // Default alignment for i128 is decided by taking the alignment of
526                        // largest-sized i{64..=128}.
527                        i128_align_src = bits;
528                        dl.i128_align = a;
529                    }
530                }
531                [s, a @ ..] if s.starts_with('v') => {
532                    let v_size = parse_size(&s[1..], "v")?;
533                    let a = parse_align_seq(a, s)?;
534                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
535                        v.1 = a;
536                        continue;
537                    }
538                    // No existing entry, add a new one.
539                    dl.vector_align.push((v_size, a));
540                }
541                _ => {} // Ignore everything else.
542            }
543        }
544
545        // Inherit, if not given, address space information for specific LLVM elements from the
546        // default data address space.
547        if (dl.instruction_address_space != dl.default_address_space)
548            && dl
549                .address_space_info
550                .iter()
551                .find(|(a, _)| *a == dl.instruction_address_space)
552                .is_none()
553        {
554            dl.address_space_info.push((
555                dl.instruction_address_space,
556                dl.default_address_space_pointer_spec.clone(),
557            ));
558        }
559
560        Ok(dl)
561    }
562
563    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
564    /// space.
565    ///
566    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
567    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
568    /// index every address within an object along with one byte past the end, along with allowing
569    /// `isize` to store the difference between any two pointers into an object.
570    ///
571    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
572    /// so we adopt such a more-constrained size bound due to its technical limitations.
573    #[inline]
574    pub fn obj_size_bound(&self) -> u64 {
575        match self.pointer_size().bits() {
576            16 => 1 << 15,
577            32 => 1 << 31,
578            64 => 1 << 61,
579            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
580        }
581    }
582
583    /// Returns **exclusive** upper bound on object size in bytes.
584    ///
585    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
586    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
587    /// index every address within an object along with one byte past the end, along with allowing
588    /// `isize` to store the difference between any two pointers into an object.
589    ///
590    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
591    /// so we adopt such a more-constrained size bound due to its technical limitations.
592    #[inline]
593    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
594        match self.pointer_size_in(address_space).bits() {
595            16 => 1 << 15,
596            32 => 1 << 31,
597            64 => 1 << 61,
598            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
599        }
600    }
601
602    #[inline]
603    pub fn ptr_sized_integer(&self) -> Integer {
604        use Integer::*;
605        match self.pointer_offset().bits() {
606            16 => I16,
607            32 => I32,
608            64 => I64,
609            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
610        }
611    }
612
613    #[inline]
614    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
615        use Integer::*;
616        match self.pointer_offset_in(address_space).bits() {
617            16 => I16,
618            32 => I32,
619            64 => I64,
620            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
621        }
622    }
623
624    /// psABI-mandated alignment for a vector type, if any
625    #[inline]
626    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
627        self.vector_align
628            .iter()
629            .find(|(size, _align)| *size == vec_size)
630            .map(|(_size, align)| *align)
631    }
632
633    /// an alignment resembling the one LLVM would pick for a vector
634    #[inline]
635    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
636        self.cabi_vector_align(vec_size)
637            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
638    }
639
640    /// Get the pointer size in the default data address space.
641    #[inline]
642    pub fn pointer_size(&self) -> Size {
643        self.default_address_space_pointer_spec.pointer_size
644    }
645
646    /// Get the pointer size in a specific address space.
647    #[inline]
648    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
649        if c == self.default_address_space {
650            return self.default_address_space_pointer_spec.pointer_size;
651        }
652
653        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
654            e.1.pointer_size
655        } else {
656            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
657        }
658    }
659
660    /// Get the pointer index in the default data address space.
661    #[inline]
662    pub fn pointer_offset(&self) -> Size {
663        self.default_address_space_pointer_spec.pointer_offset
664    }
665
666    /// Get the pointer index in a specific address space.
667    #[inline]
668    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
669        if c == self.default_address_space {
670            return self.default_address_space_pointer_spec.pointer_offset;
671        }
672
673        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
674            e.1.pointer_offset
675        } else {
676            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
677        }
678    }
679
680    /// Get the pointer alignment in the default data address space.
681    #[inline]
682    pub fn pointer_align(&self) -> AbiAlign {
683        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
684    }
685
686    /// Get the pointer alignment in a specific address space.
687    #[inline]
688    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
689        AbiAlign::new(if c == self.default_address_space {
690            self.default_address_space_pointer_spec.pointer_align
691        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
692            e.1.pointer_align
693        } else {
694            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
695        })
696    }
697}
698
699pub trait HasDataLayout {
700    fn data_layout(&self) -> &TargetDataLayout;
701}
702
703impl HasDataLayout for TargetDataLayout {
704    #[inline]
705    fn data_layout(&self) -> &TargetDataLayout {
706        self
707    }
708}
709
710// used by rust-analyzer
711impl HasDataLayout for &TargetDataLayout {
712    #[inline]
713    fn data_layout(&self) -> &TargetDataLayout {
714        (**self).data_layout()
715    }
716}
717
718/// Endianness of the target, which must match cfg(target-endian).
719#[derive(#[automatically_derived]
impl ::core::marker::Copy for Endian { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Endian {
    #[inline]
    fn clone(&self) -> Endian { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Endian {
    #[inline]
    fn eq(&self, other: &Endian) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Endian {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq)]
720pub enum Endian {
721    Little,
722    Big,
723}
724
725impl Endian {
726    pub fn as_str(&self) -> &'static str {
727        match self {
728            Self::Little => "little",
729            Self::Big => "big",
730        }
731    }
732}
733
734impl fmt::Debug for Endian {
735    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
736        f.write_str(self.as_str())
737    }
738}
739
740impl FromStr for Endian {
741    type Err = String;
742
743    fn from_str(s: &str) -> Result<Self, Self::Err> {
744        match s {
745            "little" => Ok(Self::Little),
746            "big" => Ok(Self::Big),
747            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("unknown endian: \"{0}\"", s))
    })format!(r#"unknown endian: "{s}""#)),
748        }
749    }
750}
751
752/// Size of a type in bytes.
753#[derive(#[automatically_derived]
impl ::core::marker::Copy for Size { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Size {
    #[inline]
    fn clone(&self) -> Size {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Size {
    #[inline]
    fn eq(&self, other: &Size) -> bool { self.raw == other.raw }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Size {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Size {
    #[inline]
    fn partial_cmp(&self, other: &Size)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.raw, &other.raw)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Size {
    #[inline]
    fn cmp(&self, other: &Size) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.raw, &other.raw)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Size {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.raw, state)
    }
}Hash)]
754#[cfg_attr(
755    feature = "nightly",
756    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Size {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Size {
            fn decode(__decoder: &mut __D) -> Self {
                Size { raw: ::rustc_serialize::Decodable::decode(__decoder) }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Size where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
757)]
758pub struct Size {
759    raw: u64,
760}
761
762#[cfg(feature = "nightly")]
763impl StableOrd for Size {
764    const CAN_USE_UNSTABLE_SORT: bool = true;
765
766    // `Ord` is implemented as just comparing numerical values and numerical values
767    // are not changed by (de-)serialization.
768    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
769}
770
771// This is debug-printed a lot in larger structs, don't waste too much space there
772impl fmt::Debug for Size {
773    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
774        f.write_fmt(format_args!("Size({0} bytes)", self.bytes()))write!(f, "Size({} bytes)", self.bytes())
775    }
776}
777
778impl Size {
779    pub const ZERO: Size = Size { raw: 0 };
780
781    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
782    /// not a multiple of 8.
783    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
784        let bits = bits.try_into().ok().unwrap();
785        Size { raw: bits.div_ceil(8) }
786    }
787
788    #[inline]
789    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
790        let bytes: u64 = bytes.try_into().ok().unwrap();
791        Size { raw: bytes }
792    }
793
794    #[inline]
795    pub fn bytes(self) -> u64 {
796        self.raw
797    }
798
799    #[inline]
800    pub fn bytes_usize(self) -> usize {
801        self.bytes().try_into().unwrap()
802    }
803
804    #[inline]
805    pub fn bits(self) -> u64 {
806        #[cold]
807        fn overflow(bytes: u64) -> ! {
808            {
    ::core::panicking::panic_fmt(format_args!("Size::bits: {0} bytes in bits doesn\'t fit in u64",
            bytes));
}panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
809        }
810
811        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
812    }
813
814    #[inline]
815    pub fn bits_usize(self) -> usize {
816        self.bits().try_into().unwrap()
817    }
818
819    #[inline]
820    pub fn align_to(self, align: Align) -> Size {
821        let mask = align.bytes() - 1;
822        Size::from_bytes((self.bytes() + mask) & !mask)
823    }
824
825    #[inline]
826    pub fn is_aligned(self, align: Align) -> bool {
827        let mask = align.bytes() - 1;
828        self.bytes() & mask == 0
829    }
830
831    #[inline]
832    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
833        let dl = cx.data_layout();
834
835        let bytes = self.bytes().checked_add(offset.bytes())?;
836
837        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
838    }
839
840    #[inline]
841    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
842        let dl = cx.data_layout();
843
844        let bytes = self.bytes().checked_mul(count)?;
845        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
846    }
847
848    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
849    /// (i.e., if it is negative, fill with 1's on the left).
850    #[inline]
851    pub fn sign_extend(self, value: u128) -> i128 {
852        let size = self.bits();
853        if size == 0 {
854            // Truncated until nothing is left.
855            return 0;
856        }
857        // Sign-extend it.
858        let shift = 128 - size;
859        // Shift the unsigned value to the left, then shift back to the right as signed
860        // (essentially fills with sign bit on the left).
861        ((value << shift) as i128) >> shift
862    }
863
864    /// Truncates `value` to `self` bits.
865    #[inline]
866    pub fn truncate(self, value: u128) -> u128 {
867        let size = self.bits();
868        if size == 0 {
869            // Truncated until nothing is left.
870            return 0;
871        }
872        let shift = 128 - size;
873        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
874        (value << shift) >> shift
875    }
876
877    #[inline]
878    pub fn signed_int_min(&self) -> i128 {
879        self.sign_extend(1_u128 << (self.bits() - 1))
880    }
881
882    #[inline]
883    pub fn signed_int_max(&self) -> i128 {
884        i128::MAX >> (128 - self.bits())
885    }
886
887    #[inline]
888    pub fn unsigned_int_max(&self) -> u128 {
889        u128::MAX >> (128 - self.bits())
890    }
891}
892
893// Panicking addition, subtraction and multiplication for convenience.
894// Avoid during layout computation, return `LayoutError` instead.
895
896impl Add for Size {
897    type Output = Size;
898    #[inline]
899    fn add(self, other: Size) -> Size {
900        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
901            {
    ::core::panicking::panic_fmt(format_args!("Size::add: {0} + {1} doesn\'t fit in u64",
            self.bytes(), other.bytes()));
}panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
902        }))
903    }
904}
905
906impl Sub for Size {
907    type Output = Size;
908    #[inline]
909    fn sub(self, other: Size) -> Size {
910        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
911            {
    ::core::panicking::panic_fmt(format_args!("Size::sub: {0} - {1} would result in negative size",
            self.bytes(), other.bytes()));
}panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
912        }))
913    }
914}
915
916impl Mul<Size> for u64 {
917    type Output = Size;
918    #[inline]
919    fn mul(self, size: Size) -> Size {
920        size * self
921    }
922}
923
924impl Mul<u64> for Size {
925    type Output = Size;
926    #[inline]
927    fn mul(self, count: u64) -> Size {
928        match self.bytes().checked_mul(count) {
929            Some(bytes) => Size::from_bytes(bytes),
930            None => {
    ::core::panicking::panic_fmt(format_args!("Size::mul: {0} * {1} doesn\'t fit in u64",
            self.bytes(), count));
}panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
931        }
932    }
933}
934
935impl AddAssign for Size {
936    #[inline]
937    fn add_assign(&mut self, other: Size) {
938        *self = *self + other;
939    }
940}
941
942#[cfg(feature = "nightly")]
943impl Step for Size {
944    #[inline]
945    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
946        u64::steps_between(&start.bytes(), &end.bytes())
947    }
948
949    #[inline]
950    fn forward_checked(start: Self, count: usize) -> Option<Self> {
951        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
952    }
953
954    #[inline]
955    fn forward(start: Self, count: usize) -> Self {
956        Self::from_bytes(u64::forward(start.bytes(), count))
957    }
958
959    #[inline]
960    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
961        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
962    }
963
964    #[inline]
965    fn backward_checked(start: Self, count: usize) -> Option<Self> {
966        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
967    }
968
969    #[inline]
970    fn backward(start: Self, count: usize) -> Self {
971        Self::from_bytes(u64::backward(start.bytes(), count))
972    }
973
974    #[inline]
975    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
976        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
977    }
978}
979
980/// Alignment of a type in bytes (always a power of two).
981#[derive(#[automatically_derived]
impl ::core::marker::Copy for Align { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Align {
    #[inline]
    fn clone(&self) -> Align {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Align {
    #[inline]
    fn eq(&self, other: &Align) -> bool { self.pow2 == other.pow2 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Align {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Align {
    #[inline]
    fn partial_cmp(&self, other: &Align)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.pow2, &other.pow2)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Align {
    #[inline]
    fn cmp(&self, other: &Align) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.pow2, &other.pow2)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Align {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.pow2, state)
    }
}Hash)]
982#[cfg_attr(
983    feature = "nightly",
984    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Align {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Align {
            fn decode(__decoder: &mut __D) -> Self {
                Align {
                    pow2: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Align where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
985)]
986pub struct Align {
987    pow2: u8,
988}
989
990// This is debug-printed a lot in larger structs, don't waste too much space there
991impl fmt::Debug for Align {
992    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
993        f.write_fmt(format_args!("Align({0} bytes)", self.bytes()))write!(f, "Align({} bytes)", self.bytes())
994    }
995}
996
997#[derive(#[automatically_derived]
impl ::core::clone::Clone for AlignFromBytesError {
    #[inline]
    fn clone(&self) -> AlignFromBytesError {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AlignFromBytesError { }Copy)]
998pub enum AlignFromBytesError {
999    NotPowerOfTwo(u64),
1000    TooLarge(u64),
1001}
1002
1003impl AlignFromBytesError {
1004    pub fn diag_ident(self) -> &'static str {
1005        match self {
1006            Self::NotPowerOfTwo(_) => "not_power_of_two",
1007            Self::TooLarge(_) => "too_large",
1008        }
1009    }
1010
1011    pub fn align(self) -> u64 {
1012        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
1013        align
1014    }
1015}
1016
1017impl fmt::Debug for AlignFromBytesError {
1018    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1019        fmt::Display::fmt(self, f)
1020    }
1021}
1022
1023impl fmt::Display for AlignFromBytesError {
1024    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1025        match self {
1026            AlignFromBytesError::NotPowerOfTwo(align) => f.write_fmt(format_args!("`{0}` is not a power of 2", align))write!(f, "`{align}` is not a power of 2"),
1027            AlignFromBytesError::TooLarge(align) => f.write_fmt(format_args!("`{0}` is too large", align))write!(f, "`{align}` is too large"),
1028        }
1029    }
1030}
1031
1032impl Align {
1033    pub const ONE: Align = Align { pow2: 0 };
1034    pub const EIGHT: Align = Align { pow2: 3 };
1035    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1036    pub const MAX: Align = Align { pow2: 29 };
1037
1038    /// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.
1039    #[inline]
1040    pub fn max_for_target(tdl: &TargetDataLayout) -> Align {
1041        let pointer_bits = tdl.pointer_size().bits();
1042        if let Ok(pointer_bits) = u8::try_from(pointer_bits)
1043            && pointer_bits <= Align::MAX.pow2
1044        {
1045            Align { pow2: pointer_bits - 1 }
1046        } else {
1047            Align::MAX
1048        }
1049    }
1050
1051    #[inline]
1052    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1053        Align::from_bytes(Size::from_bits(bits).bytes())
1054    }
1055
1056    #[inline]
1057    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1058        // Treat an alignment of 0 bytes like 1-byte alignment.
1059        if align == 0 {
1060            return Ok(Align::ONE);
1061        }
1062
1063        #[cold]
1064        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1065            AlignFromBytesError::NotPowerOfTwo(align)
1066        }
1067
1068        #[cold]
1069        const fn too_large(align: u64) -> AlignFromBytesError {
1070            AlignFromBytesError::TooLarge(align)
1071        }
1072
1073        let tz = align.trailing_zeros();
1074        if align != (1 << tz) {
1075            return Err(not_power_of_2(align));
1076        }
1077
1078        let pow2 = tz as u8;
1079        if pow2 > Self::MAX.pow2 {
1080            return Err(too_large(align));
1081        }
1082
1083        Ok(Align { pow2 })
1084    }
1085
1086    #[inline]
1087    pub const fn bytes(self) -> u64 {
1088        1 << self.pow2
1089    }
1090
1091    #[inline]
1092    pub fn bytes_usize(self) -> usize {
1093        self.bytes().try_into().unwrap()
1094    }
1095
1096    #[inline]
1097    pub const fn bits(self) -> u64 {
1098        self.bytes() * 8
1099    }
1100
1101    #[inline]
1102    pub fn bits_usize(self) -> usize {
1103        self.bits().try_into().unwrap()
1104    }
1105
1106    /// Obtain the greatest factor of `size` that is an alignment
1107    /// (the largest power of two the Size is a multiple of).
1108    ///
1109    /// Note that all numbers are factors of 0
1110    #[inline]
1111    pub fn max_aligned_factor(size: Size) -> Align {
1112        Align { pow2: size.bytes().trailing_zeros() as u8 }
1113    }
1114
1115    /// Reduces Align to an aligned factor of `size`.
1116    #[inline]
1117    pub fn restrict_for_offset(self, size: Size) -> Align {
1118        self.min(Align::max_aligned_factor(size))
1119    }
1120}
1121
1122/// A pair of alignments, ABI-mandated and preferred.
1123///
1124/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1125/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1126/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1127/// and thus in practice the two values are almost always identical.
1128///
1129/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1130/// It is of effectively no consequence for layout in structs and on the stack.
1131#[derive(#[automatically_derived]
impl ::core::marker::Copy for AbiAlign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AbiAlign {
    #[inline]
    fn clone(&self) -> AbiAlign {
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AbiAlign {
    #[inline]
    fn eq(&self, other: &AbiAlign) -> bool { self.abi == other.abi }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AbiAlign {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Align>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for AbiAlign {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.abi, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for AbiAlign {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field1_finish(f, "AbiAlign",
            "abi", &&self.abi)
    }
}Debug)]
1132#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AbiAlign where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AbiAlign { abi: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1133pub struct AbiAlign {
1134    pub abi: Align,
1135}
1136
1137impl AbiAlign {
1138    #[inline]
1139    pub fn new(align: Align) -> AbiAlign {
1140        AbiAlign { abi: align }
1141    }
1142
1143    #[inline]
1144    pub fn min(self, other: AbiAlign) -> AbiAlign {
1145        AbiAlign { abi: self.abi.min(other.abi) }
1146    }
1147
1148    #[inline]
1149    pub fn max(self, other: AbiAlign) -> AbiAlign {
1150        AbiAlign { abi: self.abi.max(other.abi) }
1151    }
1152}
1153
1154impl Deref for AbiAlign {
1155    type Target = Align;
1156
1157    fn deref(&self) -> &Self::Target {
1158        &self.abi
1159    }
1160}
1161
1162/// Integers, also used for enum discriminants.
1163#[derive(#[automatically_derived]
impl ::core::marker::Copy for Integer { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Integer {
    #[inline]
    fn clone(&self) -> Integer { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Integer {
    #[inline]
    fn eq(&self, other: &Integer) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Integer {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Integer {
    #[inline]
    fn partial_cmp(&self, other: &Integer)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Integer {
    #[inline]
    fn cmp(&self, other: &Integer) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Integer {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Integer {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Integer::I8 => "I8",
                Integer::I16 => "I16",
                Integer::I32 => "I32",
                Integer::I64 => "I64",
                Integer::I128 => "I128",
            })
    }
}Debug)]
1164#[cfg_attr(
1165    feature = "nightly",
1166    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Integer {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        Integer::I8 => { 0usize }
                        Integer::I16 => { 1usize }
                        Integer::I32 => { 2usize }
                        Integer::I64 => { 3usize }
                        Integer::I128 => { 4usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Integer {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { Integer::I8 }
                    1usize => { Integer::I16 }
                    2usize => { Integer::I32 }
                    3usize => { Integer::I64 }
                    4usize => { Integer::I128 }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `Integer`, expected 0..5, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Integer where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };HashStable_Generic)
1167)]
1168pub enum Integer {
1169    I8,
1170    I16,
1171    I32,
1172    I64,
1173    I128,
1174}
1175
1176impl Integer {
1177    pub fn int_ty_str(self) -> &'static str {
1178        use Integer::*;
1179        match self {
1180            I8 => "i8",
1181            I16 => "i16",
1182            I32 => "i32",
1183            I64 => "i64",
1184            I128 => "i128",
1185        }
1186    }
1187
1188    pub fn uint_ty_str(self) -> &'static str {
1189        use Integer::*;
1190        match self {
1191            I8 => "u8",
1192            I16 => "u16",
1193            I32 => "u32",
1194            I64 => "u64",
1195            I128 => "u128",
1196        }
1197    }
1198
1199    #[inline]
1200    pub fn size(self) -> Size {
1201        use Integer::*;
1202        match self {
1203            I8 => Size::from_bytes(1),
1204            I16 => Size::from_bytes(2),
1205            I32 => Size::from_bytes(4),
1206            I64 => Size::from_bytes(8),
1207            I128 => Size::from_bytes(16),
1208        }
1209    }
1210
1211    /// Gets the Integer type from an IntegerType.
1212    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1213        let dl = cx.data_layout();
1214
1215        match ity {
1216            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1217            IntegerType::Fixed(x, _) => x,
1218        }
1219    }
1220
1221    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1222        use Integer::*;
1223        let dl = cx.data_layout();
1224
1225        AbiAlign::new(match self {
1226            I8 => dl.i8_align,
1227            I16 => dl.i16_align,
1228            I32 => dl.i32_align,
1229            I64 => dl.i64_align,
1230            I128 => dl.i128_align,
1231        })
1232    }
1233
1234    /// Returns the largest signed value that can be represented by this Integer.
1235    #[inline]
1236    pub fn signed_max(self) -> i128 {
1237        use Integer::*;
1238        match self {
1239            I8 => i8::MAX as i128,
1240            I16 => i16::MAX as i128,
1241            I32 => i32::MAX as i128,
1242            I64 => i64::MAX as i128,
1243            I128 => i128::MAX,
1244        }
1245    }
1246
1247    /// Returns the smallest signed value that can be represented by this Integer.
1248    #[inline]
1249    pub fn signed_min(self) -> i128 {
1250        use Integer::*;
1251        match self {
1252            I8 => i8::MIN as i128,
1253            I16 => i16::MIN as i128,
1254            I32 => i32::MIN as i128,
1255            I64 => i64::MIN as i128,
1256            I128 => i128::MIN,
1257        }
1258    }
1259
1260    /// Finds the smallest Integer type which can represent the signed value.
1261    #[inline]
1262    pub fn fit_signed(x: i128) -> Integer {
1263        use Integer::*;
1264        match x {
1265            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1266            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1267            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1268            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1269            _ => I128,
1270        }
1271    }
1272
1273    /// Finds the smallest Integer type which can represent the unsigned value.
1274    #[inline]
1275    pub fn fit_unsigned(x: u128) -> Integer {
1276        use Integer::*;
1277        match x {
1278            0..=0x0000_0000_0000_00ff => I8,
1279            0..=0x0000_0000_0000_ffff => I16,
1280            0..=0x0000_0000_ffff_ffff => I32,
1281            0..=0xffff_ffff_ffff_ffff => I64,
1282            _ => I128,
1283        }
1284    }
1285
1286    /// Finds the smallest integer with the given alignment.
1287    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1288        use Integer::*;
1289        let dl = cx.data_layout();
1290
1291        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1292            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1293        })
1294    }
1295
1296    /// Find the largest integer with the given alignment or less.
1297    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1298        use Integer::*;
1299        let dl = cx.data_layout();
1300
1301        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1302        for candidate in [I64, I32, I16] {
1303            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1304                return candidate;
1305            }
1306        }
1307        I8
1308    }
1309
1310    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1311    // `Integer` given some requirements.
1312    #[inline]
1313    pub fn from_size(size: Size) -> Result<Self, String> {
1314        match size.bits() {
1315            8 => Ok(Integer::I8),
1316            16 => Ok(Integer::I16),
1317            32 => Ok(Integer::I32),
1318            64 => Ok(Integer::I64),
1319            128 => Ok(Integer::I128),
1320            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("rust does not support integers with {0} bits",
                size.bits()))
    })format!("rust does not support integers with {} bits", size.bits())),
1321        }
1322    }
1323}
1324
1325/// Floating-point types.
1326#[derive(#[automatically_derived]
impl ::core::marker::Copy for Float { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Float {
    #[inline]
    fn clone(&self) -> Float { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Float {
    #[inline]
    fn eq(&self, other: &Float) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Float {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Float {
    #[inline]
    fn partial_cmp(&self, other: &Float)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Float {
    #[inline]
    fn cmp(&self, other: &Float) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Float {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Float {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Float::F16 => "F16",
                Float::F32 => "F32",
                Float::F64 => "F64",
                Float::F128 => "F128",
            })
    }
}Debug)]
1327#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Float where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Float::F16 => {}
                    Float::F32 => {}
                    Float::F64 => {}
                    Float::F128 => {}
                }
            }
        }
    };HashStable_Generic))]
1328pub enum Float {
1329    F16,
1330    F32,
1331    F64,
1332    F128,
1333}
1334
1335impl Float {
1336    pub fn size(self) -> Size {
1337        use Float::*;
1338
1339        match self {
1340            F16 => Size::from_bits(16),
1341            F32 => Size::from_bits(32),
1342            F64 => Size::from_bits(64),
1343            F128 => Size::from_bits(128),
1344        }
1345    }
1346
1347    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1348        use Float::*;
1349        let dl = cx.data_layout();
1350
1351        AbiAlign::new(match self {
1352            F16 => dl.f16_align,
1353            F32 => dl.f32_align,
1354            F64 => dl.f64_align,
1355            F128 => dl.f128_align,
1356        })
1357    }
1358}
1359
1360/// Fundamental unit of memory access and layout.
1361#[derive(#[automatically_derived]
impl ::core::marker::Copy for Primitive { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Primitive {
    #[inline]
    fn clone(&self) -> Primitive {
        let _: ::core::clone::AssertParamIsClone<Integer>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Float>;
        let _: ::core::clone::AssertParamIsClone<AddressSpace>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Primitive {
    #[inline]
    fn eq(&self, other: &Primitive) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Primitive::Int(__self_0, __self_1),
                    Primitive::Int(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (Primitive::Float(__self_0), Primitive::Float(__arg1_0)) =>
                    __self_0 == __arg1_0,
                (Primitive::Pointer(__self_0), Primitive::Pointer(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Primitive {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Integer>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Float>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Primitive {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Primitive::Int(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Primitive::Float(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            Primitive::Pointer(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Primitive {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Primitive::Int(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Int",
                    __self_0, &__self_1),
            Primitive::Float(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Float",
                    &__self_0),
            Primitive::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
        }
    }
}Debug)]
1362#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Primitive where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Primitive::Int(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Float(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1363pub enum Primitive {
1364    /// The `bool` is the signedness of the `Integer` type.
1365    ///
1366    /// One would think we would not care about such details this low down,
1367    /// but some ABIs are described in terms of C types and ISAs where the
1368    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1369    /// a negative integer passed by zero-extension will appear positive in
1370    /// the callee, and most operations on it will produce the wrong values.
1371    Int(Integer, bool),
1372    Float(Float),
1373    Pointer(AddressSpace),
1374}
1375
1376impl Primitive {
1377    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1378        use Primitive::*;
1379        let dl = cx.data_layout();
1380
1381        match self {
1382            Int(i, _) => i.size(),
1383            Float(f) => f.size(),
1384            Pointer(a) => dl.pointer_size_in(a),
1385        }
1386    }
1387
1388    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1389        use Primitive::*;
1390        let dl = cx.data_layout();
1391
1392        match self {
1393            Int(i, _) => i.align(dl),
1394            Float(f) => f.align(dl),
1395            Pointer(a) => dl.pointer_align_in(a),
1396        }
1397    }
1398}
1399
1400/// Inclusive wrap-around range of valid values, that is, if
1401/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1402///
1403/// That is, for an i8 primitive, a range of `254..=2` means following
1404/// sequence:
1405///
1406///    254 (-2), 255 (-1), 0, 1, 2
1407///
1408/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1409#[derive(#[automatically_derived]
impl ::core::clone::Clone for WrappingRange {
    #[inline]
    fn clone(&self) -> WrappingRange {
        let _: ::core::clone::AssertParamIsClone<u128>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for WrappingRange { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for WrappingRange {
    #[inline]
    fn eq(&self, other: &WrappingRange) -> bool {
        self.start == other.start && self.end == other.end
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for WrappingRange {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for WrappingRange {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.start, state);
        ::core::hash::Hash::hash(&self.end, state)
    }
}Hash)]
1410#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for WrappingRange where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    WrappingRange { start: ref __binding_0, end: ref __binding_1
                        } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1411pub struct WrappingRange {
1412    pub start: u128,
1413    pub end: u128,
1414}
1415
1416impl WrappingRange {
1417    pub fn full(size: Size) -> Self {
1418        Self { start: 0, end: size.unsigned_int_max() }
1419    }
1420
1421    /// Returns `true` if `v` is contained in the range.
1422    #[inline(always)]
1423    pub fn contains(&self, v: u128) -> bool {
1424        if self.start <= self.end {
1425            self.start <= v && v <= self.end
1426        } else {
1427            self.start <= v || v <= self.end
1428        }
1429    }
1430
1431    /// Returns `true` if all the values in `other` are contained in this range,
1432    /// when the values are considered as having width `size`.
1433    #[inline(always)]
1434    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1435        if self.is_full_for(size) {
1436            true
1437        } else {
1438            let trunc = |x| size.truncate(x);
1439
1440            let delta = self.start;
1441            let max = trunc(self.end.wrapping_sub(delta));
1442
1443            let other_start = trunc(other.start.wrapping_sub(delta));
1444            let other_end = trunc(other.end.wrapping_sub(delta));
1445
1446            // Having shifted both input ranges by `delta`, now we only need to check
1447            // whether `0..=max` contains `other_start..=other_end`, which can only
1448            // happen if the other doesn't wrap since `self` isn't everything.
1449            (other_start <= other_end) && (other_end <= max)
1450        }
1451    }
1452
1453    /// Returns `self` with replaced `start`
1454    #[inline(always)]
1455    fn with_start(mut self, start: u128) -> Self {
1456        self.start = start;
1457        self
1458    }
1459
1460    /// Returns `self` with replaced `end`
1461    #[inline(always)]
1462    fn with_end(mut self, end: u128) -> Self {
1463        self.end = end;
1464        self
1465    }
1466
1467    /// Returns `true` if `size` completely fills the range.
1468    ///
1469    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1470    /// Niche calculations can produce full ranges which are not the canonical one;
1471    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1472    #[inline]
1473    fn is_full_for(&self, size: Size) -> bool {
1474        let max_value = size.unsigned_int_max();
1475        if true {
    if !(self.start <= max_value && self.end <= max_value) {
        ::core::panicking::panic("assertion failed: self.start <= max_value && self.end <= max_value")
    };
};debug_assert!(self.start <= max_value && self.end <= max_value);
1476        self.start == (self.end.wrapping_add(1) & max_value)
1477    }
1478
1479    /// Checks whether this range is considered non-wrapping when the values are
1480    /// interpreted as *unsigned* numbers of width `size`.
1481    ///
1482    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1483    /// and `Err(..)` if the range is full so it depends how you think about it.
1484    #[inline]
1485    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1486        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1487    }
1488
1489    /// Checks whether this range is considered non-wrapping when the values are
1490    /// interpreted as *signed* numbers of width `size`.
1491    ///
1492    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1493    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1494    ///
1495    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1496    /// and `Err(..)` if the range is full so it depends how you think about it.
1497    #[inline]
1498    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1499        if self.is_full_for(size) {
1500            Err(..)
1501        } else {
1502            let start: i128 = size.sign_extend(self.start);
1503            let end: i128 = size.sign_extend(self.end);
1504            Ok(start <= end)
1505        }
1506    }
1507}
1508
1509impl fmt::Debug for WrappingRange {
1510    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1511        if self.start > self.end {
1512            fmt.write_fmt(format_args!("(..={0}) | ({1}..)", self.end, self.start))write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1513        } else {
1514            fmt.write_fmt(format_args!("{0}..={1}", self.start, self.end))write!(fmt, "{}..={}", self.start, self.end)?;
1515        }
1516        Ok(())
1517    }
1518}
1519
1520/// Information about one scalar component of a Rust type.
1521#[derive(#[automatically_derived]
impl ::core::clone::Clone for Scalar {
    #[inline]
    fn clone(&self) -> Scalar {
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Scalar { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Scalar {
    #[inline]
    fn eq(&self, other: &Scalar) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Scalar::Initialized { value: __self_0, valid_range: __self_1
                    }, Scalar::Initialized {
                    value: __arg1_0, valid_range: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (Scalar::Union { value: __self_0 }, Scalar::Union {
                    value: __arg1_0 }) => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Scalar {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Scalar {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Scalar::Union { value: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Scalar {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Initialized", "value", __self_0, "valid_range", &__self_1),
            Scalar::Union { value: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f, "Union",
                    "value", &__self_0),
        }
    }
}Debug)]
1522#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Scalar where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Scalar::Initialized {
                        value: ref __binding_0, valid_range: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Scalar::Union { value: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1523pub enum Scalar {
1524    Initialized {
1525        value: Primitive,
1526
1527        // FIXME(eddyb) always use the shortest range, e.g., by finding
1528        // the largest space between two consecutive valid values and
1529        // taking everything else as the (shortest) valid range.
1530        valid_range: WrappingRange,
1531    },
1532    Union {
1533        /// Even for unions, we need to use the correct registers for the kind of
1534        /// values inside the union, so we keep the `Primitive` type around. We
1535        /// also use it to compute the size of the scalar.
1536        /// However, unions never have niches and even allow undef,
1537        /// so there is no `valid_range`.
1538        value: Primitive,
1539    },
1540}
1541
1542impl Scalar {
1543    #[inline]
1544    pub fn is_bool(&self) -> bool {
1545        use Integer::*;
1546        #[allow(non_exhaustive_omitted_patterns)] match self {
    Scalar::Initialized {
        value: Primitive::Int(I8, false),
        valid_range: WrappingRange { start: 0, end: 1 } } => true,
    _ => false,
}matches!(
1547            self,
1548            Scalar::Initialized {
1549                value: Primitive::Int(I8, false),
1550                valid_range: WrappingRange { start: 0, end: 1 }
1551            }
1552        )
1553    }
1554
1555    /// Get the primitive representation of this type, ignoring the valid range and whether the
1556    /// value is allowed to be undefined (due to being a union).
1557    pub fn primitive(&self) -> Primitive {
1558        match *self {
1559            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1560        }
1561    }
1562
1563    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1564        self.primitive().align(cx)
1565    }
1566
1567    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1568        self.primitive().size(cx)
1569    }
1570
1571    #[inline]
1572    pub fn to_union(&self) -> Self {
1573        Self::Union { value: self.primitive() }
1574    }
1575
1576    #[inline]
1577    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1578        match *self {
1579            Scalar::Initialized { valid_range, .. } => valid_range,
1580            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1581        }
1582    }
1583
1584    #[inline]
1585    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1586    /// union.
1587    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1588        match self {
1589            Scalar::Initialized { valid_range, .. } => valid_range,
1590            Scalar::Union { .. } => {
    ::core::panicking::panic_fmt(format_args!("cannot change the valid range of a union"));
}panic!("cannot change the valid range of a union"),
1591        }
1592    }
1593
1594    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1595    /// layout.
1596    #[inline]
1597    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1598        match *self {
1599            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1600            Scalar::Union { .. } => true,
1601        }
1602    }
1603
1604    /// Returns `true` if this type can be left uninit.
1605    #[inline]
1606    pub fn is_uninit_valid(&self) -> bool {
1607        match *self {
1608            Scalar::Initialized { .. } => false,
1609            Scalar::Union { .. } => true,
1610        }
1611    }
1612
1613    /// Returns `true` if this is a signed integer scalar
1614    #[inline]
1615    pub fn is_signed(&self) -> bool {
1616        match self.primitive() {
1617            Primitive::Int(_, signed) => signed,
1618            _ => false,
1619        }
1620    }
1621}
1622
1623// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1624/// Describes how the fields of a type are located in memory.
1625#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    FieldsShape<FieldIdx> {
    #[inline]
    fn eq(&self, other: &FieldsShape<FieldIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (FieldsShape::Union(__self_0), FieldsShape::Union(__arg1_0))
                    => __self_0 == __arg1_0,
                (FieldsShape::Array { stride: __self_0, count: __self_1 },
                    FieldsShape::Array { stride: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (FieldsShape::Arbitrary {
                    offsets: __self_0, in_memory_order: __self_1 },
                    FieldsShape::Arbitrary {
                    offsets: __arg1_0, in_memory_order: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    FieldsShape<FieldIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<NonZeroUsize>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<FieldIdx, Size>>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<u32, FieldIdx>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    FieldsShape<FieldIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            FieldsShape::Union(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            FieldsShape::Array { stride: __self_0, count: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    FieldsShape<FieldIdx> {
    #[inline]
    fn clone(&self) -> FieldsShape<FieldIdx> {
        match self {
            FieldsShape::Primitive => FieldsShape::Primitive,
            FieldsShape::Union(__self_0) =>
                FieldsShape::Union(::core::clone::Clone::clone(__self_0)),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                FieldsShape::Array {
                    stride: ::core::clone::Clone::clone(__self_0),
                    count: ::core::clone::Clone::clone(__self_1),
                },
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                FieldsShape::Arbitrary {
                    offsets: ::core::clone::Clone::clone(__self_0),
                    in_memory_order: ::core::clone::Clone::clone(__self_1),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    FieldsShape<FieldIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            FieldsShape::Primitive =>
                ::core::fmt::Formatter::write_str(f, "Primitive"),
            FieldsShape::Union(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Union",
                    &__self_0),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Array",
                    "stride", __self_0, "count", &__self_1),
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Arbitrary", "offsets", __self_0, "in_memory_order",
                    &__self_1),
        }
    }
}Debug)]
1626#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            FieldsShape<FieldIdx> where __CTX: crate::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    FieldsShape::Primitive => {}
                    FieldsShape::Union(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Array {
                        stride: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Arbitrary {
                        offsets: ref __binding_0, in_memory_order: ref __binding_1 }
                        => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1627pub enum FieldsShape<FieldIdx: Idx> {
1628    /// Scalar primitives and `!`, which never have fields.
1629    Primitive,
1630
1631    /// All fields start at no offset. The `usize` is the field count.
1632    Union(NonZeroUsize),
1633
1634    /// Array/vector-like placement, with all fields of identical types.
1635    Array { stride: Size, count: u64 },
1636
1637    /// Struct-like placement, with precomputed offsets.
1638    ///
1639    /// Fields are guaranteed to not overlap, but note that gaps
1640    /// before, between and after all the fields are NOT always
1641    /// padding, and as such their contents may not be discarded.
1642    /// For example, enum variants leave a gap at the start,
1643    /// where the discriminant field in the enum layout goes.
1644    Arbitrary {
1645        /// Offsets for the first byte of each field,
1646        /// ordered to match the source definition order.
1647        /// This vector does not go in increasing order.
1648        // FIXME(eddyb) use small vector optimization for the common case.
1649        offsets: IndexVec<FieldIdx, Size>,
1650
1651        /// Maps memory order field indices to source order indices,
1652        /// depending on how the fields were reordered (if at all).
1653        /// This is a permutation, with both the source order and the
1654        /// memory order using the same (0..n) index ranges.
1655        ///
1656        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1657        // FIXME(camlorn) also consider small vector optimization here.
1658        in_memory_order: IndexVec<u32, FieldIdx>,
1659    },
1660}
1661
1662impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1663    #[inline]
1664    pub fn count(&self) -> usize {
1665        match *self {
1666            FieldsShape::Primitive => 0,
1667            FieldsShape::Union(count) => count.get(),
1668            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1669            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1670        }
1671    }
1672
1673    #[inline]
1674    pub fn offset(&self, i: usize) -> Size {
1675        match *self {
1676            FieldsShape::Primitive => {
1677                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("FieldsShape::offset: `Primitive`s have no fields")));
}unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1678            }
1679            FieldsShape::Union(count) => {
1680                if !(i < count.get()) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of union with {1} fields",
                i, count));
    }
};assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1681                Size::ZERO
1682            }
1683            FieldsShape::Array { stride, count } => {
1684                let i = u64::try_from(i).unwrap();
1685                if !(i < count) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of array with {1} fields",
                i, count));
    }
};assert!(i < count, "tried to access field {i} of array with {count} fields");
1686                stride * i
1687            }
1688            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1689        }
1690    }
1691
1692    /// Gets source indices of the fields by increasing offsets.
1693    #[inline]
1694    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1695        // Primitives don't really have fields in the way that structs do,
1696        // but having this return an empty iterator for them is unhelpful
1697        // since that makes them look kinda like ZSTs, which they're not.
1698        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1699
1700        (0..pseudofield_count).map(move |i| match self {
1701            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1702            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1703        })
1704    }
1705}
1706
1707/// An identifier that specifies the address space that some operation
1708/// should operate on. Special address spaces have an effect on code generation,
1709/// depending on the target and the address spaces it implements.
1710#[derive(#[automatically_derived]
impl ::core::marker::Copy for AddressSpace { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AddressSpace {
    #[inline]
    fn clone(&self) -> AddressSpace {
        let _: ::core::clone::AssertParamIsClone<u32>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AddressSpace {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f, "AddressSpace",
            &&self.0)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for AddressSpace {
    #[inline]
    fn eq(&self, other: &AddressSpace) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AddressSpace {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u32>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for AddressSpace {
    #[inline]
    fn partial_cmp(&self, other: &AddressSpace)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for AddressSpace {
    #[inline]
    fn cmp(&self, other: &AddressSpace) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.0, &other.0)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for AddressSpace {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash)]
1711#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AddressSpace where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AddressSpace(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1712pub struct AddressSpace(pub u32);
1713
1714impl AddressSpace {
1715    /// LLVM's `0` address space.
1716    pub const ZERO: Self = AddressSpace(0);
1717}
1718
1719/// The way we represent values to the backend
1720///
1721/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1722/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1723/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1724/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1725/// how the value will be lowered to the calling convention, in itself.
1726///
1727/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1728/// and larger values will usually prefer to be represented as memory.
1729#[derive(#[automatically_derived]
impl ::core::clone::Clone for BackendRepr {
    #[inline]
    fn clone(&self) -> BackendRepr {
        let _: ::core::clone::AssertParamIsClone<Scalar>;
        let _: ::core::clone::AssertParamIsClone<u64>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BackendRepr { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BackendRepr {
    #[inline]
    fn eq(&self, other: &BackendRepr) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (BackendRepr::Scalar(__self_0), BackendRepr::Scalar(__arg1_0))
                    => __self_0 == __arg1_0,
                (BackendRepr::ScalarPair(__self_0, __self_1),
                    BackendRepr::ScalarPair(__arg1_0, __arg1_1)) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (BackendRepr::ScalableVector {
                    element: __self_0, count: __self_1 },
                    BackendRepr::ScalableVector {
                    element: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (BackendRepr::SimdVector { element: __self_0, count: __self_1
                    }, BackendRepr::SimdVector {
                    element: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (BackendRepr::Memory { sized: __self_0 },
                    BackendRepr::Memory { sized: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for BackendRepr {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for BackendRepr {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            BackendRepr::ScalarPair(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::ScalableVector { element: __self_0, count: __self_1 }
                => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for BackendRepr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Scalar",
                    &__self_0),
            BackendRepr::ScalarPair(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "ScalarPair", __self_0, &__self_1),
            BackendRepr::ScalableVector { element: __self_0, count: __self_1 }
                =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "ScalableVector", "element", __self_0, "count", &__self_1),
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "SimdVector", "element", __self_0, "count", &__self_1),
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Memory", "sized", &__self_0),
        }
    }
}Debug)]
1730#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for BackendRepr where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    BackendRepr::Scalar(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::ScalarPair(ref __binding_0, ref __binding_1) =>
                        {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::ScalableVector {
                        element: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdVector {
                        element: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::Memory { sized: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1731pub enum BackendRepr {
1732    Scalar(Scalar),
1733    ScalarPair(Scalar, Scalar),
1734    ScalableVector {
1735        element: Scalar,
1736        count: u64,
1737    },
1738    SimdVector {
1739        element: Scalar,
1740        count: u64,
1741    },
1742    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1743    Memory {
1744        /// If true, the size is exact, otherwise it's only a lower bound.
1745        sized: bool,
1746    },
1747}
1748
1749impl BackendRepr {
1750    /// Returns `true` if the layout corresponds to an unsized type.
1751    #[inline]
1752    pub fn is_unsized(&self) -> bool {
1753        match *self {
1754            BackendRepr::Scalar(_)
1755            | BackendRepr::ScalarPair(..)
1756            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
1757            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
1758            // fully implemented, scalable vectors will remain `Sized`, they just won't be
1759            // `const Sized` - whether `is_unsized` continues to return `false` at that point will
1760            // need to be revisited and will depend on what `is_unsized` is used for.
1761            | BackendRepr::ScalableVector { .. }
1762            | BackendRepr::SimdVector { .. } => false,
1763            BackendRepr::Memory { sized } => !sized,
1764        }
1765    }
1766
1767    #[inline]
1768    pub fn is_sized(&self) -> bool {
1769        !self.is_unsized()
1770    }
1771
1772    /// Returns `true` if this is a single signed integer scalar.
1773    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1774    #[inline]
1775    pub fn is_signed(&self) -> bool {
1776        match self {
1777            BackendRepr::Scalar(scal) => scal.is_signed(),
1778            _ => {
    ::core::panicking::panic_fmt(format_args!("`is_signed` on non-scalar ABI {0:?}",
            self));
}panic!("`is_signed` on non-scalar ABI {self:?}"),
1779        }
1780    }
1781
1782    /// Returns `true` if this is a scalar type
1783    #[inline]
1784    pub fn is_scalar(&self) -> bool {
1785        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(_) => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(_))
1786    }
1787
1788    /// Returns `true` if this is a bool
1789    #[inline]
1790    pub fn is_bool(&self) -> bool {
1791        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(s) if s.is_bool() => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1792    }
1793
1794    /// The psABI alignment for a `Scalar` or `ScalarPair`
1795    ///
1796    /// `None` for other variants.
1797    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1798        match *self {
1799            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1800            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1801            // The align of a Vector can vary in surprising ways
1802            BackendRepr::SimdVector { .. }
1803            | BackendRepr::Memory { .. }
1804            | BackendRepr::ScalableVector { .. } => None,
1805        }
1806    }
1807
1808    /// The psABI size for a `Scalar` or `ScalarPair`
1809    ///
1810    /// `None` for other variants
1811    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1812        match *self {
1813            // No padding in scalars.
1814            BackendRepr::Scalar(s) => Some(s.size(cx)),
1815            // May have some padding between the pair.
1816            BackendRepr::ScalarPair(s1, s2) => {
1817                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1818                let size = (field2_offset + s2.size(cx)).align_to(
1819                    self.scalar_align(cx)
1820                        // We absolutely must have an answer here or everything is FUBAR.
1821                        .unwrap(),
1822                );
1823                Some(size)
1824            }
1825            // The size of a Vector can vary in surprising ways
1826            BackendRepr::SimdVector { .. }
1827            | BackendRepr::Memory { .. }
1828            | BackendRepr::ScalableVector { .. } => None,
1829        }
1830    }
1831
1832    /// Discard validity range information and allow undef.
1833    pub fn to_union(&self) -> Self {
1834        match *self {
1835            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1836            BackendRepr::ScalarPair(s1, s2) => {
1837                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1838            }
1839            BackendRepr::SimdVector { element, count } => {
1840                BackendRepr::SimdVector { element: element.to_union(), count }
1841            }
1842            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1843            BackendRepr::ScalableVector { element, count } => {
1844                BackendRepr::ScalableVector { element: element.to_union(), count }
1845            }
1846        }
1847    }
1848
1849    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1850        match (self, other) {
1851            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1852            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1853            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1854            (
1855                BackendRepr::SimdVector { element: element_l, count: count_l },
1856                BackendRepr::SimdVector { element: element_r, count: count_r },
1857            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1858            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1859                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1860            }
1861            // Everything else must be strictly identical.
1862            _ => self == other,
1863        }
1864    }
1865}
1866
1867// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1868#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &Variants<FieldIdx, VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Variants::Single { index: __self_0 }, Variants::Single {
                    index: __arg1_0 }) => __self_0 == __arg1_0,
                (Variants::Multiple {
                    tag: __self_0,
                    tag_encoding: __self_1,
                    tag_field: __self_2,
                    variants: __self_3 }, Variants::Multiple {
                    tag: __arg1_0,
                    tag_encoding: __arg1_1,
                    tag_field: __arg1_2,
                    variants: __arg1_3 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1 &&
                            __self_2 == __arg1_2 && __self_3 == __arg1_3,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for Variants<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<TagEncoding<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<FieldIdx>;
        let _:
                ::core::cmp::AssertParamIsEq<IndexVec<VariantIdx,
                LayoutData<FieldIdx, VariantIdx>>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Variants::Single { index: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state);
                ::core::hash::Hash::hash(__self_3, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> Variants<FieldIdx, VariantIdx> {
        match self {
            Variants::Empty => Variants::Empty,
            Variants::Single { index: __self_0 } =>
                Variants::Single {
                    index: ::core::clone::Clone::clone(__self_0),
                },
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                Variants::Multiple {
                    tag: ::core::clone::Clone::clone(__self_0),
                    tag_encoding: ::core::clone::Clone::clone(__self_1),
                    tag_field: ::core::clone::Clone::clone(__self_2),
                    variants: ::core::clone::Clone::clone(__self_3),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx, VariantIdx: ::core::fmt::Debug + Idx>
    ::core::fmt::Debug for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Variants::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
            Variants::Single { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Single", "index", &__self_0),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                ::core::fmt::Formatter::debug_struct_field4_finish(f,
                    "Multiple", "tag", __self_0, "tag_encoding", __self_1,
                    "tag_field", __self_2, "variants", &__self_3),
        }
    }
}Debug)]
1869#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            Variants<FieldIdx, VariantIdx> where
            __CTX: crate::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Variants::Empty => {}
                    Variants::Single { index: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Variants::Multiple {
                        tag: ref __binding_0,
                        tag_encoding: ref __binding_1,
                        tag_field: ref __binding_2,
                        variants: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1870pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1871    /// A type with no valid variants. Must be uninhabited.
1872    Empty,
1873
1874    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1875    Single {
1876        /// Always `0` for types that cannot have multiple variants.
1877        index: VariantIdx,
1878    },
1879
1880    /// Enum-likes with more than one variant: each variant comes with
1881    /// a *discriminant* (usually the same as the variant index but the user can
1882    /// assign explicit discriminant values). That discriminant is encoded
1883    /// as a *tag* on the machine. The layout of each variant is
1884    /// a struct, and they all have space reserved for the tag.
1885    /// For enums, the tag is the sole field of the layout.
1886    Multiple {
1887        tag: Scalar,
1888        tag_encoding: TagEncoding<VariantIdx>,
1889        tag_field: FieldIdx,
1890        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1891    },
1892}
1893
1894// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1895#[derive(#[automatically_derived]
impl<VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    TagEncoding<VariantIdx> {
    #[inline]
    fn eq(&self, other: &TagEncoding<VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (TagEncoding::Niche {
                    untagged_variant: __self_0,
                    niche_variants: __self_1,
                    niche_start: __self_2 }, TagEncoding::Niche {
                    untagged_variant: __arg1_0,
                    niche_variants: __arg1_1,
                    niche_start: __arg1_2 }) =>
                    __self_2 == __arg1_2 && __self_0 == __arg1_0 &&
                        __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<VariantIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    TagEncoding<VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<RangeInclusive<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl<VariantIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    TagEncoding<VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<VariantIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    TagEncoding<VariantIdx> {
    #[inline]
    fn clone(&self) -> TagEncoding<VariantIdx> {
        match self {
            TagEncoding::Direct => TagEncoding::Direct,
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                TagEncoding::Niche {
                    untagged_variant: ::core::clone::Clone::clone(__self_0),
                    niche_variants: ::core::clone::Clone::clone(__self_1),
                    niche_start: ::core::clone::Clone::clone(__self_2),
                },
        }
    }
}Clone, #[automatically_derived]
impl<VariantIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    TagEncoding<VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TagEncoding::Direct =>
                ::core::fmt::Formatter::write_str(f, "Direct"),
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
                    "untagged_variant", __self_0, "niche_variants", __self_1,
                    "niche_start", &__self_2),
        }
    }
}Debug)]
1896#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            TagEncoding<VariantIdx> where __CTX: crate::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    TagEncoding::Direct => {}
                    TagEncoding::Niche {
                        untagged_variant: ref __binding_0,
                        niche_variants: ref __binding_1,
                        niche_start: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1897pub enum TagEncoding<VariantIdx: Idx> {
1898    /// The tag directly stores the discriminant, but possibly with a smaller layout
1899    /// (so converting the tag to the discriminant can require sign extension).
1900    Direct,
1901
1902    /// Niche (values invalid for a type) encoding the discriminant.
1903    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1904    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1905    ///
1906    /// The variant `untagged_variant` contains a niche at an arbitrary
1907    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1908    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1909    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1910    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1911    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1912    /// query implementation).
1913    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1914    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1915    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1916    ///
1917    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1918    /// `None` is the null pointer in the second tuple field, and
1919    /// `Some` is the identity function (with a non-null reference)
1920    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1921    ///
1922    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1923    /// range cannot be represented; they must be uninhabited.
1924    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1925    Niche {
1926        untagged_variant: VariantIdx,
1927        /// This range *may* contain `untagged_variant` or uninhabited variants;
1928        /// these are then just "dead values" and not used to encode anything.
1929        niche_variants: RangeInclusive<VariantIdx>,
1930        /// This is inbounds of the type of the niche field
1931        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1932        niche_start: u128,
1933    },
1934}
1935
1936#[derive(#[automatically_derived]
impl ::core::clone::Clone for Niche {
    #[inline]
    fn clone(&self) -> Niche {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Niche { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Niche {
    #[inline]
    fn eq(&self, other: &Niche) -> bool {
        self.offset == other.offset && self.value == other.value &&
            self.valid_range == other.valid_range
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Niche {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Niche {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.offset, state);
        ::core::hash::Hash::hash(&self.value, state);
        ::core::hash::Hash::hash(&self.valid_range, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Niche {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
            "offset", &self.offset, "value", &self.value, "valid_range",
            &&self.valid_range)
    }
}Debug)]
1937#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Niche where __CTX: crate::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Niche {
                        offset: ref __binding_0,
                        value: ref __binding_1,
                        valid_range: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1938pub struct Niche {
1939    pub offset: Size,
1940    pub value: Primitive,
1941    pub valid_range: WrappingRange,
1942}
1943
1944impl Niche {
1945    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1946        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1947        let niche = Niche { offset, value, valid_range };
1948        if niche.available(cx) > 0 { Some(niche) } else { None }
1949    }
1950
1951    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1952        let Self { value, valid_range: v, .. } = *self;
1953        let size = value.size(cx);
1954        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1955        let max_value = size.unsigned_int_max();
1956
1957        // Find out how many values are outside the valid range.
1958        let niche = v.end.wrapping_add(1)..v.start;
1959        niche.end.wrapping_sub(niche.start) & max_value
1960    }
1961
1962    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1963        if !(count > 0) { ::core::panicking::panic("assertion failed: count > 0") };assert!(count > 0);
1964
1965        let Self { value, valid_range: v, .. } = *self;
1966        let size = value.size(cx);
1967        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1968        let max_value = size.unsigned_int_max();
1969
1970        let niche = v.end.wrapping_add(1)..v.start;
1971        let available = niche.end.wrapping_sub(niche.start) & max_value;
1972        if count > available {
1973            return None;
1974        }
1975
1976        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1977        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1978        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1979        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1980        // enable some special optimizations.
1981        //
1982        // Bound selection criteria:
1983        // 1. Select closest to zero given wrapping semantics.
1984        // 2. Avoid moving past zero if possible.
1985        //
1986        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1987        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1988        // bounds are of little interest.
1989        let move_start = |v: WrappingRange| {
1990            let start = v.start.wrapping_sub(count) & max_value;
1991            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1992        };
1993        let move_end = |v: WrappingRange| {
1994            let start = v.end.wrapping_add(1) & max_value;
1995            let end = v.end.wrapping_add(count) & max_value;
1996            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1997        };
1998        let distance_end_zero = max_value - v.end;
1999        if v.start > v.end {
2000            // zero is unavailable because wrapping occurs
2001            move_end(v)
2002        } else if v.start <= distance_end_zero {
2003            if count <= v.start {
2004                move_start(v)
2005            } else {
2006                // moved past zero, use other bound
2007                move_end(v)
2008            }
2009        } else {
2010            let end = v.end.wrapping_add(count) & max_value;
2011            let overshot_zero = (1..=v.end).contains(&end);
2012            if overshot_zero {
2013                // moved past zero, use other bound
2014                move_start(v)
2015            } else {
2016                move_end(v)
2017            }
2018        }
2019    }
2020}
2021
2022// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2023#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &LayoutData<FieldIdx, VariantIdx>) -> bool {
        self.uninhabited == other.uninhabited && self.fields == other.fields
                                        && self.variants == other.variants &&
                                    self.backend_repr == other.backend_repr &&
                                self.largest_niche == other.largest_niche &&
                            self.align == other.align && self.size == other.size &&
                    self.max_repr_align == other.max_repr_align &&
                self.unadjusted_abi_align == other.unadjusted_abi_align &&
            self.randomization_seed == other.randomization_seed
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<FieldsShape<FieldIdx>>;
        let _: ::core::cmp::AssertParamIsEq<Variants<FieldIdx, VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<BackendRepr>;
        let _: ::core::cmp::AssertParamIsEq<Option<Niche>>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<AbiAlign>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.fields, state);
        ::core::hash::Hash::hash(&self.variants, state);
        ::core::hash::Hash::hash(&self.backend_repr, state);
        ::core::hash::Hash::hash(&self.largest_niche, state);
        ::core::hash::Hash::hash(&self.uninhabited, state);
        ::core::hash::Hash::hash(&self.align, state);
        ::core::hash::Hash::hash(&self.size, state);
        ::core::hash::Hash::hash(&self.max_repr_align, state);
        ::core::hash::Hash::hash(&self.unadjusted_abi_align, state);
        ::core::hash::Hash::hash(&self.randomization_seed, state)
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> LayoutData<FieldIdx, VariantIdx> {
        LayoutData {
            fields: ::core::clone::Clone::clone(&self.fields),
            variants: ::core::clone::Clone::clone(&self.variants),
            backend_repr: ::core::clone::Clone::clone(&self.backend_repr),
            largest_niche: ::core::clone::Clone::clone(&self.largest_niche),
            uninhabited: ::core::clone::Clone::clone(&self.uninhabited),
            align: ::core::clone::Clone::clone(&self.align),
            size: ::core::clone::Clone::clone(&self.size),
            max_repr_align: ::core::clone::Clone::clone(&self.max_repr_align),
            unadjusted_abi_align: ::core::clone::Clone::clone(&self.unadjusted_abi_align),
            randomization_seed: ::core::clone::Clone::clone(&self.randomization_seed),
        }
    }
}Clone)]
2024#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            LayoutData<FieldIdx, VariantIdx> where
            __CTX: crate::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    LayoutData {
                        fields: ref __binding_0,
                        variants: ref __binding_1,
                        backend_repr: ref __binding_2,
                        largest_niche: ref __binding_3,
                        uninhabited: ref __binding_4,
                        align: ref __binding_5,
                        size: ref __binding_6,
                        max_repr_align: ref __binding_7,
                        unadjusted_abi_align: ref __binding_8,
                        randomization_seed: ref __binding_9 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                        { __binding_6.hash_stable(__hcx, __hasher); }
                        { __binding_7.hash_stable(__hcx, __hasher); }
                        { __binding_8.hash_stable(__hcx, __hasher); }
                        { __binding_9.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
2025pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2026    /// Says where the fields are located within the layout.
2027    pub fields: FieldsShape<FieldIdx>,
2028
2029    /// Encodes information about multi-variant layouts.
2030    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2031    /// shared between all variants. One of them will be the discriminant,
2032    /// but e.g. coroutines can have more.
2033    ///
2034    /// To access all fields of this layout, both `fields` and the fields of the active variant
2035    /// must be taken into account.
2036    pub variants: Variants<FieldIdx, VariantIdx>,
2037
2038    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2039    /// and encodes value restrictions via `valid_range`.
2040    ///
2041    /// Note that this is entirely orthogonal to the recursive structure defined by
2042    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2043    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2044    /// have to be taken into account to find all fields of this layout.
2045    pub backend_repr: BackendRepr,
2046
2047    /// The leaf scalar with the largest number of invalid values
2048    /// (i.e. outside of its `valid_range`), if it exists.
2049    pub largest_niche: Option<Niche>,
2050    /// Is this type known to be uninhabted?
2051    ///
2052    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2053    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2054    pub uninhabited: bool,
2055
2056    pub align: AbiAlign,
2057    pub size: Size,
2058
2059    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2060    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2061    /// requested, even if the requested alignment is equal to the natural alignment.
2062    pub max_repr_align: Option<Align>,
2063
2064    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2065    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2066    /// in some cases.
2067    pub unadjusted_abi_align: Align,
2068
2069    /// The randomization seed based on this type's own repr and its fields.
2070    ///
2071    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2072    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2073    /// types.
2074    ///
2075    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2076    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2077    /// to reorder its fields based on that information. The current implementation is a conservative
2078    /// approximation of this goal.
2079    pub randomization_seed: Hash64,
2080}
2081
2082impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2083    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2084    pub fn is_aggregate(&self) -> bool {
2085        match self.backend_repr {
2086            BackendRepr::Scalar(_)
2087            | BackendRepr::SimdVector { .. }
2088            | BackendRepr::ScalableVector { .. } => false,
2089            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2090        }
2091    }
2092
2093    /// Returns `true` if this is an uninhabited type
2094    pub fn is_uninhabited(&self) -> bool {
2095        self.uninhabited
2096    }
2097}
2098
2099impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2100where
2101    FieldsShape<FieldIdx>: fmt::Debug,
2102    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2103{
2104    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2105        // This is how `Layout` used to print before it become
2106        // `Interned<LayoutData>`. We print it like this to avoid having to update
2107        // expected output in a lot of tests.
2108        let LayoutData {
2109            size,
2110            align,
2111            backend_repr,
2112            fields,
2113            largest_niche,
2114            uninhabited,
2115            variants,
2116            max_repr_align,
2117            unadjusted_abi_align,
2118            randomization_seed,
2119        } = self;
2120        f.debug_struct("Layout")
2121            .field("size", size)
2122            .field("align", align)
2123            .field("backend_repr", backend_repr)
2124            .field("fields", fields)
2125            .field("largest_niche", largest_niche)
2126            .field("uninhabited", uninhabited)
2127            .field("variants", variants)
2128            .field("max_repr_align", max_repr_align)
2129            .field("unadjusted_abi_align", unadjusted_abi_align)
2130            .field("randomization_seed", randomization_seed)
2131            .finish()
2132    }
2133}
2134
2135#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerKind {
    #[inline]
    fn clone(&self) -> PointerKind {
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerKind {
    #[inline]
    fn eq(&self, other: &PointerKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (PointerKind::SharedRef { frozen: __self_0 },
                    PointerKind::SharedRef { frozen: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::MutableRef { unpin: __self_0 },
                    PointerKind::MutableRef { unpin: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::Box { unpin: __self_0, global: __self_1 },
                    PointerKind::Box { unpin: __arg1_0, global: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerKind {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::fmt::Debug for PointerKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            PointerKind::SharedRef { frozen: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "SharedRef", "frozen", &__self_0),
            PointerKind::MutableRef { unpin: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "MutableRef", "unpin", &__self_0),
            PointerKind::Box { unpin: __self_0, global: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Box",
                    "unpin", __self_0, "global", &__self_1),
        }
    }
}Debug)]
2136pub enum PointerKind {
2137    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2138    SharedRef { frozen: bool },
2139    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2140    MutableRef { unpin: bool },
2141    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2142    /// uses the global allocator or a custom one.
2143    Box { unpin: bool, global: bool },
2144}
2145
2146/// Encodes extra information we have about a pointer.
2147///
2148/// Note that this information is advisory only, and backends are free to ignore it:
2149/// if the information is wrong, that can cause UB, but if the information is absent,
2150/// that must always be okay.
2151#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointeeInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointeeInfo {
    #[inline]
    fn clone(&self) -> PointeeInfo {
        let _: ::core::clone::AssertParamIsClone<Option<PointerKind>>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointeeInfo {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "PointeeInfo",
            "safe", &self.safe, "size", &self.size, "align", &&self.align)
    }
}Debug)]
2152pub struct PointeeInfo {
2153    /// If this is `None`, then this is a raw pointer.
2154    pub safe: Option<PointerKind>,
2155    /// If `size` is not zero, then the pointer is either null or dereferenceable for this many bytes
2156    /// (independent of `safe`).
2157    ///
2158    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2159    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2160    /// while this function is still running.
2161    pub size: Size,
2162    /// The pointer is guaranteed to be aligned this much (independent of `safe`).
2163    pub align: Align,
2164}
2165
2166impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2167    /// Returns `true` if the layout corresponds to an unsized type.
2168    #[inline]
2169    pub fn is_unsized(&self) -> bool {
2170        self.backend_repr.is_unsized()
2171    }
2172
2173    #[inline]
2174    pub fn is_sized(&self) -> bool {
2175        self.backend_repr.is_sized()
2176    }
2177
2178    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2179    pub fn is_1zst(&self) -> bool {
2180        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2181    }
2182
2183    /// Returns `true` if the size of the type is only known at runtime.
2184    pub fn is_runtime_sized(&self) -> bool {
2185        #[allow(non_exhaustive_omitted_patterns)] match self.backend_repr {
    BackendRepr::ScalableVector { .. } => true,
    _ => false,
}matches!(self.backend_repr, BackendRepr::ScalableVector { .. })
2186    }
2187
2188    /// Returns the elements count of a scalable vector.
2189    pub fn scalable_vector_element_count(&self) -> Option<u64> {
2190        match self.backend_repr {
2191            BackendRepr::ScalableVector { count, .. } => Some(count),
2192            _ => None,
2193        }
2194    }
2195
2196    /// Returns `true` if the type is a ZST and not unsized.
2197    ///
2198    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2199    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2200    pub fn is_zst(&self) -> bool {
2201        match self.backend_repr {
2202            BackendRepr::Scalar(_)
2203            | BackendRepr::ScalarPair(..)
2204            | BackendRepr::ScalableVector { .. }
2205            | BackendRepr::SimdVector { .. } => false,
2206            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2207        }
2208    }
2209
2210    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2211    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2212    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2213    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2214    /// checks would otherwise be required.
2215    pub fn eq_abi(&self, other: &Self) -> bool {
2216        // The one thing that we are not capturing here is that for unsized types, the metadata must
2217        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2218        // 2nd point is quite hard to check though.
2219        self.size == other.size
2220            && self.is_sized() == other.is_sized()
2221            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2222            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2223            && self.align.abi == other.align.abi
2224            && self.max_repr_align == other.max_repr_align
2225            && self.unadjusted_abi_align == other.unadjusted_abi_align
2226    }
2227}
2228
2229#[derive(#[automatically_derived]
impl ::core::marker::Copy for StructKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for StructKind {
    #[inline]
    fn clone(&self) -> StructKind {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for StructKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            StructKind::AlwaysSized =>
                ::core::fmt::Formatter::write_str(f, "AlwaysSized"),
            StructKind::MaybeUnsized =>
                ::core::fmt::Formatter::write_str(f, "MaybeUnsized"),
            StructKind::Prefixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "Prefixed", __self_0, &__self_1),
        }
    }
}Debug)]
2230pub enum StructKind {
2231    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2232    AlwaysSized,
2233    /// A univariant, the last field of which may be coerced to unsized.
2234    MaybeUnsized,
2235    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2236    Prefixed(Size, Align),
2237}
2238
2239#[derive(#[automatically_derived]
impl ::core::clone::Clone for AbiFromStrErr {
    #[inline]
    fn clone(&self) -> AbiFromStrErr {
        match self {
            AbiFromStrErr::Unknown => AbiFromStrErr::Unknown,
            AbiFromStrErr::NoExplicitUnwind =>
                AbiFromStrErr::NoExplicitUnwind,
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AbiFromStrErr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                AbiFromStrErr::Unknown => "Unknown",
                AbiFromStrErr::NoExplicitUnwind => "NoExplicitUnwind",
            })
    }
}Debug)]
2240pub enum AbiFromStrErr {
2241    /// not a known ABI
2242    Unknown,
2243    /// no "-unwind" variant can be used here
2244    NoExplicitUnwind,
2245}