Skip to main content

rustc_target/callconv/
mod.rs

1use std::{fmt, iter};
2
3use rustc_abi::{
4    AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, FieldsShape, HasDataLayout, Primitive,
5    Reg, RegKind, Scalar, Size, TyAbiInterface, TyAndLayout, Variants,
6};
7use rustc_macros::HashStable_Generic;
8
9pub use crate::spec::AbiMap;
10use crate::spec::{Arch, HasTargetSpec, HasX86AbiOpt};
11
12mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
37
38#[derive(#[automatically_derived]
impl ::core::clone::Clone for PassMode {
    #[inline]
    fn clone(&self) -> PassMode {
        match self {
            PassMode::Ignore => PassMode::Ignore,
            PassMode::Direct(__self_0) =>
                PassMode::Direct(::core::clone::Clone::clone(__self_0)),
            PassMode::Pair(__self_0, __self_1) =>
                PassMode::Pair(::core::clone::Clone::clone(__self_0),
                    ::core::clone::Clone::clone(__self_1)),
            PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
                PassMode::Cast {
                    pad_i32: ::core::clone::Clone::clone(__self_0),
                    cast: ::core::clone::Clone::clone(__self_1),
                },
            PassMode::Indirect {
                attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
                PassMode::Indirect {
                    attrs: ::core::clone::Clone::clone(__self_0),
                    meta_attrs: ::core::clone::Clone::clone(__self_1),
                    on_stack: ::core::clone::Clone::clone(__self_2),
                },
        }
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PassMode {
    #[inline]
    fn eq(&self, other: &PassMode) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (PassMode::Direct(__self_0), PassMode::Direct(__arg1_0)) =>
                    __self_0 == __arg1_0,
                (PassMode::Pair(__self_0, __self_1),
                    PassMode::Pair(__arg1_0, __arg1_1)) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (PassMode::Cast { pad_i32: __self_0, cast: __self_1 },
                    PassMode::Cast { pad_i32: __arg1_0, cast: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (PassMode::Indirect {
                    attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 },
                    PassMode::Indirect {
                    attrs: __arg1_0, meta_attrs: __arg1_1, on_stack: __arg1_2 })
                    =>
                    __self_2 == __arg1_2 && __self_0 == __arg1_0 &&
                        __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PassMode {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Box<CastTarget>>;
        let _: ::core::cmp::AssertParamIsEq<Option<ArgAttributes>>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for PassMode {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            PassMode::Direct(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            PassMode::Pair(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            PassMode::Cast { pad_i32: __self_0, cast: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            PassMode::Indirect {
                attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for PassMode {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            PassMode::Ignore =>
                ::core::fmt::Formatter::write_str(f, "Ignore"),
            PassMode::Direct(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Direct",
                    &__self_0),
            PassMode::Pair(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Pair",
                    __self_0, &__self_1),
            PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Cast",
                    "pad_i32", __self_0, "cast", &__self_1),
            PassMode::Indirect {
                attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f,
                    "Indirect", "attrs", __self_0, "meta_attrs", __self_1,
                    "on_stack", &__self_2),
        }
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for PassMode where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    PassMode::Ignore => {}
                    PassMode::Direct(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    PassMode::Pair(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    PassMode::Cast {
                        pad_i32: ref __binding_0, cast: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    PassMode::Indirect {
                        attrs: ref __binding_0,
                        meta_attrs: ref __binding_1,
                        on_stack: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
39pub enum PassMode {
40    /// Ignore the argument.
41    ///
42    /// The argument is a ZST.
43    Ignore,
44    /// Pass the argument directly.
45    ///
46    /// The argument has a layout abi of `Scalar` or `Vector`.
47    /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48    /// This is bad since it leaks LLVM implementation details into the ABI.
49    /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50    Direct(ArgAttributes),
51    /// Pass a pair's elements directly in two arguments.
52    ///
53    /// The argument has a layout abi of `ScalarPair`.
54    Pair(ArgAttributes, ArgAttributes),
55    /// Pass the argument after casting it. See the `CastTarget` docs for details.
56    ///
57    /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58    Cast { pad_i32: bool, cast: Box<CastTarget> },
59    /// Pass the argument indirectly via a hidden pointer.
60    ///
61    /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62    /// argument. (This is the only mode that supports unsized arguments.)
63    ///
64    /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65    /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66    /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67    /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68    /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69    /// alignment (if `None`). This means that the alignment will not always
70    /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71    ///
72    /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73    Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
75
76impl PassMode {
77    /// Checks if these two `PassMode` are equal enough to be considered "the same for all
78    /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79    /// so that needs to be compared as well!
80    pub fn eq_abi(&self, other: &Self) -> bool {
81        match (self, other) {
82            (PassMode::Ignore, PassMode::Ignore) => true,
83            (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84            (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85            (
86                PassMode::Cast { cast: c1, pad_i32: pad1 },
87                PassMode::Cast { cast: c2, pad_i32: pad2 },
88            ) => c1.eq_abi(c2) && pad1 == pad2,
89            (
90                PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91                PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92            ) => a1.eq_abi(a2) && s1 == s2,
93            (
94                PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95                PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96            ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97            _ => false,
98        }
99    }
100}
101
102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105
106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109    use rustc_macros::HashStable_Generic;
110
111    // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112    #[derive(#[automatically_derived]
impl ::core::clone::Clone for ArgAttribute {
    #[inline]
    fn clone(&self) -> ArgAttribute {
        let _: ::core::clone::AssertParamIsClone<u16>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ArgAttribute { }Copy, #[automatically_derived]
impl ::core::default::Default for ArgAttribute {
    #[inline]
    fn default() -> ArgAttribute {
        ArgAttribute(::core::default::Default::default())
    }
}Default, #[automatically_derived]
impl ::core::hash::Hash for ArgAttribute {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttribute {
    #[inline]
    fn eq(&self, other: &ArgAttribute) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttribute {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u16>;
    }
}Eq, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ArgAttribute where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ArgAttribute(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
113    pub struct ArgAttribute(u16);
114    impl ArgAttribute {
    #[allow(deprecated, non_upper_case_globals,)]
    pub const CapturesNone: Self = Self::from_bits_retain(0b111);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const CapturesAddress: Self = Self::from_bits_retain(0b110);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const CapturesReadOnly: Self = Self::from_bits_retain(0b100);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const NoAlias: Self = Self::from_bits_retain(1 << 3);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const NonNull: Self = Self::from_bits_retain(1 << 4);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const ReadOnly: Self = Self::from_bits_retain(1 << 5);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const InReg: Self = Self::from_bits_retain(1 << 6);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const NoUndef: Self = Self::from_bits_retain(1 << 7);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const Writable: Self = Self::from_bits_retain(1 << 8);
}
impl ::bitflags::Flags for ArgAttribute {
    const FLAGS: &'static [::bitflags::Flag<ArgAttribute>] =
        &[{

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("CapturesNone",
                            ArgAttribute::CapturesNone)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("CapturesAddress",
                            ArgAttribute::CapturesAddress)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("CapturesReadOnly",
                            ArgAttribute::CapturesReadOnly)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("NoAlias", ArgAttribute::NoAlias)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("NonNull", ArgAttribute::NonNull)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("ReadOnly", ArgAttribute::ReadOnly)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("InReg", ArgAttribute::InReg)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("NoUndef", ArgAttribute::NoUndef)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("Writable", ArgAttribute::Writable)
                    }];
    type Bits = u16;
    fn bits(&self) -> u16 { ArgAttribute::bits(self) }
    fn from_bits_retain(bits: u16) -> ArgAttribute {
        ArgAttribute::from_bits_retain(bits)
    }
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
    {
        #[allow(dead_code, deprecated, unused_attributes)]
        impl ArgAttribute {
            /// Get a flags value with all bits unset.
            #[inline]
            pub const fn empty() -> Self {
                Self(<u16 as ::bitflags::Bits>::EMPTY)
            }
            /// Get a flags value with all known bits set.
            #[inline]
            pub const fn all() -> Self {
                let mut truncated = <u16 as ::bitflags::Bits>::EMPTY;
                let mut i = 0;
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ArgAttribute as
                                            ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                let _ = i;
                Self(truncated)
            }
            /// Get the underlying bits value.
            ///
            /// The returned value is exactly the bits set in this flags value.
            #[inline]
            pub const fn bits(&self) -> u16 { self.0 }
            /// Convert from a bits value.
            ///
            /// This method will return `None` if any unknown bits are set.
            #[inline]
            pub const fn from_bits(bits: u16)
                -> ::bitflags::__private::core::option::Option<Self> {
                let truncated = Self::from_bits_truncate(bits).0;
                if truncated == bits {
                    ::bitflags::__private::core::option::Option::Some(Self(bits))
                } else { ::bitflags::__private::core::option::Option::None }
            }
            /// Convert from a bits value, unsetting any unknown bits.
            #[inline]
            pub const fn from_bits_truncate(bits: u16) -> Self {
                Self(bits & Self::all().0)
            }
            /// Convert from a bits value exactly.
            #[inline]
            pub const fn from_bits_retain(bits: u16) -> Self { Self(bits) }
            /// Get a flags value with the bits of a flag with the given name set.
            ///
            /// This method will return `None` if `name` is empty or doesn't
            /// correspond to any named flag.
            #[inline]
            pub fn from_name(name: &str)
                -> ::bitflags::__private::core::option::Option<Self> {
                {
                    if name == "CapturesNone" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesNone.bits()));
                    }
                };
                ;
                {
                    if name == "CapturesAddress" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesAddress.bits()));
                    }
                };
                ;
                {
                    if name == "CapturesReadOnly" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesReadOnly.bits()));
                    }
                };
                ;
                {
                    if name == "NoAlias" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoAlias.bits()));
                    }
                };
                ;
                {
                    if name == "NonNull" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NonNull.bits()));
                    }
                };
                ;
                {
                    if name == "ReadOnly" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::ReadOnly.bits()));
                    }
                };
                ;
                {
                    if name == "InReg" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::InReg.bits()));
                    }
                };
                ;
                {
                    if name == "NoUndef" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoUndef.bits()));
                    }
                };
                ;
                {
                    if name == "Writable" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::Writable.bits()));
                    }
                };
                ;
                let _ = name;
                ::bitflags::__private::core::option::Option::None
            }
            /// Whether all bits in this flags value are unset.
            #[inline]
            pub const fn is_empty(&self) -> bool {
                self.0 == <u16 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all known bits in this flags value are set.
            #[inline]
            pub const fn is_all(&self) -> bool {
                Self::all().0 | self.0 == self.0
            }
            /// Whether any set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn intersects(&self, other: Self) -> bool {
                self.0 & other.0 != <u16 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn contains(&self, other: Self) -> bool {
                self.0 & other.0 == other.0
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            pub fn insert(&mut self, other: Self) {
                *self = Self(self.0).union(other);
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `remove` won't truncate `other`, but the `!` operator will.
            #[inline]
            pub fn remove(&mut self, other: Self) {
                *self = Self(self.0).difference(other);
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            pub fn toggle(&mut self, other: Self) {
                *self = Self(self.0).symmetric_difference(other);
            }
            /// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
            #[inline]
            pub fn set(&mut self, other: Self, value: bool) {
                if value { self.insert(other); } else { self.remove(other); }
            }
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn intersection(self, other: Self) -> Self {
                Self(self.0 & other.0)
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn union(self, other: Self) -> Self {
                Self(self.0 | other.0)
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            #[must_use]
            pub const fn difference(self, other: Self) -> Self {
                Self(self.0 & !other.0)
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn symmetric_difference(self, other: Self) -> Self {
                Self(self.0 ^ other.0)
            }
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            #[must_use]
            pub const fn complement(self) -> Self {
                Self::from_bits_truncate(!self.0)
            }
        }
        impl ::bitflags::__private::core::fmt::Binary for ArgAttribute {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::Octal for ArgAttribute {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::LowerHex for ArgAttribute {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::UpperHex for ArgAttribute {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::ops::BitOr for ArgAttribute {
            type Output = Self;
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor(self, other: ArgAttribute) -> Self { self.union(other) }
        }
        impl ::bitflags::__private::core::ops::BitOrAssign for ArgAttribute {
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor_assign(&mut self, other: Self) { self.insert(other); }
        }
        impl ::bitflags::__private::core::ops::BitXor for ArgAttribute {
            type Output = Self;
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor(self, other: Self) -> Self {
                self.symmetric_difference(other)
            }
        }
        impl ::bitflags::__private::core::ops::BitXorAssign for ArgAttribute {
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
        }
        impl ::bitflags::__private::core::ops::BitAnd for ArgAttribute {
            type Output = Self;
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand(self, other: Self) -> Self { self.intersection(other) }
        }
        impl ::bitflags::__private::core::ops::BitAndAssign for ArgAttribute {
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand_assign(&mut self, other: Self) {
                *self =
                    Self::from_bits_retain(self.bits()).intersection(other);
            }
        }
        impl ::bitflags::__private::core::ops::Sub for ArgAttribute {
            type Output = Self;
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub(self, other: Self) -> Self { self.difference(other) }
        }
        impl ::bitflags::__private::core::ops::SubAssign for ArgAttribute {
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub_assign(&mut self, other: Self) { self.remove(other); }
        }
        impl ::bitflags::__private::core::ops::Not for ArgAttribute {
            type Output = Self;
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            fn not(self) -> Self { self.complement() }
        }
        impl ::bitflags::__private::core::iter::Extend<ArgAttribute> for
            ArgAttribute {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(&mut self, iterator: T) {
                for item in iterator { self.insert(item) }
            }
        }
        impl ::bitflags::__private::core::iter::FromIterator<ArgAttribute> for
            ArgAttribute {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(iterator: T) -> Self {
                use ::bitflags::__private::core::iter::Extend;
                let mut result = Self::empty();
                result.extend(iterator);
                result
            }
        }
        impl ArgAttribute {
            /// Yield a set of contained flags values.
            ///
            /// Each yielded flags value will correspond to a defined named flag. Any unknown bits
            /// will be yielded together as a final flags value.
            #[inline]
            pub const fn iter(&self) -> ::bitflags::iter::Iter<ArgAttribute> {
                ::bitflags::iter::Iter::__private_const_new(<ArgAttribute as
                        ::bitflags::Flags>::FLAGS,
                    ArgAttribute::from_bits_retain(self.bits()),
                    ArgAttribute::from_bits_retain(self.bits()))
            }
            /// Yield a set of contained named flags values.
            ///
            /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
            /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
            #[inline]
            pub const fn iter_names(&self)
                -> ::bitflags::iter::IterNames<ArgAttribute> {
                ::bitflags::iter::IterNames::__private_const_new(<ArgAttribute
                        as ::bitflags::Flags>::FLAGS,
                    ArgAttribute::from_bits_retain(self.bits()),
                    ArgAttribute::from_bits_retain(self.bits()))
            }
        }
        impl ::bitflags::__private::core::iter::IntoIterator for ArgAttribute
            {
            type Item = ArgAttribute;
            type IntoIter = ::bitflags::iter::Iter<ArgAttribute>;
            fn into_iter(self) -> Self::IntoIter { self.iter() }
        }
    };bitflags::bitflags! {
115        impl ArgAttribute: u16 {
116            const CapturesNone     = 0b111;
117            const CapturesAddress  = 0b110;
118            const CapturesReadOnly = 0b100;
119            const NoAlias  = 1 << 3;
120            const NonNull  = 1 << 4;
121            const ReadOnly = 1 << 5;
122            const InReg    = 1 << 6;
123            const NoUndef  = 1 << 7;
124            const Writable = 1 << 8;
125        }
126    }
127    impl ::std::fmt::Debug for ArgAttribute {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::bitflags::parser::to_writer(self, f)
    }
}rustc_data_structures::external_bitflags_debug! { ArgAttribute }
128}
129
130/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
131/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
132/// not necessary to extend the argument, this enum is ignored.
133#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgExtension { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgExtension {
    #[inline]
    fn clone(&self) -> ArgExtension { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgExtension {
    #[inline]
    fn eq(&self, other: &ArgExtension) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgExtension {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgExtension {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgExtension {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                ArgExtension::None => "None",
                ArgExtension::Zext => "Zext",
                ArgExtension::Sext => "Sext",
            })
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ArgExtension where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    ArgExtension::None => {}
                    ArgExtension::Zext => {}
                    ArgExtension::Sext => {}
                }
            }
        }
    };HashStable_Generic)]
134pub enum ArgExtension {
135    None,
136    Zext,
137    Sext,
138}
139
140/// A compact representation of LLVM attributes (at least those relevant for this module)
141/// that can be manipulated without interacting with LLVM's Attribute machinery.
142#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgAttributes { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgAttributes {
    #[inline]
    fn clone(&self) -> ArgAttributes {
        let _: ::core::clone::AssertParamIsClone<ArgAttribute>;
        let _: ::core::clone::AssertParamIsClone<ArgExtension>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttributes {
    #[inline]
    fn eq(&self, other: &ArgAttributes) -> bool {
        self.regular == other.regular && self.arg_ext == other.arg_ext &&
                self.pointee_size == other.pointee_size &&
            self.pointee_align == other.pointee_align
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttributes {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<ArgAttribute>;
        let _: ::core::cmp::AssertParamIsEq<ArgExtension>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgAttributes {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.regular, state);
        ::core::hash::Hash::hash(&self.arg_ext, state);
        ::core::hash::Hash::hash(&self.pointee_size, state);
        ::core::hash::Hash::hash(&self.pointee_align, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgAttributes {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "ArgAttributes",
            "regular", &self.regular, "arg_ext", &self.arg_ext,
            "pointee_size", &self.pointee_size, "pointee_align",
            &&self.pointee_align)
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ArgAttributes where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ArgAttributes {
                        regular: ref __binding_0,
                        arg_ext: ref __binding_1,
                        pointee_size: ref __binding_2,
                        pointee_align: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
143pub struct ArgAttributes {
144    pub regular: ArgAttribute,
145    pub arg_ext: ArgExtension,
146    /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
147    /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
148    /// set on a null pointer, but all non-null pointers must be dereferenceable).
149    pub pointee_size: Size,
150    /// The minimum alignment of the pointee, if any.
151    pub pointee_align: Option<Align>,
152}
153
154impl ArgAttributes {
155    pub fn new() -> Self {
156        ArgAttributes {
157            regular: ArgAttribute::default(),
158            arg_ext: ArgExtension::None,
159            pointee_size: Size::ZERO,
160            pointee_align: None,
161        }
162    }
163
164    pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
165        if !(self.arg_ext == ArgExtension::None || self.arg_ext == ext) {
    {
        ::core::panicking::panic_fmt(format_args!("cannot set {0:?} when {1:?} is already set",
                ext, self.arg_ext));
    }
};assert!(
166            self.arg_ext == ArgExtension::None || self.arg_ext == ext,
167            "cannot set {:?} when {:?} is already set",
168            ext,
169            self.arg_ext
170        );
171        self.arg_ext = ext;
172        self
173    }
174
175    pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
176        self.regular |= attr;
177        self
178    }
179
180    pub fn contains(&self, attr: ArgAttribute) -> bool {
181        self.regular.contains(attr)
182    }
183
184    /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
185    /// function call ABIs".
186    pub fn eq_abi(&self, other: &Self) -> bool {
187        // There's only one regular attribute that matters for the call ABI: InReg.
188        // Everything else is things like noalias, dereferenceable, nonnull, ...
189        // (This also applies to pointee_size, pointee_align.)
190        if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
191        {
192            return false;
193        }
194        // We also compare the sign extension mode -- this could let the callee make assumptions
195        // about bits that conceptually were not even passed.
196        if self.arg_ext != other.arg_ext {
197            return false;
198        }
199        true
200    }
201}
202
203impl From<ArgAttribute> for ArgAttributes {
204    fn from(value: ArgAttribute) -> Self {
205        Self {
206            regular: value,
207            arg_ext: ArgExtension::None,
208            pointee_size: Size::ZERO,
209            pointee_align: None,
210        }
211    }
212}
213
214/// An argument passed entirely registers with the
215/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
216#[derive(#[automatically_derived]
impl ::core::clone::Clone for Uniform {
    #[inline]
    fn clone(&self) -> Uniform {
        let _: ::core::clone::AssertParamIsClone<Reg>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Uniform { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Uniform {
    #[inline]
    fn eq(&self, other: &Uniform) -> bool {
        self.is_consecutive == other.is_consecutive && self.unit == other.unit
            && self.total == other.total
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Uniform {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Reg>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Uniform {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.unit, state);
        ::core::hash::Hash::hash(&self.total, state);
        ::core::hash::Hash::hash(&self.is_consecutive, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Uniform {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "Uniform",
            "unit", &self.unit, "total", &self.total, "is_consecutive",
            &&self.is_consecutive)
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Uniform where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Uniform {
                        unit: ref __binding_0,
                        total: ref __binding_1,
                        is_consecutive: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
217pub struct Uniform {
218    pub unit: Reg,
219
220    /// The total size of the argument, which can be:
221    /// * equal to `unit.size` (one scalar/vector),
222    /// * a multiple of `unit.size` (an array of scalar/vectors),
223    /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
224    ///   for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
225    ///   this size will be rounded up to the nearest multiple of `unit.size`.
226    pub total: Size,
227
228    /// Indicate that the argument is consecutive, in the sense that either all values need to be
229    /// passed in register, or all on the stack. If they are passed on the stack, there should be
230    /// no additional padding between elements.
231    pub is_consecutive: bool,
232}
233
234impl From<Reg> for Uniform {
235    fn from(unit: Reg) -> Uniform {
236        Uniform { unit, total: unit.size, is_consecutive: false }
237    }
238}
239
240impl Uniform {
241    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
242        self.unit.align(cx)
243    }
244
245    /// Pass using one or more values of the given type, without requiring them to be consecutive.
246    /// That is, some values may be passed in register and some on the stack.
247    pub fn new(unit: Reg, total: Size) -> Self {
248        Uniform { unit, total, is_consecutive: false }
249    }
250
251    /// Pass using one or more consecutive values of the given type. Either all values will be
252    /// passed in registers, or all on the stack.
253    pub fn consecutive(unit: Reg, total: Size) -> Self {
254        Uniform { unit, total, is_consecutive: true }
255    }
256}
257
258/// Describes the type used for `PassMode::Cast`.
259///
260/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
261/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
262/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
263/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
264/// (and all data in the padding between the registers is dropped).
265#[derive(#[automatically_derived]
impl ::core::clone::Clone for CastTarget {
    #[inline]
    fn clone(&self) -> CastTarget {
        CastTarget {
            prefix: ::core::clone::Clone::clone(&self.prefix),
            rest_offset: ::core::clone::Clone::clone(&self.rest_offset),
            rest: ::core::clone::Clone::clone(&self.rest),
            attrs: ::core::clone::Clone::clone(&self.attrs),
        }
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for CastTarget {
    #[inline]
    fn eq(&self, other: &CastTarget) -> bool {
        self.prefix == other.prefix && self.rest_offset == other.rest_offset
                && self.rest == other.rest && self.attrs == other.attrs
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for CastTarget {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<[Option<Reg>; 8]>;
        let _: ::core::cmp::AssertParamIsEq<Option<Size>>;
        let _: ::core::cmp::AssertParamIsEq<Uniform>;
        let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for CastTarget {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.prefix, state);
        ::core::hash::Hash::hash(&self.rest_offset, state);
        ::core::hash::Hash::hash(&self.rest, state);
        ::core::hash::Hash::hash(&self.attrs, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for CastTarget {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "CastTarget",
            "prefix", &self.prefix, "rest_offset", &self.rest_offset, "rest",
            &self.rest, "attrs", &&self.attrs)
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for CastTarget where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    CastTarget {
                        prefix: ref __binding_0,
                        rest_offset: ref __binding_1,
                        rest: ref __binding_2,
                        attrs: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
266pub struct CastTarget {
267    pub prefix: [Option<Reg>; 8],
268    /// The offset of `rest` from the start of the value. Currently only implemented for a `Reg`
269    /// pair created by the `offset_pair` method.
270    pub rest_offset: Option<Size>,
271    pub rest: Uniform,
272    pub attrs: ArgAttributes,
273}
274
275impl From<Reg> for CastTarget {
276    fn from(unit: Reg) -> CastTarget {
277        CastTarget::from(Uniform::from(unit))
278    }
279}
280
281impl From<Uniform> for CastTarget {
282    fn from(uniform: Uniform) -> CastTarget {
283        Self::prefixed([None; 8], uniform)
284    }
285}
286
287impl CastTarget {
288    pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self {
289        Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() }
290    }
291
292    pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self {
293        Self {
294            prefix: [Some(a), None, None, None, None, None, None, None],
295            rest_offset: Some(offset_from_start),
296            rest: b.into(),
297            attrs: ArgAttributes::new(),
298        }
299    }
300
301    pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self {
302        self.attrs = attrs;
303        self
304    }
305
306    pub fn pair(a: Reg, b: Reg) -> CastTarget {
307        Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b))
308    }
309
310    /// When you only access the range containing valid data, you can use this unaligned size;
311    /// otherwise, use the safer `size` method.
312    pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
313        // Prefix arguments are passed in specific designated registers
314        let prefix_size = if let Some(offset_from_start) = self.rest_offset {
315            offset_from_start
316        } else {
317            self.prefix
318                .iter()
319                .filter_map(|x| x.map(|reg| reg.size))
320                .fold(Size::ZERO, |acc, size| acc + size)
321        };
322        // Remaining arguments are passed in chunks of the unit size
323        let rest_size =
324            self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
325
326        prefix_size + rest_size
327    }
328
329    pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
330        self.unaligned_size(cx).align_to(self.align(cx))
331    }
332
333    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
334        self.prefix
335            .iter()
336            .filter_map(|x| x.map(|reg| reg.align(cx)))
337            .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| {
338                acc.max(align)
339            })
340    }
341
342    /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
343    /// function call ABIs".
344    pub fn eq_abi(&self, other: &Self) -> bool {
345        let CastTarget {
346            prefix: prefix_l,
347            rest_offset: rest_offset_l,
348            rest: rest_l,
349            attrs: attrs_l,
350        } = self;
351        let CastTarget {
352            prefix: prefix_r,
353            rest_offset: rest_offset_r,
354            rest: rest_r,
355            attrs: attrs_r,
356        } = other;
357        prefix_l == prefix_r
358            && rest_offset_l == rest_offset_r
359            && rest_l == rest_r
360            && attrs_l.eq_abi(attrs_r)
361    }
362}
363
364/// Information about how to pass an argument to,
365/// or return a value from, a function, under some ABI.
366#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for ArgAbi<'a, Ty> {
    #[inline]
    fn clone(&self) -> ArgAbi<'a, Ty> {
        ArgAbi {
            layout: ::core::clone::Clone::clone(&self.layout),
            mode: ::core::clone::Clone::clone(&self.mode),
        }
    }
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for ArgAbi<'a, Ty>
    {
    #[inline]
    fn eq(&self, other: &ArgAbi<'a, Ty>) -> bool {
        self.layout == other.layout && self.mode == other.mode
    }
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for ArgAbi<'a, Ty> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<TyAndLayout<'a, Ty>>;
        let _: ::core::cmp::AssertParamIsEq<PassMode>;
    }
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for ArgAbi<'a, Ty> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.layout, state);
        ::core::hash::Hash::hash(&self.mode, state)
    }
}Hash, const _: () =
    {
        impl<'a, Ty, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            ArgAbi<'a, Ty> where __CTX: ::rustc_span::HashStableContext,
            Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ArgAbi { layout: ref __binding_0, mode: ref __binding_1 } =>
                        {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
367pub struct ArgAbi<'a, Ty> {
368    pub layout: TyAndLayout<'a, Ty>,
369    pub mode: PassMode,
370}
371
372// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
373impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
374    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375        let ArgAbi { layout, mode } = self;
376        f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
377    }
378}
379
380impl<'a, Ty> ArgAbi<'a, Ty> {
381    /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
382    pub fn new(
383        cx: &impl HasDataLayout,
384        layout: TyAndLayout<'a, Ty>,
385        scalar_attrs: impl Fn(Scalar, Size) -> ArgAttributes,
386    ) -> Self {
387        let mode = match layout.backend_repr {
388            _ if layout.is_zst() => PassMode::Ignore,
389            BackendRepr::Scalar(scalar) => PassMode::Direct(scalar_attrs(scalar, Size::ZERO)),
390            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
391                scalar_attrs(a, Size::ZERO),
392                scalar_attrs(b, a.size(cx).align_to(b.align(cx).abi)),
393            ),
394            BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
395            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
396            BackendRepr::SimdScalableVector { .. } => PassMode::Direct(ArgAttributes::new()),
397        };
398        ArgAbi { layout, mode }
399    }
400
401    fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
402        let mut attrs = ArgAttributes::new();
403
404        // For non-immediate arguments the callee gets its own copy of
405        // the value on the stack, so there are no aliases. The function
406        // can capture the address of the argument, but not the provenance.
407        attrs
408            .set(ArgAttribute::NoAlias)
409            .set(ArgAttribute::CapturesAddress)
410            .set(ArgAttribute::NonNull)
411            .set(ArgAttribute::NoUndef);
412        attrs.pointee_size = layout.size;
413        attrs.pointee_align = Some(layout.align.abi);
414
415        let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
416
417        PassMode::Indirect { attrs, meta_attrs, on_stack: false }
418    }
419
420    /// Pass this argument directly instead. Should NOT be used!
421    /// Only exists because of past ABI mistakes that will take time to fix
422    /// (see <https://github.com/rust-lang/rust/issues/115666>).
423    #[track_caller]
424    pub fn make_direct_deprecated(&mut self) {
425        match self.mode {
426            PassMode::Indirect { .. } => {
427                self.mode = PassMode::Direct(ArgAttributes::new());
428            }
429            PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
430            _ => {
    ::core::panicking::panic_fmt(format_args!("Tried to make {0:?} direct",
            self.mode));
}panic!("Tried to make {:?} direct", self.mode),
431        }
432    }
433
434    /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
435    /// This is valid for both sized and unsized arguments.
436    #[track_caller]
437    pub fn make_indirect(&mut self) {
438        match self.mode {
439            PassMode::Direct(_) | PassMode::Pair(_, _) => {
440                self.mode = Self::indirect_pass_mode(&self.layout);
441            }
442            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
443                // already indirect
444            }
445            _ => {
    ::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect",
            self.mode));
}panic!("Tried to make {:?} indirect", self.mode),
446        }
447    }
448
449    /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
450    /// ZSTs indirectly.
451    #[track_caller]
452    pub fn make_indirect_from_ignore(&mut self) {
453        match self.mode {
454            PassMode::Ignore => {
455                self.mode = Self::indirect_pass_mode(&self.layout);
456            }
457            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
458                // already indirect
459            }
460            _ => {
    ::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect (expected `PassMode::Ignore`)",
            self.mode));
}panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
461        }
462    }
463
464    /// Pass this argument indirectly, by placing it at a fixed stack offset.
465    /// This corresponds to the `byval` LLVM argument attribute.
466    /// This is only valid for sized arguments.
467    ///
468    /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
469    /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
470    /// stack slots used for arguments passed by-value have specific alignment requirements which
471    /// differ from the alignment used in other situations.
472    ///
473    /// If `None`, the type's alignment is used.
474    ///
475    /// If the resulting alignment differs from the type's alignment,
476    /// the argument will be copied to an alloca with sufficient alignment,
477    /// either in the caller (if the type's alignment is lower than the byval alignment)
478    /// or in the callee (if the type's alignment is higher than the byval alignment),
479    /// to ensure that Rust code never sees an underaligned pointer.
480    pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
481        if !!self.layout.is_unsized() {
    {
        ::core::panicking::panic_fmt(format_args!("used byval ABI for unsized layout"));
    }
};assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
482        self.make_indirect();
483        match self.mode {
484            PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
485                *on_stack = true;
486
487                // Some platforms, like 32-bit x86, change the alignment of the type when passing
488                // `byval`. Account for that.
489                if let Some(byval_align) = byval_align {
490                    // On all targets with byval align this is currently true, so let's assert it.
491                    if true {
    if !(byval_align >= Align::from_bytes(4).unwrap()) {
        ::core::panicking::panic("assertion failed: byval_align >= Align::from_bytes(4).unwrap()")
    };
};debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
492                    attrs.pointee_align = Some(byval_align);
493                }
494            }
495            _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
496        }
497    }
498
499    pub fn extend_integer_width_to(&mut self, bits: u64) {
500        // Only integers have signedness
501        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr
502            && let Primitive::Int(i, signed) = scalar.primitive()
503            && i.size().bits() < bits
504            && let PassMode::Direct(ref mut attrs) = self.mode
505        {
506            if signed {
507                attrs.ext(ArgExtension::Sext)
508            } else {
509                attrs.ext(ArgExtension::Zext)
510            };
511        }
512    }
513
514    pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
515        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
516    }
517
518    pub fn cast_to_with_attrs<T: Into<CastTarget>>(&mut self, target: T, attrs: ArgAttributes) {
519        self.mode =
520            PassMode::Cast { cast: Box::new(target.into().with_attrs(attrs)), pad_i32: false };
521    }
522
523    pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
524        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
525    }
526
527    pub fn is_indirect(&self) -> bool {
528        #[allow(non_exhaustive_omitted_patterns)] match self.mode {
    PassMode::Indirect { .. } => true,
    _ => false,
}matches!(self.mode, PassMode::Indirect { .. })
529    }
530
531    pub fn is_sized_indirect(&self) -> bool {
532        #[allow(non_exhaustive_omitted_patterns)] match self.mode {
    PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => true,
    _ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
533    }
534
535    pub fn is_unsized_indirect(&self) -> bool {
536        #[allow(non_exhaustive_omitted_patterns)] match self.mode {
    PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => true,
    _ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
537    }
538
539    pub fn is_ignore(&self) -> bool {
540        #[allow(non_exhaustive_omitted_patterns)] match self.mode {
    PassMode::Ignore => true,
    _ => false,
}matches!(self.mode, PassMode::Ignore)
541    }
542
543    /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
544    /// function call ABIs".
545    pub fn eq_abi(&self, other: &Self) -> bool
546    where
547        Ty: PartialEq,
548    {
549        // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
550        // at the type.
551        self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
552            // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
553            // That elevates any type difference to an ABI difference since we just use the
554            // full Rust type as the LLVM argument/return type.
555            if #[allow(non_exhaustive_omitted_patterns)] match self.mode {
    PassMode::Direct(..) => true,
    _ => false,
}matches!(self.mode, PassMode::Direct(..))
556                && #[allow(non_exhaustive_omitted_patterns)] match self.layout.backend_repr {
    BackendRepr::Memory { .. } => true,
    _ => false,
}matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
557            {
558                // For aggregates in `Direct` mode to be compatible, the types need to be equal.
559                self.layout.ty == other.layout.ty
560            } else {
561                true
562            }
563        }
564    }
565}
566
567#[derive(#[automatically_derived]
impl ::core::marker::Copy for RiscvInterruptKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for RiscvInterruptKind {
    #[inline]
    fn clone(&self) -> RiscvInterruptKind { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for RiscvInterruptKind {
    #[inline]
    fn eq(&self, other: &RiscvInterruptKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for RiscvInterruptKind {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for RiscvInterruptKind {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for RiscvInterruptKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                RiscvInterruptKind::Machine => "Machine",
                RiscvInterruptKind::Supervisor => "Supervisor",
            })
    }
}Debug, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for RiscvInterruptKind where
            __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    RiscvInterruptKind::Machine => {}
                    RiscvInterruptKind::Supervisor => {}
                }
            }
        }
    };HashStable_Generic)]
568pub enum RiscvInterruptKind {
569    Machine,
570    Supervisor,
571}
572
573impl RiscvInterruptKind {
574    pub fn as_str(&self) -> &'static str {
575        match self {
576            Self::Machine => "machine",
577            Self::Supervisor => "supervisor",
578        }
579    }
580}
581
582/// Metadata describing how the arguments to a native function
583/// should be passed in order to respect the native ABI.
584///
585/// The signature represented by this type may not match the MIR function signature.
586/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
587/// The std::offload module also adds an addition dyn_ptr argument to the GpuKernel ABI.
588/// While this difference is rarely relevant, it should still be kept in mind.
589///
590/// I will do my best to describe this structure, but these
591/// comments are reverse-engineered and may be inaccurate. -NDM
592#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for FnAbi<'a, Ty> {
    #[inline]
    fn clone(&self) -> FnAbi<'a, Ty> {
        FnAbi {
            args: ::core::clone::Clone::clone(&self.args),
            ret: ::core::clone::Clone::clone(&self.ret),
            c_variadic: ::core::clone::Clone::clone(&self.c_variadic),
            fixed_count: ::core::clone::Clone::clone(&self.fixed_count),
            conv: ::core::clone::Clone::clone(&self.conv),
            can_unwind: ::core::clone::Clone::clone(&self.can_unwind),
        }
    }
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for FnAbi<'a, Ty>
    {
    #[inline]
    fn eq(&self, other: &FnAbi<'a, Ty>) -> bool {
        self.c_variadic == other.c_variadic &&
                            self.fixed_count == other.fixed_count &&
                        self.can_unwind == other.can_unwind &&
                    self.args == other.args && self.ret == other.ret &&
            self.conv == other.conv
    }
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for FnAbi<'a, Ty> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Box<[ArgAbi<'a, Ty>]>>;
        let _: ::core::cmp::AssertParamIsEq<ArgAbi<'a, Ty>>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<u32>;
        let _: ::core::cmp::AssertParamIsEq<CanonAbi>;
    }
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for FnAbi<'a, Ty> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.args, state);
        ::core::hash::Hash::hash(&self.ret, state);
        ::core::hash::Hash::hash(&self.c_variadic, state);
        ::core::hash::Hash::hash(&self.fixed_count, state);
        ::core::hash::Hash::hash(&self.conv, state);
        ::core::hash::Hash::hash(&self.can_unwind, state)
    }
}Hash, const _: () =
    {
        impl<'a, Ty, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            FnAbi<'a, Ty> where __CTX: ::rustc_span::HashStableContext,
            Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    FnAbi {
                        args: ref __binding_0,
                        ret: ref __binding_1,
                        c_variadic: ref __binding_2,
                        fixed_count: ref __binding_3,
                        conv: ref __binding_4,
                        can_unwind: ref __binding_5 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)]
593pub struct FnAbi<'a, Ty> {
594    /// The type, layout, and information about how each argument is passed.
595    pub args: Box<[ArgAbi<'a, Ty>]>,
596
597    /// The layout, type, and the way a value is returned from this function.
598    pub ret: ArgAbi<'a, Ty>,
599
600    /// Marks this function as variadic (accepting a variable number of arguments).
601    pub c_variadic: bool,
602
603    /// The count of non-variadic arguments.
604    ///
605    /// Should only be different from args.len() when c_variadic is true.
606    /// This can be used to know whether an argument is variadic or not.
607    pub fixed_count: u32,
608    /// The calling convention of this function.
609    pub conv: CanonAbi,
610    /// Indicates if an unwind may happen across a call to this function.
611    pub can_unwind: bool,
612}
613
614// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
615impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
616    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
617        let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
618        f.debug_struct("FnAbi")
619            .field("args", args)
620            .field("ret", ret)
621            .field("c_variadic", c_variadic)
622            .field("fixed_count", fixed_count)
623            .field("conv", conv)
624            .field("can_unwind", can_unwind)
625            .finish()
626    }
627}
628
629impl<'a, Ty> FnAbi<'a, Ty> {
630    pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
631    where
632        Ty: TyAbiInterface<'a, C> + Copy,
633        C: HasDataLayout + HasTargetSpec + HasX86AbiOpt,
634    {
635        if abi == ExternAbi::X86Interrupt {
636            if let Some(arg) = self.args.first_mut() {
637                arg.pass_by_stack_offset(None);
638            }
639            return;
640        }
641
642        let spec = cx.target_spec();
643        match &spec.arch {
644            Arch::X86 => {
645                let (flavor, regparm) = match abi {
646                    ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
647                        (x86::Flavor::FastcallOrVectorcall, None)
648                    }
649                    ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
650                        (x86::Flavor::General, cx.x86_abi_opt().regparm)
651                    }
652                    _ => (x86::Flavor::General, None),
653                };
654                let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
655                let opts = x86::X86Options { flavor, regparm, reg_struct_return };
656                if spec.is_like_msvc {
657                    x86_win32::compute_abi_info(cx, self, opts);
658                } else {
659                    x86::compute_abi_info(cx, self, opts);
660                }
661            }
662            Arch::X86_64 => match abi {
663                ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
664                ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
665                    x86_win64::compute_abi_info(cx, self)
666                }
667                _ => {
668                    if cx.target_spec().is_like_windows {
669                        x86_win64::compute_abi_info(cx, self)
670                    } else {
671                        x86_64::compute_abi_info(cx, self)
672                    }
673                }
674            },
675            Arch::AArch64 | Arch::Arm64EC => {
676                let kind = if cx.target_spec().is_like_darwin {
677                    aarch64::AbiKind::DarwinPCS
678                } else if cx.target_spec().is_like_windows {
679                    aarch64::AbiKind::Win64
680                } else {
681                    aarch64::AbiKind::AAPCS
682                };
683                aarch64::compute_abi_info(cx, self, kind)
684            }
685            Arch::AmdGpu => amdgpu::compute_abi_info(cx, self),
686            Arch::Arm => arm::compute_abi_info(cx, self),
687            Arch::Avr => avr::compute_abi_info(cx, self),
688            Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_abi_info(cx, self),
689            Arch::M68k => m68k::compute_abi_info(cx, self),
690            Arch::CSky => csky::compute_abi_info(cx, self),
691            Arch::Mips | Arch::Mips32r6 => mips::compute_abi_info(cx, self),
692            Arch::Mips64 | Arch::Mips64r6 => mips64::compute_abi_info(cx, self),
693            Arch::PowerPC => powerpc::compute_abi_info(cx, self),
694            Arch::PowerPC64 => powerpc64::compute_abi_info(cx, self),
695            Arch::S390x => s390x::compute_abi_info(cx, self),
696            Arch::Msp430 => msp430::compute_abi_info(cx, self),
697            Arch::Sparc => sparc::compute_abi_info(cx, self),
698            Arch::Sparc64 => sparc64::compute_abi_info(cx, self),
699            Arch::Nvptx64 => {
700                if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
701                    nvptx64::compute_ptx_kernel_abi_info(cx, self)
702                } else {
703                    nvptx64::compute_abi_info(cx, self)
704                }
705            }
706            Arch::Hexagon => hexagon::compute_abi_info(cx, self),
707            Arch::Xtensa => xtensa::compute_abi_info(cx, self),
708            Arch::RiscV32 | Arch::RiscV64 => riscv::compute_abi_info(cx, self),
709            Arch::Wasm32 | Arch::Wasm64 => wasm::compute_abi_info(cx, self),
710            Arch::Bpf => bpf::compute_abi_info(cx, self),
711            arch @ (Arch::SpirV | Arch::Other(_)) => {
712                {
    ::core::panicking::panic_fmt(format_args!("no lowering implemented for {0}",
            arch));
}panic!("no lowering implemented for {arch}")
713            }
714        }
715    }
716
717    pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
718    where
719        Ty: TyAbiInterface<'a, C> + Copy,
720        C: HasDataLayout + HasTargetSpec,
721    {
722        let spec = cx.target_spec();
723        match &spec.arch {
724            Arch::X86 => x86::compute_rust_abi_info(cx, self),
725            Arch::RiscV32 | Arch::RiscV64 => riscv::compute_rust_abi_info(cx, self),
726            Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_rust_abi_info(cx, self),
727            Arch::AArch64 => aarch64::compute_rust_abi_info(cx, self),
728            Arch::Bpf => bpf::compute_rust_abi_info(self),
729            _ => {}
730        };
731
732        for (arg_idx, arg) in self
733            .args
734            .iter_mut()
735            .enumerate()
736            .map(|(idx, arg)| (Some(idx), arg))
737            .chain(iter::once((None, &mut self.ret)))
738        {
739            // If the logic above already picked a specific type to cast the argument to, leave that
740            // in place.
741            if #[allow(non_exhaustive_omitted_patterns)] match arg.mode {
    PassMode::Ignore | PassMode::Cast { .. } => true,
    _ => false,
}matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
742                continue;
743            }
744
745            if arg_idx.is_none()
746                && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
747                && !#[allow(non_exhaustive_omitted_patterns)] match arg.layout.backend_repr {
    BackendRepr::SimdVector { .. } => true,
    _ => false,
}matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
748            {
749                // Return values larger than 2 registers using a return area
750                // pointer. LLVM and Cranelift disagree about how to return
751                // values that don't fit in the registers designated for return
752                // values. LLVM will force the entire return value to be passed
753                // by return area pointer, while Cranelift will look at each IR level
754                // return value independently and decide to pass it in a
755                // register or not, which would result in the return value
756                // being passed partially in registers and partially through a
757                // return area pointer. For large IR-level values such as `i128`,
758                // cranelift will even split up the value into smaller chunks.
759                //
760                // While Cranelift may need to be fixed as the LLVM behavior is
761                // generally more correct with respect to the surface language,
762                // forcing this behavior in rustc itself makes it easier for
763                // other backends to conform to the Rust ABI and for the C ABI
764                // rustc already handles this behavior anyway.
765                //
766                // In addition LLVM's decision to pass the return value in
767                // registers or using a return area pointer depends on how
768                // exactly the return type is lowered to an LLVM IR type. For
769                // example `Option<u128>` can be lowered as `{ i128, i128 }`
770                // in which case the x86_64 backend would use a return area
771                // pointer, or it could be passed as `{ i32, i128 }` in which
772                // case the x86_64 backend would pass it in registers by taking
773                // advantage of an LLVM ABI extension that allows using 3
774                // registers for the x86_64 sysv call conv rather than the
775                // officially specified 2 registers.
776                //
777                // FIXME: Technically we should look at the amount of available
778                // return registers rather than guessing that there are 2
779                // registers for return values. In practice only a couple of
780                // architectures have less than 2 return registers. None of
781                // which supported by Cranelift.
782                //
783                // NOTE: This adjustment is only necessary for the Rust ABI as
784                // for other ABI's the calling convention implementations in
785                // rustc_target already ensure any return value which doesn't
786                // fit in the available amount of return registers is passed in
787                // the right way for the current target.
788                //
789                // The adjustment is not necessary nor desired for types with a vector
790                // representation; those are handled below.
791                arg.make_indirect();
792                continue;
793            }
794
795            match arg.layout.backend_repr {
796                BackendRepr::Memory { .. } => {
797                    // Compute `Aggregate` ABI.
798
799                    let is_indirect_not_on_stack =
800                        #[allow(non_exhaustive_omitted_patterns)] match arg.mode {
    PassMode::Indirect { on_stack: false, .. } => true,
    _ => false,
}matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
801                    if !is_indirect_not_on_stack {
    ::core::panicking::panic("assertion failed: is_indirect_not_on_stack")
};assert!(is_indirect_not_on_stack);
802
803                    let size = arg.layout.size;
804                    if arg.layout.is_sized()
805                        && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
806                    {
807                        // We want to pass small aggregates as immediates, but using
808                        // an LLVM aggregate type for this leads to bad optimizations,
809                        // so we pick an appropriately sized integer type instead.
810                        let attr = if layout_is_noundef(arg.layout, cx) {
811                            ArgAttribute::NoUndef
812                        } else {
813                            ArgAttribute::default()
814                        };
815                        arg.cast_to_with_attrs(Reg { kind: RegKind::Integer, size }, attr.into());
816                    }
817                }
818
819                BackendRepr::SimdVector { .. } => {
820                    // This is a fun case! The gist of what this is doing is
821                    // that we want callers and callees to always agree on the
822                    // ABI of how they pass SIMD arguments. If we were to *not*
823                    // make these arguments indirect then they'd be immediates
824                    // in LLVM, which means that they'd used whatever the
825                    // appropriate ABI is for the callee and the caller. That
826                    // means, for example, if the caller doesn't have AVX
827                    // enabled but the callee does, then passing an AVX argument
828                    // across this boundary would cause corrupt data to show up.
829                    //
830                    // This problem is fixed by unconditionally passing SIMD
831                    // arguments through memory between callers and callees
832                    // which should get them all to agree on ABI regardless of
833                    // target feature sets. Some more information about this
834                    // issue can be found in #44367.
835                    //
836                    // We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
837                    // required. However, it turns out that that makes LLVM worse at optimizing this
838                    // code, so we pass things indirectly even there. See #139029 for more on that.
839                    if spec.simd_types_indirect {
840                        arg.make_indirect();
841                    }
842                }
843
844                _ => {}
845            }
846        }
847    }
848}
849
850/// Determines whether `layout` contains no uninit bytes (no padding, no unions),
851/// using only the computed layout.
852///
853/// Conservative: returns `false` for anything it cannot prove fully initialized,
854/// including multi-variant enums and SIMD vectors.
855// FIXME: extend to multi-variant enums (per-variant padding analysis needed).
856fn layout_is_noundef<'a, Ty, C>(layout: TyAndLayout<'a, Ty>, cx: &C) -> bool
857where
858    Ty: TyAbiInterface<'a, C> + Copy,
859    C: HasDataLayout,
860{
861    match layout.backend_repr {
862        BackendRepr::Scalar(scalar) => !scalar.is_uninit_valid(),
863        BackendRepr::ScalarPair(s1, s2) => {
864            !s1.is_uninit_valid()
865                && !s2.is_uninit_valid()
866                // Ensure there is no padding.
867                && s1.size(cx) + s2.size(cx) == layout.size
868        }
869        BackendRepr::Memory { .. } => match layout.fields {
870            FieldsShape::Primitive | FieldsShape::Union(_) => false,
871            // Array elements are at stride offsets with no inter-element gaps.
872            FieldsShape::Array { stride: _, count } => {
873                count == 0 || layout_is_noundef(layout.field(cx, 0), cx)
874            }
875            FieldsShape::Arbitrary { .. } => {
876                // With `Variants::Multiple`, `layout.fields` only covers shared
877                // bytes (niche/discriminant); per-variant data is absent, so
878                // full coverage cannot be proven.
879                #[allow(non_exhaustive_omitted_patterns)] match layout.variants {
    Variants::Single { .. } => true,
    _ => false,
}matches!(layout.variants, Variants::Single { .. }) && fields_are_noundef(layout, cx)
880            }
881        },
882        BackendRepr::SimdVector { .. } | BackendRepr::SimdScalableVector { .. } => false,
883    }
884}
885
886/// Returns `true` if the fields of `layout` contiguously cover bytes `0..layout.size`
887/// with no padding gaps and each field is recursively `layout_is_noundef`.
888fn fields_are_noundef<'a, Ty, C>(layout: TyAndLayout<'a, Ty>, cx: &C) -> bool
889where
890    Ty: TyAbiInterface<'a, C> + Copy,
891    C: HasDataLayout,
892{
893    let mut cursor = Size::ZERO;
894    for i in layout.fields.index_by_increasing_offset() {
895        let field = layout.field(cx, i);
896        if field.size == Size::ZERO {
897            continue;
898        }
899        if layout.fields.offset(i) != cursor {
900            return false;
901        }
902        if !layout_is_noundef(field, cx) {
903            return false;
904        }
905        cursor += field.size;
906    }
907    cursor == layout.size
908}
909
910// Some types are used a lot. Make sure they don't unintentionally get bigger.
911#[cfg(target_pointer_width = "64")]
912mod size_asserts {
913    use rustc_data_structures::static_assert_size;
914
915    use super::*;
916    // tidy-alphabetical-start
917    const _: [(); 56] = [(); ::std::mem::size_of::<ArgAbi<'_, usize>>()];static_assert_size!(ArgAbi<'_, usize>, 56);
918    const _: [(); 80] = [(); ::std::mem::size_of::<FnAbi<'_, usize>>()];static_assert_size!(FnAbi<'_, usize>, 80);
919    // tidy-alphabetical-end
920}