1use std::{fmt, iter};
23use rustc_abi::{
4AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
5Scalar, Size, TyAbiInterface, TyAndLayout,
6};
7use rustc_macros::HashStable_Generic;
89pub use crate::spec::AbiMap;
10use crate::spec::{Arch, HasTargetSpec, HasX86AbiOpt};
1112mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
3738#[derive(#[automatically_derived]
impl ::core::clone::Clone for PassMode {
#[inline]
fn clone(&self) -> PassMode {
match self {
PassMode::Ignore => PassMode::Ignore,
PassMode::Direct(__self_0) =>
PassMode::Direct(::core::clone::Clone::clone(__self_0)),
PassMode::Pair(__self_0, __self_1) =>
PassMode::Pair(::core::clone::Clone::clone(__self_0),
::core::clone::Clone::clone(__self_1)),
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
PassMode::Cast {
pad_i32: ::core::clone::Clone::clone(__self_0),
cast: ::core::clone::Clone::clone(__self_1),
},
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
PassMode::Indirect {
attrs: ::core::clone::Clone::clone(__self_0),
meta_attrs: ::core::clone::Clone::clone(__self_1),
on_stack: ::core::clone::Clone::clone(__self_2),
},
}
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PassMode {
#[inline]
fn eq(&self, other: &PassMode) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(PassMode::Direct(__self_0), PassMode::Direct(__arg1_0)) =>
__self_0 == __arg1_0,
(PassMode::Pair(__self_0, __self_1),
PassMode::Pair(__arg1_0, __arg1_1)) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(PassMode::Cast { pad_i32: __self_0, cast: __self_1 },
PassMode::Cast { pad_i32: __arg1_0, cast: __arg1_1 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 },
PassMode::Indirect {
attrs: __arg1_0, meta_attrs: __arg1_1, on_stack: __arg1_2 })
=>
__self_2 == __arg1_2 && __self_0 == __arg1_0 &&
__self_1 == __arg1_1,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PassMode {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<Box<CastTarget>>;
let _: ::core::cmp::AssertParamIsEq<Option<ArgAttributes>>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for PassMode {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
PassMode::Direct(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
PassMode::Pair(__self_0, __self_1) => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
{
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state);
::core::hash::Hash::hash(__self_2, state)
}
_ => {}
}
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for PassMode {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
PassMode::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
PassMode::Direct(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Direct",
&__self_0),
PassMode::Pair(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f, "Pair",
__self_0, &__self_1),
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f, "Cast",
"pad_i32", __self_0, "cast", &__self_1),
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
::core::fmt::Formatter::debug_struct_field3_finish(f,
"Indirect", "attrs", __self_0, "meta_attrs", __self_1,
"on_stack", &__self_2),
}
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for PassMode where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
PassMode::Ignore => {}
PassMode::Direct(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
PassMode::Pair(ref __binding_0, ref __binding_1) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
PassMode::Cast {
pad_i32: ref __binding_0, cast: ref __binding_1 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
PassMode::Indirect {
attrs: ref __binding_0,
meta_attrs: ref __binding_1,
on_stack: ref __binding_2 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
39pub enum PassMode {
40/// Ignore the argument.
41 ///
42 /// The argument is a ZST.
43Ignore,
44/// Pass the argument directly.
45 ///
46 /// The argument has a layout abi of `Scalar` or `Vector`.
47 /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48 /// This is bad since it leaks LLVM implementation details into the ABI.
49 /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50Direct(ArgAttributes),
51/// Pass a pair's elements directly in two arguments.
52 ///
53 /// The argument has a layout abi of `ScalarPair`.
54Pair(ArgAttributes, ArgAttributes),
55/// Pass the argument after casting it. See the `CastTarget` docs for details.
56 ///
57 /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58Cast { pad_i32: bool, cast: Box<CastTarget> },
59/// Pass the argument indirectly via a hidden pointer.
60 ///
61 /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62 /// argument. (This is the only mode that supports unsized arguments.)
63 ///
64 /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65 /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66 /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67 /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68 /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69 /// alignment (if `None`). This means that the alignment will not always
70 /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71 ///
72 /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
7576impl PassMode {
77/// Checks if these two `PassMode` are equal enough to be considered "the same for all
78 /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79 /// so that needs to be compared as well!
80pub fn eq_abi(&self, other: &Self) -> bool {
81match (self, other) {
82 (PassMode::Ignore, PassMode::Ignore) => true,
83 (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85 (
86 PassMode::Cast { cast: c1, pad_i32: pad1 },
87 PassMode::Cast { cast: c2, pad_i32: pad2 },
88 ) => c1.eq_abi(c2) && pad1 == pad2,
89 (
90 PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91 PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92 ) => a1.eq_abi(a2) && s1 == s2,
93 (
94 PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95 PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96 ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97_ => false,
98 }
99 }
100}
101102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109use rustc_macros::HashStable_Generic;
110111// The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112#[derive(#[automatically_derived]
impl ::core::clone::Clone for ArgAttribute {
#[inline]
fn clone(&self) -> ArgAttribute {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ArgAttribute { }Copy, #[automatically_derived]
impl ::core::default::Default for ArgAttribute {
#[inline]
fn default() -> ArgAttribute {
ArgAttribute(::core::default::Default::default())
}
}Default, #[automatically_derived]
impl ::core::hash::Hash for ArgAttribute {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttribute {
#[inline]
fn eq(&self, other: &ArgAttribute) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttribute {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}Eq, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgAttribute where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAttribute(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
113pub struct ArgAttribute(u8);
114impl ArgAttribute {
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesNone: Self = Self::from_bits_retain(0b111);
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesAddress: Self = Self::from_bits_retain(0b110);
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesReadOnly: Self = Self::from_bits_retain(0b100);
#[allow(deprecated, non_upper_case_globals,)]
pub const NoAlias: Self = Self::from_bits_retain(1 << 3);
#[allow(deprecated, non_upper_case_globals,)]
pub const NonNull: Self = Self::from_bits_retain(1 << 4);
#[allow(deprecated, non_upper_case_globals,)]
pub const ReadOnly: Self = Self::from_bits_retain(1 << 5);
#[allow(deprecated, non_upper_case_globals,)]
pub const InReg: Self = Self::from_bits_retain(1 << 6);
#[allow(deprecated, non_upper_case_globals,)]
pub const NoUndef: Self = Self::from_bits_retain(1 << 7);
}
impl ::bitflags::Flags for ArgAttribute {
const FLAGS: &'static [::bitflags::Flag<ArgAttribute>] =
&[{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesNone",
ArgAttribute::CapturesNone)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesAddress",
ArgAttribute::CapturesAddress)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesReadOnly",
ArgAttribute::CapturesReadOnly)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NoAlias", ArgAttribute::NoAlias)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NonNull", ArgAttribute::NonNull)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("ReadOnly", ArgAttribute::ReadOnly)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("InReg", ArgAttribute::InReg)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NoUndef", ArgAttribute::NoUndef)
}];
type Bits = u8;
fn bits(&self) -> u8 { ArgAttribute::bits(self) }
fn from_bits_retain(bits: u8) -> ArgAttribute {
ArgAttribute::from_bits_retain(bits)
}
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
{
#[allow(dead_code, deprecated, unused_attributes)]
impl ArgAttribute {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self {
Self(<u8 as ::bitflags::Bits>::EMPTY)
}
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self {
let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
let mut i = 0;
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
let _ = i;
Self(truncated)
}
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u8 { self.0 }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
let truncated = Self::from_bits_truncate(bits).0;
if truncated == bits {
::bitflags::__private::core::option::Option::Some(Self(bits))
} else { ::bitflags::__private::core::option::Option::None }
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(bits & Self::all().0)
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
{
if name == "CapturesNone" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesNone.bits()));
}
};
;
{
if name == "CapturesAddress" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesAddress.bits()));
}
};
;
{
if name == "CapturesReadOnly" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesReadOnly.bits()));
}
};
;
{
if name == "NoAlias" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoAlias.bits()));
}
};
;
{
if name == "NonNull" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NonNull.bits()));
}
};
;
{
if name == "ReadOnly" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::ReadOnly.bits()));
}
};
;
{
if name == "InReg" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::InReg.bits()));
}
};
;
{
if name == "NoUndef" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoUndef.bits()));
}
};
;
let _ = name;
::bitflags::__private::core::option::Option::None
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool {
self.0 == <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool {
Self::all().0 | self.0 == self.0
}
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0 & other.0 == other.0
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) {
*self = Self(self.0).union(other);
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) {
*self = Self(self.0).difference(other);
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) {
*self = Self(self.0).symmetric_difference(other);
}
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
if value { self.insert(other); } else { self.remove(other); }
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0 & other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0 | other.0)
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0 & !other.0)
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0 ^ other.0)
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self::from_bits_truncate(!self.0)
}
}
impl ::bitflags::__private::core::fmt::Binary for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for ArgAttribute {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: ArgAttribute) -> Self { self.union(other) }
}
impl ::bitflags::__private::core::ops::BitOrAssign for ArgAttribute {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for ArgAttribute {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for ArgAttribute {
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for ArgAttribute {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for ArgAttribute {
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for ArgAttribute {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for ArgAttribute {
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for ArgAttribute {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<ArgAttribute> for
ArgAttribute {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<ArgAttribute> for
ArgAttribute {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl ArgAttribute {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self) -> ::bitflags::iter::Iter<ArgAttribute> {
::bitflags::iter::Iter::__private_const_new(<ArgAttribute as
::bitflags::Flags>::FLAGS,
ArgAttribute::from_bits_retain(self.bits()),
ArgAttribute::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<ArgAttribute> {
::bitflags::iter::IterNames::__private_const_new(<ArgAttribute
as ::bitflags::Flags>::FLAGS,
ArgAttribute::from_bits_retain(self.bits()),
ArgAttribute::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for ArgAttribute
{
type Item = ArgAttribute;
type IntoIter = ::bitflags::iter::Iter<ArgAttribute>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
};bitflags::bitflags! {
115impl ArgAttribute: u8 {
116const CapturesNone = 0b111;
117const CapturesAddress = 0b110;
118const CapturesReadOnly = 0b100;
119const NoAlias = 1 << 3;
120const NonNull = 1 << 4;
121const ReadOnly = 1 << 5;
122const InReg = 1 << 6;
123const NoUndef = 1 << 7;
124 }
125 }126impl ::std::fmt::Debug for ArgAttribute {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::bitflags::parser::to_writer(self, f)
}
}rustc_data_structures::external_bitflags_debug! { ArgAttribute }127}
128129/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
130/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
131/// not necessary to extend the argument, this enum is ignored.
132#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgExtension { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgExtension {
#[inline]
fn clone(&self) -> ArgExtension { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgExtension {
#[inline]
fn eq(&self, other: &ArgExtension) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgExtension {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgExtension {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgExtension {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
ArgExtension::None => "None",
ArgExtension::Zext => "Zext",
ArgExtension::Sext => "Sext",
})
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgExtension where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
ArgExtension::None => {}
ArgExtension::Zext => {}
ArgExtension::Sext => {}
}
}
}
};HashStable_Generic)]
133pub enum ArgExtension {
134None,
135 Zext,
136 Sext,
137}
138139/// A compact representation of LLVM attributes (at least those relevant for this module)
140/// that can be manipulated without interacting with LLVM's Attribute machinery.
141#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgAttributes { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgAttributes {
#[inline]
fn clone(&self) -> ArgAttributes {
let _: ::core::clone::AssertParamIsClone<ArgAttribute>;
let _: ::core::clone::AssertParamIsClone<ArgExtension>;
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Option<Align>>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttributes {
#[inline]
fn eq(&self, other: &ArgAttributes) -> bool {
self.regular == other.regular && self.arg_ext == other.arg_ext &&
self.pointee_size == other.pointee_size &&
self.pointee_align == other.pointee_align
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttributes {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<ArgAttribute>;
let _: ::core::cmp::AssertParamIsEq<ArgExtension>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgAttributes {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.regular, state);
::core::hash::Hash::hash(&self.arg_ext, state);
::core::hash::Hash::hash(&self.pointee_size, state);
::core::hash::Hash::hash(&self.pointee_align, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgAttributes {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "ArgAttributes",
"regular", &self.regular, "arg_ext", &self.arg_ext,
"pointee_size", &self.pointee_size, "pointee_align",
&&self.pointee_align)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgAttributes where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAttributes {
regular: ref __binding_0,
arg_ext: ref __binding_1,
pointee_size: ref __binding_2,
pointee_align: ref __binding_3 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
142pub struct ArgAttributes {
143pub regular: ArgAttribute,
144pub arg_ext: ArgExtension,
145/// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
146 /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
147 /// set on a null pointer, but all non-null pointers must be dereferenceable).
148pub pointee_size: Size,
149/// The minimum alignment of the pointee, if any.
150pub pointee_align: Option<Align>,
151}
152153impl ArgAttributes {
154pub fn new() -> Self {
155ArgAttributes {
156 regular: ArgAttribute::default(),
157 arg_ext: ArgExtension::None,
158 pointee_size: Size::ZERO,
159 pointee_align: None,
160 }
161 }
162163pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
164if !(self.arg_ext == ArgExtension::None || self.arg_ext == ext) {
{
::core::panicking::panic_fmt(format_args!("cannot set {0:?} when {1:?} is already set",
ext, self.arg_ext));
}
};assert!(
165self.arg_ext == ArgExtension::None || self.arg_ext == ext,
166"cannot set {:?} when {:?} is already set",
167 ext,
168self.arg_ext
169 );
170self.arg_ext = ext;
171self172 }
173174pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
175self.regular |= attr;
176self177 }
178179pub fn contains(&self, attr: ArgAttribute) -> bool {
180self.regular.contains(attr)
181 }
182183/// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
184 /// function call ABIs".
185pub fn eq_abi(&self, other: &Self) -> bool {
186// There's only one regular attribute that matters for the call ABI: InReg.
187 // Everything else is things like noalias, dereferenceable, nonnull, ...
188 // (This also applies to pointee_size, pointee_align.)
189if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
190 {
191return false;
192 }
193// We also compare the sign extension mode -- this could let the callee make assumptions
194 // about bits that conceptually were not even passed.
195if self.arg_ext != other.arg_ext {
196return false;
197 }
198true
199}
200}
201202impl From<ArgAttribute> for ArgAttributes {
203fn from(value: ArgAttribute) -> Self {
204Self {
205 regular: value,
206 arg_ext: ArgExtension::None,
207 pointee_size: Size::ZERO,
208 pointee_align: None,
209 }
210 }
211}
212213/// An argument passed entirely registers with the
214/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
215#[derive(#[automatically_derived]
impl ::core::clone::Clone for Uniform {
#[inline]
fn clone(&self) -> Uniform {
let _: ::core::clone::AssertParamIsClone<Reg>;
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Uniform { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Uniform {
#[inline]
fn eq(&self, other: &Uniform) -> bool {
self.is_consecutive == other.is_consecutive && self.unit == other.unit
&& self.total == other.total
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Uniform {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Reg>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Uniform {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.unit, state);
::core::hash::Hash::hash(&self.total, state);
::core::hash::Hash::hash(&self.is_consecutive, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Uniform {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "Uniform",
"unit", &self.unit, "total", &self.total, "is_consecutive",
&&self.is_consecutive)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for Uniform where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Uniform {
unit: ref __binding_0,
total: ref __binding_1,
is_consecutive: ref __binding_2 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
216pub struct Uniform {
217pub unit: Reg,
218219/// The total size of the argument, which can be:
220 /// * equal to `unit.size` (one scalar/vector),
221 /// * a multiple of `unit.size` (an array of scalar/vectors),
222 /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
223 /// for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
224 /// this size will be rounded up to the nearest multiple of `unit.size`.
225pub total: Size,
226227/// Indicate that the argument is consecutive, in the sense that either all values need to be
228 /// passed in register, or all on the stack. If they are passed on the stack, there should be
229 /// no additional padding between elements.
230pub is_consecutive: bool,
231}
232233impl From<Reg> for Uniform {
234fn from(unit: Reg) -> Uniform {
235Uniform { unit, total: unit.size, is_consecutive: false }
236 }
237}
238239impl Uniform {
240pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
241self.unit.align(cx)
242 }
243244/// Pass using one or more values of the given type, without requiring them to be consecutive.
245 /// That is, some values may be passed in register and some on the stack.
246pub fn new(unit: Reg, total: Size) -> Self {
247Uniform { unit, total, is_consecutive: false }
248 }
249250/// Pass using one or more consecutive values of the given type. Either all values will be
251 /// passed in registers, or all on the stack.
252pub fn consecutive(unit: Reg, total: Size) -> Self {
253Uniform { unit, total, is_consecutive: true }
254 }
255}
256257/// Describes the type used for `PassMode::Cast`.
258///
259/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
260/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
261/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
262/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
263/// (and all data in the padding between the registers is dropped).
264#[derive(#[automatically_derived]
impl ::core::clone::Clone for CastTarget {
#[inline]
fn clone(&self) -> CastTarget {
CastTarget {
prefix: ::core::clone::Clone::clone(&self.prefix),
rest_offset: ::core::clone::Clone::clone(&self.rest_offset),
rest: ::core::clone::Clone::clone(&self.rest),
attrs: ::core::clone::Clone::clone(&self.attrs),
}
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for CastTarget {
#[inline]
fn eq(&self, other: &CastTarget) -> bool {
self.prefix == other.prefix && self.rest_offset == other.rest_offset
&& self.rest == other.rest && self.attrs == other.attrs
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for CastTarget {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<[Option<Reg>; 8]>;
let _: ::core::cmp::AssertParamIsEq<Option<Size>>;
let _: ::core::cmp::AssertParamIsEq<Uniform>;
let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for CastTarget {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.prefix, state);
::core::hash::Hash::hash(&self.rest_offset, state);
::core::hash::Hash::hash(&self.rest, state);
::core::hash::Hash::hash(&self.attrs, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for CastTarget {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "CastTarget",
"prefix", &self.prefix, "rest_offset", &self.rest_offset, "rest",
&self.rest, "attrs", &&self.attrs)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for CastTarget where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
CastTarget {
prefix: ref __binding_0,
rest_offset: ref __binding_1,
rest: ref __binding_2,
attrs: ref __binding_3 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
265pub struct CastTarget {
266pub prefix: [Option<Reg>; 8],
267/// The offset of `rest` from the start of the value. Currently only implemented for a `Reg`
268 /// pair created by the `offset_pair` method.
269pub rest_offset: Option<Size>,
270pub rest: Uniform,
271pub attrs: ArgAttributes,
272}
273274impl From<Reg> for CastTarget {
275fn from(unit: Reg) -> CastTarget {
276CastTarget::from(Uniform::from(unit))
277 }
278}
279280impl From<Uniform> for CastTarget {
281fn from(uniform: Uniform) -> CastTarget {
282Self::prefixed([None; 8], uniform)
283 }
284}
285286impl CastTarget {
287pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self {
288Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() }
289 }
290291pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self {
292Self {
293 prefix: [Some(a), None, None, None, None, None, None, None],
294 rest_offset: Some(offset_from_start),
295 rest: b.into(),
296 attrs: ArgAttributes::new(),
297 }
298 }
299300pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self {
301self.attrs = attrs;
302self303 }
304305pub fn pair(a: Reg, b: Reg) -> CastTarget {
306Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b))
307 }
308309/// When you only access the range containing valid data, you can use this unaligned size;
310 /// otherwise, use the safer `size` method.
311pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
312// Prefix arguments are passed in specific designated registers
313let prefix_size = if let Some(offset_from_start) = self.rest_offset {
314offset_from_start315 } else {
316self.prefix
317 .iter()
318 .filter_map(|x| x.map(|reg| reg.size))
319 .fold(Size::ZERO, |acc, size| acc + size)
320 };
321// Remaining arguments are passed in chunks of the unit size
322let rest_size =
323self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
324325prefix_size + rest_size326 }
327328pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
329self.unaligned_size(cx).align_to(self.align(cx))
330 }
331332pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
333self.prefix
334 .iter()
335 .filter_map(|x| x.map(|reg| reg.align(cx)))
336 .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| {
337acc.max(align)
338 })
339 }
340341/// Checks if these two `CastTarget` are equal enough to be considered "the same for all
342 /// function call ABIs".
343pub fn eq_abi(&self, other: &Self) -> bool {
344let CastTarget {
345 prefix: prefix_l,
346 rest_offset: rest_offset_l,
347 rest: rest_l,
348 attrs: attrs_l,
349 } = self;
350let CastTarget {
351 prefix: prefix_r,
352 rest_offset: rest_offset_r,
353 rest: rest_r,
354 attrs: attrs_r,
355 } = other;
356prefix_l == prefix_r357 && rest_offset_l == rest_offset_r358 && rest_l == rest_r359 && attrs_l.eq_abi(attrs_r)
360 }
361}
362363/// Information about how to pass an argument to,
364/// or return a value from, a function, under some ABI.
365#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for ArgAbi<'a, Ty> {
#[inline]
fn clone(&self) -> ArgAbi<'a, Ty> {
ArgAbi {
layout: ::core::clone::Clone::clone(&self.layout),
mode: ::core::clone::Clone::clone(&self.mode),
}
}
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for ArgAbi<'a, Ty>
{
#[inline]
fn eq(&self, other: &ArgAbi<'a, Ty>) -> bool {
self.layout == other.layout && self.mode == other.mode
}
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for ArgAbi<'a, Ty> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<TyAndLayout<'a, Ty>>;
let _: ::core::cmp::AssertParamIsEq<PassMode>;
}
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for ArgAbi<'a, Ty> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.layout, state);
::core::hash::Hash::hash(&self.mode, state)
}
}Hash, const _: () =
{
impl<'a, Ty, __CTX>
::rustc_data_structures::stable_hasher::HashStable<__CTX> for
ArgAbi<'a, Ty> where __CTX: crate::HashStableContext,
Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAbi { layout: ref __binding_0, mode: ref __binding_1 } =>
{
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
366pub struct ArgAbi<'a, Ty> {
367pub layout: TyAndLayout<'a, Ty>,
368pub mode: PassMode,
369}
370371// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
372impl<'a, Ty: fmt::Display> fmt::Debugfor ArgAbi<'a, Ty> {
373fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
374let ArgAbi { layout, mode } = self;
375f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
376 }
377}
378379impl<'a, Ty> ArgAbi<'a, Ty> {
380/// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
381pub fn new(
382 cx: &impl HasDataLayout,
383 layout: TyAndLayout<'a, Ty>,
384 scalar_attrs: impl Fn(Scalar, Size) -> ArgAttributes,
385 ) -> Self {
386let mode = match layout.backend_repr {
387_ if layout.is_zst() => PassMode::Ignore,
388 BackendRepr::Scalar(scalar) => PassMode::Direct(scalar_attrs(scalar, Size::ZERO)),
389 BackendRepr::ScalarPair(a, b) => PassMode::Pair(
390scalar_attrs(a, Size::ZERO),
391scalar_attrs(b, a.size(cx).align_to(b.align(cx).abi)),
392 ),
393 BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
394 BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
395 BackendRepr::ScalableVector { .. } => PassMode::Direct(ArgAttributes::new()),
396 };
397ArgAbi { layout, mode }
398 }
399400fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
401let mut attrs = ArgAttributes::new();
402403// For non-immediate arguments the callee gets its own copy of
404 // the value on the stack, so there are no aliases. The function
405 // can capture the address of the argument, but not the provenance.
406attrs407 .set(ArgAttribute::NoAlias)
408 .set(ArgAttribute::CapturesAddress)
409 .set(ArgAttribute::NonNull)
410 .set(ArgAttribute::NoUndef);
411attrs.pointee_size = layout.size;
412attrs.pointee_align = Some(layout.align.abi);
413414let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
415416 PassMode::Indirect { attrs, meta_attrs, on_stack: false }
417 }
418419/// Pass this argument directly instead. Should NOT be used!
420 /// Only exists because of past ABI mistakes that will take time to fix
421 /// (see <https://github.com/rust-lang/rust/issues/115666>).
422#[track_caller]
423pub fn make_direct_deprecated(&mut self) {
424match self.mode {
425 PassMode::Indirect { .. } => {
426self.mode = PassMode::Direct(ArgAttributes::new());
427 }
428 PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
429_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} direct",
self.mode));
}panic!("Tried to make {:?} direct", self.mode),
430 }
431 }
432433/// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
434 /// This is valid for both sized and unsized arguments.
435#[track_caller]
436pub fn make_indirect(&mut self) {
437match self.mode {
438 PassMode::Direct(_) | PassMode::Pair(_, _) => {
439self.mode = Self::indirect_pass_mode(&self.layout);
440 }
441 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
442// already indirect
443}
444_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect",
self.mode));
}panic!("Tried to make {:?} indirect", self.mode),
445 }
446 }
447448/// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
449 /// ZSTs indirectly.
450#[track_caller]
451pub fn make_indirect_from_ignore(&mut self) {
452match self.mode {
453 PassMode::Ignore => {
454self.mode = Self::indirect_pass_mode(&self.layout);
455 }
456 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
457// already indirect
458}
459_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect (expected `PassMode::Ignore`)",
self.mode));
}panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
460 }
461 }
462463/// Pass this argument indirectly, by placing it at a fixed stack offset.
464 /// This corresponds to the `byval` LLVM argument attribute.
465 /// This is only valid for sized arguments.
466 ///
467 /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
468 /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
469 /// stack slots used for arguments passed by-value have specific alignment requirements which
470 /// differ from the alignment used in other situations.
471 ///
472 /// If `None`, the type's alignment is used.
473 ///
474 /// If the resulting alignment differs from the type's alignment,
475 /// the argument will be copied to an alloca with sufficient alignment,
476 /// either in the caller (if the type's alignment is lower than the byval alignment)
477 /// or in the callee (if the type's alignment is higher than the byval alignment),
478 /// to ensure that Rust code never sees an underaligned pointer.
479pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
480if !!self.layout.is_unsized() {
{
::core::panicking::panic_fmt(format_args!("used byval ABI for unsized layout"));
}
};assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
481self.make_indirect();
482match self.mode {
483 PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
484*on_stack = true;
485486// Some platforms, like 32-bit x86, change the alignment of the type when passing
487 // `byval`. Account for that.
488if let Some(byval_align) = byval_align {
489// On all targets with byval align this is currently true, so let's assert it.
490if true {
if !(byval_align >= Align::from_bytes(4).unwrap()) {
::core::panicking::panic("assertion failed: byval_align >= Align::from_bytes(4).unwrap()")
};
};debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
491attrs.pointee_align = Some(byval_align);
492 }
493 }
494_ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
495 }
496 }
497498pub fn extend_integer_width_to(&mut self, bits: u64) {
499// Only integers have signedness
500if let BackendRepr::Scalar(scalar) = self.layout.backend_repr
501 && let Primitive::Int(i, signed) = scalar.primitive()
502 && i.size().bits() < bits503 && let PassMode::Direct(ref mut attrs) = self.mode
504 {
505if signed {
506attrs.ext(ArgExtension::Sext)
507 } else {
508attrs.ext(ArgExtension::Zext)
509 };
510 }
511 }
512513pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
514self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
515 }
516517pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
518self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
519 }
520521pub fn is_indirect(&self) -> bool {
522#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { .. } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { .. })523 }
524525pub fn is_sized_indirect(&self) -> bool {
526#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })527 }
528529pub fn is_unsized_indirect(&self) -> bool {
530#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })531 }
532533pub fn is_ignore(&self) -> bool {
534#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Ignore => true,
_ => false,
}matches!(self.mode, PassMode::Ignore)535 }
536537/// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
538 /// function call ABIs".
539pub fn eq_abi(&self, other: &Self) -> bool540where
541Ty: PartialEq,
542 {
543// Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
544 // at the type.
545self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
546// `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
547 // That elevates any type difference to an ABI difference since we just use the
548 // full Rust type as the LLVM argument/return type.
549if #[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Direct(..) => true,
_ => false,
}matches!(self.mode, PassMode::Direct(..))550 && #[allow(non_exhaustive_omitted_patterns)] match self.layout.backend_repr {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(self.layout.backend_repr, BackendRepr::Memory { .. })551 {
552// For aggregates in `Direct` mode to be compatible, the types need to be equal.
553self.layout.ty == other.layout.ty
554 } else {
555true
556}
557 }
558 }
559}
560561#[derive(#[automatically_derived]
impl ::core::marker::Copy for RiscvInterruptKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for RiscvInterruptKind {
#[inline]
fn clone(&self) -> RiscvInterruptKind { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for RiscvInterruptKind {
#[inline]
fn eq(&self, other: &RiscvInterruptKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for RiscvInterruptKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for RiscvInterruptKind {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for RiscvInterruptKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
RiscvInterruptKind::Machine => "Machine",
RiscvInterruptKind::Supervisor => "Supervisor",
})
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for RiscvInterruptKind where __CTX: crate::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
RiscvInterruptKind::Machine => {}
RiscvInterruptKind::Supervisor => {}
}
}
}
};HashStable_Generic)]
562pub enum RiscvInterruptKind {
563 Machine,
564 Supervisor,
565}
566567impl RiscvInterruptKind {
568pub fn as_str(&self) -> &'static str {
569match self {
570Self::Machine => "machine",
571Self::Supervisor => "supervisor",
572 }
573 }
574}
575576/// Metadata describing how the arguments to a native function
577/// should be passed in order to respect the native ABI.
578///
579/// The signature represented by this type may not match the MIR function signature.
580/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
581/// The std::offload module also adds an addition dyn_ptr argument to the GpuKernel ABI.
582/// While this difference is rarely relevant, it should still be kept in mind.
583///
584/// I will do my best to describe this structure, but these
585/// comments are reverse-engineered and may be inaccurate. -NDM
586#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for FnAbi<'a, Ty> {
#[inline]
fn clone(&self) -> FnAbi<'a, Ty> {
FnAbi {
args: ::core::clone::Clone::clone(&self.args),
ret: ::core::clone::Clone::clone(&self.ret),
c_variadic: ::core::clone::Clone::clone(&self.c_variadic),
fixed_count: ::core::clone::Clone::clone(&self.fixed_count),
conv: ::core::clone::Clone::clone(&self.conv),
can_unwind: ::core::clone::Clone::clone(&self.can_unwind),
}
}
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for FnAbi<'a, Ty>
{
#[inline]
fn eq(&self, other: &FnAbi<'a, Ty>) -> bool {
self.c_variadic == other.c_variadic &&
self.fixed_count == other.fixed_count &&
self.can_unwind == other.can_unwind &&
self.args == other.args && self.ret == other.ret &&
self.conv == other.conv
}
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for FnAbi<'a, Ty> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Box<[ArgAbi<'a, Ty>]>>;
let _: ::core::cmp::AssertParamIsEq<ArgAbi<'a, Ty>>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<u32>;
let _: ::core::cmp::AssertParamIsEq<CanonAbi>;
}
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for FnAbi<'a, Ty> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.args, state);
::core::hash::Hash::hash(&self.ret, state);
::core::hash::Hash::hash(&self.c_variadic, state);
::core::hash::Hash::hash(&self.fixed_count, state);
::core::hash::Hash::hash(&self.conv, state);
::core::hash::Hash::hash(&self.can_unwind, state)
}
}Hash, const _: () =
{
impl<'a, Ty, __CTX>
::rustc_data_structures::stable_hasher::HashStable<__CTX> for
FnAbi<'a, Ty> where __CTX: crate::HashStableContext,
Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
FnAbi {
args: ref __binding_0,
ret: ref __binding_1,
c_variadic: ref __binding_2,
fixed_count: ref __binding_3,
conv: ref __binding_4,
can_unwind: ref __binding_5 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
{ __binding_4.hash_stable(__hcx, __hasher); }
{ __binding_5.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
587pub struct FnAbi<'a, Ty> {
588/// The type, layout, and information about how each argument is passed.
589pub args: Box<[ArgAbi<'a, Ty>]>,
590591/// The layout, type, and the way a value is returned from this function.
592pub ret: ArgAbi<'a, Ty>,
593594/// Marks this function as variadic (accepting a variable number of arguments).
595pub c_variadic: bool,
596597/// The count of non-variadic arguments.
598 ///
599 /// Should only be different from args.len() when c_variadic is true.
600 /// This can be used to know whether an argument is variadic or not.
601pub fixed_count: u32,
602/// The calling convention of this function.
603pub conv: CanonAbi,
604/// Indicates if an unwind may happen across a call to this function.
605pub can_unwind: bool,
606}
607608// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
609impl<'a, Ty: fmt::Display> fmt::Debugfor FnAbi<'a, Ty> {
610fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
611let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
612f.debug_struct("FnAbi")
613 .field("args", args)
614 .field("ret", ret)
615 .field("c_variadic", c_variadic)
616 .field("fixed_count", fixed_count)
617 .field("conv", conv)
618 .field("can_unwind", can_unwind)
619 .finish()
620 }
621}
622623impl<'a, Ty> FnAbi<'a, Ty> {
624pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
625where
626Ty: TyAbiInterface<'a, C> + Copy,
627 C: HasDataLayout + HasTargetSpec + HasX86AbiOpt,
628 {
629if abi == ExternAbi::X86Interrupt {
630if let Some(arg) = self.args.first_mut() {
631arg.pass_by_stack_offset(None);
632 }
633return;
634 }
635636let spec = cx.target_spec();
637match &spec.arch {
638 Arch::X86 => {
639let (flavor, regparm) = match abi {
640 ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
641 (x86::Flavor::FastcallOrVectorcall, None)
642 }
643 ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
644 (x86::Flavor::General, cx.x86_abi_opt().regparm)
645 }
646_ => (x86::Flavor::General, None),
647 };
648let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
649let opts = x86::X86Options { flavor, regparm, reg_struct_return };
650if spec.is_like_msvc {
651 x86_win32::compute_abi_info(cx, self, opts);
652 } else {
653 x86::compute_abi_info(cx, self, opts);
654 }
655 }
656 Arch::X86_64 => match abi {
657 ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
658 ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
659 x86_win64::compute_abi_info(cx, self)
660 }
661_ => {
662if cx.target_spec().is_like_windows {
663 x86_win64::compute_abi_info(cx, self)
664 } else {
665 x86_64::compute_abi_info(cx, self)
666 }
667 }
668 },
669 Arch::AArch64 | Arch::Arm64EC => {
670let kind = if cx.target_spec().is_like_darwin {
671 aarch64::AbiKind::DarwinPCS672 } else if cx.target_spec().is_like_windows {
673 aarch64::AbiKind::Win64674 } else {
675 aarch64::AbiKind::AAPCS676 };
677 aarch64::compute_abi_info(cx, self, kind)
678 }
679 Arch::AmdGpu => amdgpu::compute_abi_info(cx, self),
680 Arch::Arm => arm::compute_abi_info(cx, self),
681 Arch::Avr => avr::compute_abi_info(cx, self),
682 Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_abi_info(cx, self),
683 Arch::M68k => m68k::compute_abi_info(cx, self),
684 Arch::CSky => csky::compute_abi_info(cx, self),
685 Arch::Mips | Arch::Mips32r6 => mips::compute_abi_info(cx, self),
686 Arch::Mips64 | Arch::Mips64r6 => mips64::compute_abi_info(cx, self),
687 Arch::PowerPC => powerpc::compute_abi_info(cx, self),
688 Arch::PowerPC64 => powerpc64::compute_abi_info(cx, self),
689 Arch::S390x => s390x::compute_abi_info(cx, self),
690 Arch::Msp430 => msp430::compute_abi_info(cx, self),
691 Arch::Sparc => sparc::compute_abi_info(cx, self),
692 Arch::Sparc64 => sparc64::compute_abi_info(cx, self),
693 Arch::Nvptx64 => {
694if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
695 nvptx64::compute_ptx_kernel_abi_info(cx, self)
696 } else {
697 nvptx64::compute_abi_info(cx, self)
698 }
699 }
700 Arch::Hexagon => hexagon::compute_abi_info(cx, self),
701 Arch::Xtensa => xtensa::compute_abi_info(cx, self),
702 Arch::RiscV32 | Arch::RiscV64 => riscv::compute_abi_info(cx, self),
703 Arch::Wasm32 | Arch::Wasm64 => wasm::compute_abi_info(cx, self),
704 Arch::Bpf => bpf::compute_abi_info(cx, self),
705 arch @ (Arch::SpirV | Arch::Other(_)) => {
706{
::core::panicking::panic_fmt(format_args!("no lowering implemented for {0}",
arch));
}panic!("no lowering implemented for {arch}")707 }
708 }
709 }
710711pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
712where
713Ty: TyAbiInterface<'a, C> + Copy,
714 C: HasDataLayout + HasTargetSpec,
715 {
716let spec = cx.target_spec();
717match &spec.arch {
718 Arch::X86 => x86::compute_rust_abi_info(cx, self),
719 Arch::RiscV32 | Arch::RiscV64 => riscv::compute_rust_abi_info(cx, self),
720 Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_rust_abi_info(cx, self),
721 Arch::AArch64 => aarch64::compute_rust_abi_info(cx, self),
722 Arch::Bpf => bpf::compute_rust_abi_info(self),
723_ => {}
724 };
725726for (arg_idx, arg) in self
727.args
728 .iter_mut()
729 .enumerate()
730 .map(|(idx, arg)| (Some(idx), arg))
731 .chain(iter::once((None, &mut self.ret)))
732 {
733// If the logic above already picked a specific type to cast the argument to, leave that
734 // in place.
735if #[allow(non_exhaustive_omitted_patterns)] match arg.mode {
PassMode::Ignore | PassMode::Cast { .. } => true,
_ => false,
}matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
736continue;
737 }
738739if arg_idx.is_none()
740 && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
741 && !#[allow(non_exhaustive_omitted_patterns)] match arg.layout.backend_repr {
BackendRepr::SimdVector { .. } => true,
_ => false,
}matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })742 {
743// Return values larger than 2 registers using a return area
744 // pointer. LLVM and Cranelift disagree about how to return
745 // values that don't fit in the registers designated for return
746 // values. LLVM will force the entire return value to be passed
747 // by return area pointer, while Cranelift will look at each IR level
748 // return value independently and decide to pass it in a
749 // register or not, which would result in the return value
750 // being passed partially in registers and partially through a
751 // return area pointer. For large IR-level values such as `i128`,
752 // cranelift will even split up the value into smaller chunks.
753 //
754 // While Cranelift may need to be fixed as the LLVM behavior is
755 // generally more correct with respect to the surface language,
756 // forcing this behavior in rustc itself makes it easier for
757 // other backends to conform to the Rust ABI and for the C ABI
758 // rustc already handles this behavior anyway.
759 //
760 // In addition LLVM's decision to pass the return value in
761 // registers or using a return area pointer depends on how
762 // exactly the return type is lowered to an LLVM IR type. For
763 // example `Option<u128>` can be lowered as `{ i128, i128 }`
764 // in which case the x86_64 backend would use a return area
765 // pointer, or it could be passed as `{ i32, i128 }` in which
766 // case the x86_64 backend would pass it in registers by taking
767 // advantage of an LLVM ABI extension that allows using 3
768 // registers for the x86_64 sysv call conv rather than the
769 // officially specified 2 registers.
770 //
771 // FIXME: Technically we should look at the amount of available
772 // return registers rather than guessing that there are 2
773 // registers for return values. In practice only a couple of
774 // architectures have less than 2 return registers. None of
775 // which supported by Cranelift.
776 //
777 // NOTE: This adjustment is only necessary for the Rust ABI as
778 // for other ABI's the calling convention implementations in
779 // rustc_target already ensure any return value which doesn't
780 // fit in the available amount of return registers is passed in
781 // the right way for the current target.
782 //
783 // The adjustment is not necessary nor desired for types with a vector
784 // representation; those are handled below.
785arg.make_indirect();
786continue;
787 }
788789match arg.layout.backend_repr {
790 BackendRepr::Memory { .. } => {
791// Compute `Aggregate` ABI.
792793let is_indirect_not_on_stack =
794#[allow(non_exhaustive_omitted_patterns)] match arg.mode {
PassMode::Indirect { on_stack: false, .. } => true,
_ => false,
}matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
795if !is_indirect_not_on_stack {
::core::panicking::panic("assertion failed: is_indirect_not_on_stack")
};assert!(is_indirect_not_on_stack);
796797let size = arg.layout.size;
798if arg.layout.is_sized()
799 && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
800 {
801// We want to pass small aggregates as immediates, but using
802 // an LLVM aggregate type for this leads to bad optimizations,
803 // so we pick an appropriately sized integer type instead.
804arg.cast_to(Reg { kind: RegKind::Integer, size });
805 }
806 }
807808 BackendRepr::SimdVector { .. } => {
809// This is a fun case! The gist of what this is doing is
810 // that we want callers and callees to always agree on the
811 // ABI of how they pass SIMD arguments. If we were to *not*
812 // make these arguments indirect then they'd be immediates
813 // in LLVM, which means that they'd used whatever the
814 // appropriate ABI is for the callee and the caller. That
815 // means, for example, if the caller doesn't have AVX
816 // enabled but the callee does, then passing an AVX argument
817 // across this boundary would cause corrupt data to show up.
818 //
819 // This problem is fixed by unconditionally passing SIMD
820 // arguments through memory between callers and callees
821 // which should get them all to agree on ABI regardless of
822 // target feature sets. Some more information about this
823 // issue can be found in #44367.
824 //
825 // We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
826 // required. However, it turns out that that makes LLVM worse at optimizing this
827 // code, so we pass things indirectly even there. See #139029 for more on that.
828if spec.simd_types_indirect {
829 arg.make_indirect();
830 }
831 }
832833_ => {}
834 }
835 }
836 }
837}
838839// Some types are used a lot. Make sure they don't unintentionally get bigger.
840#[cfg(target_pointer_width = "64")]
841mod size_asserts {
842use rustc_data_structures::static_assert_size;
843844use super::*;
845// tidy-alphabetical-start
846const _: [(); 56] = [(); ::std::mem::size_of::<ArgAbi<'_, usize>>()];static_assert_size!(ArgAbi<'_, usize>, 56);
847const _: [(); 80] = [(); ::std::mem::size_of::<FnAbi<'_, usize>>()];static_assert_size!(FnAbi<'_, usize>, 80);
848// tidy-alphabetical-end
849}