1use std::{fmt, iter};
23use rustc_abi::{
4AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, FieldsShape, HasDataLayout, Primitive,
5Reg, RegKind, Scalar, Size, TyAbiInterface, TyAndLayout, Variants,
6};
7use rustc_macros::HashStable_Generic;
89pub use crate::spec::AbiMap;
10use crate::spec::{Arch, HasTargetSpec, HasX86AbiOpt};
1112mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
3738#[derive(#[automatically_derived]
impl ::core::clone::Clone for PassMode {
#[inline]
fn clone(&self) -> PassMode {
match self {
PassMode::Ignore => PassMode::Ignore,
PassMode::Direct(__self_0) =>
PassMode::Direct(::core::clone::Clone::clone(__self_0)),
PassMode::Pair(__self_0, __self_1) =>
PassMode::Pair(::core::clone::Clone::clone(__self_0),
::core::clone::Clone::clone(__self_1)),
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
PassMode::Cast {
pad_i32: ::core::clone::Clone::clone(__self_0),
cast: ::core::clone::Clone::clone(__self_1),
},
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
PassMode::Indirect {
attrs: ::core::clone::Clone::clone(__self_0),
meta_attrs: ::core::clone::Clone::clone(__self_1),
on_stack: ::core::clone::Clone::clone(__self_2),
},
}
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PassMode {
#[inline]
fn eq(&self, other: &PassMode) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(PassMode::Direct(__self_0), PassMode::Direct(__arg1_0)) =>
__self_0 == __arg1_0,
(PassMode::Pair(__self_0, __self_1),
PassMode::Pair(__arg1_0, __arg1_1)) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(PassMode::Cast { pad_i32: __self_0, cast: __self_1 },
PassMode::Cast { pad_i32: __arg1_0, cast: __arg1_1 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 },
PassMode::Indirect {
attrs: __arg1_0, meta_attrs: __arg1_1, on_stack: __arg1_2 })
=>
__self_2 == __arg1_2 && __self_0 == __arg1_0 &&
__self_1 == __arg1_1,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PassMode {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<Box<CastTarget>>;
let _: ::core::cmp::AssertParamIsEq<Option<ArgAttributes>>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for PassMode {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
PassMode::Direct(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
PassMode::Pair(__self_0, __self_1) => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
{
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state);
::core::hash::Hash::hash(__self_2, state)
}
_ => {}
}
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for PassMode {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
PassMode::Ignore =>
::core::fmt::Formatter::write_str(f, "Ignore"),
PassMode::Direct(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Direct",
&__self_0),
PassMode::Pair(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f, "Pair",
__self_0, &__self_1),
PassMode::Cast { pad_i32: __self_0, cast: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f, "Cast",
"pad_i32", __self_0, "cast", &__self_1),
PassMode::Indirect {
attrs: __self_0, meta_attrs: __self_1, on_stack: __self_2 } =>
::core::fmt::Formatter::debug_struct_field3_finish(f,
"Indirect", "attrs", __self_0, "meta_attrs", __self_1,
"on_stack", &__self_2),
}
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for PassMode where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
PassMode::Ignore => {}
PassMode::Direct(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
PassMode::Pair(ref __binding_0, ref __binding_1) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
PassMode::Cast {
pad_i32: ref __binding_0, cast: ref __binding_1 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
PassMode::Indirect {
attrs: ref __binding_0,
meta_attrs: ref __binding_1,
on_stack: ref __binding_2 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
39pub enum PassMode {
40/// Ignore the argument.
41 ///
42 /// The argument is a ZST.
43Ignore,
44/// Pass the argument directly.
45 ///
46 /// The argument has a layout abi of `Scalar` or `Vector`.
47 /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48 /// This is bad since it leaks LLVM implementation details into the ABI.
49 /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50Direct(ArgAttributes),
51/// Pass a pair's elements directly in two arguments.
52 ///
53 /// The argument has a layout abi of `ScalarPair`.
54Pair(ArgAttributes, ArgAttributes),
55/// Pass the argument after casting it. See the `CastTarget` docs for details.
56 ///
57 /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58Cast { pad_i32: bool, cast: Box<CastTarget> },
59/// Pass the argument indirectly via a hidden pointer.
60 ///
61 /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62 /// argument. (This is the only mode that supports unsized arguments.)
63 ///
64 /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65 /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66 /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67 /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68 /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69 /// alignment (if `None`). This means that the alignment will not always
70 /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71 ///
72 /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
7576impl PassMode {
77/// Checks if these two `PassMode` are equal enough to be considered "the same for all
78 /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79 /// so that needs to be compared as well!
80pub fn eq_abi(&self, other: &Self) -> bool {
81match (self, other) {
82 (PassMode::Ignore, PassMode::Ignore) => true,
83 (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85 (
86 PassMode::Cast { cast: c1, pad_i32: pad1 },
87 PassMode::Cast { cast: c2, pad_i32: pad2 },
88 ) => c1.eq_abi(c2) && pad1 == pad2,
89 (
90 PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91 PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92 ) => a1.eq_abi(a2) && s1 == s2,
93 (
94 PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95 PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96 ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97_ => false,
98 }
99 }
100}
101102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109use rustc_macros::HashStable_Generic;
110111// The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112#[derive(#[automatically_derived]
impl ::core::clone::Clone for ArgAttribute {
#[inline]
fn clone(&self) -> ArgAttribute {
let _: ::core::clone::AssertParamIsClone<u16>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ArgAttribute { }Copy, #[automatically_derived]
impl ::core::default::Default for ArgAttribute {
#[inline]
fn default() -> ArgAttribute {
ArgAttribute(::core::default::Default::default())
}
}Default, #[automatically_derived]
impl ::core::hash::Hash for ArgAttribute {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttribute {
#[inline]
fn eq(&self, other: &ArgAttribute) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttribute {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u16>;
}
}Eq, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgAttribute where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAttribute(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
113pub struct ArgAttribute(u16);
114impl ArgAttribute {
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesNone: Self = Self::from_bits_retain(0b111);
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesAddress: Self = Self::from_bits_retain(0b110);
#[allow(deprecated, non_upper_case_globals,)]
pub const CapturesReadOnly: Self = Self::from_bits_retain(0b100);
#[allow(deprecated, non_upper_case_globals,)]
pub const NoAlias: Self = Self::from_bits_retain(1 << 3);
#[allow(deprecated, non_upper_case_globals,)]
pub const NonNull: Self = Self::from_bits_retain(1 << 4);
#[allow(deprecated, non_upper_case_globals,)]
pub const ReadOnly: Self = Self::from_bits_retain(1 << 5);
#[allow(deprecated, non_upper_case_globals,)]
pub const InReg: Self = Self::from_bits_retain(1 << 6);
#[allow(deprecated, non_upper_case_globals,)]
pub const NoUndef: Self = Self::from_bits_retain(1 << 7);
#[allow(deprecated, non_upper_case_globals,)]
pub const Writable: Self = Self::from_bits_retain(1 << 8);
}
impl ::bitflags::Flags for ArgAttribute {
const FLAGS: &'static [::bitflags::Flag<ArgAttribute>] =
&[{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesNone",
ArgAttribute::CapturesNone)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesAddress",
ArgAttribute::CapturesAddress)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("CapturesReadOnly",
ArgAttribute::CapturesReadOnly)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NoAlias", ArgAttribute::NoAlias)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NonNull", ArgAttribute::NonNull)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("ReadOnly", ArgAttribute::ReadOnly)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("InReg", ArgAttribute::InReg)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("NoUndef", ArgAttribute::NoUndef)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("Writable", ArgAttribute::Writable)
}];
type Bits = u16;
fn bits(&self) -> u16 { ArgAttribute::bits(self) }
fn from_bits_retain(bits: u16) -> ArgAttribute {
ArgAttribute::from_bits_retain(bits)
}
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
{
#[allow(dead_code, deprecated, unused_attributes)]
impl ArgAttribute {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self {
Self(<u16 as ::bitflags::Bits>::EMPTY)
}
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self {
let mut truncated = <u16 as ::bitflags::Bits>::EMPTY;
let mut i = 0;
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ArgAttribute as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
let _ = i;
Self(truncated)
}
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u16 { self.0 }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u16)
-> ::bitflags::__private::core::option::Option<Self> {
let truncated = Self::from_bits_truncate(bits).0;
if truncated == bits {
::bitflags::__private::core::option::Option::Some(Self(bits))
} else { ::bitflags::__private::core::option::Option::None }
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u16) -> Self {
Self(bits & Self::all().0)
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u16) -> Self { Self(bits) }
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
{
if name == "CapturesNone" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesNone.bits()));
}
};
;
{
if name == "CapturesAddress" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesAddress.bits()));
}
};
;
{
if name == "CapturesReadOnly" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::CapturesReadOnly.bits()));
}
};
;
{
if name == "NoAlias" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoAlias.bits()));
}
};
;
{
if name == "NonNull" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NonNull.bits()));
}
};
;
{
if name == "ReadOnly" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::ReadOnly.bits()));
}
};
;
{
if name == "InReg" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::InReg.bits()));
}
};
;
{
if name == "NoUndef" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::NoUndef.bits()));
}
};
;
{
if name == "Writable" {
return ::bitflags::__private::core::option::Option::Some(Self(ArgAttribute::Writable.bits()));
}
};
;
let _ = name;
::bitflags::__private::core::option::Option::None
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool {
self.0 == <u16 as ::bitflags::Bits>::EMPTY
}
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool {
Self::all().0 | self.0 == self.0
}
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0 & other.0 != <u16 as ::bitflags::Bits>::EMPTY
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0 & other.0 == other.0
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) {
*self = Self(self.0).union(other);
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) {
*self = Self(self.0).difference(other);
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) {
*self = Self(self.0).symmetric_difference(other);
}
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
if value { self.insert(other); } else { self.remove(other); }
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0 & other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0 | other.0)
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0 & !other.0)
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0 ^ other.0)
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self::from_bits_truncate(!self.0)
}
}
impl ::bitflags::__private::core::fmt::Binary for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for ArgAttribute {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for ArgAttribute {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: ArgAttribute) -> Self { self.union(other) }
}
impl ::bitflags::__private::core::ops::BitOrAssign for ArgAttribute {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for ArgAttribute {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for ArgAttribute {
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for ArgAttribute {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for ArgAttribute {
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for ArgAttribute {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for ArgAttribute {
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for ArgAttribute {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<ArgAttribute> for
ArgAttribute {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<ArgAttribute> for
ArgAttribute {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl ArgAttribute {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self) -> ::bitflags::iter::Iter<ArgAttribute> {
::bitflags::iter::Iter::__private_const_new(<ArgAttribute as
::bitflags::Flags>::FLAGS,
ArgAttribute::from_bits_retain(self.bits()),
ArgAttribute::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<ArgAttribute> {
::bitflags::iter::IterNames::__private_const_new(<ArgAttribute
as ::bitflags::Flags>::FLAGS,
ArgAttribute::from_bits_retain(self.bits()),
ArgAttribute::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for ArgAttribute
{
type Item = ArgAttribute;
type IntoIter = ::bitflags::iter::Iter<ArgAttribute>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
};bitflags::bitflags! {
115impl ArgAttribute: u16 {
116const CapturesNone = 0b111;
117const CapturesAddress = 0b110;
118const CapturesReadOnly = 0b100;
119const NoAlias = 1 << 3;
120const NonNull = 1 << 4;
121const ReadOnly = 1 << 5;
122const InReg = 1 << 6;
123const NoUndef = 1 << 7;
124const Writable = 1 << 8;
125 }
126 }127impl ::std::fmt::Debug for ArgAttribute {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::bitflags::parser::to_writer(self, f)
}
}rustc_data_structures::external_bitflags_debug! { ArgAttribute }128}
129130/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
131/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
132/// not necessary to extend the argument, this enum is ignored.
133#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgExtension { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgExtension {
#[inline]
fn clone(&self) -> ArgExtension { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgExtension {
#[inline]
fn eq(&self, other: &ArgExtension) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgExtension {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgExtension {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgExtension {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
ArgExtension::None => "None",
ArgExtension::Zext => "Zext",
ArgExtension::Sext => "Sext",
})
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgExtension where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
ArgExtension::None => {}
ArgExtension::Zext => {}
ArgExtension::Sext => {}
}
}
}
};HashStable_Generic)]
134pub enum ArgExtension {
135None,
136 Zext,
137 Sext,
138}
139140/// A compact representation of LLVM attributes (at least those relevant for this module)
141/// that can be manipulated without interacting with LLVM's Attribute machinery.
142#[derive(#[automatically_derived]
impl ::core::marker::Copy for ArgAttributes { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ArgAttributes {
#[inline]
fn clone(&self) -> ArgAttributes {
let _: ::core::clone::AssertParamIsClone<ArgAttribute>;
let _: ::core::clone::AssertParamIsClone<ArgExtension>;
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Option<Align>>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for ArgAttributes {
#[inline]
fn eq(&self, other: &ArgAttributes) -> bool {
self.regular == other.regular && self.arg_ext == other.arg_ext &&
self.pointee_size == other.pointee_size &&
self.pointee_align == other.pointee_align
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ArgAttributes {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<ArgAttribute>;
let _: ::core::cmp::AssertParamIsEq<ArgExtension>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for ArgAttributes {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.regular, state);
::core::hash::Hash::hash(&self.arg_ext, state);
::core::hash::Hash::hash(&self.pointee_size, state);
::core::hash::Hash::hash(&self.pointee_align, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for ArgAttributes {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "ArgAttributes",
"regular", &self.regular, "arg_ext", &self.arg_ext,
"pointee_size", &self.pointee_size, "pointee_align",
&&self.pointee_align)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for ArgAttributes where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAttributes {
regular: ref __binding_0,
arg_ext: ref __binding_1,
pointee_size: ref __binding_2,
pointee_align: ref __binding_3 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
143pub struct ArgAttributes {
144pub regular: ArgAttribute,
145pub arg_ext: ArgExtension,
146/// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
147 /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
148 /// set on a null pointer, but all non-null pointers must be dereferenceable).
149pub pointee_size: Size,
150/// The minimum alignment of the pointee, if any.
151pub pointee_align: Option<Align>,
152}
153154impl ArgAttributes {
155pub fn new() -> Self {
156ArgAttributes {
157 regular: ArgAttribute::default(),
158 arg_ext: ArgExtension::None,
159 pointee_size: Size::ZERO,
160 pointee_align: None,
161 }
162 }
163164pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
165if !(self.arg_ext == ArgExtension::None || self.arg_ext == ext) {
{
::core::panicking::panic_fmt(format_args!("cannot set {0:?} when {1:?} is already set",
ext, self.arg_ext));
}
};assert!(
166self.arg_ext == ArgExtension::None || self.arg_ext == ext,
167"cannot set {:?} when {:?} is already set",
168 ext,
169self.arg_ext
170 );
171self.arg_ext = ext;
172self173 }
174175pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
176self.regular |= attr;
177self178 }
179180pub fn contains(&self, attr: ArgAttribute) -> bool {
181self.regular.contains(attr)
182 }
183184/// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
185 /// function call ABIs".
186pub fn eq_abi(&self, other: &Self) -> bool {
187// There's only one regular attribute that matters for the call ABI: InReg.
188 // Everything else is things like noalias, dereferenceable, nonnull, ...
189 // (This also applies to pointee_size, pointee_align.)
190if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
191 {
192return false;
193 }
194// We also compare the sign extension mode -- this could let the callee make assumptions
195 // about bits that conceptually were not even passed.
196if self.arg_ext != other.arg_ext {
197return false;
198 }
199true
200}
201}
202203impl From<ArgAttribute> for ArgAttributes {
204fn from(value: ArgAttribute) -> Self {
205Self {
206 regular: value,
207 arg_ext: ArgExtension::None,
208 pointee_size: Size::ZERO,
209 pointee_align: None,
210 }
211 }
212}
213214/// An argument passed entirely registers with the
215/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
216#[derive(#[automatically_derived]
impl ::core::clone::Clone for Uniform {
#[inline]
fn clone(&self) -> Uniform {
let _: ::core::clone::AssertParamIsClone<Reg>;
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Uniform { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Uniform {
#[inline]
fn eq(&self, other: &Uniform) -> bool {
self.is_consecutive == other.is_consecutive && self.unit == other.unit
&& self.total == other.total
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Uniform {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Reg>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Uniform {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.unit, state);
::core::hash::Hash::hash(&self.total, state);
::core::hash::Hash::hash(&self.is_consecutive, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Uniform {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "Uniform",
"unit", &self.unit, "total", &self.total, "is_consecutive",
&&self.is_consecutive)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for Uniform where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Uniform {
unit: ref __binding_0,
total: ref __binding_1,
is_consecutive: ref __binding_2 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
217pub struct Uniform {
218pub unit: Reg,
219220/// The total size of the argument, which can be:
221 /// * equal to `unit.size` (one scalar/vector),
222 /// * a multiple of `unit.size` (an array of scalar/vectors),
223 /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
224 /// for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
225 /// this size will be rounded up to the nearest multiple of `unit.size`.
226pub total: Size,
227228/// Indicate that the argument is consecutive, in the sense that either all values need to be
229 /// passed in register, or all on the stack. If they are passed on the stack, there should be
230 /// no additional padding between elements.
231pub is_consecutive: bool,
232}
233234impl From<Reg> for Uniform {
235fn from(unit: Reg) -> Uniform {
236Uniform { unit, total: unit.size, is_consecutive: false }
237 }
238}
239240impl Uniform {
241pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
242self.unit.align(cx)
243 }
244245/// Pass using one or more values of the given type, without requiring them to be consecutive.
246 /// That is, some values may be passed in register and some on the stack.
247pub fn new(unit: Reg, total: Size) -> Self {
248Uniform { unit, total, is_consecutive: false }
249 }
250251/// Pass using one or more consecutive values of the given type. Either all values will be
252 /// passed in registers, or all on the stack.
253pub fn consecutive(unit: Reg, total: Size) -> Self {
254Uniform { unit, total, is_consecutive: true }
255 }
256}
257258/// Describes the type used for `PassMode::Cast`.
259///
260/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
261/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
262/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
263/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
264/// (and all data in the padding between the registers is dropped).
265#[derive(#[automatically_derived]
impl ::core::clone::Clone for CastTarget {
#[inline]
fn clone(&self) -> CastTarget {
CastTarget {
prefix: ::core::clone::Clone::clone(&self.prefix),
rest_offset: ::core::clone::Clone::clone(&self.rest_offset),
rest: ::core::clone::Clone::clone(&self.rest),
attrs: ::core::clone::Clone::clone(&self.attrs),
}
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for CastTarget {
#[inline]
fn eq(&self, other: &CastTarget) -> bool {
self.prefix == other.prefix && self.rest_offset == other.rest_offset
&& self.rest == other.rest && self.attrs == other.attrs
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for CastTarget {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<[Option<Reg>; 8]>;
let _: ::core::cmp::AssertParamIsEq<Option<Size>>;
let _: ::core::cmp::AssertParamIsEq<Uniform>;
let _: ::core::cmp::AssertParamIsEq<ArgAttributes>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for CastTarget {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.prefix, state);
::core::hash::Hash::hash(&self.rest_offset, state);
::core::hash::Hash::hash(&self.rest, state);
::core::hash::Hash::hash(&self.attrs, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for CastTarget {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "CastTarget",
"prefix", &self.prefix, "rest_offset", &self.rest_offset, "rest",
&self.rest, "attrs", &&self.attrs)
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for CastTarget where __CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
CastTarget {
prefix: ref __binding_0,
rest_offset: ref __binding_1,
rest: ref __binding_2,
attrs: ref __binding_3 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
266pub struct CastTarget {
267pub prefix: [Option<Reg>; 8],
268/// The offset of `rest` from the start of the value. Currently only implemented for a `Reg`
269 /// pair created by the `offset_pair` method.
270pub rest_offset: Option<Size>,
271pub rest: Uniform,
272pub attrs: ArgAttributes,
273}
274275impl From<Reg> for CastTarget {
276fn from(unit: Reg) -> CastTarget {
277CastTarget::from(Uniform::from(unit))
278 }
279}
280281impl From<Uniform> for CastTarget {
282fn from(uniform: Uniform) -> CastTarget {
283Self::prefixed([None; 8], uniform)
284 }
285}
286287impl CastTarget {
288pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self {
289Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() }
290 }
291292pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self {
293Self {
294 prefix: [Some(a), None, None, None, None, None, None, None],
295 rest_offset: Some(offset_from_start),
296 rest: b.into(),
297 attrs: ArgAttributes::new(),
298 }
299 }
300301pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self {
302self.attrs = attrs;
303self304 }
305306pub fn pair(a: Reg, b: Reg) -> CastTarget {
307Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b))
308 }
309310/// When you only access the range containing valid data, you can use this unaligned size;
311 /// otherwise, use the safer `size` method.
312pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
313// Prefix arguments are passed in specific designated registers
314let prefix_size = if let Some(offset_from_start) = self.rest_offset {
315offset_from_start316 } else {
317self.prefix
318 .iter()
319 .filter_map(|x| x.map(|reg| reg.size))
320 .fold(Size::ZERO, |acc, size| acc + size)
321 };
322// Remaining arguments are passed in chunks of the unit size
323let rest_size =
324self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
325326prefix_size + rest_size327 }
328329pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
330self.unaligned_size(cx).align_to(self.align(cx))
331 }
332333pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
334self.prefix
335 .iter()
336 .filter_map(|x| x.map(|reg| reg.align(cx)))
337 .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| {
338acc.max(align)
339 })
340 }
341342/// Checks if these two `CastTarget` are equal enough to be considered "the same for all
343 /// function call ABIs".
344pub fn eq_abi(&self, other: &Self) -> bool {
345let CastTarget {
346 prefix: prefix_l,
347 rest_offset: rest_offset_l,
348 rest: rest_l,
349 attrs: attrs_l,
350 } = self;
351let CastTarget {
352 prefix: prefix_r,
353 rest_offset: rest_offset_r,
354 rest: rest_r,
355 attrs: attrs_r,
356 } = other;
357prefix_l == prefix_r358 && rest_offset_l == rest_offset_r359 && rest_l == rest_r360 && attrs_l.eq_abi(attrs_r)
361 }
362}
363364/// Information about how to pass an argument to,
365/// or return a value from, a function, under some ABI.
366#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for ArgAbi<'a, Ty> {
#[inline]
fn clone(&self) -> ArgAbi<'a, Ty> {
ArgAbi {
layout: ::core::clone::Clone::clone(&self.layout),
mode: ::core::clone::Clone::clone(&self.mode),
}
}
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for ArgAbi<'a, Ty>
{
#[inline]
fn eq(&self, other: &ArgAbi<'a, Ty>) -> bool {
self.layout == other.layout && self.mode == other.mode
}
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for ArgAbi<'a, Ty> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<TyAndLayout<'a, Ty>>;
let _: ::core::cmp::AssertParamIsEq<PassMode>;
}
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for ArgAbi<'a, Ty> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.layout, state);
::core::hash::Hash::hash(&self.mode, state)
}
}Hash, const _: () =
{
impl<'a, Ty, __CTX>
::rustc_data_structures::stable_hasher::HashStable<__CTX> for
ArgAbi<'a, Ty> where __CTX: ::rustc_span::HashStableContext,
Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ArgAbi { layout: ref __binding_0, mode: ref __binding_1 } =>
{
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
367pub struct ArgAbi<'a, Ty> {
368pub layout: TyAndLayout<'a, Ty>,
369pub mode: PassMode,
370}
371372// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
373impl<'a, Ty: fmt::Display> fmt::Debugfor ArgAbi<'a, Ty> {
374fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375let ArgAbi { layout, mode } = self;
376f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
377 }
378}
379380impl<'a, Ty> ArgAbi<'a, Ty> {
381/// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
382pub fn new(
383 cx: &impl HasDataLayout,
384 layout: TyAndLayout<'a, Ty>,
385 scalar_attrs: impl Fn(Scalar, Size) -> ArgAttributes,
386 ) -> Self {
387let mode = match layout.backend_repr {
388_ if layout.is_zst() => PassMode::Ignore,
389 BackendRepr::Scalar(scalar) => PassMode::Direct(scalar_attrs(scalar, Size::ZERO)),
390 BackendRepr::ScalarPair(a, b) => PassMode::Pair(
391scalar_attrs(a, Size::ZERO),
392scalar_attrs(b, a.size(cx).align_to(b.align(cx).abi)),
393 ),
394 BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
395 BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
396 BackendRepr::SimdScalableVector { .. } => PassMode::Direct(ArgAttributes::new()),
397 };
398ArgAbi { layout, mode }
399 }
400401fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
402let mut attrs = ArgAttributes::new();
403404// For non-immediate arguments the callee gets its own copy of
405 // the value on the stack, so there are no aliases. The function
406 // can capture the address of the argument, but not the provenance.
407attrs408 .set(ArgAttribute::NoAlias)
409 .set(ArgAttribute::CapturesAddress)
410 .set(ArgAttribute::NonNull)
411 .set(ArgAttribute::NoUndef);
412attrs.pointee_size = layout.size;
413attrs.pointee_align = Some(layout.align.abi);
414415let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
416417 PassMode::Indirect { attrs, meta_attrs, on_stack: false }
418 }
419420/// Pass this argument directly instead. Should NOT be used!
421 /// Only exists because of past ABI mistakes that will take time to fix
422 /// (see <https://github.com/rust-lang/rust/issues/115666>).
423#[track_caller]
424pub fn make_direct_deprecated(&mut self) {
425match self.mode {
426 PassMode::Indirect { .. } => {
427self.mode = PassMode::Direct(ArgAttributes::new());
428 }
429 PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
430_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} direct",
self.mode));
}panic!("Tried to make {:?} direct", self.mode),
431 }
432 }
433434/// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
435 /// This is valid for both sized and unsized arguments.
436#[track_caller]
437pub fn make_indirect(&mut self) {
438match self.mode {
439 PassMode::Direct(_) | PassMode::Pair(_, _) => {
440self.mode = Self::indirect_pass_mode(&self.layout);
441 }
442 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
443// already indirect
444}
445_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect",
self.mode));
}panic!("Tried to make {:?} indirect", self.mode),
446 }
447 }
448449/// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
450 /// ZSTs indirectly.
451#[track_caller]
452pub fn make_indirect_from_ignore(&mut self) {
453match self.mode {
454 PassMode::Ignore => {
455self.mode = Self::indirect_pass_mode(&self.layout);
456 }
457 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
458// already indirect
459}
460_ => {
::core::panicking::panic_fmt(format_args!("Tried to make {0:?} indirect (expected `PassMode::Ignore`)",
self.mode));
}panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
461 }
462 }
463464/// Pass this argument indirectly, by placing it at a fixed stack offset.
465 /// This corresponds to the `byval` LLVM argument attribute.
466 /// This is only valid for sized arguments.
467 ///
468 /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
469 /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
470 /// stack slots used for arguments passed by-value have specific alignment requirements which
471 /// differ from the alignment used in other situations.
472 ///
473 /// If `None`, the type's alignment is used.
474 ///
475 /// If the resulting alignment differs from the type's alignment,
476 /// the argument will be copied to an alloca with sufficient alignment,
477 /// either in the caller (if the type's alignment is lower than the byval alignment)
478 /// or in the callee (if the type's alignment is higher than the byval alignment),
479 /// to ensure that Rust code never sees an underaligned pointer.
480pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
481if !!self.layout.is_unsized() {
{
::core::panicking::panic_fmt(format_args!("used byval ABI for unsized layout"));
}
};assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
482self.make_indirect();
483match self.mode {
484 PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
485*on_stack = true;
486487// Some platforms, like 32-bit x86, change the alignment of the type when passing
488 // `byval`. Account for that.
489if let Some(byval_align) = byval_align {
490// On all targets with byval align this is currently true, so let's assert it.
491if true {
if !(byval_align >= Align::from_bytes(4).unwrap()) {
::core::panicking::panic("assertion failed: byval_align >= Align::from_bytes(4).unwrap()")
};
};debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
492attrs.pointee_align = Some(byval_align);
493 }
494 }
495_ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
496 }
497 }
498499pub fn extend_integer_width_to(&mut self, bits: u64) {
500// Only integers have signedness
501if let BackendRepr::Scalar(scalar) = self.layout.backend_repr
502 && let Primitive::Int(i, signed) = scalar.primitive()
503 && i.size().bits() < bits504 && let PassMode::Direct(ref mut attrs) = self.mode
505 {
506if signed {
507attrs.ext(ArgExtension::Sext)
508 } else {
509attrs.ext(ArgExtension::Zext)
510 };
511 }
512 }
513514pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
515self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
516 }
517518pub fn cast_to_with_attrs<T: Into<CastTarget>>(&mut self, target: T, attrs: ArgAttributes) {
519self.mode =
520 PassMode::Cast { cast: Box::new(target.into().with_attrs(attrs)), pad_i32: false };
521 }
522523pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
524self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
525 }
526527pub fn is_indirect(&self) -> bool {
528#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { .. } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { .. })529 }
530531pub fn is_sized_indirect(&self) -> bool {
532#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })533 }
534535pub fn is_unsized_indirect(&self) -> bool {
536#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => true,
_ => false,
}matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })537 }
538539pub fn is_ignore(&self) -> bool {
540#[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Ignore => true,
_ => false,
}matches!(self.mode, PassMode::Ignore)541 }
542543/// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
544 /// function call ABIs".
545pub fn eq_abi(&self, other: &Self) -> bool546where
547Ty: PartialEq,
548 {
549// Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
550 // at the type.
551self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
552// `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
553 // That elevates any type difference to an ABI difference since we just use the
554 // full Rust type as the LLVM argument/return type.
555if #[allow(non_exhaustive_omitted_patterns)] match self.mode {
PassMode::Direct(..) => true,
_ => false,
}matches!(self.mode, PassMode::Direct(..))556 && #[allow(non_exhaustive_omitted_patterns)] match self.layout.backend_repr {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(self.layout.backend_repr, BackendRepr::Memory { .. })557 {
558// For aggregates in `Direct` mode to be compatible, the types need to be equal.
559self.layout.ty == other.layout.ty
560 } else {
561true
562}
563 }
564 }
565}
566567#[derive(#[automatically_derived]
impl ::core::marker::Copy for RiscvInterruptKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for RiscvInterruptKind {
#[inline]
fn clone(&self) -> RiscvInterruptKind { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for RiscvInterruptKind {
#[inline]
fn eq(&self, other: &RiscvInterruptKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for RiscvInterruptKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for RiscvInterruptKind {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for RiscvInterruptKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
RiscvInterruptKind::Machine => "Machine",
RiscvInterruptKind::Supervisor => "Supervisor",
})
}
}Debug, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for RiscvInterruptKind where
__CTX: ::rustc_span::HashStableContext {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
RiscvInterruptKind::Machine => {}
RiscvInterruptKind::Supervisor => {}
}
}
}
};HashStable_Generic)]
568pub enum RiscvInterruptKind {
569 Machine,
570 Supervisor,
571}
572573impl RiscvInterruptKind {
574pub fn as_str(&self) -> &'static str {
575match self {
576Self::Machine => "machine",
577Self::Supervisor => "supervisor",
578 }
579 }
580}
581582/// Metadata describing how the arguments to a native function
583/// should be passed in order to respect the native ABI.
584///
585/// The signature represented by this type may not match the MIR function signature.
586/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
587/// The std::offload module also adds an addition dyn_ptr argument to the GpuKernel ABI.
588/// While this difference is rarely relevant, it should still be kept in mind.
589///
590/// I will do my best to describe this structure, but these
591/// comments are reverse-engineered and may be inaccurate. -NDM
592#[derive(#[automatically_derived]
impl<'a, Ty: ::core::clone::Clone> ::core::clone::Clone for FnAbi<'a, Ty> {
#[inline]
fn clone(&self) -> FnAbi<'a, Ty> {
FnAbi {
args: ::core::clone::Clone::clone(&self.args),
ret: ::core::clone::Clone::clone(&self.ret),
c_variadic: ::core::clone::Clone::clone(&self.c_variadic),
fixed_count: ::core::clone::Clone::clone(&self.fixed_count),
conv: ::core::clone::Clone::clone(&self.conv),
can_unwind: ::core::clone::Clone::clone(&self.can_unwind),
}
}
}Clone, #[automatically_derived]
impl<'a, Ty: ::core::cmp::PartialEq> ::core::cmp::PartialEq for FnAbi<'a, Ty>
{
#[inline]
fn eq(&self, other: &FnAbi<'a, Ty>) -> bool {
self.c_variadic == other.c_variadic &&
self.fixed_count == other.fixed_count &&
self.can_unwind == other.can_unwind &&
self.args == other.args && self.ret == other.ret &&
self.conv == other.conv
}
}PartialEq, #[automatically_derived]
impl<'a, Ty: ::core::cmp::Eq> ::core::cmp::Eq for FnAbi<'a, Ty> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Box<[ArgAbi<'a, Ty>]>>;
let _: ::core::cmp::AssertParamIsEq<ArgAbi<'a, Ty>>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<u32>;
let _: ::core::cmp::AssertParamIsEq<CanonAbi>;
}
}Eq, #[automatically_derived]
impl<'a, Ty: ::core::hash::Hash> ::core::hash::Hash for FnAbi<'a, Ty> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.args, state);
::core::hash::Hash::hash(&self.ret, state);
::core::hash::Hash::hash(&self.c_variadic, state);
::core::hash::Hash::hash(&self.fixed_count, state);
::core::hash::Hash::hash(&self.conv, state);
::core::hash::Hash::hash(&self.can_unwind, state)
}
}Hash, const _: () =
{
impl<'a, Ty, __CTX>
::rustc_data_structures::stable_hasher::HashStable<__CTX> for
FnAbi<'a, Ty> where __CTX: ::rustc_span::HashStableContext,
Ty: ::rustc_data_structures::stable_hasher::HashStable<__CTX> {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
FnAbi {
args: ref __binding_0,
ret: ref __binding_1,
c_variadic: ref __binding_2,
fixed_count: ref __binding_3,
conv: ref __binding_4,
can_unwind: ref __binding_5 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
{ __binding_4.hash_stable(__hcx, __hasher); }
{ __binding_5.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable_Generic)]
593pub struct FnAbi<'a, Ty> {
594/// The type, layout, and information about how each argument is passed.
595pub args: Box<[ArgAbi<'a, Ty>]>,
596597/// The layout, type, and the way a value is returned from this function.
598pub ret: ArgAbi<'a, Ty>,
599600/// Marks this function as variadic (accepting a variable number of arguments).
601pub c_variadic: bool,
602603/// The count of non-variadic arguments.
604 ///
605 /// Should only be different from args.len() when c_variadic is true.
606 /// This can be used to know whether an argument is variadic or not.
607pub fixed_count: u32,
608/// The calling convention of this function.
609pub conv: CanonAbi,
610/// Indicates if an unwind may happen across a call to this function.
611pub can_unwind: bool,
612}
613614// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
615impl<'a, Ty: fmt::Display> fmt::Debugfor FnAbi<'a, Ty> {
616fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
617let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
618f.debug_struct("FnAbi")
619 .field("args", args)
620 .field("ret", ret)
621 .field("c_variadic", c_variadic)
622 .field("fixed_count", fixed_count)
623 .field("conv", conv)
624 .field("can_unwind", can_unwind)
625 .finish()
626 }
627}
628629impl<'a, Ty> FnAbi<'a, Ty> {
630pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
631where
632Ty: TyAbiInterface<'a, C> + Copy,
633 C: HasDataLayout + HasTargetSpec + HasX86AbiOpt,
634 {
635if abi == ExternAbi::X86Interrupt {
636if let Some(arg) = self.args.first_mut() {
637arg.pass_by_stack_offset(None);
638 }
639return;
640 }
641642let spec = cx.target_spec();
643match &spec.arch {
644 Arch::X86 => {
645let (flavor, regparm) = match abi {
646 ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
647 (x86::Flavor::FastcallOrVectorcall, None)
648 }
649 ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
650 (x86::Flavor::General, cx.x86_abi_opt().regparm)
651 }
652_ => (x86::Flavor::General, None),
653 };
654let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
655let opts = x86::X86Options { flavor, regparm, reg_struct_return };
656if spec.is_like_msvc {
657 x86_win32::compute_abi_info(cx, self, opts);
658 } else {
659 x86::compute_abi_info(cx, self, opts);
660 }
661 }
662 Arch::X86_64 => match abi {
663 ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
664 ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
665 x86_win64::compute_abi_info(cx, self)
666 }
667_ => {
668if cx.target_spec().is_like_windows {
669 x86_win64::compute_abi_info(cx, self)
670 } else {
671 x86_64::compute_abi_info(cx, self)
672 }
673 }
674 },
675 Arch::AArch64 | Arch::Arm64EC => {
676let kind = if cx.target_spec().is_like_darwin {
677 aarch64::AbiKind::DarwinPCS678 } else if cx.target_spec().is_like_windows {
679 aarch64::AbiKind::Win64680 } else {
681 aarch64::AbiKind::AAPCS682 };
683 aarch64::compute_abi_info(cx, self, kind)
684 }
685 Arch::AmdGpu => amdgpu::compute_abi_info(cx, self),
686 Arch::Arm => arm::compute_abi_info(cx, self),
687 Arch::Avr => avr::compute_abi_info(cx, self),
688 Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_abi_info(cx, self),
689 Arch::M68k => m68k::compute_abi_info(cx, self),
690 Arch::CSky => csky::compute_abi_info(cx, self),
691 Arch::Mips | Arch::Mips32r6 => mips::compute_abi_info(cx, self),
692 Arch::Mips64 | Arch::Mips64r6 => mips64::compute_abi_info(cx, self),
693 Arch::PowerPC => powerpc::compute_abi_info(cx, self),
694 Arch::PowerPC64 => powerpc64::compute_abi_info(cx, self),
695 Arch::S390x => s390x::compute_abi_info(cx, self),
696 Arch::Msp430 => msp430::compute_abi_info(cx, self),
697 Arch::Sparc => sparc::compute_abi_info(cx, self),
698 Arch::Sparc64 => sparc64::compute_abi_info(cx, self),
699 Arch::Nvptx64 => {
700if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
701 nvptx64::compute_ptx_kernel_abi_info(cx, self)
702 } else {
703 nvptx64::compute_abi_info(cx, self)
704 }
705 }
706 Arch::Hexagon => hexagon::compute_abi_info(cx, self),
707 Arch::Xtensa => xtensa::compute_abi_info(cx, self),
708 Arch::RiscV32 | Arch::RiscV64 => riscv::compute_abi_info(cx, self),
709 Arch::Wasm32 | Arch::Wasm64 => wasm::compute_abi_info(cx, self),
710 Arch::Bpf => bpf::compute_abi_info(cx, self),
711 arch @ (Arch::SpirV | Arch::Other(_)) => {
712{
::core::panicking::panic_fmt(format_args!("no lowering implemented for {0}",
arch));
}panic!("no lowering implemented for {arch}")713 }
714 }
715 }
716717pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
718where
719Ty: TyAbiInterface<'a, C> + Copy,
720 C: HasDataLayout + HasTargetSpec,
721 {
722let spec = cx.target_spec();
723match &spec.arch {
724 Arch::X86 => x86::compute_rust_abi_info(cx, self),
725 Arch::RiscV32 | Arch::RiscV64 => riscv::compute_rust_abi_info(cx, self),
726 Arch::LoongArch32 | Arch::LoongArch64 => loongarch::compute_rust_abi_info(cx, self),
727 Arch::AArch64 => aarch64::compute_rust_abi_info(cx, self),
728 Arch::Bpf => bpf::compute_rust_abi_info(self),
729_ => {}
730 };
731732for (arg_idx, arg) in self
733.args
734 .iter_mut()
735 .enumerate()
736 .map(|(idx, arg)| (Some(idx), arg))
737 .chain(iter::once((None, &mut self.ret)))
738 {
739// If the logic above already picked a specific type to cast the argument to, leave that
740 // in place.
741if #[allow(non_exhaustive_omitted_patterns)] match arg.mode {
PassMode::Ignore | PassMode::Cast { .. } => true,
_ => false,
}matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
742continue;
743 }
744745if arg_idx.is_none()
746 && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
747 && !#[allow(non_exhaustive_omitted_patterns)] match arg.layout.backend_repr {
BackendRepr::SimdVector { .. } => true,
_ => false,
}matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })748 {
749// Return values larger than 2 registers using a return area
750 // pointer. LLVM and Cranelift disagree about how to return
751 // values that don't fit in the registers designated for return
752 // values. LLVM will force the entire return value to be passed
753 // by return area pointer, while Cranelift will look at each IR level
754 // return value independently and decide to pass it in a
755 // register or not, which would result in the return value
756 // being passed partially in registers and partially through a
757 // return area pointer. For large IR-level values such as `i128`,
758 // cranelift will even split up the value into smaller chunks.
759 //
760 // While Cranelift may need to be fixed as the LLVM behavior is
761 // generally more correct with respect to the surface language,
762 // forcing this behavior in rustc itself makes it easier for
763 // other backends to conform to the Rust ABI and for the C ABI
764 // rustc already handles this behavior anyway.
765 //
766 // In addition LLVM's decision to pass the return value in
767 // registers or using a return area pointer depends on how
768 // exactly the return type is lowered to an LLVM IR type. For
769 // example `Option<u128>` can be lowered as `{ i128, i128 }`
770 // in which case the x86_64 backend would use a return area
771 // pointer, or it could be passed as `{ i32, i128 }` in which
772 // case the x86_64 backend would pass it in registers by taking
773 // advantage of an LLVM ABI extension that allows using 3
774 // registers for the x86_64 sysv call conv rather than the
775 // officially specified 2 registers.
776 //
777 // FIXME: Technically we should look at the amount of available
778 // return registers rather than guessing that there are 2
779 // registers for return values. In practice only a couple of
780 // architectures have less than 2 return registers. None of
781 // which supported by Cranelift.
782 //
783 // NOTE: This adjustment is only necessary for the Rust ABI as
784 // for other ABI's the calling convention implementations in
785 // rustc_target already ensure any return value which doesn't
786 // fit in the available amount of return registers is passed in
787 // the right way for the current target.
788 //
789 // The adjustment is not necessary nor desired for types with a vector
790 // representation; those are handled below.
791arg.make_indirect();
792continue;
793 }
794795match arg.layout.backend_repr {
796 BackendRepr::Memory { .. } => {
797// Compute `Aggregate` ABI.
798799let is_indirect_not_on_stack =
800#[allow(non_exhaustive_omitted_patterns)] match arg.mode {
PassMode::Indirect { on_stack: false, .. } => true,
_ => false,
}matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
801if !is_indirect_not_on_stack {
::core::panicking::panic("assertion failed: is_indirect_not_on_stack")
};assert!(is_indirect_not_on_stack);
802803let size = arg.layout.size;
804if arg.layout.is_sized()
805 && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
806 {
807// We want to pass small aggregates as immediates, but using
808 // an LLVM aggregate type for this leads to bad optimizations,
809 // so we pick an appropriately sized integer type instead.
810let attr = if layout_is_noundef(arg.layout, cx) {
811 ArgAttribute::NoUndef
812 } else {
813 ArgAttribute::default()
814 };
815 arg.cast_to_with_attrs(Reg { kind: RegKind::Integer, size }, attr.into());
816 }
817 }
818819 BackendRepr::SimdVector { .. } => {
820// This is a fun case! The gist of what this is doing is
821 // that we want callers and callees to always agree on the
822 // ABI of how they pass SIMD arguments. If we were to *not*
823 // make these arguments indirect then they'd be immediates
824 // in LLVM, which means that they'd used whatever the
825 // appropriate ABI is for the callee and the caller. That
826 // means, for example, if the caller doesn't have AVX
827 // enabled but the callee does, then passing an AVX argument
828 // across this boundary would cause corrupt data to show up.
829 //
830 // This problem is fixed by unconditionally passing SIMD
831 // arguments through memory between callers and callees
832 // which should get them all to agree on ABI regardless of
833 // target feature sets. Some more information about this
834 // issue can be found in #44367.
835 //
836 // We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
837 // required. However, it turns out that that makes LLVM worse at optimizing this
838 // code, so we pass things indirectly even there. See #139029 for more on that.
839if spec.simd_types_indirect {
840 arg.make_indirect();
841 }
842 }
843844_ => {}
845 }
846 }
847 }
848}
849850/// Determines whether `layout` contains no uninit bytes (no padding, no unions),
851/// using only the computed layout.
852///
853/// Conservative: returns `false` for anything it cannot prove fully initialized,
854/// including multi-variant enums and SIMD vectors.
855// FIXME: extend to multi-variant enums (per-variant padding analysis needed).
856fn layout_is_noundef<'a, Ty, C>(layout: TyAndLayout<'a, Ty>, cx: &C) -> bool857where
858Ty: TyAbiInterface<'a, C> + Copy,
859 C: HasDataLayout,
860{
861match layout.backend_repr {
862 BackendRepr::Scalar(scalar) => !scalar.is_uninit_valid(),
863 BackendRepr::ScalarPair(s1, s2) => {
864 !s1.is_uninit_valid()
865 && !s2.is_uninit_valid()
866// Ensure there is no padding.
867&& s1.size(cx) + s2.size(cx) == layout.size
868 }
869 BackendRepr::Memory { .. } => match layout.fields {
870 FieldsShape::Primitive | FieldsShape::Union(_) => false,
871// Array elements are at stride offsets with no inter-element gaps.
872FieldsShape::Array { stride: _, count } => {
873count == 0 || layout_is_noundef(layout.field(cx, 0), cx)
874 }
875 FieldsShape::Arbitrary { .. } => {
876// With `Variants::Multiple`, `layout.fields` only covers shared
877 // bytes (niche/discriminant); per-variant data is absent, so
878 // full coverage cannot be proven.
879#[allow(non_exhaustive_omitted_patterns)] match layout.variants {
Variants::Single { .. } => true,
_ => false,
}matches!(layout.variants, Variants::Single { .. }) && fields_are_noundef(layout, cx)
880 }
881 },
882 BackendRepr::SimdVector { .. } | BackendRepr::SimdScalableVector { .. } => false,
883 }
884}
885886/// Returns `true` if the fields of `layout` contiguously cover bytes `0..layout.size`
887/// with no padding gaps and each field is recursively `layout_is_noundef`.
888fn fields_are_noundef<'a, Ty, C>(layout: TyAndLayout<'a, Ty>, cx: &C) -> bool889where
890Ty: TyAbiInterface<'a, C> + Copy,
891 C: HasDataLayout,
892{
893let mut cursor = Size::ZERO;
894for i in layout.fields.index_by_increasing_offset() {
895let field = layout.field(cx, i);
896if field.size == Size::ZERO {
897continue;
898 }
899if layout.fields.offset(i) != cursor {
900return false;
901 }
902if !layout_is_noundef(field, cx) {
903return false;
904 }
905 cursor += field.size;
906 }
907cursor == layout.size
908}
909910// Some types are used a lot. Make sure they don't unintentionally get bigger.
911#[cfg(target_pointer_width = "64")]
912mod size_asserts {
913use rustc_data_structures::static_assert_size;
914915use super::*;
916// tidy-alphabetical-start
917const _: [(); 56] = [(); ::std::mem::size_of::<ArgAbi<'_, usize>>()];static_assert_size!(ArgAbi<'_, usize>, 56);
918const _: [(); 80] = [(); ::std::mem::size_of::<FnAbi<'_, usize>>()];static_assert_size!(FnAbi<'_, usize>, 80);
919// tidy-alphabetical-end
920}