rustc_target/callconv/
mod.rs

1use std::{fmt, iter};
2
3use rustc_abi::{
4    AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
5    Scalar, Size, TyAbiInterface, TyAndLayout,
6};
7use rustc_macros::HashStable_Generic;
8
9pub use crate::spec::AbiMap;
10use crate::spec::{HasTargetSpec, HasX86AbiOpt};
11
12mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
37
38#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
39pub enum PassMode {
40    /// Ignore the argument.
41    ///
42    /// The argument is a ZST.
43    Ignore,
44    /// Pass the argument directly.
45    ///
46    /// The argument has a layout abi of `Scalar` or `Vector`.
47    /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48    /// This is bad since it leaks LLVM implementation details into the ABI.
49    /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50    Direct(ArgAttributes),
51    /// Pass a pair's elements directly in two arguments.
52    ///
53    /// The argument has a layout abi of `ScalarPair`.
54    Pair(ArgAttributes, ArgAttributes),
55    /// Pass the argument after casting it. See the `CastTarget` docs for details.
56    ///
57    /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58    Cast { pad_i32: bool, cast: Box<CastTarget> },
59    /// Pass the argument indirectly via a hidden pointer.
60    ///
61    /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62    /// argument. (This is the only mode that supports unsized arguments.)
63    ///
64    /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65    /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66    /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67    /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68    /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69    /// alignment (if `None`). This means that the alignment will not always
70    /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71    ///
72    /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73    Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
75
76impl PassMode {
77    /// Checks if these two `PassMode` are equal enough to be considered "the same for all
78    /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79    /// so that needs to be compared as well!
80    pub fn eq_abi(&self, other: &Self) -> bool {
81        match (self, other) {
82            (PassMode::Ignore, PassMode::Ignore) => true,
83            (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84            (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85            (
86                PassMode::Cast { cast: c1, pad_i32: pad1 },
87                PassMode::Cast { cast: c2, pad_i32: pad2 },
88            ) => c1.eq_abi(c2) && pad1 == pad2,
89            (
90                PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91                PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92            ) => a1.eq_abi(a2) && s1 == s2,
93            (
94                PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95                PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96            ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97            _ => false,
98        }
99    }
100}
101
102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105
106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109    use rustc_macros::HashStable_Generic;
110
111    // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112    #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, HashStable_Generic)]
113    pub struct ArgAttribute(u8);
114    bitflags::bitflags! {
115        impl ArgAttribute: u8 {
116            const CapturesNone     = 0b111;
117            const CapturesAddress  = 0b110;
118            const CapturesReadOnly = 0b100;
119            const NoAlias  = 1 << 3;
120            const NonNull  = 1 << 4;
121            const ReadOnly = 1 << 5;
122            const InReg    = 1 << 6;
123            const NoUndef  = 1 << 7;
124        }
125    }
126    rustc_data_structures::external_bitflags_debug! { ArgAttribute }
127}
128
129/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
130/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
131/// not necessary to extend the argument, this enum is ignored.
132#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
133pub enum ArgExtension {
134    None,
135    Zext,
136    Sext,
137}
138
139/// A compact representation of LLVM attributes (at least those relevant for this module)
140/// that can be manipulated without interacting with LLVM's Attribute machinery.
141#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
142pub struct ArgAttributes {
143    pub regular: ArgAttribute,
144    pub arg_ext: ArgExtension,
145    /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
146    /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
147    /// set on a null pointer, but all non-null pointers must be dereferenceable).
148    pub pointee_size: Size,
149    /// The minimum alignment of the pointee, if any.
150    pub pointee_align: Option<Align>,
151}
152
153impl ArgAttributes {
154    pub fn new() -> Self {
155        ArgAttributes {
156            regular: ArgAttribute::default(),
157            arg_ext: ArgExtension::None,
158            pointee_size: Size::ZERO,
159            pointee_align: None,
160        }
161    }
162
163    pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
164        assert!(
165            self.arg_ext == ArgExtension::None || self.arg_ext == ext,
166            "cannot set {:?} when {:?} is already set",
167            ext,
168            self.arg_ext
169        );
170        self.arg_ext = ext;
171        self
172    }
173
174    pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
175        self.regular |= attr;
176        self
177    }
178
179    pub fn contains(&self, attr: ArgAttribute) -> bool {
180        self.regular.contains(attr)
181    }
182
183    /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
184    /// function call ABIs".
185    pub fn eq_abi(&self, other: &Self) -> bool {
186        // There's only one regular attribute that matters for the call ABI: InReg.
187        // Everything else is things like noalias, dereferenceable, nonnull, ...
188        // (This also applies to pointee_size, pointee_align.)
189        if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
190        {
191            return false;
192        }
193        // We also compare the sign extension mode -- this could let the callee make assumptions
194        // about bits that conceptually were not even passed.
195        if self.arg_ext != other.arg_ext {
196            return false;
197        }
198        true
199    }
200}
201
202impl From<ArgAttribute> for ArgAttributes {
203    fn from(value: ArgAttribute) -> Self {
204        Self {
205            regular: value,
206            arg_ext: ArgExtension::None,
207            pointee_size: Size::ZERO,
208            pointee_align: None,
209        }
210    }
211}
212
213/// An argument passed entirely registers with the
214/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
215#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
216pub struct Uniform {
217    pub unit: Reg,
218
219    /// The total size of the argument, which can be:
220    /// * equal to `unit.size` (one scalar/vector),
221    /// * a multiple of `unit.size` (an array of scalar/vectors),
222    /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
223    ///   for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
224    ///   this size will be rounded up to the nearest multiple of `unit.size`.
225    pub total: Size,
226
227    /// Indicate that the argument is consecutive, in the sense that either all values need to be
228    /// passed in register, or all on the stack. If they are passed on the stack, there should be
229    /// no additional padding between elements.
230    pub is_consecutive: bool,
231}
232
233impl From<Reg> for Uniform {
234    fn from(unit: Reg) -> Uniform {
235        Uniform { unit, total: unit.size, is_consecutive: false }
236    }
237}
238
239impl Uniform {
240    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
241        self.unit.align(cx)
242    }
243
244    /// Pass using one or more values of the given type, without requiring them to be consecutive.
245    /// That is, some values may be passed in register and some on the stack.
246    pub fn new(unit: Reg, total: Size) -> Self {
247        Uniform { unit, total, is_consecutive: false }
248    }
249
250    /// Pass using one or more consecutive values of the given type. Either all values will be
251    /// passed in registers, or all on the stack.
252    pub fn consecutive(unit: Reg, total: Size) -> Self {
253        Uniform { unit, total, is_consecutive: true }
254    }
255}
256
257/// Describes the type used for `PassMode::Cast`.
258///
259/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
260/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
261/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
262/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
263/// (and all data in the padding between the registers is dropped).
264#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
265pub struct CastTarget {
266    pub prefix: [Option<Reg>; 8],
267    /// The offset of `rest` from the start of the value. Currently only implemented for a `Reg`
268    /// pair created by the `offset_pair` method.
269    pub rest_offset: Option<Size>,
270    pub rest: Uniform,
271    pub attrs: ArgAttributes,
272}
273
274impl From<Reg> for CastTarget {
275    fn from(unit: Reg) -> CastTarget {
276        CastTarget::from(Uniform::from(unit))
277    }
278}
279
280impl From<Uniform> for CastTarget {
281    fn from(uniform: Uniform) -> CastTarget {
282        Self::prefixed([None; 8], uniform)
283    }
284}
285
286impl CastTarget {
287    pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self {
288        Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() }
289    }
290
291    pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self {
292        Self {
293            prefix: [Some(a), None, None, None, None, None, None, None],
294            rest_offset: Some(offset_from_start),
295            rest: b.into(),
296            attrs: ArgAttributes::new(),
297        }
298    }
299
300    pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self {
301        self.attrs = attrs;
302        self
303    }
304
305    pub fn pair(a: Reg, b: Reg) -> CastTarget {
306        Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b))
307    }
308
309    /// When you only access the range containing valid data, you can use this unaligned size;
310    /// otherwise, use the safer `size` method.
311    pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
312        // Prefix arguments are passed in specific designated registers
313        let prefix_size = if let Some(offset_from_start) = self.rest_offset {
314            offset_from_start
315        } else {
316            self.prefix
317                .iter()
318                .filter_map(|x| x.map(|reg| reg.size))
319                .fold(Size::ZERO, |acc, size| acc + size)
320        };
321        // Remaining arguments are passed in chunks of the unit size
322        let rest_size =
323            self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
324
325        prefix_size + rest_size
326    }
327
328    pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
329        self.unaligned_size(cx).align_to(self.align(cx))
330    }
331
332    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
333        self.prefix
334            .iter()
335            .filter_map(|x| x.map(|reg| reg.align(cx)))
336            .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| {
337                acc.max(align)
338            })
339    }
340
341    /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
342    /// function call ABIs".
343    pub fn eq_abi(&self, other: &Self) -> bool {
344        let CastTarget {
345            prefix: prefix_l,
346            rest_offset: rest_offset_l,
347            rest: rest_l,
348            attrs: attrs_l,
349        } = self;
350        let CastTarget {
351            prefix: prefix_r,
352            rest_offset: rest_offset_r,
353            rest: rest_r,
354            attrs: attrs_r,
355        } = other;
356        prefix_l == prefix_r
357            && rest_offset_l == rest_offset_r
358            && rest_l == rest_r
359            && attrs_l.eq_abi(attrs_r)
360    }
361}
362
363/// Information about how to pass an argument to,
364/// or return a value from, a function, under some ABI.
365#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
366pub struct ArgAbi<'a, Ty> {
367    pub layout: TyAndLayout<'a, Ty>,
368    pub mode: PassMode,
369}
370
371// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
372impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
373    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
374        let ArgAbi { layout, mode } = self;
375        f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
376    }
377}
378
379impl<'a, Ty> ArgAbi<'a, Ty> {
380    /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
381    pub fn new(
382        cx: &impl HasDataLayout,
383        layout: TyAndLayout<'a, Ty>,
384        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
385    ) -> Self {
386        let mode = match layout.backend_repr {
387            BackendRepr::Scalar(scalar) => {
388                PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
389            }
390            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
391                scalar_attrs(&layout, a, Size::ZERO),
392                scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
393            ),
394            BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
395            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
396        };
397        ArgAbi { layout, mode }
398    }
399
400    fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
401        let mut attrs = ArgAttributes::new();
402
403        // For non-immediate arguments the callee gets its own copy of
404        // the value on the stack, so there are no aliases. The function
405        // can capture the address of the argument, but not the provenance.
406        attrs
407            .set(ArgAttribute::NoAlias)
408            .set(ArgAttribute::CapturesAddress)
409            .set(ArgAttribute::NonNull)
410            .set(ArgAttribute::NoUndef);
411        attrs.pointee_size = layout.size;
412        attrs.pointee_align = Some(layout.align.abi);
413
414        let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
415
416        PassMode::Indirect { attrs, meta_attrs, on_stack: false }
417    }
418
419    /// Pass this argument directly instead. Should NOT be used!
420    /// Only exists because of past ABI mistakes that will take time to fix
421    /// (see <https://github.com/rust-lang/rust/issues/115666>).
422    #[track_caller]
423    pub fn make_direct_deprecated(&mut self) {
424        match self.mode {
425            PassMode::Indirect { .. } => {
426                self.mode = PassMode::Direct(ArgAttributes::new());
427            }
428            PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
429            _ => panic!("Tried to make {:?} direct", self.mode),
430        }
431    }
432
433    /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
434    /// This is valid for both sized and unsized arguments.
435    #[track_caller]
436    pub fn make_indirect(&mut self) {
437        match self.mode {
438            PassMode::Direct(_) | PassMode::Pair(_, _) => {
439                self.mode = Self::indirect_pass_mode(&self.layout);
440            }
441            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
442                // already indirect
443            }
444            _ => panic!("Tried to make {:?} indirect", self.mode),
445        }
446    }
447
448    /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
449    /// ZSTs indirectly.
450    #[track_caller]
451    pub fn make_indirect_from_ignore(&mut self) {
452        match self.mode {
453            PassMode::Ignore => {
454                self.mode = Self::indirect_pass_mode(&self.layout);
455            }
456            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
457                // already indirect
458            }
459            _ => panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
460        }
461    }
462
463    /// Pass this argument indirectly, by placing it at a fixed stack offset.
464    /// This corresponds to the `byval` LLVM argument attribute.
465    /// This is only valid for sized arguments.
466    ///
467    /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
468    /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
469    /// stack slots used for arguments passed by-value have specific alignment requirements which
470    /// differ from the alignment used in other situations.
471    ///
472    /// If `None`, the type's alignment is used.
473    ///
474    /// If the resulting alignment differs from the type's alignment,
475    /// the argument will be copied to an alloca with sufficient alignment,
476    /// either in the caller (if the type's alignment is lower than the byval alignment)
477    /// or in the callee (if the type's alignment is higher than the byval alignment),
478    /// to ensure that Rust code never sees an underaligned pointer.
479    pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
480        assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
481        self.make_indirect();
482        match self.mode {
483            PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
484                *on_stack = true;
485
486                // Some platforms, like 32-bit x86, change the alignment of the type when passing
487                // `byval`. Account for that.
488                if let Some(byval_align) = byval_align {
489                    // On all targets with byval align this is currently true, so let's assert it.
490                    debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
491                    attrs.pointee_align = Some(byval_align);
492                }
493            }
494            _ => unreachable!(),
495        }
496    }
497
498    pub fn extend_integer_width_to(&mut self, bits: u64) {
499        // Only integers have signedness
500        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr
501            && let Primitive::Int(i, signed) = scalar.primitive()
502            && i.size().bits() < bits
503            && let PassMode::Direct(ref mut attrs) = self.mode
504        {
505            if signed {
506                attrs.ext(ArgExtension::Sext)
507            } else {
508                attrs.ext(ArgExtension::Zext)
509            };
510        }
511    }
512
513    pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
514        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
515    }
516
517    pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
518        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
519    }
520
521    pub fn is_indirect(&self) -> bool {
522        matches!(self.mode, PassMode::Indirect { .. })
523    }
524
525    pub fn is_sized_indirect(&self) -> bool {
526        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
527    }
528
529    pub fn is_unsized_indirect(&self) -> bool {
530        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
531    }
532
533    pub fn is_ignore(&self) -> bool {
534        matches!(self.mode, PassMode::Ignore)
535    }
536
537    /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
538    /// function call ABIs".
539    pub fn eq_abi(&self, other: &Self) -> bool
540    where
541        Ty: PartialEq,
542    {
543        // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
544        // at the type.
545        self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
546            // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
547            // That elevates any type difference to an ABI difference since we just use the
548            // full Rust type as the LLVM argument/return type.
549            if matches!(self.mode, PassMode::Direct(..))
550                && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
551            {
552                // For aggregates in `Direct` mode to be compatible, the types need to be equal.
553                self.layout.ty == other.layout.ty
554            } else {
555                true
556            }
557        }
558    }
559}
560
561#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
562pub enum RiscvInterruptKind {
563    Machine,
564    Supervisor,
565}
566
567impl RiscvInterruptKind {
568    pub fn as_str(&self) -> &'static str {
569        match self {
570            Self::Machine => "machine",
571            Self::Supervisor => "supervisor",
572        }
573    }
574}
575
576/// Metadata describing how the arguments to a native function
577/// should be passed in order to respect the native ABI.
578///
579/// The signature represented by this type may not match the MIR function signature.
580/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
581/// While this difference is rarely relevant, it should still be kept in mind.
582///
583/// I will do my best to describe this structure, but these
584/// comments are reverse-engineered and may be inaccurate. -NDM
585#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
586pub struct FnAbi<'a, Ty> {
587    /// The type, layout, and information about how each argument is passed.
588    pub args: Box<[ArgAbi<'a, Ty>]>,
589
590    /// The layout, type, and the way a value is returned from this function.
591    pub ret: ArgAbi<'a, Ty>,
592
593    /// Marks this function as variadic (accepting a variable number of arguments).
594    pub c_variadic: bool,
595
596    /// The count of non-variadic arguments.
597    ///
598    /// Should only be different from args.len() when c_variadic is true.
599    /// This can be used to know whether an argument is variadic or not.
600    pub fixed_count: u32,
601    /// The calling convention of this function.
602    pub conv: CanonAbi,
603    /// Indicates if an unwind may happen across a call to this function.
604    pub can_unwind: bool,
605}
606
607// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
608impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
609    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
610        let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
611        f.debug_struct("FnAbi")
612            .field("args", args)
613            .field("ret", ret)
614            .field("c_variadic", c_variadic)
615            .field("fixed_count", fixed_count)
616            .field("conv", conv)
617            .field("can_unwind", can_unwind)
618            .finish()
619    }
620}
621
622impl<'a, Ty> FnAbi<'a, Ty> {
623    pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
624    where
625        Ty: TyAbiInterface<'a, C> + Copy,
626        C: HasDataLayout + HasTargetSpec + HasX86AbiOpt,
627    {
628        if abi == ExternAbi::X86Interrupt {
629            if let Some(arg) = self.args.first_mut() {
630                arg.pass_by_stack_offset(None);
631            }
632            return;
633        }
634
635        let spec = cx.target_spec();
636        match &spec.arch[..] {
637            "x86" => {
638                let (flavor, regparm) = match abi {
639                    ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
640                        (x86::Flavor::FastcallOrVectorcall, None)
641                    }
642                    ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
643                        (x86::Flavor::General, cx.x86_abi_opt().regparm)
644                    }
645                    _ => (x86::Flavor::General, None),
646                };
647                let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
648                let opts = x86::X86Options { flavor, regparm, reg_struct_return };
649                if spec.is_like_msvc {
650                    x86_win32::compute_abi_info(cx, self, opts);
651                } else {
652                    x86::compute_abi_info(cx, self, opts);
653                }
654            }
655            "x86_64" => match abi {
656                ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
657                ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
658                    x86_win64::compute_abi_info(cx, self)
659                }
660                _ => {
661                    if cx.target_spec().is_like_windows {
662                        x86_win64::compute_abi_info(cx, self)
663                    } else {
664                        x86_64::compute_abi_info(cx, self)
665                    }
666                }
667            },
668            "aarch64" | "arm64ec" => {
669                let kind = if cx.target_spec().is_like_darwin {
670                    aarch64::AbiKind::DarwinPCS
671                } else if cx.target_spec().is_like_windows {
672                    aarch64::AbiKind::Win64
673                } else {
674                    aarch64::AbiKind::AAPCS
675                };
676                aarch64::compute_abi_info(cx, self, kind)
677            }
678            "amdgpu" => amdgpu::compute_abi_info(cx, self),
679            "arm" => arm::compute_abi_info(cx, self),
680            "avr" => avr::compute_abi_info(cx, self),
681            "loongarch32" | "loongarch64" => loongarch::compute_abi_info(cx, self),
682            "m68k" => m68k::compute_abi_info(cx, self),
683            "csky" => csky::compute_abi_info(cx, self),
684            "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
685            "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
686            "powerpc" => powerpc::compute_abi_info(cx, self),
687            "powerpc64" => powerpc64::compute_abi_info(cx, self),
688            "s390x" => s390x::compute_abi_info(cx, self),
689            "msp430" => msp430::compute_abi_info(cx, self),
690            "sparc" => sparc::compute_abi_info(cx, self),
691            "sparc64" => sparc64::compute_abi_info(cx, self),
692            "nvptx64" => {
693                if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
694                    nvptx64::compute_ptx_kernel_abi_info(cx, self)
695                } else {
696                    nvptx64::compute_abi_info(cx, self)
697                }
698            }
699            "hexagon" => hexagon::compute_abi_info(cx, self),
700            "xtensa" => xtensa::compute_abi_info(cx, self),
701            "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
702            "wasm32" | "wasm64" => wasm::compute_abi_info(cx, self),
703            "bpf" => bpf::compute_abi_info(cx, self),
704            arch => panic!("no lowering implemented for {arch}"),
705        }
706    }
707
708    pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
709    where
710        Ty: TyAbiInterface<'a, C> + Copy,
711        C: HasDataLayout + HasTargetSpec,
712    {
713        let spec = cx.target_spec();
714        match &*spec.arch {
715            "x86" => x86::compute_rust_abi_info(cx, self),
716            "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self),
717            "loongarch32" | "loongarch64" => loongarch::compute_rust_abi_info(cx, self),
718            "aarch64" => aarch64::compute_rust_abi_info(cx, self),
719            "bpf" => bpf::compute_rust_abi_info(self),
720            _ => {}
721        };
722
723        for (arg_idx, arg) in self
724            .args
725            .iter_mut()
726            .enumerate()
727            .map(|(idx, arg)| (Some(idx), arg))
728            .chain(iter::once((None, &mut self.ret)))
729        {
730            // If the logic above already picked a specific type to cast the argument to, leave that
731            // in place.
732            if matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
733                continue;
734            }
735
736            if arg_idx.is_none()
737                && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
738                && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
739            {
740                // Return values larger than 2 registers using a return area
741                // pointer. LLVM and Cranelift disagree about how to return
742                // values that don't fit in the registers designated for return
743                // values. LLVM will force the entire return value to be passed
744                // by return area pointer, while Cranelift will look at each IR level
745                // return value independently and decide to pass it in a
746                // register or not, which would result in the return value
747                // being passed partially in registers and partially through a
748                // return area pointer. For large IR-level values such as `i128`,
749                // cranelift will even split up the value into smaller chunks.
750                //
751                // While Cranelift may need to be fixed as the LLVM behavior is
752                // generally more correct with respect to the surface language,
753                // forcing this behavior in rustc itself makes it easier for
754                // other backends to conform to the Rust ABI and for the C ABI
755                // rustc already handles this behavior anyway.
756                //
757                // In addition LLVM's decision to pass the return value in
758                // registers or using a return area pointer depends on how
759                // exactly the return type is lowered to an LLVM IR type. For
760                // example `Option<u128>` can be lowered as `{ i128, i128 }`
761                // in which case the x86_64 backend would use a return area
762                // pointer, or it could be passed as `{ i32, i128 }` in which
763                // case the x86_64 backend would pass it in registers by taking
764                // advantage of an LLVM ABI extension that allows using 3
765                // registers for the x86_64 sysv call conv rather than the
766                // officially specified 2 registers.
767                //
768                // FIXME: Technically we should look at the amount of available
769                // return registers rather than guessing that there are 2
770                // registers for return values. In practice only a couple of
771                // architectures have less than 2 return registers. None of
772                // which supported by Cranelift.
773                //
774                // NOTE: This adjustment is only necessary for the Rust ABI as
775                // for other ABI's the calling convention implementations in
776                // rustc_target already ensure any return value which doesn't
777                // fit in the available amount of return registers is passed in
778                // the right way for the current target.
779                //
780                // The adjustment is not necessary nor desired for types with a vector
781                // representation; those are handled below.
782                arg.make_indirect();
783                continue;
784            }
785
786            match arg.layout.backend_repr {
787                BackendRepr::Memory { .. } => {
788                    // Compute `Aggregate` ABI.
789
790                    let is_indirect_not_on_stack =
791                        matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
792                    assert!(is_indirect_not_on_stack);
793
794                    let size = arg.layout.size;
795                    if arg.layout.is_sized()
796                        && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
797                    {
798                        // We want to pass small aggregates as immediates, but using
799                        // an LLVM aggregate type for this leads to bad optimizations,
800                        // so we pick an appropriately sized integer type instead.
801                        arg.cast_to(Reg { kind: RegKind::Integer, size });
802                    }
803                }
804
805                BackendRepr::SimdVector { .. } => {
806                    // This is a fun case! The gist of what this is doing is
807                    // that we want callers and callees to always agree on the
808                    // ABI of how they pass SIMD arguments. If we were to *not*
809                    // make these arguments indirect then they'd be immediates
810                    // in LLVM, which means that they'd used whatever the
811                    // appropriate ABI is for the callee and the caller. That
812                    // means, for example, if the caller doesn't have AVX
813                    // enabled but the callee does, then passing an AVX argument
814                    // across this boundary would cause corrupt data to show up.
815                    //
816                    // This problem is fixed by unconditionally passing SIMD
817                    // arguments through memory between callers and callees
818                    // which should get them all to agree on ABI regardless of
819                    // target feature sets. Some more information about this
820                    // issue can be found in #44367.
821                    //
822                    // We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
823                    // required. However, it turns out that that makes LLVM worse at optimizing this
824                    // code, so we pass things indirectly even there. See #139029 for more on that.
825                    if spec.simd_types_indirect {
826                        arg.make_indirect();
827                    }
828                }
829
830                _ => {}
831            }
832        }
833    }
834}
835
836// Some types are used a lot. Make sure they don't unintentionally get bigger.
837#[cfg(target_pointer_width = "64")]
838mod size_asserts {
839    use rustc_data_structures::static_assert_size;
840
841    use super::*;
842    // tidy-alphabetical-start
843    static_assert_size!(ArgAbi<'_, usize>, 56);
844    static_assert_size!(FnAbi<'_, usize>, 80);
845    // tidy-alphabetical-end
846}