rustc_target/callconv/
mod.rs

1use std::str::FromStr;
2use std::{fmt, iter};
3
4use rustc_abi::{
5    AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind, Scalar,
6    Size, TyAbiInterface, TyAndLayout,
7};
8use rustc_macros::HashStable_Generic;
9
10use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, RustcAbi, WasmCAbi};
11
12mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
37
38#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
39pub enum PassMode {
40    /// Ignore the argument.
41    ///
42    /// The argument is a ZST.
43    Ignore,
44    /// Pass the argument directly.
45    ///
46    /// The argument has a layout abi of `Scalar` or `Vector`.
47    /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48    /// This is bad since it leaks LLVM implementation details into the ABI.
49    /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50    Direct(ArgAttributes),
51    /// Pass a pair's elements directly in two arguments.
52    ///
53    /// The argument has a layout abi of `ScalarPair`.
54    Pair(ArgAttributes, ArgAttributes),
55    /// Pass the argument after casting it. See the `CastTarget` docs for details.
56    ///
57    /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58    Cast { pad_i32: bool, cast: Box<CastTarget> },
59    /// Pass the argument indirectly via a hidden pointer.
60    ///
61    /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62    /// argument. (This is the only mode that supports unsized arguments.)
63    ///
64    /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65    /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66    /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67    /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68    /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69    /// alignment (if `None`). This means that the alignment will not always
70    /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71    ///
72    /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73    Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
75
76impl PassMode {
77    /// Checks if these two `PassMode` are equal enough to be considered "the same for all
78    /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79    /// so that needs to be compared as well!
80    pub fn eq_abi(&self, other: &Self) -> bool {
81        match (self, other) {
82            (PassMode::Ignore, PassMode::Ignore) => true,
83            (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84            (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85            (
86                PassMode::Cast { cast: c1, pad_i32: pad1 },
87                PassMode::Cast { cast: c2, pad_i32: pad2 },
88            ) => c1.eq_abi(c2) && pad1 == pad2,
89            (
90                PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91                PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92            ) => a1.eq_abi(a2) && s1 == s2,
93            (
94                PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95                PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96            ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97            _ => false,
98        }
99    }
100}
101
102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105
106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109    use rustc_macros::HashStable_Generic;
110
111    // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112    #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, HashStable_Generic)]
113    pub struct ArgAttribute(u8);
114    bitflags::bitflags! {
115        impl ArgAttribute: u8 {
116            const NoAlias   = 1 << 1;
117            const NoCapture = 1 << 2;
118            const NonNull   = 1 << 3;
119            const ReadOnly  = 1 << 4;
120            const InReg     = 1 << 5;
121            const NoUndef = 1 << 6;
122        }
123    }
124    rustc_data_structures::external_bitflags_debug! { ArgAttribute }
125}
126
127/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
128/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
129/// not necessary to extend the argument, this enum is ignored.
130#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
131pub enum ArgExtension {
132    None,
133    Zext,
134    Sext,
135}
136
137/// A compact representation of LLVM attributes (at least those relevant for this module)
138/// that can be manipulated without interacting with LLVM's Attribute machinery.
139#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
140pub struct ArgAttributes {
141    pub regular: ArgAttribute,
142    pub arg_ext: ArgExtension,
143    /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
144    /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
145    /// set on a null pointer, but all non-null pointers must be dereferenceable).
146    pub pointee_size: Size,
147    pub pointee_align: Option<Align>,
148}
149
150impl ArgAttributes {
151    pub fn new() -> Self {
152        ArgAttributes {
153            regular: ArgAttribute::default(),
154            arg_ext: ArgExtension::None,
155            pointee_size: Size::ZERO,
156            pointee_align: None,
157        }
158    }
159
160    pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
161        assert!(
162            self.arg_ext == ArgExtension::None || self.arg_ext == ext,
163            "cannot set {:?} when {:?} is already set",
164            ext,
165            self.arg_ext
166        );
167        self.arg_ext = ext;
168        self
169    }
170
171    pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
172        self.regular |= attr;
173        self
174    }
175
176    pub fn contains(&self, attr: ArgAttribute) -> bool {
177        self.regular.contains(attr)
178    }
179
180    /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
181    /// function call ABIs".
182    pub fn eq_abi(&self, other: &Self) -> bool {
183        // There's only one regular attribute that matters for the call ABI: InReg.
184        // Everything else is things like noalias, dereferenceable, nonnull, ...
185        // (This also applies to pointee_size, pointee_align.)
186        if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
187        {
188            return false;
189        }
190        // We also compare the sign extension mode -- this could let the callee make assumptions
191        // about bits that conceptually were not even passed.
192        if self.arg_ext != other.arg_ext {
193            return false;
194        }
195        true
196    }
197}
198
199/// An argument passed entirely registers with the
200/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
201#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
202pub struct Uniform {
203    pub unit: Reg,
204
205    /// The total size of the argument, which can be:
206    /// * equal to `unit.size` (one scalar/vector),
207    /// * a multiple of `unit.size` (an array of scalar/vectors),
208    /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
209    ///   for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
210    ///   this size will be rounded up to the nearest multiple of `unit.size`.
211    pub total: Size,
212
213    /// Indicate that the argument is consecutive, in the sense that either all values need to be
214    /// passed in register, or all on the stack. If they are passed on the stack, there should be
215    /// no additional padding between elements.
216    pub is_consecutive: bool,
217}
218
219impl From<Reg> for Uniform {
220    fn from(unit: Reg) -> Uniform {
221        Uniform { unit, total: unit.size, is_consecutive: false }
222    }
223}
224
225impl Uniform {
226    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
227        self.unit.align(cx)
228    }
229
230    /// Pass using one or more values of the given type, without requiring them to be consecutive.
231    /// That is, some values may be passed in register and some on the stack.
232    pub fn new(unit: Reg, total: Size) -> Self {
233        Uniform { unit, total, is_consecutive: false }
234    }
235
236    /// Pass using one or more consecutive values of the given type. Either all values will be
237    /// passed in registers, or all on the stack.
238    pub fn consecutive(unit: Reg, total: Size) -> Self {
239        Uniform { unit, total, is_consecutive: true }
240    }
241}
242
243/// Describes the type used for `PassMode::Cast`.
244///
245/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
246/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
247/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
248/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
249/// (and all data in the padding between the registers is dropped).
250#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
251pub struct CastTarget {
252    pub prefix: [Option<Reg>; 8],
253    pub rest: Uniform,
254    pub attrs: ArgAttributes,
255}
256
257impl From<Reg> for CastTarget {
258    fn from(unit: Reg) -> CastTarget {
259        CastTarget::from(Uniform::from(unit))
260    }
261}
262
263impl From<Uniform> for CastTarget {
264    fn from(uniform: Uniform) -> CastTarget {
265        CastTarget {
266            prefix: [None; 8],
267            rest: uniform,
268            attrs: ArgAttributes {
269                regular: ArgAttribute::default(),
270                arg_ext: ArgExtension::None,
271                pointee_size: Size::ZERO,
272                pointee_align: None,
273            },
274        }
275    }
276}
277
278impl CastTarget {
279    pub fn pair(a: Reg, b: Reg) -> CastTarget {
280        CastTarget {
281            prefix: [Some(a), None, None, None, None, None, None, None],
282            rest: Uniform::from(b),
283            attrs: ArgAttributes {
284                regular: ArgAttribute::default(),
285                arg_ext: ArgExtension::None,
286                pointee_size: Size::ZERO,
287                pointee_align: None,
288            },
289        }
290    }
291
292    /// When you only access the range containing valid data, you can use this unaligned size;
293    /// otherwise, use the safer `size` method.
294    pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
295        // Prefix arguments are passed in specific designated registers
296        let prefix_size = self
297            .prefix
298            .iter()
299            .filter_map(|x| x.map(|reg| reg.size))
300            .fold(Size::ZERO, |acc, size| acc + size);
301        // Remaining arguments are passed in chunks of the unit size
302        let rest_size =
303            self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
304
305        prefix_size + rest_size
306    }
307
308    pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
309        self.unaligned_size(cx).align_to(self.align(cx))
310    }
311
312    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
313        self.prefix
314            .iter()
315            .filter_map(|x| x.map(|reg| reg.align(cx)))
316            .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
317                acc.max(align)
318            })
319    }
320
321    /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
322    /// function call ABIs".
323    pub fn eq_abi(&self, other: &Self) -> bool {
324        let CastTarget { prefix: prefix_l, rest: rest_l, attrs: attrs_l } = self;
325        let CastTarget { prefix: prefix_r, rest: rest_r, attrs: attrs_r } = other;
326        prefix_l == prefix_r && rest_l == rest_r && attrs_l.eq_abi(attrs_r)
327    }
328}
329
330/// Information about how to pass an argument to,
331/// or return a value from, a function, under some ABI.
332#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
333pub struct ArgAbi<'a, Ty> {
334    pub layout: TyAndLayout<'a, Ty>,
335    pub mode: PassMode,
336}
337
338// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
339impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
340    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
341        let ArgAbi { layout, mode } = self;
342        f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
343    }
344}
345
346impl<'a, Ty> ArgAbi<'a, Ty> {
347    /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
348    pub fn new(
349        cx: &impl HasDataLayout,
350        layout: TyAndLayout<'a, Ty>,
351        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
352    ) -> Self {
353        let mode = match layout.backend_repr {
354            BackendRepr::Scalar(scalar) => {
355                PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
356            }
357            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
358                scalar_attrs(&layout, a, Size::ZERO),
359                scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
360            ),
361            BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
362            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
363        };
364        ArgAbi { layout, mode }
365    }
366
367    fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
368        let mut attrs = ArgAttributes::new();
369
370        // For non-immediate arguments the callee gets its own copy of
371        // the value on the stack, so there are no aliases. It's also
372        // program-invisible so can't possibly capture
373        attrs
374            .set(ArgAttribute::NoAlias)
375            .set(ArgAttribute::NoCapture)
376            .set(ArgAttribute::NonNull)
377            .set(ArgAttribute::NoUndef);
378        attrs.pointee_size = layout.size;
379        attrs.pointee_align = Some(layout.align.abi);
380
381        let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
382
383        PassMode::Indirect { attrs, meta_attrs, on_stack: false }
384    }
385
386    /// Pass this argument directly instead. Should NOT be used!
387    /// Only exists because of past ABI mistakes that will take time to fix
388    /// (see <https://github.com/rust-lang/rust/issues/115666>).
389    #[track_caller]
390    pub fn make_direct_deprecated(&mut self) {
391        match self.mode {
392            PassMode::Indirect { .. } => {
393                self.mode = PassMode::Direct(ArgAttributes::new());
394            }
395            PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
396            _ => panic!("Tried to make {:?} direct", self.mode),
397        }
398    }
399
400    /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
401    /// This is valid for both sized and unsized arguments.
402    #[track_caller]
403    pub fn make_indirect(&mut self) {
404        match self.mode {
405            PassMode::Direct(_) | PassMode::Pair(_, _) => {
406                self.mode = Self::indirect_pass_mode(&self.layout);
407            }
408            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
409                // already indirect
410            }
411            _ => panic!("Tried to make {:?} indirect", self.mode),
412        }
413    }
414
415    /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
416    /// ZSTs indirectly.
417    #[track_caller]
418    pub fn make_indirect_from_ignore(&mut self) {
419        match self.mode {
420            PassMode::Ignore => {
421                self.mode = Self::indirect_pass_mode(&self.layout);
422            }
423            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
424                // already indirect
425            }
426            _ => panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
427        }
428    }
429
430    /// Pass this argument indirectly, by placing it at a fixed stack offset.
431    /// This corresponds to the `byval` LLVM argument attribute.
432    /// This is only valid for sized arguments.
433    ///
434    /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
435    /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
436    /// stack slots used for arguments passed by-value have specific alignment requirements which
437    /// differ from the alignment used in other situations.
438    ///
439    /// If `None`, the type's alignment is used.
440    ///
441    /// If the resulting alignment differs from the type's alignment,
442    /// the argument will be copied to an alloca with sufficient alignment,
443    /// either in the caller (if the type's alignment is lower than the byval alignment)
444    /// or in the callee (if the type's alignment is higher than the byval alignment),
445    /// to ensure that Rust code never sees an underaligned pointer.
446    pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
447        assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
448        self.make_indirect();
449        match self.mode {
450            PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
451                *on_stack = true;
452
453                // Some platforms, like 32-bit x86, change the alignment of the type when passing
454                // `byval`. Account for that.
455                if let Some(byval_align) = byval_align {
456                    // On all targets with byval align this is currently true, so let's assert it.
457                    debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
458                    attrs.pointee_align = Some(byval_align);
459                }
460            }
461            _ => unreachable!(),
462        }
463    }
464
465    pub fn extend_integer_width_to(&mut self, bits: u64) {
466        // Only integers have signedness
467        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
468            if let Primitive::Int(i, signed) = scalar.primitive() {
469                if i.size().bits() < bits {
470                    if let PassMode::Direct(ref mut attrs) = self.mode {
471                        if signed {
472                            attrs.ext(ArgExtension::Sext)
473                        } else {
474                            attrs.ext(ArgExtension::Zext)
475                        };
476                    }
477                }
478            }
479        }
480    }
481
482    pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
483        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
484    }
485
486    pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
487        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
488    }
489
490    pub fn is_indirect(&self) -> bool {
491        matches!(self.mode, PassMode::Indirect { .. })
492    }
493
494    pub fn is_sized_indirect(&self) -> bool {
495        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
496    }
497
498    pub fn is_unsized_indirect(&self) -> bool {
499        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
500    }
501
502    pub fn is_ignore(&self) -> bool {
503        matches!(self.mode, PassMode::Ignore)
504    }
505
506    /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
507    /// function call ABIs".
508    pub fn eq_abi(&self, other: &Self) -> bool
509    where
510        Ty: PartialEq,
511    {
512        // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
513        // at the type.
514        self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
515            // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
516            // That elevates any type difference to an ABI difference since we just use the
517            // full Rust type as the LLVM argument/return type.
518            if matches!(self.mode, PassMode::Direct(..))
519                && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
520            {
521                // For aggregates in `Direct` mode to be compatible, the types need to be equal.
522                self.layout.ty == other.layout.ty
523            } else {
524                true
525            }
526        }
527    }
528}
529
530#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
531pub enum Conv {
532    // General language calling conventions, for which every target
533    // should have its own backend (e.g. LLVM) support.
534    C,
535    Rust,
536
537    Cold,
538    PreserveMost,
539    PreserveAll,
540
541    // Target-specific calling conventions.
542    ArmAapcs,
543    CCmseNonSecureCall,
544    CCmseNonSecureEntry,
545
546    Msp430Intr,
547
548    GpuKernel,
549
550    X86Fastcall,
551    X86Intr,
552    X86Stdcall,
553    X86ThisCall,
554    X86VectorCall,
555
556    X86_64SysV,
557    X86_64Win64,
558
559    AvrInterrupt,
560    AvrNonBlockingInterrupt,
561
562    RiscvInterrupt { kind: RiscvInterruptKind },
563}
564
565#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
566pub enum RiscvInterruptKind {
567    Machine,
568    Supervisor,
569}
570
571impl RiscvInterruptKind {
572    pub fn as_str(&self) -> &'static str {
573        match self {
574            Self::Machine => "machine",
575            Self::Supervisor => "supervisor",
576        }
577    }
578}
579
580/// Metadata describing how the arguments to a native function
581/// should be passed in order to respect the native ABI.
582///
583/// The signature represented by this type may not match the MIR function signature.
584/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
585/// While this difference is rarely relevant, it should still be kept in mind.
586///
587/// I will do my best to describe this structure, but these
588/// comments are reverse-engineered and may be inaccurate. -NDM
589#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
590pub struct FnAbi<'a, Ty> {
591    /// The type, layout, and information about how each argument is passed.
592    pub args: Box<[ArgAbi<'a, Ty>]>,
593
594    /// The layout, type, and the way a value is returned from this function.
595    pub ret: ArgAbi<'a, Ty>,
596
597    /// Marks this function as variadic (accepting a variable number of arguments).
598    pub c_variadic: bool,
599
600    /// The count of non-variadic arguments.
601    ///
602    /// Should only be different from args.len() when c_variadic is true.
603    /// This can be used to know whether an argument is variadic or not.
604    pub fixed_count: u32,
605    /// The calling convention of this function.
606    pub conv: Conv,
607    /// Indicates if an unwind may happen across a call to this function.
608    pub can_unwind: bool,
609}
610
611// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
612impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
613    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
614        let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
615        f.debug_struct("FnAbi")
616            .field("args", args)
617            .field("ret", ret)
618            .field("c_variadic", c_variadic)
619            .field("fixed_count", fixed_count)
620            .field("conv", conv)
621            .field("can_unwind", can_unwind)
622            .finish()
623    }
624}
625
626impl<'a, Ty> FnAbi<'a, Ty> {
627    pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
628    where
629        Ty: TyAbiInterface<'a, C> + Copy,
630        C: HasDataLayout + HasTargetSpec + HasWasmCAbiOpt + HasX86AbiOpt,
631    {
632        if abi == ExternAbi::X86Interrupt {
633            if let Some(arg) = self.args.first_mut() {
634                arg.pass_by_stack_offset(None);
635            }
636            return;
637        }
638
639        let spec = cx.target_spec();
640        match &spec.arch[..] {
641            "x86" => {
642                let (flavor, regparm) = match abi {
643                    ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
644                        (x86::Flavor::FastcallOrVectorcall, None)
645                    }
646                    ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
647                        (x86::Flavor::General, cx.x86_abi_opt().regparm)
648                    }
649                    _ => (x86::Flavor::General, None),
650                };
651                let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
652                let opts = x86::X86Options { flavor, regparm, reg_struct_return };
653                if spec.is_like_msvc {
654                    x86_win32::compute_abi_info(cx, self, opts);
655                } else {
656                    x86::compute_abi_info(cx, self, opts);
657                }
658            }
659            "x86_64" => match abi {
660                ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
661                ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
662                    x86_win64::compute_abi_info(cx, self)
663                }
664                _ => {
665                    if cx.target_spec().is_like_windows {
666                        x86_win64::compute_abi_info(cx, self)
667                    } else {
668                        x86_64::compute_abi_info(cx, self)
669                    }
670                }
671            },
672            "aarch64" | "arm64ec" => {
673                let kind = if cx.target_spec().is_like_osx {
674                    aarch64::AbiKind::DarwinPCS
675                } else if cx.target_spec().is_like_windows {
676                    aarch64::AbiKind::Win64
677                } else {
678                    aarch64::AbiKind::AAPCS
679                };
680                aarch64::compute_abi_info(cx, self, kind)
681            }
682            "amdgpu" => amdgpu::compute_abi_info(cx, self),
683            "arm" => arm::compute_abi_info(cx, self),
684            "avr" => avr::compute_abi_info(self),
685            "loongarch64" => loongarch::compute_abi_info(cx, self),
686            "m68k" => m68k::compute_abi_info(self),
687            "csky" => csky::compute_abi_info(self),
688            "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
689            "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
690            "powerpc" => powerpc::compute_abi_info(cx, self),
691            "powerpc64" => powerpc64::compute_abi_info(cx, self),
692            "s390x" => s390x::compute_abi_info(cx, self),
693            "msp430" => msp430::compute_abi_info(self),
694            "sparc" => sparc::compute_abi_info(cx, self),
695            "sparc64" => sparc64::compute_abi_info(cx, self),
696            "nvptx64" => {
697                let abi = cx.target_spec().adjust_abi(abi, self.c_variadic);
698                if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
699                    nvptx64::compute_ptx_kernel_abi_info(cx, self)
700                } else {
701                    nvptx64::compute_abi_info(self)
702                }
703            }
704            "hexagon" => hexagon::compute_abi_info(self),
705            "xtensa" => xtensa::compute_abi_info(cx, self),
706            "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
707            "wasm32" => {
708                if spec.os == "unknown" && matches!(cx.wasm_c_abi_opt(), WasmCAbi::Legacy { .. }) {
709                    wasm::compute_wasm_abi_info(self)
710                } else {
711                    wasm::compute_c_abi_info(cx, self)
712                }
713            }
714            "wasm64" => wasm::compute_c_abi_info(cx, self),
715            "bpf" => bpf::compute_abi_info(self),
716            arch => panic!("no lowering implemented for {arch}"),
717        }
718    }
719
720    pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: ExternAbi)
721    where
722        Ty: TyAbiInterface<'a, C> + Copy,
723        C: HasDataLayout + HasTargetSpec,
724    {
725        let spec = cx.target_spec();
726        match &*spec.arch {
727            "x86" => x86::compute_rust_abi_info(cx, self, abi),
728            "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
729            "loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
730            "aarch64" => aarch64::compute_rust_abi_info(cx, self),
731            _ => {}
732        };
733
734        // Decides whether we can pass the given SIMD argument via `PassMode::Direct`.
735        // May only return `true` if the target will always pass those arguments the same way,
736        // no matter what the user does with `-Ctarget-feature`! In other words, whatever
737        // target features are required to pass a SIMD value in registers must be listed in
738        // the `abi_required_features` for the current target and ABI.
739        let can_pass_simd_directly = |arg: &ArgAbi<'_, Ty>| match &*spec.arch {
740            // On x86, if we have SSE2 (which we have by default for x86_64), we can always pass up
741            // to 128-bit-sized vectors.
742            "x86" if spec.rustc_abi == Some(RustcAbi::X86Sse2) => arg.layout.size.bits() <= 128,
743            "x86_64" if spec.rustc_abi != Some(RustcAbi::X86Softfloat) => {
744                // FIXME once https://github.com/bytecodealliance/wasmtime/issues/10254 is fixed
745                // accept vectors up to 128bit rather than vectors of exactly 128bit.
746                arg.layout.size.bits() == 128
747            }
748            // So far, we haven't implemented this logic for any other target.
749            _ => false,
750        };
751
752        for (arg_idx, arg) in self
753            .args
754            .iter_mut()
755            .enumerate()
756            .map(|(idx, arg)| (Some(idx), arg))
757            .chain(iter::once((None, &mut self.ret)))
758        {
759            // If the logic above already picked a specific type to cast the argument to, leave that
760            // in place.
761            if matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
762                continue;
763            }
764
765            if arg_idx.is_none()
766                && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2
767                && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
768            {
769                // Return values larger than 2 registers using a return area
770                // pointer. LLVM and Cranelift disagree about how to return
771                // values that don't fit in the registers designated for return
772                // values. LLVM will force the entire return value to be passed
773                // by return area pointer, while Cranelift will look at each IR level
774                // return value independently and decide to pass it in a
775                // register or not, which would result in the return value
776                // being passed partially in registers and partially through a
777                // return area pointer. For large IR-level values such as `i128`,
778                // cranelift will even split up the value into smaller chunks.
779                //
780                // While Cranelift may need to be fixed as the LLVM behavior is
781                // generally more correct with respect to the surface language,
782                // forcing this behavior in rustc itself makes it easier for
783                // other backends to conform to the Rust ABI and for the C ABI
784                // rustc already handles this behavior anyway.
785                //
786                // In addition LLVM's decision to pass the return value in
787                // registers or using a return area pointer depends on how
788                // exactly the return type is lowered to an LLVM IR type. For
789                // example `Option<u128>` can be lowered as `{ i128, i128 }`
790                // in which case the x86_64 backend would use a return area
791                // pointer, or it could be passed as `{ i32, i128 }` in which
792                // case the x86_64 backend would pass it in registers by taking
793                // advantage of an LLVM ABI extension that allows using 3
794                // registers for the x86_64 sysv call conv rather than the
795                // officially specified 2 registers.
796                //
797                // FIXME: Technically we should look at the amount of available
798                // return registers rather than guessing that there are 2
799                // registers for return values. In practice only a couple of
800                // architectures have less than 2 return registers. None of
801                // which supported by Cranelift.
802                //
803                // NOTE: This adjustment is only necessary for the Rust ABI as
804                // for other ABI's the calling convention implementations in
805                // rustc_target already ensure any return value which doesn't
806                // fit in the available amount of return registers is passed in
807                // the right way for the current target.
808                //
809                // The adjustment is not necessary nor desired for types with a vector
810                // representation; those are handled below.
811                arg.make_indirect();
812                continue;
813            }
814
815            match arg.layout.backend_repr {
816                BackendRepr::Memory { .. } => {
817                    // Compute `Aggregate` ABI.
818
819                    let is_indirect_not_on_stack =
820                        matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
821                    assert!(is_indirect_not_on_stack);
822
823                    let size = arg.layout.size;
824                    if arg.layout.is_sized()
825                        && size <= Primitive::Pointer(AddressSpace::DATA).size(cx)
826                    {
827                        // We want to pass small aggregates as immediates, but using
828                        // an LLVM aggregate type for this leads to bad optimizations,
829                        // so we pick an appropriately sized integer type instead.
830                        arg.cast_to(Reg { kind: RegKind::Integer, size });
831                    }
832                }
833
834                BackendRepr::SimdVector { .. } => {
835                    // This is a fun case! The gist of what this is doing is
836                    // that we want callers and callees to always agree on the
837                    // ABI of how they pass SIMD arguments. If we were to *not*
838                    // make these arguments indirect then they'd be immediates
839                    // in LLVM, which means that they'd used whatever the
840                    // appropriate ABI is for the callee and the caller. That
841                    // means, for example, if the caller doesn't have AVX
842                    // enabled but the callee does, then passing an AVX argument
843                    // across this boundary would cause corrupt data to show up.
844                    //
845                    // This problem is fixed by unconditionally passing SIMD
846                    // arguments through memory between callers and callees
847                    // which should get them all to agree on ABI regardless of
848                    // target feature sets. Some more information about this
849                    // issue can be found in #44367.
850                    //
851                    // Note that the intrinsic ABI is exempt here as those are not
852                    // real functions anyway, and the backend expects very specific types.
853                    if abi != ExternAbi::RustIntrinsic
854                        && spec.simd_types_indirect
855                        && !can_pass_simd_directly(arg)
856                    {
857                        arg.make_indirect();
858                    }
859                }
860
861                _ => {}
862            }
863        }
864    }
865}
866
867impl FromStr for Conv {
868    type Err = String;
869
870    fn from_str(s: &str) -> Result<Self, Self::Err> {
871        match s {
872            "C" => Ok(Conv::C),
873            "Rust" => Ok(Conv::Rust),
874            "RustCold" => Ok(Conv::Rust),
875            "ArmAapcs" => Ok(Conv::ArmAapcs),
876            "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
877            "CCmseNonSecureEntry" => Ok(Conv::CCmseNonSecureEntry),
878            "Msp430Intr" => Ok(Conv::Msp430Intr),
879            "X86Fastcall" => Ok(Conv::X86Fastcall),
880            "X86Intr" => Ok(Conv::X86Intr),
881            "X86Stdcall" => Ok(Conv::X86Stdcall),
882            "X86ThisCall" => Ok(Conv::X86ThisCall),
883            "X86VectorCall" => Ok(Conv::X86VectorCall),
884            "X86_64SysV" => Ok(Conv::X86_64SysV),
885            "X86_64Win64" => Ok(Conv::X86_64Win64),
886            "GpuKernel" => Ok(Conv::GpuKernel),
887            "AvrInterrupt" => Ok(Conv::AvrInterrupt),
888            "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
889            "RiscvInterrupt(machine)" => {
890                Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine })
891            }
892            "RiscvInterrupt(supervisor)" => {
893                Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor })
894            }
895            _ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
896        }
897    }
898}
899
900// Some types are used a lot. Make sure they don't unintentionally get bigger.
901#[cfg(target_pointer_width = "64")]
902mod size_asserts {
903    use rustc_data_structures::static_assert_size;
904
905    use super::*;
906    // tidy-alphabetical-start
907    static_assert_size!(ArgAbi<'_, usize>, 56);
908    static_assert_size!(FnAbi<'_, usize>, 80);
909    // tidy-alphabetical-end
910}