rustc_target/callconv/
mod.rs

1use std::str::FromStr;
2use std::{fmt, iter};
3
4use rustc_abi::{
5    AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind, Scalar,
6    Size, TyAbiInterface, TyAndLayout,
7};
8use rustc_macros::HashStable_Generic;
9
10use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
11
12mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win64;
35mod xtensa;
36
37#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
38pub enum PassMode {
39    /// Ignore the argument.
40    ///
41    /// The argument is either uninhabited or a ZST.
42    Ignore,
43    /// Pass the argument directly.
44    ///
45    /// The argument has a layout abi of `Scalar` or `Vector`.
46    /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
47    /// This is bad since it leaks LLVM implementation details into the ABI.
48    /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
49    Direct(ArgAttributes),
50    /// Pass a pair's elements directly in two arguments.
51    ///
52    /// The argument has a layout abi of `ScalarPair`.
53    Pair(ArgAttributes, ArgAttributes),
54    /// Pass the argument after casting it. See the `CastTarget` docs for details.
55    ///
56    /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
57    Cast { pad_i32: bool, cast: Box<CastTarget> },
58    /// Pass the argument indirectly via a hidden pointer.
59    ///
60    /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
61    /// argument. (This is the only mode that supports unsized arguments.)
62    ///
63    /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
64    /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
65    /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
66    /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
67    /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
68    /// alignment (if `None`). This means that the alignment will not always
69    /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
70    ///
71    /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
72    Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
73}
74
75impl PassMode {
76    /// Checks if these two `PassMode` are equal enough to be considered "the same for all
77    /// function call ABIs". However, the `Layout` can also impact ABI decisions,
78    /// so that needs to be compared as well!
79    pub fn eq_abi(&self, other: &Self) -> bool {
80        match (self, other) {
81            (PassMode::Ignore, PassMode::Ignore) => true,
82            (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
83            (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
84            (
85                PassMode::Cast { cast: c1, pad_i32: pad1 },
86                PassMode::Cast { cast: c2, pad_i32: pad2 },
87            ) => c1.eq_abi(c2) && pad1 == pad2,
88            (
89                PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
90                PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
91            ) => a1.eq_abi(a2) && s1 == s2,
92            (
93                PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
94                PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
95            ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
96            _ => false,
97        }
98    }
99}
100
101// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
102// of this module
103pub use attr_impl::ArgAttribute;
104
105#[allow(non_upper_case_globals)]
106#[allow(unused)]
107mod attr_impl {
108    use rustc_macros::HashStable_Generic;
109
110    // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
111    #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, HashStable_Generic)]
112    pub struct ArgAttribute(u8);
113    bitflags::bitflags! {
114        impl ArgAttribute: u8 {
115            const NoAlias   = 1 << 1;
116            const NoCapture = 1 << 2;
117            const NonNull   = 1 << 3;
118            const ReadOnly  = 1 << 4;
119            const InReg     = 1 << 5;
120            const NoUndef = 1 << 6;
121        }
122    }
123    rustc_data_structures::external_bitflags_debug! { ArgAttribute }
124}
125
126/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
127/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
128/// not necessary to extend the argument, this enum is ignored.
129#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
130pub enum ArgExtension {
131    None,
132    Zext,
133    Sext,
134}
135
136/// A compact representation of LLVM attributes (at least those relevant for this module)
137/// that can be manipulated without interacting with LLVM's Attribute machinery.
138#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
139pub struct ArgAttributes {
140    pub regular: ArgAttribute,
141    pub arg_ext: ArgExtension,
142    /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
143    /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
144    /// set on a null pointer, but all non-null pointers must be dereferenceable).
145    pub pointee_size: Size,
146    pub pointee_align: Option<Align>,
147}
148
149impl ArgAttributes {
150    pub fn new() -> Self {
151        ArgAttributes {
152            regular: ArgAttribute::default(),
153            arg_ext: ArgExtension::None,
154            pointee_size: Size::ZERO,
155            pointee_align: None,
156        }
157    }
158
159    pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
160        assert!(
161            self.arg_ext == ArgExtension::None || self.arg_ext == ext,
162            "cannot set {:?} when {:?} is already set",
163            ext,
164            self.arg_ext
165        );
166        self.arg_ext = ext;
167        self
168    }
169
170    pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
171        self.regular |= attr;
172        self
173    }
174
175    pub fn contains(&self, attr: ArgAttribute) -> bool {
176        self.regular.contains(attr)
177    }
178
179    /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
180    /// function call ABIs".
181    pub fn eq_abi(&self, other: &Self) -> bool {
182        // There's only one regular attribute that matters for the call ABI: InReg.
183        // Everything else is things like noalias, dereferenceable, nonnull, ...
184        // (This also applies to pointee_size, pointee_align.)
185        if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
186        {
187            return false;
188        }
189        // We also compare the sign extension mode -- this could let the callee make assumptions
190        // about bits that conceptually were not even passed.
191        if self.arg_ext != other.arg_ext {
192            return false;
193        }
194        true
195    }
196}
197
198/// An argument passed entirely registers with the
199/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
200#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
201pub struct Uniform {
202    pub unit: Reg,
203
204    /// The total size of the argument, which can be:
205    /// * equal to `unit.size` (one scalar/vector),
206    /// * a multiple of `unit.size` (an array of scalar/vectors),
207    /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
208    ///   for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
209    ///   this size will be rounded up to the nearest multiple of `unit.size`.
210    pub total: Size,
211
212    /// Indicate that the argument is consecutive, in the sense that either all values need to be
213    /// passed in register, or all on the stack. If they are passed on the stack, there should be
214    /// no additional padding between elements.
215    pub is_consecutive: bool,
216}
217
218impl From<Reg> for Uniform {
219    fn from(unit: Reg) -> Uniform {
220        Uniform { unit, total: unit.size, is_consecutive: false }
221    }
222}
223
224impl Uniform {
225    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
226        self.unit.align(cx)
227    }
228
229    /// Pass using one or more values of the given type, without requiring them to be consecutive.
230    /// That is, some values may be passed in register and some on the stack.
231    pub fn new(unit: Reg, total: Size) -> Self {
232        Uniform { unit, total, is_consecutive: false }
233    }
234
235    /// Pass using one or more consecutive values of the given type. Either all values will be
236    /// passed in registers, or all on the stack.
237    pub fn consecutive(unit: Reg, total: Size) -> Self {
238        Uniform { unit, total, is_consecutive: true }
239    }
240}
241
242/// Describes the type used for `PassMode::Cast`.
243///
244/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
245/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
246/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
247/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
248/// (and all data in the padding between the registers is dropped).
249#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
250pub struct CastTarget {
251    pub prefix: [Option<Reg>; 8],
252    pub rest: Uniform,
253    pub attrs: ArgAttributes,
254}
255
256impl From<Reg> for CastTarget {
257    fn from(unit: Reg) -> CastTarget {
258        CastTarget::from(Uniform::from(unit))
259    }
260}
261
262impl From<Uniform> for CastTarget {
263    fn from(uniform: Uniform) -> CastTarget {
264        CastTarget {
265            prefix: [None; 8],
266            rest: uniform,
267            attrs: ArgAttributes {
268                regular: ArgAttribute::default(),
269                arg_ext: ArgExtension::None,
270                pointee_size: Size::ZERO,
271                pointee_align: None,
272            },
273        }
274    }
275}
276
277impl CastTarget {
278    pub fn pair(a: Reg, b: Reg) -> CastTarget {
279        CastTarget {
280            prefix: [Some(a), None, None, None, None, None, None, None],
281            rest: Uniform::from(b),
282            attrs: ArgAttributes {
283                regular: ArgAttribute::default(),
284                arg_ext: ArgExtension::None,
285                pointee_size: Size::ZERO,
286                pointee_align: None,
287            },
288        }
289    }
290
291    /// When you only access the range containing valid data, you can use this unaligned size;
292    /// otherwise, use the safer `size` method.
293    pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
294        // Prefix arguments are passed in specific designated registers
295        let prefix_size = self
296            .prefix
297            .iter()
298            .filter_map(|x| x.map(|reg| reg.size))
299            .fold(Size::ZERO, |acc, size| acc + size);
300        // Remaining arguments are passed in chunks of the unit size
301        let rest_size =
302            self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
303
304        prefix_size + rest_size
305    }
306
307    pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
308        self.unaligned_size(cx).align_to(self.align(cx))
309    }
310
311    pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
312        self.prefix
313            .iter()
314            .filter_map(|x| x.map(|reg| reg.align(cx)))
315            .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
316                acc.max(align)
317            })
318    }
319
320    /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
321    /// function call ABIs".
322    pub fn eq_abi(&self, other: &Self) -> bool {
323        let CastTarget { prefix: prefix_l, rest: rest_l, attrs: attrs_l } = self;
324        let CastTarget { prefix: prefix_r, rest: rest_r, attrs: attrs_r } = other;
325        prefix_l == prefix_r && rest_l == rest_r && attrs_l.eq_abi(attrs_r)
326    }
327}
328
329/// Information about how to pass an argument to,
330/// or return a value from, a function, under some ABI.
331#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
332pub struct ArgAbi<'a, Ty> {
333    pub layout: TyAndLayout<'a, Ty>,
334    pub mode: PassMode,
335}
336
337// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
338impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
339    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
340        let ArgAbi { layout, mode } = self;
341        f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
342    }
343}
344
345impl<'a, Ty> ArgAbi<'a, Ty> {
346    /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
347    pub fn new(
348        cx: &impl HasDataLayout,
349        layout: TyAndLayout<'a, Ty>,
350        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
351    ) -> Self {
352        let mode = match layout.backend_repr {
353            BackendRepr::Uninhabited => PassMode::Ignore,
354            BackendRepr::Scalar(scalar) => {
355                PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
356            }
357            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
358                scalar_attrs(&layout, a, Size::ZERO),
359                scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
360            ),
361            BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
362            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
363        };
364        ArgAbi { layout, mode }
365    }
366
367    fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
368        let mut attrs = ArgAttributes::new();
369
370        // For non-immediate arguments the callee gets its own copy of
371        // the value on the stack, so there are no aliases. It's also
372        // program-invisible so can't possibly capture
373        attrs
374            .set(ArgAttribute::NoAlias)
375            .set(ArgAttribute::NoCapture)
376            .set(ArgAttribute::NonNull)
377            .set(ArgAttribute::NoUndef);
378        attrs.pointee_size = layout.size;
379        attrs.pointee_align = Some(layout.align.abi);
380
381        let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
382
383        PassMode::Indirect { attrs, meta_attrs, on_stack: false }
384    }
385
386    /// Pass this argument directly instead. Should NOT be used!
387    /// Only exists because of past ABI mistakes that will take time to fix
388    /// (see <https://github.com/rust-lang/rust/issues/115666>).
389    pub fn make_direct_deprecated(&mut self) {
390        match self.mode {
391            PassMode::Indirect { .. } => {
392                self.mode = PassMode::Direct(ArgAttributes::new());
393            }
394            PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
395            _ => panic!("Tried to make {:?} direct", self.mode),
396        }
397    }
398
399    /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
400    /// This is valid for both sized and unsized arguments.
401    pub fn make_indirect(&mut self) {
402        match self.mode {
403            PassMode::Direct(_) | PassMode::Pair(_, _) => {
404                self.mode = Self::indirect_pass_mode(&self.layout);
405            }
406            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
407                // already indirect
408            }
409            _ => panic!("Tried to make {:?} indirect", self.mode),
410        }
411    }
412
413    /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
414    /// ZSTs indirectly.
415    pub fn make_indirect_from_ignore(&mut self) {
416        match self.mode {
417            PassMode::Ignore => {
418                self.mode = Self::indirect_pass_mode(&self.layout);
419            }
420            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
421                // already indirect
422            }
423            _ => panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
424        }
425    }
426
427    /// Pass this argument indirectly, by placing it at a fixed stack offset.
428    /// This corresponds to the `byval` LLVM argument attribute.
429    /// This is only valid for sized arguments.
430    ///
431    /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
432    /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
433    /// stack slots used for arguments passed by-value have specific alignment requirements which
434    /// differ from the alignment used in other situations.
435    ///
436    /// If `None`, the type's alignment is used.
437    ///
438    /// If the resulting alignment differs from the type's alignment,
439    /// the argument will be copied to an alloca with sufficient alignment,
440    /// either in the caller (if the type's alignment is lower than the byval alignment)
441    /// or in the callee (if the type's alignment is higher than the byval alignment),
442    /// to ensure that Rust code never sees an underaligned pointer.
443    pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
444        assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
445        self.make_indirect();
446        match self.mode {
447            PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
448                *on_stack = true;
449
450                // Some platforms, like 32-bit x86, change the alignment of the type when passing
451                // `byval`. Account for that.
452                if let Some(byval_align) = byval_align {
453                    // On all targets with byval align this is currently true, so let's assert it.
454                    debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
455                    attrs.pointee_align = Some(byval_align);
456                }
457            }
458            _ => unreachable!(),
459        }
460    }
461
462    pub fn extend_integer_width_to(&mut self, bits: u64) {
463        // Only integers have signedness
464        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
465            if let Primitive::Int(i, signed) = scalar.primitive() {
466                if i.size().bits() < bits {
467                    if let PassMode::Direct(ref mut attrs) = self.mode {
468                        if signed {
469                            attrs.ext(ArgExtension::Sext)
470                        } else {
471                            attrs.ext(ArgExtension::Zext)
472                        };
473                    }
474                }
475            }
476        }
477    }
478
479    pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
480        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
481    }
482
483    pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
484        self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
485    }
486
487    pub fn is_indirect(&self) -> bool {
488        matches!(self.mode, PassMode::Indirect { .. })
489    }
490
491    pub fn is_sized_indirect(&self) -> bool {
492        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
493    }
494
495    pub fn is_unsized_indirect(&self) -> bool {
496        matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
497    }
498
499    pub fn is_ignore(&self) -> bool {
500        matches!(self.mode, PassMode::Ignore)
501    }
502
503    /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
504    /// function call ABIs".
505    pub fn eq_abi(&self, other: &Self) -> bool
506    where
507        Ty: PartialEq,
508    {
509        // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
510        // at the type.
511        self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
512            // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
513            // That elevates any type difference to an ABI difference since we just use the
514            // full Rust type as the LLVM argument/return type.
515            if matches!(self.mode, PassMode::Direct(..))
516                && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
517            {
518                // For aggregates in `Direct` mode to be compatible, the types need to be equal.
519                self.layout.ty == other.layout.ty
520            } else {
521                true
522            }
523        }
524    }
525}
526
527#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
528pub enum Conv {
529    // General language calling conventions, for which every target
530    // should have its own backend (e.g. LLVM) support.
531    C,
532    Rust,
533
534    Cold,
535    PreserveMost,
536    PreserveAll,
537
538    // Target-specific calling conventions.
539    ArmAapcs,
540    CCmseNonSecureCall,
541    CCmseNonSecureEntry,
542
543    Msp430Intr,
544
545    GpuKernel,
546
547    X86Fastcall,
548    X86Intr,
549    X86Stdcall,
550    X86ThisCall,
551    X86VectorCall,
552
553    X86_64SysV,
554    X86_64Win64,
555
556    AvrInterrupt,
557    AvrNonBlockingInterrupt,
558
559    RiscvInterrupt { kind: RiscvInterruptKind },
560}
561
562#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
563pub enum RiscvInterruptKind {
564    Machine,
565    Supervisor,
566}
567
568impl RiscvInterruptKind {
569    pub fn as_str(&self) -> &'static str {
570        match self {
571            Self::Machine => "machine",
572            Self::Supervisor => "supervisor",
573        }
574    }
575}
576
577/// Metadata describing how the arguments to a native function
578/// should be passed in order to respect the native ABI.
579///
580/// The signature represented by this type may not match the MIR function signature.
581/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
582/// While this difference is rarely relevant, it should still be kept in mind.
583///
584/// I will do my best to describe this structure, but these
585/// comments are reverse-engineered and may be inaccurate. -NDM
586#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
587pub struct FnAbi<'a, Ty> {
588    /// The type, layout, and information about how each argument is passed.
589    pub args: Box<[ArgAbi<'a, Ty>]>,
590
591    /// The layout, type, and the way a value is returned from this function.
592    pub ret: ArgAbi<'a, Ty>,
593
594    /// Marks this function as variadic (accepting a variable number of arguments).
595    pub c_variadic: bool,
596
597    /// The count of non-variadic arguments.
598    ///
599    /// Should only be different from args.len() when c_variadic is true.
600    /// This can be used to know whether an argument is variadic or not.
601    pub fixed_count: u32,
602    /// The calling convention of this function.
603    pub conv: Conv,
604    /// Indicates if an unwind may happen across a call to this function.
605    pub can_unwind: bool,
606}
607
608// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
609impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
610    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
611        let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
612        f.debug_struct("FnAbi")
613            .field("args", args)
614            .field("ret", ret)
615            .field("c_variadic", c_variadic)
616            .field("fixed_count", fixed_count)
617            .field("conv", conv)
618            .field("can_unwind", can_unwind)
619            .finish()
620    }
621}
622
623impl<'a, Ty> FnAbi<'a, Ty> {
624    pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
625    where
626        Ty: TyAbiInterface<'a, C> + Copy,
627        C: HasDataLayout + HasTargetSpec + HasWasmCAbiOpt + HasX86AbiOpt,
628    {
629        if abi == ExternAbi::X86Interrupt {
630            if let Some(arg) = self.args.first_mut() {
631                arg.pass_by_stack_offset(None);
632            }
633            return;
634        }
635
636        let spec = cx.target_spec();
637        match &spec.arch[..] {
638            "x86" => {
639                let (flavor, regparm) = match abi {
640                    ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
641                        (x86::Flavor::FastcallOrVectorcall, None)
642                    }
643                    ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
644                        (x86::Flavor::General, cx.x86_abi_opt().regparm)
645                    }
646                    _ => (x86::Flavor::General, None),
647                };
648                let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
649                let opts = x86::X86Options { flavor, regparm, reg_struct_return };
650                x86::compute_abi_info(cx, self, opts);
651            }
652            "x86_64" => match abi {
653                ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
654                ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
655                    x86_win64::compute_abi_info(cx, self)
656                }
657                _ => {
658                    if cx.target_spec().is_like_windows {
659                        x86_win64::compute_abi_info(cx, self)
660                    } else {
661                        x86_64::compute_abi_info(cx, self)
662                    }
663                }
664            },
665            "aarch64" | "arm64ec" => {
666                let kind = if cx.target_spec().is_like_osx {
667                    aarch64::AbiKind::DarwinPCS
668                } else if cx.target_spec().is_like_windows {
669                    aarch64::AbiKind::Win64
670                } else {
671                    aarch64::AbiKind::AAPCS
672                };
673                aarch64::compute_abi_info(cx, self, kind)
674            }
675            "amdgpu" => amdgpu::compute_abi_info(cx, self),
676            "arm" => arm::compute_abi_info(cx, self),
677            "avr" => avr::compute_abi_info(self),
678            "loongarch64" => loongarch::compute_abi_info(cx, self),
679            "m68k" => m68k::compute_abi_info(self),
680            "csky" => csky::compute_abi_info(self),
681            "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
682            "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
683            "powerpc" => powerpc::compute_abi_info(cx, self),
684            "powerpc64" => powerpc64::compute_abi_info(cx, self),
685            "s390x" => s390x::compute_abi_info(cx, self),
686            "msp430" => msp430::compute_abi_info(self),
687            "sparc" => sparc::compute_abi_info(cx, self),
688            "sparc64" => sparc64::compute_abi_info(cx, self),
689            "nvptx64" => {
690                let abi = cx.target_spec().adjust_abi(abi, self.c_variadic);
691                if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
692                    nvptx64::compute_ptx_kernel_abi_info(cx, self)
693                } else {
694                    nvptx64::compute_abi_info(self)
695                }
696            }
697            "hexagon" => hexagon::compute_abi_info(self),
698            "xtensa" => xtensa::compute_abi_info(cx, self),
699            "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
700            "wasm32" => {
701                if spec.os == "unknown" && cx.wasm_c_abi_opt() == WasmCAbi::Legacy {
702                    wasm::compute_wasm_abi_info(self)
703                } else {
704                    wasm::compute_c_abi_info(cx, self)
705                }
706            }
707            "wasm64" => wasm::compute_c_abi_info(cx, self),
708            "bpf" => bpf::compute_abi_info(self),
709            arch => panic!("no lowering implemented for {arch}"),
710        }
711    }
712
713    pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: ExternAbi)
714    where
715        Ty: TyAbiInterface<'a, C> + Copy,
716        C: HasDataLayout + HasTargetSpec,
717    {
718        let spec = cx.target_spec();
719        match &spec.arch[..] {
720            "x86" => x86::compute_rust_abi_info(cx, self, abi),
721            "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
722            "loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
723            "aarch64" => aarch64::compute_rust_abi_info(cx, self),
724            _ => {}
725        };
726
727        for (arg_idx, arg) in self
728            .args
729            .iter_mut()
730            .enumerate()
731            .map(|(idx, arg)| (Some(idx), arg))
732            .chain(iter::once((None, &mut self.ret)))
733        {
734            if arg.is_ignore() {
735                continue;
736            }
737
738            if arg_idx.is_none()
739                && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2
740            {
741                // Return values larger than 2 registers using a return area
742                // pointer. LLVM and Cranelift disagree about how to return
743                // values that don't fit in the registers designated for return
744                // values. LLVM will force the entire return value to be passed
745                // by return area pointer, while Cranelift will look at each IR level
746                // return value independently and decide to pass it in a
747                // register or not, which would result in the return value
748                // being passed partially in registers and partially through a
749                // return area pointer.
750                //
751                // While Cranelift may need to be fixed as the LLVM behavior is
752                // generally more correct with respect to the surface language,
753                // forcing this behavior in rustc itself makes it easier for
754                // other backends to conform to the Rust ABI and for the C ABI
755                // rustc already handles this behavior anyway.
756                //
757                // In addition LLVM's decision to pass the return value in
758                // registers or using a return area pointer depends on how
759                // exactly the return type is lowered to an LLVM IR type. For
760                // example `Option<u128>` can be lowered as `{ i128, i128 }`
761                // in which case the x86_64 backend would use a return area
762                // pointer, or it could be passed as `{ i32, i128 }` in which
763                // case the x86_64 backend would pass it in registers by taking
764                // advantage of an LLVM ABI extension that allows using 3
765                // registers for the x86_64 sysv call conv rather than the
766                // officially specified 2 registers.
767                //
768                // FIXME: Technically we should look at the amount of available
769                // return registers rather than guessing that there are 2
770                // registers for return values. In practice only a couple of
771                // architectures have less than 2 return registers. None of
772                // which supported by Cranelift.
773                //
774                // NOTE: This adjustment is only necessary for the Rust ABI as
775                // for other ABI's the calling convention implementations in
776                // rustc_target already ensure any return value which doesn't
777                // fit in the available amount of return registers is passed in
778                // the right way for the current target.
779                arg.make_indirect();
780                continue;
781            }
782
783            match arg.layout.backend_repr {
784                BackendRepr::Memory { .. } => {}
785
786                // This is a fun case! The gist of what this is doing is
787                // that we want callers and callees to always agree on the
788                // ABI of how they pass SIMD arguments. If we were to *not*
789                // make these arguments indirect then they'd be immediates
790                // in LLVM, which means that they'd used whatever the
791                // appropriate ABI is for the callee and the caller. That
792                // means, for example, if the caller doesn't have AVX
793                // enabled but the callee does, then passing an AVX argument
794                // across this boundary would cause corrupt data to show up.
795                //
796                // This problem is fixed by unconditionally passing SIMD
797                // arguments through memory between callers and callees
798                // which should get them all to agree on ABI regardless of
799                // target feature sets. Some more information about this
800                // issue can be found in #44367.
801                //
802                // Note that the intrinsic ABI is exempt here as
803                // that's how we connect up to LLVM and it's unstable
804                // anyway, we control all calls to it in libstd.
805                BackendRepr::Vector { .. }
806                    if abi != ExternAbi::RustIntrinsic && spec.simd_types_indirect =>
807                {
808                    arg.make_indirect();
809                    continue;
810                }
811
812                _ => continue,
813            }
814            // Compute `Aggregate` ABI.
815
816            let is_indirect_not_on_stack =
817                matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
818            assert!(is_indirect_not_on_stack);
819
820            let size = arg.layout.size;
821            if !arg.layout.is_unsized() && size <= Primitive::Pointer(AddressSpace::DATA).size(cx) {
822                // We want to pass small aggregates as immediates, but using
823                // an LLVM aggregate type for this leads to bad optimizations,
824                // so we pick an appropriately sized integer type instead.
825                arg.cast_to(Reg { kind: RegKind::Integer, size });
826            }
827        }
828    }
829}
830
831impl FromStr for Conv {
832    type Err = String;
833
834    fn from_str(s: &str) -> Result<Self, Self::Err> {
835        match s {
836            "C" => Ok(Conv::C),
837            "Rust" => Ok(Conv::Rust),
838            "RustCold" => Ok(Conv::Rust),
839            "ArmAapcs" => Ok(Conv::ArmAapcs),
840            "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
841            "CCmseNonSecureEntry" => Ok(Conv::CCmseNonSecureEntry),
842            "Msp430Intr" => Ok(Conv::Msp430Intr),
843            "X86Fastcall" => Ok(Conv::X86Fastcall),
844            "X86Intr" => Ok(Conv::X86Intr),
845            "X86Stdcall" => Ok(Conv::X86Stdcall),
846            "X86ThisCall" => Ok(Conv::X86ThisCall),
847            "X86VectorCall" => Ok(Conv::X86VectorCall),
848            "X86_64SysV" => Ok(Conv::X86_64SysV),
849            "X86_64Win64" => Ok(Conv::X86_64Win64),
850            "GpuKernel" => Ok(Conv::GpuKernel),
851            "AvrInterrupt" => Ok(Conv::AvrInterrupt),
852            "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
853            "RiscvInterrupt(machine)" => {
854                Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine })
855            }
856            "RiscvInterrupt(supervisor)" => {
857                Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor })
858            }
859            _ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
860        }
861    }
862}
863
864// Some types are used a lot. Make sure they don't unintentionally get bigger.
865#[cfg(target_pointer_width = "64")]
866mod size_asserts {
867    use rustc_data_structures::static_assert_size;
868
869    use super::*;
870    // tidy-alphabetical-start
871    static_assert_size!(ArgAbi<'_, usize>, 56);
872    static_assert_size!(FnAbi<'_, usize>, 80);
873    // tidy-alphabetical-end
874}