rustc_target/callconv/
aarch64.rs

1use std::iter;
2
3use rustc_abi::{BackendRepr, HasDataLayout, Primitive, TyAbiInterface};
4
5use crate::callconv::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
6use crate::spec::{Abi, HasTargetSpec, Target};
7
8/// Indicates the variant of the AArch64 ABI we are compiling for.
9/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
10///
11/// Corresponds to Clang's `AArch64ABIInfo::ABIKind`.
12#[derive(Copy, Clone, Debug, PartialEq)]
13pub(crate) enum AbiKind {
14    AAPCS,
15    DarwinPCS,
16    Win64,
17}
18
19#[tracing::instrument(skip(cx), level = "debug")]
20fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
21where
22    Ty: TyAbiInterface<'a, C> + Copy,
23    C: HasDataLayout + HasTargetSpec,
24{
25    arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
26        let size = arg.layout.size;
27
28        // Ensure we have at most four uniquely addressable members.
29        if size > unit.size.checked_mul(4, cx).unwrap() {
30            return None;
31        }
32
33        let valid_unit = match unit.kind {
34            RegKind::Integer => false,
35            // The softfloat ABI treats floats like integers, so they
36            // do not get homogeneous aggregate treatment.
37            RegKind::Float => cx.target_spec().abi != Abi::SoftFloat,
38            RegKind::Vector => size.bits() == 64 || size.bits() == 128,
39        };
40
41        valid_unit.then_some(Uniform::consecutive(unit, size))
42    })
43}
44
45fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
46    if target.abi != Abi::SoftFloat {
47        return;
48    }
49    // Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
50    // and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
51    // AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
52    // integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
53    // to use hardfloats even if the hardware supports them, so we do this for all softfloat
54    // targets.
55    if let BackendRepr::Scalar(s) = arg.layout.backend_repr
56        && let Primitive::Float(f) = s.primitive()
57    {
58        arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
59    } else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
60        && (matches!(s1.primitive(), Primitive::Float(_))
61            || matches!(s2.primitive(), Primitive::Float(_)))
62    {
63        // This case can only be reached for the Rust ABI, so we can do whatever we want here as
64        // long as it does not depend on target features (i.e., as long as we do not use float
65        // registers). So we pass small things in integer registers and large things via pointer
66        // indirection. This means we lose the nice "pass it as two arguments" optimization, but we
67        // currently just have to way to combine a `PassMode::Cast` with that optimization (and we
68        // need a cast since we want to pass the float as an int).
69        if arg.layout.size.bits() <= target.pointer_width.into() {
70            arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
71        } else {
72            arg.make_indirect();
73        }
74    }
75}
76
77#[tracing::instrument(skip(cx), level = "debug")]
78fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
79where
80    Ty: TyAbiInterface<'a, C> + Copy,
81    C: HasDataLayout + HasTargetSpec,
82{
83    if !ret.layout.is_sized() || ret.layout.is_scalable_vector() {
84        // Not touching this...
85        return;
86    }
87    if !ret.layout.is_aggregate() {
88        if kind == AbiKind::DarwinPCS {
89            // On Darwin, when returning an i8/i16, it must be sign-extended to 32 bits,
90            // and likewise a u8/u16 must be zero-extended to 32-bits.
91            // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
92            ret.extend_integer_width_to(32)
93        }
94        softfloat_float_abi(cx.target_spec(), ret);
95        return;
96    }
97    if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
98        ret.cast_to(uniform);
99        return;
100    }
101    let size = ret.layout.size;
102    let bits = size.bits();
103    if bits <= 128 {
104        ret.cast_to(Uniform::new(Reg::i64(), size));
105        return;
106    }
107    ret.make_indirect();
108}
109
110#[tracing::instrument(skip(cx), level = "debug")]
111fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
112where
113    Ty: TyAbiInterface<'a, C> + Copy,
114    C: HasDataLayout + HasTargetSpec,
115{
116    if !arg.layout.is_sized() || arg.layout.is_scalable_vector() {
117        // Not touching this...
118        return;
119    }
120    if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
121        arg.make_indirect();
122        return;
123    }
124    if !arg.layout.is_aggregate() {
125        if kind == AbiKind::DarwinPCS {
126            // On Darwin, when passing an i8/i16, it must be sign-extended to 32 bits,
127            // and likewise a u8/u16 must be zero-extended to 32-bits.
128            // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
129            arg.extend_integer_width_to(32);
130        }
131        softfloat_float_abi(cx.target_spec(), arg);
132
133        return;
134    }
135    if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
136        arg.cast_to(uniform);
137        return;
138    }
139    let size = arg.layout.size;
140    let align = if kind == AbiKind::AAPCS {
141        // When passing small aggregates by value, the AAPCS ABI mandates using the unadjusted
142        // alignment of the type (not including `repr(align)`).
143        // This matches behavior of `AArch64ABIInfo::classifyArgumentType` in Clang.
144        // See: <https://github.com/llvm/llvm-project/blob/5e691a1c9b0ad22689d4a434ddf4fed940e58dec/clang/lib/CodeGen/TargetInfo.cpp#L5816-L5823>
145        arg.layout.unadjusted_abi_align
146    } else {
147        arg.layout.align.abi
148    };
149    if size.bits() <= 128 {
150        if align.bits() == 128 {
151            arg.cast_to(Uniform::new(Reg::i128(), size));
152        } else {
153            arg.cast_to(Uniform::new(Reg::i64(), size));
154        }
155        return;
156    }
157    arg.make_indirect();
158}
159
160pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
161where
162    Ty: TyAbiInterface<'a, C> + Copy,
163    C: HasDataLayout + HasTargetSpec,
164{
165    if !fn_abi.ret.is_ignore() {
166        classify_ret(cx, &mut fn_abi.ret, kind);
167    }
168
169    for arg in fn_abi.args.iter_mut() {
170        if arg.is_ignore() {
171            continue;
172        }
173        classify_arg(cx, arg, kind);
174    }
175}
176
177pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
178where
179    Ty: TyAbiInterface<'a, C> + Copy,
180    C: HasDataLayout + HasTargetSpec,
181{
182    for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
183        softfloat_float_abi(cx.target_spec(), arg);
184    }
185}