rustc_target/callconv/
aarch64.rs

1use std::iter;
2
3use rustc_abi::{BackendRepr, HasDataLayout, Primitive, TyAbiInterface};
4
5use crate::callconv::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
6use crate::spec::{HasTargetSpec, Target};
7
8/// Indicates the variant of the AArch64 ABI we are compiling for.
9/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
10///
11/// Corresponds to Clang's `AArch64ABIInfo::ABIKind`.
12#[derive(Copy, Clone, PartialEq)]
13pub(crate) enum AbiKind {
14    AAPCS,
15    DarwinPCS,
16    Win64,
17}
18
19fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
20where
21    Ty: TyAbiInterface<'a, C> + Copy,
22    C: HasDataLayout + HasTargetSpec,
23{
24    arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
25        let size = arg.layout.size;
26
27        // Ensure we have at most four uniquely addressable members.
28        if size > unit.size.checked_mul(4, cx).unwrap() {
29            return None;
30        }
31
32        let valid_unit = match unit.kind {
33            RegKind::Integer => false,
34            // The softfloat ABI treats floats like integers, so they
35            // do not get homogeneous aggregate treatment.
36            RegKind::Float => cx.target_spec().abi != "softfloat",
37            RegKind::Vector => size.bits() == 64 || size.bits() == 128,
38        };
39
40        valid_unit.then_some(Uniform::consecutive(unit, size))
41    })
42}
43
44fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
45    if target.abi != "softfloat" {
46        return;
47    }
48    // Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
49    // and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
50    // AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
51    // integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
52    // to use hardfloats even if the hardware supports them, so we do this for all softfloat
53    // targets.
54    if let BackendRepr::Scalar(s) = arg.layout.backend_repr
55        && let Primitive::Float(f) = s.primitive()
56    {
57        arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
58    } else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
59        && (matches!(s1.primitive(), Primitive::Float(_))
60            || matches!(s2.primitive(), Primitive::Float(_)))
61    {
62        // This case can only be reached for the Rust ABI, so we can do whatever we want here as
63        // long as it does not depend on target features (i.e., as long as we do not use float
64        // registers). So we pass small things in integer registers and large things via pointer
65        // indirection. This means we lose the nice "pass it as two arguments" optimization, but we
66        // currently just have to way to combine a `PassMode::Cast` with that optimization (and we
67        // need a cast since we want to pass the float as an int).
68        if arg.layout.size.bits() <= target.pointer_width.into() {
69            arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
70        } else {
71            arg.make_indirect();
72        }
73    }
74}
75
76fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
77where
78    Ty: TyAbiInterface<'a, C> + Copy,
79    C: HasDataLayout + HasTargetSpec,
80{
81    if !ret.layout.is_sized() {
82        // Not touching this...
83        return;
84    }
85    if !ret.layout.is_aggregate() {
86        if kind == AbiKind::DarwinPCS {
87            // On Darwin, when returning an i8/i16, it must be sign-extended to 32 bits,
88            // and likewise a u8/u16 must be zero-extended to 32-bits.
89            // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
90            ret.extend_integer_width_to(32)
91        }
92        softfloat_float_abi(cx.target_spec(), ret);
93        return;
94    }
95    if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
96        ret.cast_to(uniform);
97        return;
98    }
99    let size = ret.layout.size;
100    let bits = size.bits();
101    if bits <= 128 {
102        ret.cast_to(Uniform::new(Reg::i64(), size));
103        return;
104    }
105    ret.make_indirect();
106}
107
108fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
109where
110    Ty: TyAbiInterface<'a, C> + Copy,
111    C: HasDataLayout + HasTargetSpec,
112{
113    if !arg.layout.is_sized() {
114        // Not touching this...
115        return;
116    }
117    if !arg.layout.is_aggregate() {
118        if kind == AbiKind::DarwinPCS {
119            // On Darwin, when passing an i8/i16, it must be sign-extended to 32 bits,
120            // and likewise a u8/u16 must be zero-extended to 32-bits.
121            // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
122            arg.extend_integer_width_to(32);
123        }
124        softfloat_float_abi(cx.target_spec(), arg);
125
126        return;
127    }
128    if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
129        arg.cast_to(uniform);
130        return;
131    }
132    let size = arg.layout.size;
133    let align = if kind == AbiKind::AAPCS {
134        // When passing small aggregates by value, the AAPCS ABI mandates using the unadjusted
135        // alignment of the type (not including `repr(align)`).
136        // This matches behavior of `AArch64ABIInfo::classifyArgumentType` in Clang.
137        // See: <https://github.com/llvm/llvm-project/blob/5e691a1c9b0ad22689d4a434ddf4fed940e58dec/clang/lib/CodeGen/TargetInfo.cpp#L5816-L5823>
138        arg.layout.unadjusted_abi_align
139    } else {
140        arg.layout.align.abi
141    };
142    if size.bits() <= 128 {
143        if align.bits() == 128 {
144            arg.cast_to(Uniform::new(Reg::i128(), size));
145        } else {
146            arg.cast_to(Uniform::new(Reg::i64(), size));
147        }
148        return;
149    }
150    arg.make_indirect();
151}
152
153pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
154where
155    Ty: TyAbiInterface<'a, C> + Copy,
156    C: HasDataLayout + HasTargetSpec,
157{
158    if !fn_abi.ret.is_ignore() {
159        classify_ret(cx, &mut fn_abi.ret, kind);
160    }
161
162    for arg in fn_abi.args.iter_mut() {
163        if arg.is_ignore() {
164            continue;
165        }
166        classify_arg(cx, arg, kind);
167    }
168}
169
170pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
171where
172    Ty: TyAbiInterface<'a, C> + Copy,
173    C: HasDataLayout + HasTargetSpec,
174{
175    for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
176        softfloat_float_abi(cx.target_spec(), arg);
177    }
178}