rustc_target/callconv/aarch64.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
use std::iter;
use rustc_abi::{BackendRepr, Primitive};
use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, TyAbiInterface};
use crate::spec::{HasTargetSpec, Target};
/// Indicates the variant of the AArch64 ABI we are compiling for.
/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
///
/// Corresponds to Clang's `AArch64ABIInfo::ABIKind`.
#[derive(Copy, Clone, PartialEq)]
pub(crate) enum AbiKind {
AAPCS,
DarwinPCS,
Win64,
}
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
let size = arg.layout.size;
// Ensure we have at most four uniquely addressable members.
if size > unit.size.checked_mul(4, cx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
// The softfloat ABI treats floats like integers, so they
// do not get homogeneous aggregate treatment.
RegKind::Float => cx.target_spec().abi != "softfloat",
RegKind::Vector => size.bits() == 64 || size.bits() == 128,
};
valid_unit.then_some(Uniform::consecutive(unit, size))
})
}
fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
if target.abi != "softfloat" {
return;
}
// Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
// and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
// AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
// integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
// to use hardfloats even if the hardware supports them, so we do this for all softfloat
// targets.
if let BackendRepr::Scalar(s) = arg.layout.backend_repr
&& let Primitive::Float(f) = s.primitive()
{
arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
} else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
&& (matches!(s1.primitive(), Primitive::Float(_))
|| matches!(s2.primitive(), Primitive::Float(_)))
{
// This case can only be reached for the Rust ABI, so we can do whatever we want here as
// long as it does not depend on target features (i.e., as long as we do not use float
// registers). So we pass small things in integer registers and large things via pointer
// indirection. This means we lose the nice "pass it as two arguments" optimization, but we
// currently just have to way to combine a `PassMode::Cast` with that optimization (and we
// need a cast since we want to pass the float as an int).
if arg.layout.size.bits() <= target.pointer_width.into() {
arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
} else {
arg.make_indirect();
}
}
}
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if !ret.layout.is_sized() {
// Not touching this...
return;
}
if !ret.layout.is_aggregate() {
if kind == AbiKind::DarwinPCS {
// On Darwin, when returning an i8/i16, it must be sign-extended to 32 bits,
// and likewise a u8/u16 must be zero-extended to 32-bits.
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
ret.extend_integer_width_to(32)
}
softfloat_float_abi(cx.target_spec(), ret);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
ret.cast_to(uniform);
return;
}
let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
ret.cast_to(Uniform::new(Reg::i64(), size));
return;
}
ret.make_indirect();
}
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if !arg.layout.is_sized() {
// Not touching this...
return;
}
if !arg.layout.is_aggregate() {
if kind == AbiKind::DarwinPCS {
// On Darwin, when passing an i8/i16, it must be sign-extended to 32 bits,
// and likewise a u8/u16 must be zero-extended to 32-bits.
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
arg.extend_integer_width_to(32);
}
softfloat_float_abi(cx.target_spec(), arg);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
arg.cast_to(uniform);
return;
}
let size = arg.layout.size;
let align = if kind == AbiKind::AAPCS {
// When passing small aggregates by value, the AAPCS ABI mandates using the unadjusted
// alignment of the type (not including `repr(align)`).
// This matches behavior of `AArch64ABIInfo::classifyArgumentType` in Clang.
// See: <https://github.com/llvm/llvm-project/blob/5e691a1c9b0ad22689d4a434ddf4fed940e58dec/clang/lib/CodeGen/TargetInfo.cpp#L5816-L5823>
arg.layout.unadjusted_abi_align
} else {
arg.layout.align.abi
};
if size.bits() <= 128 {
if align.bits() == 128 {
arg.cast_to(Uniform::new(Reg::i128(), size));
} else {
arg.cast_to(Uniform::new(Reg::i64(), size));
}
return;
}
arg.make_indirect();
}
pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, kind);
}
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
classify_arg(cx, arg, kind);
}
}
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
softfloat_float_abi(cx.target_spec(), arg);
}
}