use crate::abi::call::{Align, ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{Endian, HasDataLayout, TyAbiInterface};
use crate::spec::HasTargetSpec;
#[derive(Debug, Clone, Copy, PartialEq)]
enum ABI {
ELFv1, ELFv2, }
use ABI::*;
fn is_homogeneous_aggregate<'a, Ty, C>(
cx: &C,
arg: &mut ArgAbi<'a, Ty>,
abi: ABI,
) -> Option<Uniform>
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
if (abi == ELFv1 && arg.layout.size > unit.size)
|| arg.layout.size > unit.size.checked_mul(8, cx).unwrap()
{
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => arg.layout.size.bits() == 128,
};
valid_unit.then_some(Uniform::consecutive(unit, arg.layout.size))
})
}
fn classify<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI, is_ret: bool)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if arg.is_ignore() || !arg.layout.is_sized() {
return;
}
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
}
if is_ret && abi == ELFv1 {
arg.make_indirect();
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
arg.cast_to(uniform);
return;
}
let size = arg.layout.size;
if is_ret && size.bits() > 128 {
arg.make_indirect();
} else if size.bits() <= 64 {
arg.cast_to(Reg { kind: RegKind::Integer, size })
} else {
let reg = if arg.layout.align.abi.bytes() > 8 { Reg::i128() } else { Reg::i64() };
arg.cast_to(Uniform::consecutive(
reg,
size.align_to(Align::from_bytes(reg.size.bytes()).unwrap()),
))
};
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
let abi = if cx.target_spec().env == "musl" {
ELFv2
} else {
match cx.data_layout().endian {
Endian::Big => ELFv1,
Endian::Little => ELFv2,
}
};
classify(cx, &mut fn_abi.ret, abi, true);
for arg in fn_abi.args.iter_mut() {
classify(cx, arg, abi, false);
}
}