rustc_target/callconv/
mips64.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
3};
4
5use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
6
7fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
8    // Always sign extend u32 values on 64-bit mips
9    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
10        && let Primitive::Int(i, signed) = scalar.primitive()
11        && !signed
12        && i.size().bits() == 32
13        && let PassMode::Direct(ref mut attrs) = arg.mode
14    {
15        attrs.ext(ArgExtension::Sext);
16        return;
17    }
18
19    arg.extend_integer_width_to(bits);
20}
21
22fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
23where
24    Ty: TyAbiInterface<'a, C> + Copy,
25    C: HasDataLayout,
26{
27    match ret.layout.field(cx, i).backend_repr {
28        BackendRepr::Scalar(scalar) => match scalar.primitive() {
29            Primitive::Float(Float::F32) => Some(Reg::f32()),
30            Primitive::Float(Float::F64) => Some(Reg::f64()),
31            _ => None,
32        },
33        _ => None,
34    }
35}
36
37fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, offset: &mut Size)
38where
39    Ty: TyAbiInterface<'a, C> + Copy,
40    C: HasDataLayout,
41{
42    if !ret.layout.is_aggregate() {
43        extend_integer_width_mips(ret, 64);
44        return;
45    }
46
47    let size = ret.layout.size;
48    let bits = size.bits();
49    if bits <= 128 {
50        // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
51        // use of float registers to structures (not unions) containing exactly one or two
52        // float fields.
53
54        if let FieldsShape::Arbitrary { .. } = ret.layout.fields {
55            if ret.layout.fields.count() == 1 {
56                if let Some(reg) = float_reg(cx, ret, 0) {
57                    ret.cast_to(reg);
58                    return;
59                }
60            } else if ret.layout.fields.count() == 2
61                && let Some(reg0) = float_reg(cx, ret, 0)
62                && let Some(reg1) = float_reg(cx, ret, 1)
63            {
64                ret.cast_to(CastTarget::pair(reg0, reg1));
65                return;
66            }
67        }
68
69        // Cast to a uniform int structure
70        ret.cast_to(Uniform::new(Reg::i64(), size));
71    } else {
72        ret.make_indirect();
73        *offset += cx.data_layout().pointer_size();
74    }
75}
76
77fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, offset: &mut Size)
78where
79    Ty: TyAbiInterface<'a, C> + Copy,
80    C: HasDataLayout,
81{
82    let dl = cx.data_layout();
83    let size = arg.layout.size;
84    let mut prefix = [None; 8];
85    let mut prefix_index = 0;
86
87    // Detect need for padding
88    let align = Ord::clamp(arg.layout.align.abi, dl.i64_align, dl.i128_align);
89    let pad_i32 = !offset.is_aligned(align);
90
91    if !arg.layout.is_aggregate() {
92        extend_integer_width_mips(arg, 64);
93    } else if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
94        arg.make_indirect();
95    } else {
96        match arg.layout.fields {
97            FieldsShape::Primitive => unreachable!(),
98            FieldsShape::Array { .. } => {
99                // Arrays are passed indirectly
100                arg.make_indirect();
101            }
102            FieldsShape::Union(_) => {
103                // Unions and are always treated as a series of 64-bit integer chunks
104            }
105            FieldsShape::Arbitrary { .. } => {
106                // Structures are split up into a series of 64-bit integer chunks, but any aligned
107                // doubles not part of another aggregate are passed as floats.
108                let mut last_offset = Size::ZERO;
109
110                for i in 0..arg.layout.fields.count() {
111                    let field = arg.layout.field(cx, i);
112                    let offset = arg.layout.fields.offset(i);
113
114                    // We only care about aligned doubles
115                    if let BackendRepr::Scalar(scalar) = field.backend_repr {
116                        if scalar.primitive() == Primitive::Float(Float::F64) {
117                            if offset.is_aligned(dl.f64_align) {
118                                // Insert enough integers to cover [last_offset, offset)
119                                assert!(last_offset.is_aligned(dl.f64_align));
120                                for _ in 0..((offset - last_offset).bits() / 64)
121                                    .min((prefix.len() - prefix_index) as u64)
122                                {
123                                    prefix[prefix_index] = Some(Reg::i64());
124                                    prefix_index += 1;
125                                }
126
127                                if prefix_index == prefix.len() {
128                                    break;
129                                }
130
131                                prefix[prefix_index] = Some(Reg::f64());
132                                prefix_index += 1;
133                                last_offset = offset + Reg::f64().size;
134                            }
135                        }
136                    }
137                }
138            }
139        };
140
141        // Extract first 8 chunks as the prefix
142        let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
143        arg.cast_to_and_pad_i32(
144            CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)),
145            pad_i32,
146        );
147    }
148    *offset = offset.align_to(align) + size.align_to(align);
149}
150
151pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
152where
153    Ty: TyAbiInterface<'a, C> + Copy,
154    C: HasDataLayout,
155{
156    // mips64 argument passing is also affected by the alignment of aggregates.
157    // see mips.rs for how the offset is used
158    let mut offset = Size::ZERO;
159
160    if !fn_abi.ret.is_ignore() && fn_abi.ret.layout.is_sized() {
161        classify_ret(cx, &mut fn_abi.ret, &mut offset);
162    }
163
164    for arg in fn_abi.args.iter_mut() {
165        if arg.is_ignore() || !arg.layout.is_sized() {
166            continue;
167        }
168        classify_arg(cx, arg, &mut offset);
169    }
170}