rustc_target/callconv/
mips64.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
3};
4
5use crate::callconv::{
6    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Uniform,
7};
8
9fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
10    // Always sign extend u32 values on 64-bit mips
11    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
12        if let Primitive::Int(i, signed) = scalar.primitive() {
13            if !signed && i.size().bits() == 32 {
14                if let PassMode::Direct(ref mut attrs) = arg.mode {
15                    attrs.ext(ArgExtension::Sext);
16                    return;
17                }
18            }
19        }
20    }
21
22    arg.extend_integer_width_to(bits);
23}
24
25fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
26where
27    Ty: TyAbiInterface<'a, C> + Copy,
28    C: HasDataLayout,
29{
30    match ret.layout.field(cx, i).backend_repr {
31        BackendRepr::Scalar(scalar) => match scalar.primitive() {
32            Primitive::Float(Float::F32) => Some(Reg::f32()),
33            Primitive::Float(Float::F64) => Some(Reg::f64()),
34            _ => None,
35        },
36        _ => None,
37    }
38}
39
40fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
41where
42    Ty: TyAbiInterface<'a, C> + Copy,
43    C: HasDataLayout,
44{
45    if !ret.layout.is_aggregate() {
46        extend_integer_width_mips(ret, 64);
47        return;
48    }
49
50    let size = ret.layout.size;
51    let bits = size.bits();
52    if bits <= 128 {
53        // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
54        // use of float registers to structures (not unions) containing exactly one or two
55        // float fields.
56
57        if let FieldsShape::Arbitrary { .. } = ret.layout.fields {
58            if ret.layout.fields.count() == 1 {
59                if let Some(reg) = float_reg(cx, ret, 0) {
60                    ret.cast_to(reg);
61                    return;
62                }
63            } else if ret.layout.fields.count() == 2 {
64                if let Some(reg0) = float_reg(cx, ret, 0) {
65                    if let Some(reg1) = float_reg(cx, ret, 1) {
66                        ret.cast_to(CastTarget::pair(reg0, reg1));
67                        return;
68                    }
69                }
70            }
71        }
72
73        // Cast to a uniform int structure
74        ret.cast_to(Uniform::new(Reg::i64(), size));
75    } else {
76        ret.make_indirect();
77    }
78}
79
80fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
81where
82    Ty: TyAbiInterface<'a, C> + Copy,
83    C: HasDataLayout,
84{
85    if !arg.layout.is_aggregate() {
86        extend_integer_width_mips(arg, 64);
87        return;
88    }
89
90    let dl = cx.data_layout();
91    let size = arg.layout.size;
92    let mut prefix = [None; 8];
93    let mut prefix_index = 0;
94
95    match arg.layout.fields {
96        FieldsShape::Primitive => unreachable!(),
97        FieldsShape::Array { .. } => {
98            // Arrays are passed indirectly
99            arg.make_indirect();
100            return;
101        }
102        FieldsShape::Union(_) => {
103            // Unions and are always treated as a series of 64-bit integer chunks
104        }
105        FieldsShape::Arbitrary { .. } => {
106            // Structures are split up into a series of 64-bit integer chunks, but any aligned
107            // doubles not part of another aggregate are passed as floats.
108            let mut last_offset = Size::ZERO;
109
110            for i in 0..arg.layout.fields.count() {
111                let field = arg.layout.field(cx, i);
112                let offset = arg.layout.fields.offset(i);
113
114                // We only care about aligned doubles
115                if let BackendRepr::Scalar(scalar) = field.backend_repr {
116                    if scalar.primitive() == Primitive::Float(Float::F64) {
117                        if offset.is_aligned(dl.f64_align.abi) {
118                            // Insert enough integers to cover [last_offset, offset)
119                            assert!(last_offset.is_aligned(dl.f64_align.abi));
120                            for _ in 0..((offset - last_offset).bits() / 64)
121                                .min((prefix.len() - prefix_index) as u64)
122                            {
123                                prefix[prefix_index] = Some(Reg::i64());
124                                prefix_index += 1;
125                            }
126
127                            if prefix_index == prefix.len() {
128                                break;
129                            }
130
131                            prefix[prefix_index] = Some(Reg::f64());
132                            prefix_index += 1;
133                            last_offset = offset + Reg::f64().size;
134                        }
135                    }
136                }
137            }
138        }
139    };
140
141    // Extract first 8 chunks as the prefix
142    let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
143    arg.cast_to(CastTarget {
144        prefix,
145        rest: Uniform::new(Reg::i64(), rest_size),
146        attrs: ArgAttributes {
147            regular: ArgAttribute::default(),
148            arg_ext: ArgExtension::None,
149            pointee_size: Size::ZERO,
150            pointee_align: None,
151        },
152    });
153}
154
155pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
156where
157    Ty: TyAbiInterface<'a, C> + Copy,
158    C: HasDataLayout,
159{
160    if !fn_abi.ret.is_ignore() {
161        classify_ret(cx, &mut fn_abi.ret);
162    }
163
164    for arg in fn_abi.args.iter_mut() {
165        if arg.is_ignore() {
166            continue;
167        }
168        classify_arg(cx, arg);
169    }
170}