rustc_target/callconv/
loongarch.rs

1use rustc_abi::{
2    BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size,
3    TyAbiInterface, TyAndLayout, Variants,
4};
5
6use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
7use crate::spec::HasTargetSpec;
8
9#[derive(Copy, Clone)]
10enum RegPassKind {
11    Float(Reg),
12    Integer(Reg),
13    Unknown,
14}
15
16#[derive(Copy, Clone)]
17enum FloatConv {
18    FloatPair(Reg, Reg),
19    Float(Reg),
20    MixedPair(Reg, Reg),
21}
22
23#[derive(Copy, Clone)]
24struct CannotUseFpConv;
25
26fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
27    match arg.layout.backend_repr {
28        BackendRepr::Vector { .. } => true,
29        _ => arg.layout.is_aggregate(),
30    }
31}
32
33fn should_use_fp_conv_helper<'a, Ty, C>(
34    cx: &C,
35    arg_layout: &TyAndLayout<'a, Ty>,
36    xlen: u64,
37    flen: u64,
38    field1_kind: &mut RegPassKind,
39    field2_kind: &mut RegPassKind,
40) -> Result<(), CannotUseFpConv>
41where
42    Ty: TyAbiInterface<'a, C> + Copy,
43{
44    match arg_layout.backend_repr {
45        BackendRepr::Scalar(scalar) => match scalar.primitive() {
46            Primitive::Int(..) | Primitive::Pointer(_) => {
47                if arg_layout.size.bits() > xlen {
48                    return Err(CannotUseFpConv);
49                }
50                match (*field1_kind, *field2_kind) {
51                    (RegPassKind::Unknown, _) => {
52                        *field1_kind = RegPassKind::Integer(Reg {
53                            kind: RegKind::Integer,
54                            size: arg_layout.size,
55                        });
56                    }
57                    (RegPassKind::Float(_), RegPassKind::Unknown) => {
58                        *field2_kind = RegPassKind::Integer(Reg {
59                            kind: RegKind::Integer,
60                            size: arg_layout.size,
61                        });
62                    }
63                    _ => return Err(CannotUseFpConv),
64                }
65            }
66            Primitive::Float(_) => {
67                if arg_layout.size.bits() > flen {
68                    return Err(CannotUseFpConv);
69                }
70                match (*field1_kind, *field2_kind) {
71                    (RegPassKind::Unknown, _) => {
72                        *field1_kind =
73                            RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
74                    }
75                    (_, RegPassKind::Unknown) => {
76                        *field2_kind =
77                            RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
78                    }
79                    _ => return Err(CannotUseFpConv),
80                }
81            }
82        },
83        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
84        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
85            FieldsShape::Primitive => {
86                unreachable!("aggregates can't have `FieldsShape::Primitive`")
87            }
88            FieldsShape::Union(_) => {
89                if !arg_layout.is_zst() {
90                    if arg_layout.is_transparent() {
91                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
92                        return should_use_fp_conv_helper(
93                            cx,
94                            &non_1zst_elem,
95                            xlen,
96                            flen,
97                            field1_kind,
98                            field2_kind,
99                        );
100                    }
101                    return Err(CannotUseFpConv);
102                }
103            }
104            FieldsShape::Array { count, .. } => {
105                for _ in 0..count {
106                    let elem_layout = arg_layout.field(cx, 0);
107                    should_use_fp_conv_helper(
108                        cx,
109                        &elem_layout,
110                        xlen,
111                        flen,
112                        field1_kind,
113                        field2_kind,
114                    )?;
115                }
116            }
117            FieldsShape::Arbitrary { .. } => {
118                match arg_layout.variants {
119                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
120                    Variants::Single { .. } | Variants::Empty => (),
121                }
122                for i in arg_layout.fields.index_by_increasing_offset() {
123                    let field = arg_layout.field(cx, i);
124                    should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
125                }
126            }
127        },
128    }
129    Ok(())
130}
131
132fn should_use_fp_conv<'a, Ty, C>(
133    cx: &C,
134    arg: &TyAndLayout<'a, Ty>,
135    xlen: u64,
136    flen: u64,
137) -> Option<FloatConv>
138where
139    Ty: TyAbiInterface<'a, C> + Copy,
140{
141    let mut field1_kind = RegPassKind::Unknown;
142    let mut field2_kind = RegPassKind::Unknown;
143    if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
144        return None;
145    }
146    match (field1_kind, field2_kind) {
147        (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
148        (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
149        (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
150        (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
151        _ => None,
152    }
153}
154
155fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
156where
157    Ty: TyAbiInterface<'a, C> + Copy,
158{
159    if !arg.layout.is_sized() {
160        // Not touching this...
161        return false; // I guess? return value of this function is not documented
162    }
163    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
164        match conv {
165            FloatConv::Float(f) => {
166                arg.cast_to(f);
167            }
168            FloatConv::FloatPair(l, r) => {
169                arg.cast_to(CastTarget::pair(l, r));
170            }
171            FloatConv::MixedPair(l, r) => {
172                arg.cast_to(CastTarget::pair(l, r));
173            }
174        }
175        return false;
176    }
177
178    let total = arg.layout.size;
179
180    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
181    // the argument list with the address."
182    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
183    // replaced in the argument list with the address, as are C++ aggregates
184    // with nontrivial copy constructors, destructors, or vtables."
185    if total.bits() > 2 * xlen {
186        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
187        if is_loongarch_aggregate(arg) {
188            arg.make_indirect();
189        }
190        return true;
191    }
192
193    let xlen_reg = match xlen {
194        32 => Reg::i32(),
195        64 => Reg::i64(),
196        _ => unreachable!("Unsupported XLEN: {}", xlen),
197    };
198    if is_loongarch_aggregate(arg) {
199        if total.bits() <= xlen {
200            arg.cast_to(xlen_reg);
201        } else {
202            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
203        }
204        return false;
205    }
206
207    // "When passed in registers, scalars narrower than XLEN bits are widened
208    // according to the sign of their type up to 32 bits, then sign-extended to
209    // XLEN bits."
210    extend_integer_width(arg, xlen);
211    false
212}
213
214fn classify_arg<'a, Ty, C>(
215    cx: &C,
216    arg: &mut ArgAbi<'a, Ty>,
217    xlen: u64,
218    flen: u64,
219    is_vararg: bool,
220    avail_gprs: &mut u64,
221    avail_fprs: &mut u64,
222) where
223    Ty: TyAbiInterface<'a, C> + Copy,
224{
225    if !arg.layout.is_sized() {
226        // Not touching this...
227        return;
228    }
229    if !is_vararg {
230        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
231            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
232                *avail_fprs -= 1;
233                arg.cast_to(f);
234                return;
235            }
236            Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
237                *avail_fprs -= 2;
238                arg.cast_to(CastTarget::pair(l, r));
239                return;
240            }
241            Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
242                *avail_gprs -= 1;
243                *avail_fprs -= 1;
244                arg.cast_to(CastTarget::pair(l, r));
245                return;
246            }
247            _ => (),
248        }
249    }
250
251    let total = arg.layout.size;
252    let align = arg.layout.align.abi.bits();
253
254    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
255    // the argument list with the address."
256    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
257    // replaced in the argument list with the address, as are C++ aggregates
258    // with nontrivial copy constructors, destructors, or vtables."
259    if total.bits() > 2 * xlen {
260        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
261        if is_loongarch_aggregate(arg) {
262            arg.make_indirect();
263        }
264        if *avail_gprs >= 1 {
265            *avail_gprs -= 1;
266        }
267        return;
268    }
269
270    let double_xlen_reg = match xlen {
271        32 => Reg::i64(),
272        64 => Reg::i128(),
273        _ => unreachable!("Unsupported XLEN: {}", xlen),
274    };
275
276    let xlen_reg = match xlen {
277        32 => Reg::i32(),
278        64 => Reg::i64(),
279        _ => unreachable!("Unsupported XLEN: {}", xlen),
280    };
281
282    if total.bits() > xlen {
283        let align_regs = align > xlen;
284        if is_loongarch_aggregate(arg) {
285            arg.cast_to(Uniform::new(
286                if align_regs { double_xlen_reg } else { xlen_reg },
287                Size::from_bits(xlen * 2),
288            ));
289        }
290        if align_regs && is_vararg {
291            *avail_gprs -= *avail_gprs % 2;
292        }
293        if *avail_gprs >= 2 {
294            *avail_gprs -= 2;
295        } else {
296            *avail_gprs = 0;
297        }
298        return;
299    } else if is_loongarch_aggregate(arg) {
300        arg.cast_to(xlen_reg);
301        if *avail_gprs >= 1 {
302            *avail_gprs -= 1;
303        }
304        return;
305    }
306
307    // "When passed in registers, scalars narrower than XLEN bits are widened
308    // according to the sign of their type up to 32 bits, then sign-extended to
309    // XLEN bits."
310    if *avail_gprs >= 1 {
311        extend_integer_width(arg, xlen);
312        *avail_gprs -= 1;
313    }
314}
315
316fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
317    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
318        if let Primitive::Int(i, _) = scalar.primitive() {
319            // 32-bit integers are always sign-extended
320            if i.size().bits() == 32 && xlen > 32 {
321                if let PassMode::Direct(ref mut attrs) = arg.mode {
322                    attrs.ext(ArgExtension::Sext);
323                    return;
324                }
325            }
326        }
327    }
328
329    arg.extend_integer_width_to(xlen);
330}
331
332pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
333where
334    Ty: TyAbiInterface<'a, C> + Copy,
335    C: HasDataLayout + HasTargetSpec,
336{
337    let xlen = cx.data_layout().pointer_size.bits();
338    let flen = match &cx.target_spec().llvm_abiname[..] {
339        "ilp32f" | "lp64f" => 32,
340        "ilp32d" | "lp64d" => 64,
341        _ => 0,
342    };
343
344    let mut avail_gprs = 8;
345    let mut avail_fprs = 8;
346
347    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
348        avail_gprs -= 1;
349    }
350
351    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
352        if arg.is_ignore() {
353            continue;
354        }
355        classify_arg(
356            cx,
357            arg,
358            xlen,
359            flen,
360            i >= fn_abi.fixed_count as usize,
361            &mut avail_gprs,
362            &mut avail_fprs,
363        );
364    }
365}
366
367pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
368where
369    Ty: TyAbiInterface<'a, C> + Copy,
370    C: HasDataLayout + HasTargetSpec,
371{
372    if abi == ExternAbi::RustIntrinsic {
373        return;
374    }
375
376    let grlen = cx.data_layout().pointer_size.bits();
377
378    for arg in fn_abi.args.iter_mut() {
379        if arg.is_ignore() {
380            continue;
381        }
382
383        // LLVM integers types do not differentiate between signed or unsigned integers.
384        // Some LoongArch instructions do not have a `.w` suffix version, they use all the
385        // GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
386        // according to signedness to avoid unnecessary integer extending instructions.
387        //
388        // This is similar to the RISC-V case, see
389        // https://github.com/rust-lang/rust/issues/114508 for details.
390        extend_integer_width(arg, grlen);
391    }
392}