rustc_target/callconv/
loongarch.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface,
3    TyAndLayout, Variants,
4};
5
6use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
7use crate::spec::HasTargetSpec;
8
9#[derive(Copy, Clone)]
10enum RegPassKind {
11    Float { offset_from_start: Size, ty: Reg },
12    Integer { offset_from_start: Size, ty: Reg },
13    Unknown,
14}
15
16#[derive(Copy, Clone)]
17enum FloatConv {
18    FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
19    Float(Reg),
20    MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
21}
22
23#[derive(Copy, Clone)]
24struct CannotUseFpConv;
25
26fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
27    match arg.layout.backend_repr {
28        BackendRepr::SimdVector { .. } => true,
29        _ => arg.layout.is_aggregate(),
30    }
31}
32
33fn should_use_fp_conv_helper<'a, Ty, C>(
34    cx: &C,
35    arg_layout: &TyAndLayout<'a, Ty>,
36    xlen: u64,
37    flen: u64,
38    field1_kind: &mut RegPassKind,
39    field2_kind: &mut RegPassKind,
40    offset_from_start: Size,
41) -> Result<(), CannotUseFpConv>
42where
43    Ty: TyAbiInterface<'a, C> + Copy,
44{
45    match arg_layout.backend_repr {
46        BackendRepr::Scalar(scalar) => match scalar.primitive() {
47            Primitive::Int(..) | Primitive::Pointer(_) => {
48                if arg_layout.size.bits() > xlen {
49                    return Err(CannotUseFpConv);
50                }
51                match (*field1_kind, *field2_kind) {
52                    (RegPassKind::Unknown, _) => {
53                        *field1_kind = RegPassKind::Integer {
54                            offset_from_start,
55                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
56                        };
57                    }
58                    (RegPassKind::Float { .. }, RegPassKind::Unknown) => {
59                        *field2_kind = RegPassKind::Integer {
60                            offset_from_start,
61                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
62                        };
63                    }
64                    _ => return Err(CannotUseFpConv),
65                }
66            }
67            Primitive::Float(_) => {
68                if arg_layout.size.bits() > flen {
69                    return Err(CannotUseFpConv);
70                }
71                match (*field1_kind, *field2_kind) {
72                    (RegPassKind::Unknown, _) => {
73                        *field1_kind = RegPassKind::Float {
74                            offset_from_start,
75                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
76                        };
77                    }
78                    (_, RegPassKind::Unknown) => {
79                        *field2_kind = RegPassKind::Float {
80                            offset_from_start,
81                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
82                        };
83                    }
84                    _ => return Err(CannotUseFpConv),
85                }
86            }
87        },
88        BackendRepr::SimdVector { .. } => {
89            return Err(CannotUseFpConv);
90        }
91        BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"),
92        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
93            FieldsShape::Primitive => {
94                unreachable!("aggregates can't have `FieldsShape::Primitive`")
95            }
96            FieldsShape::Union(_) => {
97                if !arg_layout.is_zst() {
98                    if arg_layout.is_transparent() {
99                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
100                        return should_use_fp_conv_helper(
101                            cx,
102                            &non_1zst_elem,
103                            xlen,
104                            flen,
105                            field1_kind,
106                            field2_kind,
107                            offset_from_start,
108                        );
109                    }
110                    return Err(CannotUseFpConv);
111                }
112            }
113            FieldsShape::Array { count, .. } => {
114                for i in 0..count {
115                    let elem_layout = arg_layout.field(cx, 0);
116                    should_use_fp_conv_helper(
117                        cx,
118                        &elem_layout,
119                        xlen,
120                        flen,
121                        field1_kind,
122                        field2_kind,
123                        offset_from_start + elem_layout.size * i,
124                    )?;
125                }
126            }
127            FieldsShape::Arbitrary { .. } => {
128                match arg_layout.variants {
129                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
130                    Variants::Single { .. } | Variants::Empty => (),
131                }
132                for i in arg_layout.fields.index_by_increasing_offset() {
133                    let field = arg_layout.field(cx, i);
134                    should_use_fp_conv_helper(
135                        cx,
136                        &field,
137                        xlen,
138                        flen,
139                        field1_kind,
140                        field2_kind,
141                        offset_from_start + arg_layout.fields.offset(i),
142                    )?;
143                }
144            }
145        },
146    }
147    Ok(())
148}
149
150fn should_use_fp_conv<'a, Ty, C>(
151    cx: &C,
152    arg: &TyAndLayout<'a, Ty>,
153    xlen: u64,
154    flen: u64,
155) -> Option<FloatConv>
156where
157    Ty: TyAbiInterface<'a, C> + Copy,
158{
159    let mut field1_kind = RegPassKind::Unknown;
160    let mut field2_kind = RegPassKind::Unknown;
161    if should_use_fp_conv_helper(
162        cx,
163        arg,
164        xlen,
165        flen,
166        &mut field1_kind,
167        &mut field2_kind,
168        Size::ZERO,
169    )
170    .is_err()
171    {
172        return None;
173    }
174    match (field1_kind, field2_kind) {
175        (
176            RegPassKind::Integer { offset_from_start, .. }
177            | RegPassKind::Float { offset_from_start, .. },
178            _,
179        ) if offset_from_start != Size::ZERO => {
180            panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty)
181        }
182        (
183            RegPassKind::Integer { ty: first_ty, .. },
184            RegPassKind::Float { offset_from_start, ty: second_ty },
185        ) => Some(FloatConv::MixedPair {
186            first_ty,
187            second_ty_offset_from_start: offset_from_start,
188            second_ty,
189        }),
190        (
191            RegPassKind::Float { ty: first_ty, .. },
192            RegPassKind::Integer { offset_from_start, ty: second_ty },
193        ) => Some(FloatConv::MixedPair {
194            first_ty,
195            second_ty_offset_from_start: offset_from_start,
196            second_ty,
197        }),
198        (
199            RegPassKind::Float { ty: first_ty, .. },
200            RegPassKind::Float { offset_from_start, ty: second_ty },
201        ) => Some(FloatConv::FloatPair {
202            first_ty,
203            second_ty_offset_from_start: offset_from_start,
204            second_ty,
205        }),
206        (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)),
207        _ => None,
208    }
209}
210
211fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
212where
213    Ty: TyAbiInterface<'a, C> + Copy,
214{
215    if !arg.layout.is_sized() {
216        // Not touching this...
217        return false; // I guess? return value of this function is not documented
218    }
219    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
220        match conv {
221            FloatConv::Float(f) => {
222                arg.cast_to(f);
223            }
224            FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => {
225                arg.cast_to(CastTarget::offset_pair(
226                    first_ty,
227                    second_ty_offset_from_start,
228                    second_ty,
229                ));
230            }
231            FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => {
232                arg.cast_to(CastTarget::offset_pair(
233                    first_ty,
234                    second_ty_offset_from_start,
235                    second_ty,
236                ));
237            }
238        }
239        return false;
240    }
241
242    let total = arg.layout.size;
243
244    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
245    // the argument list with the address."
246    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
247    // replaced in the argument list with the address, as are C++ aggregates
248    // with nontrivial copy constructors, destructors, or vtables."
249    if total.bits() > 2 * xlen {
250        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
251        if is_loongarch_aggregate(arg) {
252            arg.make_indirect();
253        }
254        return true;
255    }
256
257    let xlen_reg = match xlen {
258        32 => Reg::i32(),
259        64 => Reg::i64(),
260        _ => unreachable!("Unsupported XLEN: {}", xlen),
261    };
262    if is_loongarch_aggregate(arg) {
263        if total.bits() <= xlen {
264            arg.cast_to(xlen_reg);
265        } else {
266            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
267        }
268        return false;
269    }
270
271    // "When passed in registers, scalars narrower than XLEN bits are widened
272    // according to the sign of their type up to 32 bits, then sign-extended to
273    // XLEN bits."
274    extend_integer_width(arg, xlen);
275    false
276}
277
278fn classify_arg<'a, Ty, C>(
279    cx: &C,
280    arg: &mut ArgAbi<'a, Ty>,
281    xlen: u64,
282    flen: u64,
283    is_vararg: bool,
284    avail_gprs: &mut u64,
285    avail_fprs: &mut u64,
286) where
287    Ty: TyAbiInterface<'a, C> + Copy,
288{
289    if !arg.layout.is_sized() {
290        // Not touching this...
291        return;
292    }
293    if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
294        arg.make_indirect();
295        *avail_gprs = (*avail_gprs).saturating_sub(1);
296        return;
297    }
298    if !is_vararg {
299        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
300            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
301                *avail_fprs -= 1;
302                arg.cast_to(f);
303                return;
304            }
305            Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty })
306                if *avail_fprs >= 2 =>
307            {
308                *avail_fprs -= 2;
309                arg.cast_to(CastTarget::offset_pair(
310                    first_ty,
311                    second_ty_offset_from_start,
312                    second_ty,
313                ));
314                return;
315            }
316            Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty })
317                if *avail_fprs >= 1 && *avail_gprs >= 1 =>
318            {
319                *avail_gprs -= 1;
320                *avail_fprs -= 1;
321                arg.cast_to(CastTarget::offset_pair(
322                    first_ty,
323                    second_ty_offset_from_start,
324                    second_ty,
325                ));
326                return;
327            }
328            _ => (),
329        }
330    }
331
332    let total = arg.layout.size;
333    let align = arg.layout.align.bits();
334
335    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
336    // the argument list with the address."
337    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
338    // replaced in the argument list with the address, as are C++ aggregates
339    // with nontrivial copy constructors, destructors, or vtables."
340    if total.bits() > 2 * xlen {
341        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
342        if is_loongarch_aggregate(arg) {
343            arg.make_indirect();
344        }
345        if *avail_gprs >= 1 {
346            *avail_gprs -= 1;
347        }
348        return;
349    }
350
351    let double_xlen_reg = match xlen {
352        32 => Reg::i64(),
353        64 => Reg::i128(),
354        _ => unreachable!("Unsupported XLEN: {}", xlen),
355    };
356
357    let xlen_reg = match xlen {
358        32 => Reg::i32(),
359        64 => Reg::i64(),
360        _ => unreachable!("Unsupported XLEN: {}", xlen),
361    };
362
363    if total.bits() > xlen {
364        let align_regs = align > xlen;
365        if is_loongarch_aggregate(arg) {
366            arg.cast_to(Uniform::new(
367                if align_regs { double_xlen_reg } else { xlen_reg },
368                Size::from_bits(xlen * 2),
369            ));
370        }
371        if align_regs && is_vararg {
372            *avail_gprs -= *avail_gprs % 2;
373        }
374        if *avail_gprs >= 2 {
375            *avail_gprs -= 2;
376        } else {
377            *avail_gprs = 0;
378        }
379        return;
380    } else if is_loongarch_aggregate(arg) {
381        arg.cast_to(xlen_reg);
382        if *avail_gprs >= 1 {
383            *avail_gprs -= 1;
384        }
385        return;
386    }
387
388    // "When passed in registers, scalars narrower than XLEN bits are widened
389    // according to the sign of their type up to 32 bits, then sign-extended to
390    // XLEN bits."
391    if *avail_gprs >= 1 {
392        extend_integer_width(arg, xlen);
393        *avail_gprs -= 1;
394    }
395}
396
397fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
398    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
399        && let Primitive::Int(i, _) = scalar.primitive()
400        && i.size().bits() == 32
401        && xlen > 32
402        && let PassMode::Direct(ref mut attrs) = arg.mode
403    {
404        // 32-bit integers are always sign-extended
405        attrs.ext(ArgExtension::Sext);
406        return;
407    }
408
409    arg.extend_integer_width_to(xlen);
410}
411
412pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
413where
414    Ty: TyAbiInterface<'a, C> + Copy,
415    C: HasDataLayout + HasTargetSpec,
416{
417    let xlen = cx.data_layout().pointer_size().bits();
418    let flen = match &cx.target_spec().llvm_abiname[..] {
419        "ilp32f" | "lp64f" => 32,
420        "ilp32d" | "lp64d" => 64,
421        _ => 0,
422    };
423
424    let mut avail_gprs = 8;
425    let mut avail_fprs = 8;
426
427    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
428        avail_gprs -= 1;
429    }
430
431    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
432        if arg.is_ignore() {
433            continue;
434        }
435        classify_arg(
436            cx,
437            arg,
438            xlen,
439            flen,
440            i >= fn_abi.fixed_count as usize,
441            &mut avail_gprs,
442            &mut avail_fprs,
443        );
444    }
445}
446
447pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
448where
449    Ty: TyAbiInterface<'a, C> + Copy,
450    C: HasDataLayout + HasTargetSpec,
451{
452    let grlen = cx.data_layout().pointer_size().bits();
453
454    for arg in fn_abi.args.iter_mut() {
455        if arg.is_ignore() {
456            continue;
457        }
458
459        // LLVM integers types do not differentiate between signed or unsigned integers.
460        // Some LoongArch instructions do not have a `.w` suffix version, they use all the
461        // GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
462        // according to signedness to avoid unnecessary integer extending instructions.
463        //
464        // This is similar to the RISC-V case, see
465        // https://github.com/rust-lang/rust/issues/114508 for details.
466        extend_integer_width(arg, grlen);
467    }
468}