rustc_target/callconv/
riscv.rs

1// Reference: RISC-V ELF psABI specification
2// https://github.com/riscv/riscv-elf-psabi-doc
3//
4// Reference: Clang RISC-V ELF psABI lowering code
5// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
6
7use rustc_abi::{
8    BackendRepr, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface,
9    TyAndLayout, Variants,
10};
11
12use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
13use crate::spec::HasTargetSpec;
14
15#[derive(Copy, Clone)]
16enum RegPassKind {
17    Float { offset_from_start: Size, ty: Reg },
18    Integer { offset_from_start: Size, ty: Reg },
19    Unknown,
20}
21
22#[derive(Copy, Clone)]
23enum FloatConv {
24    FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
25    Float(Reg),
26    MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
27}
28
29#[derive(Copy, Clone)]
30struct CannotUseFpConv;
31
32fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
33    match arg.layout.backend_repr {
34        BackendRepr::SimdVector { .. } => true,
35        _ => arg.layout.is_aggregate(),
36    }
37}
38
39fn should_use_fp_conv_helper<'a, Ty, C>(
40    cx: &C,
41    arg_layout: &TyAndLayout<'a, Ty>,
42    xlen: u64,
43    flen: u64,
44    field1_kind: &mut RegPassKind,
45    field2_kind: &mut RegPassKind,
46    offset_from_start: Size,
47) -> Result<(), CannotUseFpConv>
48where
49    Ty: TyAbiInterface<'a, C> + Copy,
50{
51    match arg_layout.backend_repr {
52        BackendRepr::Scalar(scalar) => match scalar.primitive() {
53            Primitive::Int(..) | Primitive::Pointer(_) => {
54                if arg_layout.size.bits() > xlen {
55                    return Err(CannotUseFpConv);
56                }
57                match (*field1_kind, *field2_kind) {
58                    (RegPassKind::Unknown, _) => {
59                        *field1_kind = RegPassKind::Integer {
60                            offset_from_start,
61                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
62                        };
63                    }
64                    (RegPassKind::Float { .. }, RegPassKind::Unknown) => {
65                        *field2_kind = RegPassKind::Integer {
66                            offset_from_start,
67                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
68                        };
69                    }
70                    _ => return Err(CannotUseFpConv),
71                }
72            }
73            Primitive::Float(_) => {
74                if arg_layout.size.bits() > flen {
75                    return Err(CannotUseFpConv);
76                }
77                match (*field1_kind, *field2_kind) {
78                    (RegPassKind::Unknown, _) => {
79                        *field1_kind = RegPassKind::Float {
80                            offset_from_start,
81                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
82                        };
83                    }
84                    (_, RegPassKind::Unknown) => {
85                        *field2_kind = RegPassKind::Float {
86                            offset_from_start,
87                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
88                        };
89                    }
90                    _ => return Err(CannotUseFpConv),
91                }
92            }
93        },
94        BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv),
95        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
96            FieldsShape::Primitive => {
97                unreachable!("aggregates can't have `FieldsShape::Primitive`")
98            }
99            FieldsShape::Union(_) => {
100                if !arg_layout.is_zst() {
101                    if arg_layout.is_transparent() {
102                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
103                        return should_use_fp_conv_helper(
104                            cx,
105                            &non_1zst_elem,
106                            xlen,
107                            flen,
108                            field1_kind,
109                            field2_kind,
110                            offset_from_start,
111                        );
112                    }
113                    return Err(CannotUseFpConv);
114                }
115            }
116            FieldsShape::Array { count, .. } => {
117                for i in 0..count {
118                    let elem_layout = arg_layout.field(cx, 0);
119                    should_use_fp_conv_helper(
120                        cx,
121                        &elem_layout,
122                        xlen,
123                        flen,
124                        field1_kind,
125                        field2_kind,
126                        offset_from_start + elem_layout.size * i,
127                    )?;
128                }
129            }
130            FieldsShape::Arbitrary { .. } => {
131                match arg_layout.variants {
132                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
133                    Variants::Single { .. } | Variants::Empty => (),
134                }
135                for i in arg_layout.fields.index_by_increasing_offset() {
136                    let field = arg_layout.field(cx, i);
137                    should_use_fp_conv_helper(
138                        cx,
139                        &field,
140                        xlen,
141                        flen,
142                        field1_kind,
143                        field2_kind,
144                        offset_from_start + arg_layout.fields.offset(i),
145                    )?;
146                }
147            }
148        },
149    }
150    Ok(())
151}
152
153fn should_use_fp_conv<'a, Ty, C>(
154    cx: &C,
155    arg: &TyAndLayout<'a, Ty>,
156    xlen: u64,
157    flen: u64,
158) -> Option<FloatConv>
159where
160    Ty: TyAbiInterface<'a, C> + Copy,
161{
162    let mut field1_kind = RegPassKind::Unknown;
163    let mut field2_kind = RegPassKind::Unknown;
164    if should_use_fp_conv_helper(
165        cx,
166        arg,
167        xlen,
168        flen,
169        &mut field1_kind,
170        &mut field2_kind,
171        Size::ZERO,
172    )
173    .is_err()
174    {
175        return None;
176    }
177    match (field1_kind, field2_kind) {
178        (
179            RegPassKind::Integer { offset_from_start, .. }
180            | RegPassKind::Float { offset_from_start, .. },
181            _,
182        ) if offset_from_start != Size::ZERO => {
183            panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty)
184        }
185        (
186            RegPassKind::Integer { ty: first_ty, .. },
187            RegPassKind::Float { offset_from_start, ty: second_ty },
188        ) => Some(FloatConv::MixedPair {
189            first_ty,
190            second_ty_offset_from_start: offset_from_start,
191            second_ty,
192        }),
193        (
194            RegPassKind::Float { ty: first_ty, .. },
195            RegPassKind::Integer { offset_from_start, ty: second_ty },
196        ) => Some(FloatConv::MixedPair {
197            first_ty,
198            second_ty_offset_from_start: offset_from_start,
199            second_ty,
200        }),
201        (
202            RegPassKind::Float { ty: first_ty, .. },
203            RegPassKind::Float { offset_from_start, ty: second_ty },
204        ) => Some(FloatConv::FloatPair {
205            first_ty,
206            second_ty_offset_from_start: offset_from_start,
207            second_ty,
208        }),
209        (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)),
210        _ => None,
211    }
212}
213
214fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
215where
216    Ty: TyAbiInterface<'a, C> + Copy,
217{
218    if !arg.layout.is_sized() {
219        // Not touching this...
220        return false; // I guess? return value of this function is not documented
221    }
222    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
223        match conv {
224            FloatConv::Float(f) => {
225                arg.cast_to(f);
226            }
227            FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => {
228                arg.cast_to(CastTarget::offset_pair(
229                    first_ty,
230                    second_ty_offset_from_start,
231                    second_ty,
232                ));
233            }
234            FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => {
235                arg.cast_to(CastTarget::offset_pair(
236                    first_ty,
237                    second_ty_offset_from_start,
238                    second_ty,
239                ));
240            }
241        }
242        return false;
243    }
244
245    let total = arg.layout.size;
246
247    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
248    // the argument list with the address."
249    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
250    // replaced in the argument list with the address, as are C++ aggregates
251    // with nontrivial copy constructors, destructors, or vtables."
252    if total.bits() > 2 * xlen {
253        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
254        if is_riscv_aggregate(arg) {
255            arg.make_indirect();
256        }
257        return true;
258    }
259
260    let xlen_reg = match xlen {
261        32 => Reg::i32(),
262        64 => Reg::i64(),
263        _ => unreachable!("Unsupported XLEN: {}", xlen),
264    };
265    if is_riscv_aggregate(arg) {
266        if total.bits() <= xlen {
267            arg.cast_to(xlen_reg);
268        } else {
269            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
270        }
271        return false;
272    }
273
274    // "When passed in registers, scalars narrower than XLEN bits are widened
275    // according to the sign of their type up to 32 bits, then sign-extended to
276    // XLEN bits."
277    extend_integer_width(arg, xlen);
278    false
279}
280
281fn classify_arg<'a, Ty, C>(
282    cx: &C,
283    arg: &mut ArgAbi<'a, Ty>,
284    xlen: u64,
285    flen: u64,
286    is_vararg: bool,
287    avail_gprs: &mut u64,
288    avail_fprs: &mut u64,
289) where
290    Ty: TyAbiInterface<'a, C> + Copy,
291{
292    if !arg.layout.is_sized() {
293        // FIXME: Update avail_gprs?
294        // Not touching this...
295        return;
296    }
297    if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
298        arg.make_indirect();
299        *avail_gprs = (*avail_gprs).saturating_sub(1);
300        return;
301    }
302    if !is_vararg {
303        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
304            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
305                *avail_fprs -= 1;
306                arg.cast_to(f);
307                return;
308            }
309            Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty })
310                if *avail_fprs >= 2 =>
311            {
312                *avail_fprs -= 2;
313                arg.cast_to(CastTarget::offset_pair(
314                    first_ty,
315                    second_ty_offset_from_start,
316                    second_ty,
317                ));
318                return;
319            }
320            Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty })
321                if *avail_fprs >= 1 && *avail_gprs >= 1 =>
322            {
323                *avail_gprs -= 1;
324                *avail_fprs -= 1;
325                arg.cast_to(CastTarget::offset_pair(
326                    first_ty,
327                    second_ty_offset_from_start,
328                    second_ty,
329                ));
330                return;
331            }
332            _ => (),
333        }
334    }
335
336    let total = arg.layout.size;
337    let align = arg.layout.align.bits();
338
339    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
340    // the argument list with the address."
341    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
342    // replaced in the argument list with the address, as are C++ aggregates
343    // with nontrivial copy constructors, destructors, or vtables."
344    if total.bits() > 2 * xlen {
345        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
346        if is_riscv_aggregate(arg) {
347            arg.make_indirect();
348        }
349        if *avail_gprs >= 1 {
350            *avail_gprs -= 1;
351        }
352        return;
353    }
354
355    let double_xlen_reg = match xlen {
356        32 => Reg::i64(),
357        64 => Reg::i128(),
358        _ => unreachable!("Unsupported XLEN: {}", xlen),
359    };
360
361    let xlen_reg = match xlen {
362        32 => Reg::i32(),
363        64 => Reg::i64(),
364        _ => unreachable!("Unsupported XLEN: {}", xlen),
365    };
366
367    if total.bits() > xlen {
368        let align_regs = align > xlen;
369        if is_riscv_aggregate(arg) {
370            arg.cast_to(Uniform::new(
371                if align_regs { double_xlen_reg } else { xlen_reg },
372                Size::from_bits(xlen * 2),
373            ));
374        }
375        if align_regs && is_vararg {
376            *avail_gprs -= *avail_gprs % 2;
377        }
378        if *avail_gprs >= 2 {
379            *avail_gprs -= 2;
380        } else {
381            *avail_gprs = 0;
382        }
383        return;
384    } else if is_riscv_aggregate(arg) {
385        arg.cast_to(xlen_reg);
386        if *avail_gprs >= 1 {
387            *avail_gprs -= 1;
388        }
389        return;
390    }
391
392    // "When passed in registers, scalars narrower than XLEN bits are widened
393    // according to the sign of their type up to 32 bits, then sign-extended to
394    // XLEN bits."
395    if *avail_gprs >= 1 {
396        extend_integer_width(arg, xlen);
397        *avail_gprs -= 1;
398    }
399}
400
401fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
402    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
403        && let Primitive::Int(i, _) = scalar.primitive()
404        && i.size().bits() == 32
405        && xlen > 32
406        && let PassMode::Direct(ref mut attrs) = arg.mode
407    {
408        attrs.ext(ArgExtension::Sext);
409        return;
410    }
411
412    arg.extend_integer_width_to(xlen);
413}
414
415pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
416where
417    Ty: TyAbiInterface<'a, C> + Copy,
418    C: HasDataLayout + HasTargetSpec,
419{
420    let flen = match &cx.target_spec().llvm_abiname[..] {
421        "ilp32f" | "lp64f" => 32,
422        "ilp32d" | "lp64d" => 64,
423        _ => 0,
424    };
425    let xlen = cx.data_layout().pointer_size().bits();
426
427    let mut avail_gprs = 8;
428    let mut avail_fprs = 8;
429
430    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
431        avail_gprs -= 1;
432    }
433
434    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
435        if arg.is_ignore() {
436            continue;
437        }
438        classify_arg(
439            cx,
440            arg,
441            xlen,
442            flen,
443            i >= fn_abi.fixed_count as usize,
444            &mut avail_gprs,
445            &mut avail_fprs,
446        );
447    }
448}
449
450pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
451where
452    Ty: TyAbiInterface<'a, C> + Copy,
453    C: HasDataLayout + HasTargetSpec,
454{
455    let xlen = cx.data_layout().pointer_size().bits();
456
457    for arg in fn_abi.args.iter_mut() {
458        if arg.is_ignore() {
459            continue;
460        }
461
462        // LLVM integers types do not differentiate between signed or unsigned integers.
463        // Some RISC-V instructions do not have a `.w` suffix version, they use all the
464        // XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
465        // according to signedness to avoid unnecessary integer extending instructions.
466        //
467        // See https://github.com/rust-lang/rust/issues/114508 for details.
468        extend_integer_width(arg, xlen);
469    }
470}