rustc_target/callconv/
riscv.rs

1// Reference: RISC-V ELF psABI specification
2// https://github.com/riscv/riscv-elf-psabi-doc
3//
4// Reference: Clang RISC-V ELF psABI lowering code
5// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
6
7use rustc_abi::{
8    BackendRepr, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface,
9    TyAndLayout, Variants,
10};
11
12use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
13use crate::spec::HasTargetSpec;
14
15#[derive(Copy, Clone)]
16enum RegPassKind {
17    Float { offset_from_start: Size, ty: Reg },
18    Integer { offset_from_start: Size, ty: Reg },
19    Unknown,
20}
21
22#[derive(Copy, Clone)]
23enum FloatConv {
24    FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
25    Float(Reg),
26    MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
27}
28
29#[derive(Copy, Clone)]
30struct CannotUseFpConv;
31
32fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
33    match arg.layout.backend_repr {
34        BackendRepr::SimdVector { .. } => true,
35        _ => arg.layout.is_aggregate(),
36    }
37}
38
39fn should_use_fp_conv_helper<'a, Ty, C>(
40    cx: &C,
41    arg_layout: &TyAndLayout<'a, Ty>,
42    xlen: u64,
43    flen: u64,
44    field1_kind: &mut RegPassKind,
45    field2_kind: &mut RegPassKind,
46    offset_from_start: Size,
47) -> Result<(), CannotUseFpConv>
48where
49    Ty: TyAbiInterface<'a, C> + Copy,
50{
51    match arg_layout.backend_repr {
52        BackendRepr::Scalar(scalar) => match scalar.primitive() {
53            Primitive::Int(..) | Primitive::Pointer(_) => {
54                if arg_layout.size.bits() > xlen {
55                    return Err(CannotUseFpConv);
56                }
57                match (*field1_kind, *field2_kind) {
58                    (RegPassKind::Unknown, _) => {
59                        *field1_kind = RegPassKind::Integer {
60                            offset_from_start,
61                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
62                        };
63                    }
64                    (RegPassKind::Float { .. }, RegPassKind::Unknown) => {
65                        *field2_kind = RegPassKind::Integer {
66                            offset_from_start,
67                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
68                        };
69                    }
70                    _ => return Err(CannotUseFpConv),
71                }
72            }
73            Primitive::Float(_) => {
74                if arg_layout.size.bits() > flen {
75                    return Err(CannotUseFpConv);
76                }
77                match (*field1_kind, *field2_kind) {
78                    (RegPassKind::Unknown, _) => {
79                        *field1_kind = RegPassKind::Float {
80                            offset_from_start,
81                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
82                        };
83                    }
84                    (_, RegPassKind::Unknown) => {
85                        *field2_kind = RegPassKind::Float {
86                            offset_from_start,
87                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
88                        };
89                    }
90                    _ => return Err(CannotUseFpConv),
91                }
92            }
93        },
94        BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
95            return Err(CannotUseFpConv);
96        }
97        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
98            FieldsShape::Primitive => {
99                unreachable!("aggregates can't have `FieldsShape::Primitive`")
100            }
101            FieldsShape::Union(_) => {
102                if !arg_layout.is_zst() {
103                    if arg_layout.is_transparent() {
104                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
105                        return should_use_fp_conv_helper(
106                            cx,
107                            &non_1zst_elem,
108                            xlen,
109                            flen,
110                            field1_kind,
111                            field2_kind,
112                            offset_from_start,
113                        );
114                    }
115                    return Err(CannotUseFpConv);
116                }
117            }
118            FieldsShape::Array { count, .. } => {
119                for i in 0..count {
120                    let elem_layout = arg_layout.field(cx, 0);
121                    should_use_fp_conv_helper(
122                        cx,
123                        &elem_layout,
124                        xlen,
125                        flen,
126                        field1_kind,
127                        field2_kind,
128                        offset_from_start + elem_layout.size * i,
129                    )?;
130                }
131            }
132            FieldsShape::Arbitrary { .. } => {
133                match arg_layout.variants {
134                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
135                    Variants::Single { .. } | Variants::Empty => (),
136                }
137                for i in arg_layout.fields.index_by_increasing_offset() {
138                    let field = arg_layout.field(cx, i);
139                    should_use_fp_conv_helper(
140                        cx,
141                        &field,
142                        xlen,
143                        flen,
144                        field1_kind,
145                        field2_kind,
146                        offset_from_start + arg_layout.fields.offset(i),
147                    )?;
148                }
149            }
150        },
151    }
152    Ok(())
153}
154
155fn should_use_fp_conv<'a, Ty, C>(
156    cx: &C,
157    arg: &TyAndLayout<'a, Ty>,
158    xlen: u64,
159    flen: u64,
160) -> Option<FloatConv>
161where
162    Ty: TyAbiInterface<'a, C> + Copy,
163{
164    let mut field1_kind = RegPassKind::Unknown;
165    let mut field2_kind = RegPassKind::Unknown;
166    if should_use_fp_conv_helper(
167        cx,
168        arg,
169        xlen,
170        flen,
171        &mut field1_kind,
172        &mut field2_kind,
173        Size::ZERO,
174    )
175    .is_err()
176    {
177        return None;
178    }
179    match (field1_kind, field2_kind) {
180        (
181            RegPassKind::Integer { offset_from_start, .. }
182            | RegPassKind::Float { offset_from_start, .. },
183            _,
184        ) if offset_from_start != Size::ZERO => {
185            panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty)
186        }
187        (
188            RegPassKind::Integer { ty: first_ty, .. },
189            RegPassKind::Float { offset_from_start, ty: second_ty },
190        ) => Some(FloatConv::MixedPair {
191            first_ty,
192            second_ty_offset_from_start: offset_from_start,
193            second_ty,
194        }),
195        (
196            RegPassKind::Float { ty: first_ty, .. },
197            RegPassKind::Integer { offset_from_start, ty: second_ty },
198        ) => Some(FloatConv::MixedPair {
199            first_ty,
200            second_ty_offset_from_start: offset_from_start,
201            second_ty,
202        }),
203        (
204            RegPassKind::Float { ty: first_ty, .. },
205            RegPassKind::Float { offset_from_start, ty: second_ty },
206        ) => Some(FloatConv::FloatPair {
207            first_ty,
208            second_ty_offset_from_start: offset_from_start,
209            second_ty,
210        }),
211        (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)),
212        _ => None,
213    }
214}
215
216fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
217where
218    Ty: TyAbiInterface<'a, C> + Copy,
219{
220    if !arg.layout.is_sized() {
221        // Not touching this...
222        return false; // I guess? return value of this function is not documented
223    }
224    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
225        match conv {
226            FloatConv::Float(f) => {
227                arg.cast_to(f);
228            }
229            FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => {
230                arg.cast_to(CastTarget::offset_pair(
231                    first_ty,
232                    second_ty_offset_from_start,
233                    second_ty,
234                ));
235            }
236            FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => {
237                arg.cast_to(CastTarget::offset_pair(
238                    first_ty,
239                    second_ty_offset_from_start,
240                    second_ty,
241                ));
242            }
243        }
244        return false;
245    }
246
247    let total = arg.layout.size;
248
249    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
250    // the argument list with the address."
251    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
252    // replaced in the argument list with the address, as are C++ aggregates
253    // with nontrivial copy constructors, destructors, or vtables."
254    if total.bits() > 2 * xlen {
255        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
256        if is_riscv_aggregate(arg) {
257            arg.make_indirect();
258        }
259        return true;
260    }
261
262    let xlen_reg = match xlen {
263        32 => Reg::i32(),
264        64 => Reg::i64(),
265        _ => unreachable!("Unsupported XLEN: {}", xlen),
266    };
267    if is_riscv_aggregate(arg) {
268        if total.bits() <= xlen {
269            arg.cast_to(xlen_reg);
270        } else {
271            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
272        }
273        return false;
274    }
275
276    // "When passed in registers, scalars narrower than XLEN bits are widened
277    // according to the sign of their type up to 32 bits, then sign-extended to
278    // XLEN bits."
279    extend_integer_width(arg, xlen);
280    false
281}
282
283fn classify_arg<'a, Ty, C>(
284    cx: &C,
285    arg: &mut ArgAbi<'a, Ty>,
286    xlen: u64,
287    flen: u64,
288    is_vararg: bool,
289    avail_gprs: &mut u64,
290    avail_fprs: &mut u64,
291) where
292    Ty: TyAbiInterface<'a, C> + Copy,
293{
294    if !arg.layout.is_sized() {
295        // FIXME: Update avail_gprs?
296        // Not touching this...
297        return;
298    }
299    if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
300        arg.make_indirect();
301        *avail_gprs = (*avail_gprs).saturating_sub(1);
302        return;
303    }
304    if !is_vararg {
305        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
306            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
307                *avail_fprs -= 1;
308                arg.cast_to(f);
309                return;
310            }
311            Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty })
312                if *avail_fprs >= 2 =>
313            {
314                *avail_fprs -= 2;
315                arg.cast_to(CastTarget::offset_pair(
316                    first_ty,
317                    second_ty_offset_from_start,
318                    second_ty,
319                ));
320                return;
321            }
322            Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty })
323                if *avail_fprs >= 1 && *avail_gprs >= 1 =>
324            {
325                *avail_gprs -= 1;
326                *avail_fprs -= 1;
327                arg.cast_to(CastTarget::offset_pair(
328                    first_ty,
329                    second_ty_offset_from_start,
330                    second_ty,
331                ));
332                return;
333            }
334            _ => (),
335        }
336    }
337
338    let total = arg.layout.size;
339    let align = arg.layout.align.bits();
340
341    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
342    // the argument list with the address."
343    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
344    // replaced in the argument list with the address, as are C++ aggregates
345    // with nontrivial copy constructors, destructors, or vtables."
346    if total.bits() > 2 * xlen {
347        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
348        if is_riscv_aggregate(arg) {
349            arg.make_indirect();
350        }
351        if *avail_gprs >= 1 {
352            *avail_gprs -= 1;
353        }
354        return;
355    }
356
357    let double_xlen_reg = match xlen {
358        32 => Reg::i64(),
359        64 => Reg::i128(),
360        _ => unreachable!("Unsupported XLEN: {}", xlen),
361    };
362
363    let xlen_reg = match xlen {
364        32 => Reg::i32(),
365        64 => Reg::i64(),
366        _ => unreachable!("Unsupported XLEN: {}", xlen),
367    };
368
369    if total.bits() > xlen {
370        let align_regs = align > xlen;
371        if is_riscv_aggregate(arg) {
372            arg.cast_to(Uniform::new(
373                if align_regs { double_xlen_reg } else { xlen_reg },
374                Size::from_bits(xlen * 2),
375            ));
376        }
377        if align_regs && is_vararg {
378            *avail_gprs -= *avail_gprs % 2;
379        }
380        if *avail_gprs >= 2 {
381            *avail_gprs -= 2;
382        } else {
383            *avail_gprs = 0;
384        }
385        return;
386    } else if is_riscv_aggregate(arg) {
387        arg.cast_to(xlen_reg);
388        if *avail_gprs >= 1 {
389            *avail_gprs -= 1;
390        }
391        return;
392    }
393
394    // "When passed in registers, scalars narrower than XLEN bits are widened
395    // according to the sign of their type up to 32 bits, then sign-extended to
396    // XLEN bits."
397    if *avail_gprs >= 1 {
398        extend_integer_width(arg, xlen);
399        *avail_gprs -= 1;
400    }
401}
402
403fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
404    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
405        && let Primitive::Int(i, _) = scalar.primitive()
406        && i.size().bits() == 32
407        && xlen > 32
408        && let PassMode::Direct(ref mut attrs) = arg.mode
409    {
410        attrs.ext(ArgExtension::Sext);
411        return;
412    }
413
414    arg.extend_integer_width_to(xlen);
415}
416
417pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
418where
419    Ty: TyAbiInterface<'a, C> + Copy,
420    C: HasDataLayout + HasTargetSpec,
421{
422    let flen = match &cx.target_spec().llvm_abiname[..] {
423        "ilp32f" | "lp64f" => 32,
424        "ilp32d" | "lp64d" => 64,
425        _ => 0,
426    };
427    let xlen = cx.data_layout().pointer_size().bits();
428
429    let mut avail_gprs = 8;
430    let mut avail_fprs = 8;
431
432    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
433        avail_gprs -= 1;
434    }
435
436    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
437        if arg.is_ignore() {
438            continue;
439        }
440        classify_arg(
441            cx,
442            arg,
443            xlen,
444            flen,
445            i >= fn_abi.fixed_count as usize,
446            &mut avail_gprs,
447            &mut avail_fprs,
448        );
449    }
450}
451
452pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
453where
454    Ty: TyAbiInterface<'a, C> + Copy,
455    C: HasDataLayout + HasTargetSpec,
456{
457    let xlen = cx.data_layout().pointer_size().bits();
458
459    for arg in fn_abi.args.iter_mut() {
460        if arg.is_ignore() {
461            continue;
462        }
463
464        // LLVM integers types do not differentiate between signed or unsigned integers.
465        // Some RISC-V instructions do not have a `.w` suffix version, they use all the
466        // XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
467        // according to signedness to avoid unnecessary integer extending instructions.
468        //
469        // See https://github.com/rust-lang/rust/issues/114508 for details.
470        extend_integer_width(arg, xlen);
471    }
472}