rustc_target/callconv/
riscv.rs

1// Reference: RISC-V ELF psABI specification
2// https://github.com/riscv/riscv-elf-psabi-doc
3//
4// Reference: Clang RISC-V ELF psABI lowering code
5// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
6
7use rustc_abi::{
8    BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size,
9    TyAbiInterface, TyAndLayout, Variants,
10};
11
12use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
13use crate::spec::HasTargetSpec;
14
15#[derive(Copy, Clone)]
16enum RegPassKind {
17    Float(Reg),
18    Integer(Reg),
19    Unknown,
20}
21
22#[derive(Copy, Clone)]
23enum FloatConv {
24    FloatPair(Reg, Reg),
25    Float(Reg),
26    MixedPair(Reg, Reg),
27}
28
29#[derive(Copy, Clone)]
30struct CannotUseFpConv;
31
32fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
33    match arg.layout.backend_repr {
34        BackendRepr::Vector { .. } => true,
35        _ => arg.layout.is_aggregate(),
36    }
37}
38
39fn should_use_fp_conv_helper<'a, Ty, C>(
40    cx: &C,
41    arg_layout: &TyAndLayout<'a, Ty>,
42    xlen: u64,
43    flen: u64,
44    field1_kind: &mut RegPassKind,
45    field2_kind: &mut RegPassKind,
46) -> Result<(), CannotUseFpConv>
47where
48    Ty: TyAbiInterface<'a, C> + Copy,
49{
50    match arg_layout.backend_repr {
51        BackendRepr::Scalar(scalar) => match scalar.primitive() {
52            Primitive::Int(..) | Primitive::Pointer(_) => {
53                if arg_layout.size.bits() > xlen {
54                    return Err(CannotUseFpConv);
55                }
56                match (*field1_kind, *field2_kind) {
57                    (RegPassKind::Unknown, _) => {
58                        *field1_kind = RegPassKind::Integer(Reg {
59                            kind: RegKind::Integer,
60                            size: arg_layout.size,
61                        });
62                    }
63                    (RegPassKind::Float(_), RegPassKind::Unknown) => {
64                        *field2_kind = RegPassKind::Integer(Reg {
65                            kind: RegKind::Integer,
66                            size: arg_layout.size,
67                        });
68                    }
69                    _ => return Err(CannotUseFpConv),
70                }
71            }
72            Primitive::Float(_) => {
73                if arg_layout.size.bits() > flen {
74                    return Err(CannotUseFpConv);
75                }
76                match (*field1_kind, *field2_kind) {
77                    (RegPassKind::Unknown, _) => {
78                        *field1_kind =
79                            RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
80                    }
81                    (_, RegPassKind::Unknown) => {
82                        *field2_kind =
83                            RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
84                    }
85                    _ => return Err(CannotUseFpConv),
86                }
87            }
88        },
89        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
90        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
91            FieldsShape::Primitive => {
92                unreachable!("aggregates can't have `FieldsShape::Primitive`")
93            }
94            FieldsShape::Union(_) => {
95                if !arg_layout.is_zst() {
96                    if arg_layout.is_transparent() {
97                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
98                        return should_use_fp_conv_helper(
99                            cx,
100                            &non_1zst_elem,
101                            xlen,
102                            flen,
103                            field1_kind,
104                            field2_kind,
105                        );
106                    }
107                    return Err(CannotUseFpConv);
108                }
109            }
110            FieldsShape::Array { count, .. } => {
111                for _ in 0..count {
112                    let elem_layout = arg_layout.field(cx, 0);
113                    should_use_fp_conv_helper(
114                        cx,
115                        &elem_layout,
116                        xlen,
117                        flen,
118                        field1_kind,
119                        field2_kind,
120                    )?;
121                }
122            }
123            FieldsShape::Arbitrary { .. } => {
124                match arg_layout.variants {
125                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
126                    Variants::Single { .. } | Variants::Empty => (),
127                }
128                for i in arg_layout.fields.index_by_increasing_offset() {
129                    let field = arg_layout.field(cx, i);
130                    should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
131                }
132            }
133        },
134    }
135    Ok(())
136}
137
138fn should_use_fp_conv<'a, Ty, C>(
139    cx: &C,
140    arg: &TyAndLayout<'a, Ty>,
141    xlen: u64,
142    flen: u64,
143) -> Option<FloatConv>
144where
145    Ty: TyAbiInterface<'a, C> + Copy,
146{
147    let mut field1_kind = RegPassKind::Unknown;
148    let mut field2_kind = RegPassKind::Unknown;
149    if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
150        return None;
151    }
152    match (field1_kind, field2_kind) {
153        (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
154        (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
155        (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
156        (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
157        _ => None,
158    }
159}
160
161fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
162where
163    Ty: TyAbiInterface<'a, C> + Copy,
164{
165    if !arg.layout.is_sized() {
166        // Not touching this...
167        return false; // I guess? return value of this function is not documented
168    }
169    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
170        match conv {
171            FloatConv::Float(f) => {
172                arg.cast_to(f);
173            }
174            FloatConv::FloatPair(l, r) => {
175                arg.cast_to(CastTarget::pair(l, r));
176            }
177            FloatConv::MixedPair(l, r) => {
178                arg.cast_to(CastTarget::pair(l, r));
179            }
180        }
181        return false;
182    }
183
184    let total = arg.layout.size;
185
186    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
187    // the argument list with the address."
188    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
189    // replaced in the argument list with the address, as are C++ aggregates
190    // with nontrivial copy constructors, destructors, or vtables."
191    if total.bits() > 2 * xlen {
192        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
193        if is_riscv_aggregate(arg) {
194            arg.make_indirect();
195        }
196        return true;
197    }
198
199    let xlen_reg = match xlen {
200        32 => Reg::i32(),
201        64 => Reg::i64(),
202        _ => unreachable!("Unsupported XLEN: {}", xlen),
203    };
204    if is_riscv_aggregate(arg) {
205        if total.bits() <= xlen {
206            arg.cast_to(xlen_reg);
207        } else {
208            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
209        }
210        return false;
211    }
212
213    // "When passed in registers, scalars narrower than XLEN bits are widened
214    // according to the sign of their type up to 32 bits, then sign-extended to
215    // XLEN bits."
216    extend_integer_width(arg, xlen);
217    false
218}
219
220fn classify_arg<'a, Ty, C>(
221    cx: &C,
222    arg: &mut ArgAbi<'a, Ty>,
223    xlen: u64,
224    flen: u64,
225    is_vararg: bool,
226    avail_gprs: &mut u64,
227    avail_fprs: &mut u64,
228) where
229    Ty: TyAbiInterface<'a, C> + Copy,
230{
231    if !arg.layout.is_sized() {
232        // Not touching this...
233        return;
234    }
235    if !is_vararg {
236        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
237            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
238                *avail_fprs -= 1;
239                arg.cast_to(f);
240                return;
241            }
242            Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
243                *avail_fprs -= 2;
244                arg.cast_to(CastTarget::pair(l, r));
245                return;
246            }
247            Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
248                *avail_gprs -= 1;
249                *avail_fprs -= 1;
250                arg.cast_to(CastTarget::pair(l, r));
251                return;
252            }
253            _ => (),
254        }
255    }
256
257    let total = arg.layout.size;
258    let align = arg.layout.align.abi.bits();
259
260    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
261    // the argument list with the address."
262    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
263    // replaced in the argument list with the address, as are C++ aggregates
264    // with nontrivial copy constructors, destructors, or vtables."
265    if total.bits() > 2 * xlen {
266        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
267        if is_riscv_aggregate(arg) {
268            arg.make_indirect();
269        }
270        if *avail_gprs >= 1 {
271            *avail_gprs -= 1;
272        }
273        return;
274    }
275
276    let double_xlen_reg = match xlen {
277        32 => Reg::i64(),
278        64 => Reg::i128(),
279        _ => unreachable!("Unsupported XLEN: {}", xlen),
280    };
281
282    let xlen_reg = match xlen {
283        32 => Reg::i32(),
284        64 => Reg::i64(),
285        _ => unreachable!("Unsupported XLEN: {}", xlen),
286    };
287
288    if total.bits() > xlen {
289        let align_regs = align > xlen;
290        if is_riscv_aggregate(arg) {
291            arg.cast_to(Uniform::new(
292                if align_regs { double_xlen_reg } else { xlen_reg },
293                Size::from_bits(xlen * 2),
294            ));
295        }
296        if align_regs && is_vararg {
297            *avail_gprs -= *avail_gprs % 2;
298        }
299        if *avail_gprs >= 2 {
300            *avail_gprs -= 2;
301        } else {
302            *avail_gprs = 0;
303        }
304        return;
305    } else if is_riscv_aggregate(arg) {
306        arg.cast_to(xlen_reg);
307        if *avail_gprs >= 1 {
308            *avail_gprs -= 1;
309        }
310        return;
311    }
312
313    // "When passed in registers, scalars narrower than XLEN bits are widened
314    // according to the sign of their type up to 32 bits, then sign-extended to
315    // XLEN bits."
316    if *avail_gprs >= 1 {
317        extend_integer_width(arg, xlen);
318        *avail_gprs -= 1;
319    }
320}
321
322fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
323    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
324        if let Primitive::Int(i, _) = scalar.primitive() {
325            // 32-bit integers are always sign-extended
326            if i.size().bits() == 32 && xlen > 32 {
327                if let PassMode::Direct(ref mut attrs) = arg.mode {
328                    attrs.ext(ArgExtension::Sext);
329                    return;
330                }
331            }
332        }
333    }
334
335    arg.extend_integer_width_to(xlen);
336}
337
338pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
339where
340    Ty: TyAbiInterface<'a, C> + Copy,
341    C: HasDataLayout + HasTargetSpec,
342{
343    let flen = match &cx.target_spec().llvm_abiname[..] {
344        "ilp32f" | "lp64f" => 32,
345        "ilp32d" | "lp64d" => 64,
346        _ => 0,
347    };
348    let xlen = cx.data_layout().pointer_size.bits();
349
350    let mut avail_gprs = 8;
351    let mut avail_fprs = 8;
352
353    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
354        avail_gprs -= 1;
355    }
356
357    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
358        if arg.is_ignore() {
359            continue;
360        }
361        classify_arg(
362            cx,
363            arg,
364            xlen,
365            flen,
366            i >= fn_abi.fixed_count as usize,
367            &mut avail_gprs,
368            &mut avail_fprs,
369        );
370    }
371}
372
373pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
374where
375    Ty: TyAbiInterface<'a, C> + Copy,
376    C: HasDataLayout + HasTargetSpec,
377{
378    if abi == ExternAbi::RustIntrinsic {
379        return;
380    }
381
382    let xlen = cx.data_layout().pointer_size.bits();
383
384    for arg in fn_abi.args.iter_mut() {
385        if arg.is_ignore() {
386            continue;
387        }
388
389        // LLVM integers types do not differentiate between signed or unsigned integers.
390        // Some RISC-V instructions do not have a `.w` suffix version, they use all the
391        // XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
392        // according to signedness to avoid unnecessary integer extending instructions.
393        //
394        // See https://github.com/rust-lang/rust/issues/114508 for details.
395        extend_integer_width(arg, xlen);
396    }
397}