rustc_target/callconv/
x86.rs

1use rustc_abi::{
2    AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
3    TyAbiInterface, TyAndLayout,
4};
5
6use crate::callconv::{ArgAttribute, FnAbi, PassMode};
7use crate::spec::HasTargetSpec;
8
9#[derive(PartialEq)]
10pub(crate) enum Flavor {
11    General,
12    FastcallOrVectorcall,
13}
14
15pub(crate) struct X86Options {
16    pub flavor: Flavor,
17    pub regparm: Option<u32>,
18    pub reg_struct_return: bool,
19}
20
21pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, opts: X86Options)
22where
23    Ty: TyAbiInterface<'a, C> + Copy,
24    C: HasDataLayout + HasTargetSpec,
25{
26    if !fn_abi.ret.is_ignore() {
27        if fn_abi.ret.layout.is_aggregate() && fn_abi.ret.layout.is_sized() {
28            // Returning a structure. Most often, this will use
29            // a hidden first argument. On some platforms, though,
30            // small structs are returned as integers.
31            //
32            // Some links:
33            // https://www.angelcode.com/dev/callconv/callconv.html
34            // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
35            let t = cx.target_spec();
36            if t.abi_return_struct_as_int || opts.reg_struct_return {
37                // According to Clang, everyone but MSVC returns single-element
38                // float aggregates directly in a floating-point register.
39                if !t.is_like_msvc && fn_abi.ret.layout.is_single_fp_element(cx) {
40                    match fn_abi.ret.layout.size.bytes() {
41                        4 => fn_abi.ret.cast_to(Reg::f32()),
42                        8 => fn_abi.ret.cast_to(Reg::f64()),
43                        _ => fn_abi.ret.make_indirect(),
44                    }
45                } else {
46                    match fn_abi.ret.layout.size.bytes() {
47                        1 => fn_abi.ret.cast_to(Reg::i8()),
48                        2 => fn_abi.ret.cast_to(Reg::i16()),
49                        4 => fn_abi.ret.cast_to(Reg::i32()),
50                        8 => fn_abi.ret.cast_to(Reg::i64()),
51                        _ => fn_abi.ret.make_indirect(),
52                    }
53                }
54            } else {
55                fn_abi.ret.make_indirect();
56            }
57        } else {
58            fn_abi.ret.extend_integer_width_to(32);
59        }
60    }
61
62    for arg in fn_abi.args.iter_mut() {
63        if arg.is_ignore() || !arg.layout.is_sized() {
64            continue;
65        }
66
67        // FIXME: MSVC 2015+ will pass the first 3 vector arguments in [XYZ]MM0-2
68        // See https://reviews.llvm.org/D72114 for Clang behavior
69
70        let t = cx.target_spec();
71        let align_4 = Align::from_bytes(4).unwrap();
72        let align_16 = Align::from_bytes(16).unwrap();
73
74        if t.is_like_msvc
75            && arg.layout.is_adt()
76            && let Some(max_repr_align) = arg.layout.max_repr_align
77            && max_repr_align > align_4
78        {
79            // MSVC has special rules for overaligned arguments: https://reviews.llvm.org/D72114.
80            // Summarized here:
81            // - Arguments with _requested_ alignment > 4 are passed indirectly.
82            // - For backwards compatibility, arguments with natural alignment > 4 are still passed
83            //   on stack (via `byval`). For example, this includes `double`, `int64_t`,
84            //   and structs containing them, provided they lack an explicit alignment attribute.
85            assert!(
86                arg.layout.align.abi >= max_repr_align,
87                "abi alignment {:?} less than requested alignment {max_repr_align:?}",
88                arg.layout.align.abi,
89            );
90            arg.make_indirect();
91        } else if arg.layout.is_aggregate() {
92            // We need to compute the alignment of the `byval` argument. The rules can be found in
93            // `X86_32ABIInfo::getTypeStackAlignInBytes` in Clang's `TargetInfo.cpp`. Summarized
94            // here, they are:
95            //
96            // 1. If the natural alignment of the type is <= 4, the alignment is 4.
97            //
98            // 2. Otherwise, on Linux, the alignment of any vector type is the natural alignment.
99            // This doesn't matter here because we only pass aggregates via `byval`, not vectors.
100            //
101            // 3. Otherwise, on Apple platforms, the alignment of anything that contains a vector
102            // type is 16.
103            //
104            // 4. If none of these conditions are true, the alignment is 4.
105
106            fn contains_vector<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
107            where
108                Ty: TyAbiInterface<'a, C> + Copy,
109            {
110                match layout.backend_repr {
111                    BackendRepr::Uninhabited
112                    | BackendRepr::Scalar(_)
113                    | BackendRepr::ScalarPair(..) => false,
114                    BackendRepr::Vector { .. } => true,
115                    BackendRepr::Memory { .. } => {
116                        for i in 0..layout.fields.count() {
117                            if contains_vector(cx, layout.field(cx, i)) {
118                                return true;
119                            }
120                        }
121                        false
122                    }
123                }
124            }
125
126            let byval_align = if arg.layout.align.abi < align_4 {
127                // (1.)
128                align_4
129            } else if t.is_like_osx && contains_vector(cx, arg.layout) {
130                // (3.)
131                align_16
132            } else {
133                // (4.)
134                align_4
135            };
136
137            arg.pass_by_stack_offset(Some(byval_align));
138        } else {
139            arg.extend_integer_width_to(32);
140        }
141    }
142
143    fill_inregs(cx, fn_abi, opts, false);
144}
145
146pub(crate) fn fill_inregs<'a, Ty, C>(
147    cx: &C,
148    fn_abi: &mut FnAbi<'a, Ty>,
149    opts: X86Options,
150    rust_abi: bool,
151) where
152    Ty: TyAbiInterface<'a, C> + Copy,
153{
154    if opts.flavor != Flavor::FastcallOrVectorcall && opts.regparm.is_none_or(|x| x == 0) {
155        return;
156    }
157    // Mark arguments as InReg like clang does it,
158    // so our fastcall/vectorcall is compatible with C/C++ fastcall/vectorcall.
159
160    // Clang reference: lib/CodeGen/TargetInfo.cpp
161    // See X86_32ABIInfo::shouldPrimitiveUseInReg(), X86_32ABIInfo::updateFreeRegs()
162
163    // IsSoftFloatABI is only set to true on ARM platforms,
164    // which in turn can't be x86?
165
166    // 2 for fastcall/vectorcall, regparm limited by 3 otherwise
167    let mut free_regs = opts.regparm.unwrap_or(2).into();
168
169    // For types generating PassMode::Cast, InRegs will not be set.
170    // Maybe, this is a FIXME
171    let has_casts = fn_abi.args.iter().any(|arg| matches!(arg.mode, PassMode::Cast { .. }));
172    if has_casts && rust_abi {
173        return;
174    }
175
176    for arg in fn_abi.args.iter_mut() {
177        let attrs = match arg.mode {
178            PassMode::Ignore | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
179                continue;
180            }
181            PassMode::Direct(ref mut attrs) => attrs,
182            PassMode::Pair(..)
183            | PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ }
184            | PassMode::Cast { .. } => {
185                unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
186            }
187        };
188
189        // At this point we know this must be a primitive of sorts.
190        let unit = arg.layout.homogeneous_aggregate(cx).unwrap().unit().unwrap();
191        assert_eq!(unit.size, arg.layout.size);
192        if matches!(unit.kind, RegKind::Float | RegKind::Vector) {
193            continue;
194        }
195
196        let size_in_regs = (arg.layout.size.bits() + 31) / 32;
197
198        if size_in_regs == 0 {
199            continue;
200        }
201
202        if size_in_regs > free_regs {
203            break;
204        }
205
206        free_regs -= size_in_regs;
207
208        if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
209            attrs.set(ArgAttribute::InReg);
210        }
211
212        if free_regs == 0 {
213            break;
214        }
215    }
216}
217
218pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
219where
220    Ty: TyAbiInterface<'a, C> + Copy,
221    C: HasDataLayout + HasTargetSpec,
222{
223    // Avoid returning floats in x87 registers on x86 as loading and storing from x87
224    // registers will quiet signalling NaNs. Also avoid using SSE registers since they
225    // are not always available (depending on target features).
226    if !fn_abi.ret.is_ignore()
227        // Intrinsics themselves are not "real" functions, so theres no need to change their ABIs.
228        && abi != ExternAbi::RustIntrinsic
229    {
230        let has_float = match fn_abi.ret.layout.backend_repr {
231            BackendRepr::Scalar(s) => matches!(s.primitive(), Primitive::Float(_)),
232            BackendRepr::ScalarPair(s1, s2) => {
233                matches!(s1.primitive(), Primitive::Float(_))
234                    || matches!(s2.primitive(), Primitive::Float(_))
235            }
236            _ => false, // anyway not passed via registers on x86
237        };
238        if has_float {
239            if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::DATA).size(cx) {
240                // Same size or smaller than pointer, return in a register.
241                fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
242            } else {
243                // Larger than a pointer, return indirectly.
244                fn_abi.ret.make_indirect();
245            }
246            return;
247        }
248    }
249}