Skip to main content

rustc_target/callconv/
x86.rs

1use rustc_abi::{
2    AddressSpace, Align, BackendRepr, HasDataLayout, Primitive, Reg, RegKind, TyAndLayout,
3};
4
5use crate::callconv::{ArgAttribute, FnAbi, PassMode, TyAbiInterface};
6use crate::spec::{HasTargetSpec, RustcAbi};
7
8#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for Flavor {
    #[inline]
    fn eq(&self, other: &Flavor) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq)]
9pub(crate) enum Flavor {
10    General,
11    FastcallOrVectorcall,
12}
13
14pub(crate) struct X86Options {
15    pub flavor: Flavor,
16    pub regparm: Option<u32>,
17    pub reg_struct_return: bool,
18}
19
20pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, opts: X86Options)
21where
22    Ty: TyAbiInterface<'a, C> + Copy,
23    C: HasDataLayout + HasTargetSpec,
24{
25    if !fn_abi.ret.is_ignore() {
26        if fn_abi.ret.layout.is_aggregate() && fn_abi.ret.layout.is_sized() {
27            // Returning a structure. Most often, this will use
28            // a hidden first argument. On some platforms, though,
29            // small structs are returned as integers.
30            //
31            // Some links:
32            // https://www.angelcode.com/dev/callconv/callconv.html
33            // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
34            let t = cx.target_spec();
35            if t.abi_return_struct_as_int || opts.reg_struct_return {
36                // According to Clang, everyone but MSVC returns single-element
37                // float aggregates directly in a floating-point register.
38                if fn_abi.ret.layout.is_single_fp_element(cx) {
39                    match fn_abi.ret.layout.size.bytes() {
40                        4 => fn_abi.ret.cast_to(Reg::f32()),
41                        8 => fn_abi.ret.cast_to(Reg::f64()),
42                        _ => fn_abi.ret.make_indirect(),
43                    }
44                } else {
45                    match fn_abi.ret.layout.size.bytes() {
46                        1 => fn_abi.ret.cast_to(Reg::i8()),
47                        2 => fn_abi.ret.cast_to(Reg::i16()),
48                        4 => fn_abi.ret.cast_to(Reg::i32()),
49                        8 => fn_abi.ret.cast_to(Reg::i64()),
50                        _ => fn_abi.ret.make_indirect(),
51                    }
52                }
53            } else {
54                fn_abi.ret.make_indirect();
55            }
56        } else {
57            fn_abi.ret.extend_integer_width_to(32);
58        }
59    }
60
61    for arg in fn_abi.args.iter_mut() {
62        if arg.is_ignore() || !arg.layout.is_sized() {
63            continue;
64        }
65
66        if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
67            arg.make_indirect();
68            continue;
69        }
70
71        let t = cx.target_spec();
72        let align_4 = Align::from_bytes(4).unwrap();
73        let align_16 = Align::from_bytes(16).unwrap();
74
75        if arg.layout.is_aggregate() {
76            // We need to compute the alignment of the `byval` argument. The rules can be found in
77            // `X86_32ABIInfo::getTypeStackAlignInBytes` in Clang's `TargetInfo.cpp`. Summarized
78            // here, they are:
79            //
80            // 1. If the natural alignment of the type is <= 4, the alignment is 4.
81            //
82            // 2. Otherwise, on Linux, the alignment of any vector type is the natural alignment.
83            // This doesn't matter here because we only pass aggregates via `byval`, not vectors.
84            //
85            // 3. Otherwise, on Apple platforms, the alignment of anything that contains a vector
86            // type is 16.
87            //
88            // 4. If none of these conditions are true, the alignment is 4.
89
90            fn contains_vector<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
91            where
92                Ty: TyAbiInterface<'a, C> + Copy,
93            {
94                match layout.backend_repr {
95                    BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) => false,
96                    BackendRepr::SimdVector { .. } => true,
97                    BackendRepr::Memory { .. } => {
98                        for i in 0..layout.fields.count() {
99                            if contains_vector(cx, layout.field(cx, i)) {
100                                return true;
101                            }
102                        }
103                        false
104                    }
105                    BackendRepr::SimdScalableVector { .. } => {
106                        {
    ::core::panicking::panic_fmt(format_args!("scalable vectors are unsupported"));
}panic!("scalable vectors are unsupported")
107                    }
108                }
109            }
110
111            let byval_align = if arg.layout.align.abi < align_4 {
112                // (1.)
113                align_4
114            } else if t.is_like_darwin && contains_vector(cx, arg.layout) {
115                // (3.)
116                align_16
117            } else {
118                // (4.)
119                align_4
120            };
121
122            arg.pass_by_stack_offset(Some(byval_align));
123        } else {
124            arg.extend_integer_width_to(32);
125        }
126    }
127
128    fill_inregs(cx, fn_abi, opts, false);
129}
130
131pub(crate) fn fill_inregs<'a, Ty, C>(
132    cx: &C,
133    fn_abi: &mut FnAbi<'a, Ty>,
134    opts: X86Options,
135    rust_abi: bool,
136) where
137    Ty: TyAbiInterface<'a, C> + Copy,
138{
139    if opts.flavor != Flavor::FastcallOrVectorcall && opts.regparm.is_none_or(|x| x == 0) {
140        return;
141    }
142    // Mark arguments as InReg like clang does it,
143    // so our fastcall/vectorcall is compatible with C/C++ fastcall/vectorcall.
144
145    // Clang reference: lib/CodeGen/TargetInfo.cpp
146    // See X86_32ABIInfo::shouldPrimitiveUseInReg(), X86_32ABIInfo::updateFreeRegs()
147
148    // IsSoftFloatABI is only set to true on ARM platforms,
149    // which in turn can't be x86?
150
151    // 2 for fastcall/vectorcall, regparm limited by 3 otherwise
152    let mut free_regs = opts.regparm.unwrap_or(2).into();
153
154    // For types generating PassMode::Cast, InRegs will not be set.
155    // Maybe, this is a FIXME
156    let has_casts = fn_abi.args.iter().any(|arg| #[allow(non_exhaustive_omitted_patterns)] match arg.mode {
    PassMode::Cast { .. } => true,
    _ => false,
}matches!(arg.mode, PassMode::Cast { .. }));
157    if has_casts && rust_abi {
158        return;
159    }
160
161    for arg in fn_abi.args.iter_mut() {
162        let attrs = match arg.mode {
163            PassMode::Ignore | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
164                continue;
165            }
166            PassMode::Direct(ref mut attrs) => attrs,
167            PassMode::Pair(..)
168            | PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ }
169            | PassMode::Cast { .. } => {
170                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("x86 shouldn\'t be passing arguments by {0:?}",
                arg.mode)));
}unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
171            }
172        };
173
174        // At this point we know this must be a primitive of sorts.
175        let unit = arg.layout.homogeneous_aggregate(cx).unwrap().unit().unwrap();
176        match (&unit.size, &arg.layout.size) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(unit.size, arg.layout.size);
177        if #[allow(non_exhaustive_omitted_patterns)] match unit.kind {
    RegKind::Float | RegKind::Vector { .. } => true,
    _ => false,
}matches!(unit.kind, RegKind::Float | RegKind::Vector { .. }) {
178            continue;
179        }
180
181        let size_in_regs = arg.layout.size.bits().div_ceil(32);
182
183        if size_in_regs == 0 {
184            continue;
185        }
186
187        if size_in_regs > free_regs {
188            break;
189        }
190
191        free_regs -= size_in_regs;
192
193        if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
194            attrs.set(ArgAttribute::InReg);
195        }
196
197        if free_regs == 0 {
198            break;
199        }
200    }
201}
202
203pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
204where
205    Ty: TyAbiInterface<'a, C> + Copy,
206    C: HasDataLayout + HasTargetSpec,
207{
208    // Avoid returning floats in x87 registers on x86 as loading and storing from x87
209    // registers will quiet signalling NaNs. Also avoid using SSE registers since they
210    // are not always available (depending on target features).
211    if !fn_abi.ret.is_ignore() {
212        let has_float = match fn_abi.ret.layout.backend_repr {
213            BackendRepr::Scalar(s) => #[allow(non_exhaustive_omitted_patterns)] match s.primitive() {
    Primitive::Float(_) => true,
    _ => false,
}matches!(s.primitive(), Primitive::Float(_)),
214            BackendRepr::ScalarPair(s1, s2) => {
215                #[allow(non_exhaustive_omitted_patterns)] match s1.primitive() {
    Primitive::Float(_) => true,
    _ => false,
}matches!(s1.primitive(), Primitive::Float(_))
216                    || #[allow(non_exhaustive_omitted_patterns)] match s2.primitive() {
    Primitive::Float(_) => true,
    _ => false,
}matches!(s2.primitive(), Primitive::Float(_))
217            }
218            _ => false, // anyway not passed via registers on x86
219        };
220        if has_float {
221            if cx.target_spec().rustc_abi == Some(RustcAbi::X86Sse2)
222                && fn_abi.ret.layout.backend_repr.is_scalar()
223                && fn_abi.ret.layout.size.bits() <= 128
224            {
225                // This is a single scalar that fits into an SSE register, and the target uses the
226                // SSE ABI. We prefer this over integer registers as float scalars need to be in SSE
227                // registers for float operations, so that's the best place to pass them around.
228                fn_abi.ret.cast_to(Reg::opaque_vector(fn_abi.ret.layout.size));
229            } else if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::ZERO).size(cx) {
230                // Same size or smaller than pointer, return in an integer register.
231                fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
232            } else {
233                // Larger than a pointer, return indirectly.
234                fn_abi.ret.make_indirect();
235            }
236            return;
237        }
238    }
239}