rustc_target/callconv/
x86_64.rs

1// The classification code for the x86_64 ABI is taken from the clay language
2// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
3
4use rustc_abi::{
5    BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface, TyAndLayout,
6    Variants,
7};
8
9use crate::callconv::{ArgAbi, CastTarget, FnAbi};
10
11/// Classification of "eightbyte" components.
12// N.B., the order of the variants is from general to specific,
13// such that `unify(a, b)` is the "smaller" of `a` and `b`.
14#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
15enum Class {
16    Int,
17    Sse,
18    SseUp,
19}
20
21#[derive(Clone, Copy, Debug)]
22struct Memory;
23
24// Currently supported vector size (AVX-512).
25const LARGEST_VECTOR_SIZE: usize = 512;
26const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
27
28fn classify_arg<'a, Ty, C>(
29    cx: &C,
30    arg: &ArgAbi<'a, Ty>,
31) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
32where
33    Ty: TyAbiInterface<'a, C> + Copy,
34    C: HasDataLayout,
35{
36    fn classify<'a, Ty, C>(
37        cx: &C,
38        layout: TyAndLayout<'a, Ty>,
39        cls: &mut [Option<Class>],
40        off: Size,
41    ) -> Result<(), Memory>
42    where
43        Ty: TyAbiInterface<'a, C> + Copy,
44        C: HasDataLayout,
45    {
46        if !off.is_aligned(layout.align.abi) {
47            if !layout.is_zst() {
48                return Err(Memory);
49            }
50            return Ok(());
51        }
52
53        let mut c = match layout.backend_repr {
54            BackendRepr::Scalar(scalar) => match scalar.primitive() {
55                Primitive::Int(..) | Primitive::Pointer(_) => Class::Int,
56                Primitive::Float(_) => Class::Sse,
57            },
58
59            BackendRepr::SimdVector { .. } => Class::Sse,
60
61            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
62                for i in 0..layout.fields.count() {
63                    let field_off = off + layout.fields.offset(i);
64                    classify(cx, layout.field(cx, i), cls, field_off)?;
65                }
66
67                match &layout.variants {
68                    Variants::Single { .. } | Variants::Empty => {}
69                    Variants::Multiple { variants, .. } => {
70                        // Treat enum variants like union members.
71                        for variant_idx in variants.indices() {
72                            classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
73                        }
74                    }
75                }
76
77                return Ok(());
78            }
79        };
80
81        // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
82        let first = (off.bytes() / 8) as usize;
83        let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
84        for cls in &mut cls[first..=last] {
85            *cls = Some(cls.map_or(c, |old| old.min(c)));
86
87            // Everything after the first Sse "eightbyte"
88            // component is the upper half of a register.
89            if c == Class::Sse {
90                c = Class::SseUp;
91            }
92        }
93
94        Ok(())
95    }
96
97    let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
98    if n > MAX_EIGHTBYTES {
99        return Err(Memory);
100    }
101
102    let mut cls = [None; MAX_EIGHTBYTES];
103    classify(cx, arg.layout, &mut cls, Size::ZERO)?;
104    if n > 2 {
105        if cls[0] != Some(Class::Sse) {
106            return Err(Memory);
107        }
108        if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
109            return Err(Memory);
110        }
111    } else {
112        let mut i = 0;
113        while i < n {
114            if cls[i] == Some(Class::SseUp) {
115                cls[i] = Some(Class::Sse);
116            } else if cls[i] == Some(Class::Sse) {
117                i += 1;
118                while i != n && cls[i] == Some(Class::SseUp) {
119                    i += 1;
120                }
121            } else {
122                i += 1;
123            }
124        }
125    }
126
127    Ok(cls)
128}
129
130fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
131    if *i >= cls.len() {
132        return None;
133    }
134
135    match cls[*i] {
136        None => None,
137        Some(Class::Int) => {
138            *i += 1;
139            Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
140        }
141        Some(Class::Sse) => {
142            let vec_len =
143                1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
144            *i += vec_len;
145            Some(if vec_len == 1 {
146                match size.bytes() {
147                    4 => Reg::f32(),
148                    _ => Reg::f64(),
149                }
150            } else {
151                Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
152            })
153        }
154        Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
155    }
156}
157
158fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
159    let mut i = 0;
160    let lo = reg_component(cls, &mut i, size).unwrap();
161    let offset = Size::from_bytes(8) * (i as u64);
162    let mut target = CastTarget::from(lo);
163    if size > offset {
164        if let Some(hi) = reg_component(cls, &mut i, size - offset) {
165            target = CastTarget::pair(lo, hi);
166        }
167    }
168    assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
169    target
170}
171
172const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
173const MAX_SSE_REGS: usize = 8; // XMM0-7
174
175pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
176where
177    Ty: TyAbiInterface<'a, C> + Copy,
178    C: HasDataLayout,
179{
180    let mut int_regs = MAX_INT_REGS;
181    let mut sse_regs = MAX_SSE_REGS;
182
183    let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
184        if !arg.layout.is_sized() {
185            // Not touching this...
186            return;
187        }
188        let mut cls_or_mem = classify_arg(cx, arg);
189
190        if is_arg {
191            if let Ok(cls) = cls_or_mem {
192                let mut needed_int = 0;
193                let mut needed_sse = 0;
194                for c in cls {
195                    match c {
196                        Some(Class::Int) => needed_int += 1,
197                        Some(Class::Sse) => needed_sse += 1,
198                        _ => {}
199                    }
200                }
201                match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
202                    (Some(left_int), Some(left_sse)) => {
203                        int_regs = left_int;
204                        sse_regs = left_sse;
205                    }
206                    _ => {
207                        // Not enough registers for this argument, so it will be
208                        // passed on the stack, but we only mark aggregates
209                        // explicitly as indirect `byval` arguments, as LLVM will
210                        // automatically put immediates on the stack itself.
211                        if arg.layout.is_aggregate() {
212                            cls_or_mem = Err(Memory);
213                        }
214                    }
215                }
216            }
217        }
218
219        match cls_or_mem {
220            Err(Memory) => {
221                if is_arg {
222                    // The x86_64 ABI doesn't have any special requirements for `byval` alignment,
223                    // the type's alignment is always used.
224                    arg.pass_by_stack_offset(None);
225                } else {
226                    // `sret` parameter thus one less integer register available
227                    arg.make_indirect();
228                    // NOTE(eddyb) return is handled first, so no registers
229                    // should've been used yet.
230                    assert_eq!(int_regs, MAX_INT_REGS);
231                    int_regs -= 1;
232                }
233            }
234            Ok(ref cls) => {
235                // split into sized chunks passed individually
236                if arg.layout.is_aggregate() {
237                    let size = arg.layout.size;
238                    arg.cast_to(cast_target(cls, size));
239                } else {
240                    arg.extend_integer_width_to(32);
241                }
242            }
243        }
244    };
245
246    if !fn_abi.ret.is_ignore() {
247        x86_64_arg_or_ret(&mut fn_abi.ret, false);
248    }
249
250    for arg in fn_abi.args.iter_mut() {
251        if arg.is_ignore() {
252            continue;
253        }
254        x86_64_arg_or_ret(arg, true);
255    }
256}