1use rustc_abi::{
5 BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface, TyAndLayout,
6 Variants,
7};
8
9use crate::callconv::{ArgAbi, CastTarget, FnAbi};
10use crate::spec::HasTargetSpec;
11
12#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
16enum Class {
17 Int,
18 Sse,
19 SseUp,
20}
21
22#[derive(Clone, Copy, Debug)]
23struct Memory;
24
25const LARGEST_VECTOR_SIZE: usize = 512;
27const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
28
29fn classify_arg<'a, Ty, C>(
30 cx: &C,
31 arg: &ArgAbi<'a, Ty>,
32) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
33where
34 Ty: TyAbiInterface<'a, C> + Copy,
35 C: HasDataLayout,
36{
37 fn classify<'a, Ty, C>(
38 cx: &C,
39 layout: TyAndLayout<'a, Ty>,
40 cls: &mut [Option<Class>],
41 off: Size,
42 ) -> Result<(), Memory>
43 where
44 Ty: TyAbiInterface<'a, C> + Copy,
45 C: HasDataLayout,
46 {
47 if !off.is_aligned(layout.align.abi) {
48 if !layout.is_zst() {
49 return Err(Memory);
50 }
51 return Ok(());
52 }
53
54 let mut c = match layout.backend_repr {
55 BackendRepr::Scalar(scalar) => match scalar.primitive() {
56 Primitive::Int(..) | Primitive::Pointer(_) => Class::Int,
57 Primitive::Float(_) => Class::Sse,
58 },
59
60 BackendRepr::SimdVector { .. } => Class::Sse,
61
62 BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"),
63
64 BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
65 for i in 0..layout.fields.count() {
66 let field_off = off + layout.fields.offset(i);
67 classify(cx, layout.field(cx, i), cls, field_off)?;
68 }
69
70 match &layout.variants {
71 Variants::Single { .. } | Variants::Empty => {}
72 Variants::Multiple { variants, .. } => {
73 for variant_idx in variants.indices() {
75 classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
76 }
77 }
78 }
79
80 return Ok(());
81 }
82 };
83
84 let first = (off.bytes() / 8) as usize;
86 let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
87 for cls in &mut cls[first..=last] {
88 *cls = Some(cls.map_or(c, |old| old.min(c)));
89
90 if c == Class::Sse {
93 c = Class::SseUp;
94 }
95 }
96
97 Ok(())
98 }
99
100 let n = arg.layout.size.bytes().div_ceil(8) as usize;
101 if n > MAX_EIGHTBYTES {
102 return Err(Memory);
103 }
104
105 let mut cls = [None; MAX_EIGHTBYTES];
106 classify(cx, arg.layout, &mut cls, Size::ZERO)?;
107 if n > 2 {
108 if cls[0] != Some(Class::Sse) {
109 return Err(Memory);
110 }
111 if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
112 return Err(Memory);
113 }
114 } else {
115 let mut i = 0;
116 while i < n {
117 if cls[i] == Some(Class::SseUp) {
118 cls[i] = Some(Class::Sse);
119 } else if cls[i] == Some(Class::Sse) {
120 i += 1;
121 while i != n && cls[i] == Some(Class::SseUp) {
122 i += 1;
123 }
124 } else {
125 i += 1;
126 }
127 }
128 }
129
130 Ok(cls)
131}
132
133fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
134 if *i >= cls.len() {
135 return None;
136 }
137
138 match cls[*i] {
139 None => None,
140 Some(Class::Int) => {
141 *i += 1;
142 Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
143 }
144 Some(Class::Sse) => {
145 let vec_len =
146 1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
147 *i += vec_len;
148 Some(if vec_len == 1 {
149 match size.bytes() {
150 4 => Reg::f32(),
151 _ => Reg::f64(),
152 }
153 } else {
154 Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
155 })
156 }
157 Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
158 }
159}
160
161fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
162 let mut i = 0;
163 let lo = reg_component(cls, &mut i, size).unwrap();
164 let offset = Size::from_bytes(8) * (i as u64);
165 let mut target = CastTarget::from(lo);
166 if size > offset {
167 if let Some(hi) = reg_component(cls, &mut i, size - offset) {
168 target = CastTarget::pair(lo, hi);
169 }
170 }
171 assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
172 target
173}
174
175const MAX_INT_REGS: usize = 6; const MAX_SSE_REGS: usize = 8; pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
179where
180 Ty: TyAbiInterface<'a, C> + Copy,
181 C: HasDataLayout + HasTargetSpec,
182{
183 let mut int_regs = MAX_INT_REGS;
184 let mut sse_regs = MAX_SSE_REGS;
185
186 let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
187 if !arg.layout.is_sized() {
188 return;
191 }
192 if is_arg && arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
193 int_regs = int_regs.saturating_sub(1);
194 arg.make_indirect();
195 return;
196 }
197 let mut cls_or_mem = classify_arg(cx, arg);
198
199 if is_arg {
200 if let Ok(cls) = cls_or_mem {
201 let mut needed_int = 0;
202 let mut needed_sse = 0;
203 for c in cls {
204 match c {
205 Some(Class::Int) => needed_int += 1,
206 Some(Class::Sse) => needed_sse += 1,
207 _ => {}
208 }
209 }
210 match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
211 (Some(left_int), Some(left_sse)) => {
212 int_regs = left_int;
213 sse_regs = left_sse;
214 }
215 _ => {
216 if arg.layout.is_aggregate() {
221 cls_or_mem = Err(Memory);
222 }
223 }
224 }
225 }
226 }
227
228 match cls_or_mem {
229 Err(Memory) => {
230 if is_arg {
231 arg.pass_by_stack_offset(None);
234 } else {
235 arg.make_indirect();
237 assert_eq!(int_regs, MAX_INT_REGS);
240 int_regs -= 1;
241 }
242 }
243 Ok(ref cls) => {
244 if arg.layout.is_aggregate() {
246 let size = arg.layout.size;
247 arg.cast_to(cast_target(cls, size));
248 } else if is_arg || cx.target_spec().is_like_darwin {
249 arg.extend_integer_width_to(32);
250 }
251 }
252 }
253 };
254
255 if !fn_abi.ret.is_ignore() {
256 x86_64_arg_or_ret(&mut fn_abi.ret, false);
257 }
258
259 for arg in fn_abi.args.iter_mut() {
260 if arg.is_ignore() {
261 continue;
262 }
263 x86_64_arg_or_ret(arg, true);
264 }
265}