1use rustc_abi::{Align, BackendRepr, Endian, HasDataLayout, Primitive, Size, TyAndLayout};
2use rustc_codegen_ssa::MemFlags;
3use rustc_codegen_ssa::common::IntPredicate;
4use rustc_codegen_ssa::mir::operand::OperandRef;
5use rustc_codegen_ssa::traits::{
6 BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, LayoutTypeCodegenMethods,
7};
8use rustc_middle::ty::Ty;
9use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
10
11use crate::builder::Builder;
12use crate::llvm::{Type, Value};
13use crate::type_of::LayoutLlvmExt;
14
15fn round_up_to_alignment<'ll>(
16 bx: &mut Builder<'_, 'll, '_>,
17 mut value: &'ll Value,
18 align: Align,
19) -> &'ll Value {
20 value = bx.add(value, bx.cx().const_i32(align.bytes() as i32 - 1));
21 return bx.and(value, bx.cx().const_i32(-(align.bytes() as i32)));
22}
23
24fn round_pointer_up_to_alignment<'ll>(
25 bx: &mut Builder<'_, 'll, '_>,
26 addr: &'ll Value,
27 align: Align,
28 ptr_ty: &'ll Type,
29) -> &'ll Value {
30 let ptr = bx.inbounds_ptradd(addr, bx.const_i32(align.bytes() as i32 - 1));
31 bx.call_intrinsic(
32 "llvm.ptrmask",
33 &[ptr_ty, bx.type_i32()],
34 &[ptr, bx.const_int(bx.isize_ty, -(align.bytes() as isize) as i64)],
35 )
36}
37
38fn emit_direct_ptr_va_arg<'ll, 'tcx>(
39 bx: &mut Builder<'_, 'll, 'tcx>,
40 list: OperandRef<'tcx, &'ll Value>,
41 size: Size,
42 align: Align,
43 slot_size: Align,
44 allow_higher_align: bool,
45 force_right_adjust: bool,
46) -> (&'ll Value, Align) {
47 let va_list_ty = bx.type_ptr();
48 let va_list_addr = list.immediate();
49
50 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
51 let ptr = bx.load(va_list_ty, va_list_addr, ptr_align_abi);
52
53 let (addr, addr_align) = if allow_higher_align && align > slot_size {
54 (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
55 } else {
56 (ptr, slot_size)
57 };
58
59 let aligned_size = size.align_to(slot_size).bytes() as i32;
60 let full_direct_size = bx.cx().const_i32(aligned_size);
61 let next = bx.inbounds_ptradd(addr, full_direct_size);
62 bx.store(next, va_list_addr, ptr_align_abi);
63
64 if size.bytes() < slot_size.bytes()
65 && bx.tcx().sess.target.endian == Endian::Big
66 && force_right_adjust
67 {
68 let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
69 let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
70 (adjusted, addr_align)
71 } else {
72 (addr, addr_align)
73 }
74}
75
76enum PassMode {
77 Direct,
78 Indirect,
79}
80
81enum SlotSize {
82 Bytes8 = 8,
83 Bytes4 = 4,
84}
85
86enum AllowHigherAlign {
87 No,
88 Yes,
89}
90
91enum ForceRightAdjust {
92 No,
93 Yes,
94}
95
96fn emit_ptr_va_arg<'ll, 'tcx>(
97 bx: &mut Builder<'_, 'll, 'tcx>,
98 list: OperandRef<'tcx, &'ll Value>,
99 target_ty: Ty<'tcx>,
100 pass_mode: PassMode,
101 slot_size: SlotSize,
102 allow_higher_align: AllowHigherAlign,
103 force_right_adjust: ForceRightAdjust,
104) -> &'ll Value {
105 let indirect = matches!(pass_mode, PassMode::Indirect);
106 let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes);
107 let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes);
108 let slot_size = Align::from_bytes(slot_size as u64).unwrap();
109
110 let layout = bx.cx.layout_of(target_ty);
111 let (llty, size, align) = if indirect {
112 (
113 bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
114 bx.cx.data_layout().pointer_size(),
115 bx.cx.data_layout().pointer_align(),
116 )
117 } else {
118 (layout.llvm_type(bx.cx), layout.size, layout.align)
119 };
120 let (addr, addr_align) = emit_direct_ptr_va_arg(
121 bx,
122 list,
123 size,
124 align.abi,
125 slot_size,
126 allow_higher_align,
127 force_right_adjust,
128 );
129 if indirect {
130 let tmp_ret = bx.load(llty, addr, addr_align);
131 bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
132 } else {
133 bx.load(llty, addr, addr_align)
134 }
135}
136
137fn emit_aapcs_va_arg<'ll, 'tcx>(
138 bx: &mut Builder<'_, 'll, 'tcx>,
139 list: OperandRef<'tcx, &'ll Value>,
140 target_ty: Ty<'tcx>,
141) -> &'ll Value {
142 let dl = bx.cx.data_layout();
143
144 let va_list_addr = list.immediate();
155
156 let ptr_offset = 8;
161 let i32_offset = 4;
162 let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
163 let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
164 let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
165 let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
166
167 let layout = bx.cx.layout_of(target_ty);
168
169 let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
170 let in_reg = bx.append_sibling_block("va_arg.in_reg");
171 let on_stack = bx.append_sibling_block("va_arg.on_stack");
172 let end = bx.append_sibling_block("va_arg.end");
173 let zero = bx.const_i32(0);
174 let offset_align = Align::from_bytes(4).unwrap();
175
176 let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
177 let (reg_off, reg_top, slot_size) = if gr_type {
178 let nreg = layout.size.bytes().div_ceil(8);
179 (gr_offs, gr_top, nreg * 8)
180 } else {
181 let nreg = layout.size.bytes().div_ceil(16);
182 (vr_offs, vr_top, nreg * 16)
183 };
184
185 let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
187 let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
188 bx.cond_br(use_stack, on_stack, maybe_reg);
189
190 bx.switch_to_block(maybe_reg);
195 if gr_type && layout.align.bytes() > 8 {
196 reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
197 reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
198 }
199 let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
200
201 bx.store(new_reg_off_v, reg_off, offset_align);
202
203 let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
206 bx.cond_br(use_stack, on_stack, in_reg);
207
208 bx.switch_to_block(in_reg);
209 let top_type = bx.type_ptr();
210 let top = bx.load(top_type, reg_top, dl.pointer_align().abi);
211
212 let mut reg_addr = bx.ptradd(top, reg_off_v);
214 if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
215 let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
217 reg_addr = bx.ptradd(reg_addr, offset);
218 }
219 let reg_type = layout.llvm_type(bx);
220 let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
221 bx.br(end);
222
223 bx.switch_to_block(on_stack);
225 let stack_value = emit_ptr_va_arg(
226 bx,
227 list,
228 target_ty,
229 PassMode::Direct,
230 SlotSize::Bytes8,
231 AllowHigherAlign::Yes,
232 ForceRightAdjust::No,
233 );
234 bx.br(end);
235
236 bx.switch_to_block(end);
237 let val =
238 bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
239
240 val
241}
242
243fn emit_powerpc_va_arg<'ll, 'tcx>(
244 bx: &mut Builder<'_, 'll, 'tcx>,
245 list: OperandRef<'tcx, &'ll Value>,
246 target_ty: Ty<'tcx>,
247) -> &'ll Value {
248 let dl = bx.cx.data_layout();
249
250 let va_list_addr = list.immediate();
258
259 let layout = {
261 let mut layout = bx.cx.layout_of(target_ty);
262
263 while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
264 layout = inner;
265 }
266
267 layout
268 };
269
270 let target = &bx.cx.tcx.sess.target;
272 let is_soft_float_abi = target.abi == "softfloat";
273 assert!(!is_soft_float_abi);
274
275 let is_indirect = false;
277
278 let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() {
279 BackendRepr::Scalar(scalar) => match scalar.primitive() {
280 rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false),
281 rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64),
282 rustc_abi::Primitive::Pointer(_) => (false, true, false),
283 },
284 _ => unreachable!("all instances of VaArgSafe are represented as scalars"),
285 };
286
287 let num_regs_addr = if is_int || is_soft_float_abi {
288 va_list_addr } else {
290 bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) };
292
293 let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align);
294
295 if is_i64 || (is_f64 && is_soft_float_abi) {
297 num_regs = bx.add(num_regs, bx.const_u8(1));
298 num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110));
299 }
300
301 let max_regs = 8u8;
302 let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs));
303 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
304
305 let in_reg = bx.append_sibling_block("va_arg.in_reg");
306 let in_mem = bx.append_sibling_block("va_arg.in_mem");
307 let end = bx.append_sibling_block("va_arg.end");
308
309 bx.cond_br(use_regs, in_reg, in_mem);
310
311 let reg_addr = {
312 bx.switch_to_block(in_reg);
313
314 let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4));
315 let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, ptr_align_abi);
316
317 if !is_int && !is_soft_float_abi {
319 reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32))
320 }
321
322 let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 };
325 let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size));
326 let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset);
327
328 let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 };
330 let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr));
331 bx.store(new_num_regs, num_regs_addr, dl.i8_align);
332
333 bx.br(end);
334
335 reg_addr
336 };
337
338 let mem_addr = {
339 bx.switch_to_block(in_mem);
340
341 bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align);
342
343 let overflow_area_align = Align::from_bytes(4).unwrap();
345
346 let size = if !is_indirect {
347 layout.layout.size.align_to(overflow_area_align)
348 } else {
349 dl.pointer_size()
350 };
351
352 let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2));
353 let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, ptr_align_abi);
354
355 if layout.layout.align.abi > overflow_area_align {
357 overflow_area = round_pointer_up_to_alignment(
358 bx,
359 overflow_area,
360 layout.layout.align.abi,
361 bx.type_ptr(),
362 );
363 }
364
365 let mem_addr = overflow_area;
366
367 overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes()));
369 bx.store(overflow_area, overflow_area_ptr, ptr_align_abi);
370
371 bx.br(end);
372
373 mem_addr
374 };
375
376 bx.switch_to_block(end);
378 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
379 let val_type = layout.llvm_type(bx);
380 let val_addr =
381 if is_indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
382 bx.load(val_type, val_addr, layout.align.abi)
383}
384
385fn emit_s390x_va_arg<'ll, 'tcx>(
386 bx: &mut Builder<'_, 'll, 'tcx>,
387 list: OperandRef<'tcx, &'ll Value>,
388 target_ty: Ty<'tcx>,
389) -> &'ll Value {
390 let dl = bx.cx.data_layout();
391
392 let va_list_addr = list.immediate();
402
403 let i64_offset = 8;
406 let ptr_offset = 8;
407 let gpr = va_list_addr;
408 let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
409 let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
410 let reg_save_area =
411 bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
412
413 let layout = bx.cx.layout_of(target_ty);
414
415 let in_reg = bx.append_sibling_block("va_arg.in_reg");
416 let in_mem = bx.append_sibling_block("va_arg.in_mem");
417 let end = bx.append_sibling_block("va_arg.end");
418 let ptr_align_abi = dl.pointer_align().abi;
419
420 let target_ty_size = bx.cx.size_of(target_ty).bytes();
422 let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
423 let unpadded_size = if indirect { 8 } else { target_ty_size };
424 let padded_size = 8;
425 let padding = padded_size - unpadded_size;
426
427 let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
428 let (max_regs, reg_count, reg_save_index, reg_padding) =
429 if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
430
431 let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
433 let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
434 bx.cond_br(use_regs, in_reg, in_mem);
435
436 bx.switch_to_block(in_reg);
438
439 let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, ptr_align_abi);
441 let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
442 let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
443 let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
444
445 let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
447 bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
448 bx.br(end);
449
450 bx.switch_to_block(in_mem);
452
453 let arg_ptr_v = bx.load(bx.type_ptr(), overflow_arg_area, ptr_align_abi);
455 let arg_off = bx.const_u64(padding);
456 let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
457
458 let arg_size = bx.cx().const_u64(padded_size);
460 let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
461 bx.store(new_arg_ptr_v, overflow_arg_area, ptr_align_abi);
462 bx.br(end);
463
464 bx.switch_to_block(end);
466 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
467 let val_type = layout.llvm_type(bx);
468 let val_addr =
469 if indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
470 bx.load(val_type, val_addr, layout.align.abi)
471}
472
473fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>(
474 bx: &mut Builder<'_, 'll, 'tcx>,
475 list: OperandRef<'tcx, &'ll Value>,
476 target_ty: Ty<'tcx>,
477) -> &'ll Value {
478 let dl = bx.cx.data_layout();
479
480 let va_list_addr = list.immediate();
494
495 let layout = {
512 let mut layout = bx.cx.layout_of(target_ty);
513
514 while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
515 layout = inner;
516 }
517
518 layout
519 };
520
521 let mut num_gp_registers = 0;
529 let mut num_fp_registers = 0;
530
531 let mut registers_for_primitive = |p| match p {
532 Primitive::Int(integer, _is_signed) => {
533 num_gp_registers += integer.size().bytes().div_ceil(8) as u32;
534 }
535 Primitive::Float(float) => {
536 num_fp_registers += float.size().bytes().div_ceil(16) as u32;
537 }
538 Primitive::Pointer(_) => {
539 num_gp_registers += 1;
540 }
541 };
542
543 match layout.layout.backend_repr() {
544 BackendRepr::Scalar(scalar) => {
545 registers_for_primitive(scalar.primitive());
546 }
547 BackendRepr::ScalarPair(scalar1, scalar2) => {
548 registers_for_primitive(scalar1.primitive());
549 registers_for_primitive(scalar2.primitive());
550 }
551 BackendRepr::SimdVector { .. } => {
552 unreachable!(
554 "No x86-64 SysV va_arg implementation for {:?}",
555 layout.layout.backend_repr()
556 )
557 }
558 BackendRepr::Memory { .. } => {
559 let mem_addr = x86_64_sysv64_va_arg_from_memory(bx, va_list_addr, layout);
560 return bx.load(layout.llvm_type(bx), mem_addr, layout.align.abi);
561 }
562 };
563
564 let unsigned_int_offset = 4;
569 let ptr_offset = 8;
570 let gp_offset_ptr = va_list_addr;
571 let fp_offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(unsigned_int_offset));
572
573 let gp_offset_v = bx.load(bx.type_i32(), gp_offset_ptr, Align::from_bytes(8).unwrap());
574 let fp_offset_v = bx.load(bx.type_i32(), fp_offset_ptr, Align::from_bytes(4).unwrap());
575
576 let mut use_regs = bx.const_bool(false);
577
578 if num_gp_registers > 0 {
579 let max_offset_val = 48u32 - num_gp_registers * 8;
580 let fits_in_gp = bx.icmp(IntPredicate::IntULE, gp_offset_v, bx.const_u32(max_offset_val));
581 use_regs = fits_in_gp;
582 }
583
584 if num_fp_registers > 0 {
585 let max_offset_val = 176u32 - num_fp_registers * 16;
586 let fits_in_fp = bx.icmp(IntPredicate::IntULE, fp_offset_v, bx.const_u32(max_offset_val));
587 use_regs = if num_gp_registers > 0 { bx.and(use_regs, fits_in_fp) } else { fits_in_fp };
588 }
589
590 let in_reg = bx.append_sibling_block("va_arg.in_reg");
591 let in_mem = bx.append_sibling_block("va_arg.in_mem");
592 let end = bx.append_sibling_block("va_arg.end");
593
594 bx.cond_br(use_regs, in_reg, in_mem);
595
596 bx.switch_to_block(in_reg);
598
599 let reg_save_area_ptr =
610 bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * unsigned_int_offset + ptr_offset));
611 let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align().abi);
612
613 let reg_addr = match layout.layout.backend_repr() {
614 BackendRepr::Scalar(scalar) => match scalar.primitive() {
615 Primitive::Int(_, _) | Primitive::Pointer(_) => {
616 let reg_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
617
618 let gp_align = Align::from_bytes(8).unwrap();
620 copy_to_temporary_if_more_aligned(bx, reg_addr, layout, gp_align)
621 }
622 Primitive::Float(_) => bx.inbounds_ptradd(reg_save_area_v, fp_offset_v),
623 },
624 BackendRepr::ScalarPair(scalar1, scalar2) => {
625 let ty_lo = bx.cx().scalar_pair_element_backend_type(layout, 0, false);
626 let ty_hi = bx.cx().scalar_pair_element_backend_type(layout, 1, false);
627
628 let align_lo = layout.field(bx.cx, 0).layout.align().abi;
629 let align_hi = layout.field(bx.cx, 1).layout.align().abi;
630
631 match (scalar1.primitive(), scalar2.primitive()) {
632 (Primitive::Float(_), Primitive::Float(_)) => {
633 let reg_lo_addr = bx.inbounds_ptradd(reg_save_area_v, fp_offset_v);
640 let reg_hi_addr = bx.inbounds_ptradd(reg_lo_addr, bx.const_i32(16));
641
642 let align = layout.layout.align().abi;
643 let tmp = bx.alloca(layout.layout.size(), align);
644
645 let reg_lo = bx.load(ty_lo, reg_lo_addr, align_lo);
646 let reg_hi = bx.load(ty_hi, reg_hi_addr, align_hi);
647
648 let offset = scalar1.size(bx.cx).align_to(align_hi).bytes();
649 let field0 = tmp;
650 let field1 = bx.inbounds_ptradd(tmp, bx.const_u32(offset as u32));
651
652 bx.store(reg_lo, field0, align);
653 bx.store(reg_hi, field1, align);
654
655 tmp
656 }
657 (Primitive::Float(_), _) | (_, Primitive::Float(_)) => {
658 let gp_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
659 let fp_addr = bx.inbounds_ptradd(reg_save_area_v, fp_offset_v);
660
661 let (reg_lo_addr, reg_hi_addr) = match scalar1.primitive() {
662 Primitive::Float(_) => (fp_addr, gp_addr),
663 Primitive::Int(_, _) | Primitive::Pointer(_) => (gp_addr, fp_addr),
664 };
665
666 let tmp = bx.alloca(layout.layout.size(), layout.layout.align().abi);
667
668 let reg_lo = bx.load(ty_lo, reg_lo_addr, align_lo);
669 let reg_hi = bx.load(ty_hi, reg_hi_addr, align_hi);
670
671 let offset = scalar1.size(bx.cx).align_to(align_hi).bytes();
672 let field0 = tmp;
673 let field1 = bx.inbounds_ptradd(tmp, bx.const_u32(offset as u32));
674
675 bx.store(reg_lo, field0, align_lo);
676 bx.store(reg_hi, field1, align_hi);
677
678 tmp
679 }
680 (_, _) => {
681 let reg_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
683
684 let gp_align = Align::from_bytes(8).unwrap();
686 copy_to_temporary_if_more_aligned(bx, reg_addr, layout, gp_align)
687 }
688 }
689 }
690 BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => unreachable!(),
692 };
693
694 if num_gp_registers > 0 {
697 let offset = bx.const_u32(num_gp_registers * 8);
698 let sum = bx.add(gp_offset_v, offset);
699 bx.store(sum, gp_offset_ptr, Align::from_bytes(8).unwrap());
701 }
702
703 if num_fp_registers > 0 {
705 let offset = bx.const_u32(num_fp_registers * 16);
706 let sum = bx.add(fp_offset_v, offset);
707 bx.store(sum, fp_offset_ptr, Align::from_bytes(4).unwrap());
708 }
709
710 bx.br(end);
711
712 bx.switch_to_block(in_mem);
713 let mem_addr = x86_64_sysv64_va_arg_from_memory(bx, va_list_addr, layout);
714 bx.br(end);
715
716 bx.switch_to_block(end);
717
718 let val_type = layout.llvm_type(bx);
719 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
720
721 bx.load(val_type, val_addr, layout.align.abi)
722}
723
724fn copy_to_temporary_if_more_aligned<'ll, 'tcx>(
726 bx: &mut Builder<'_, 'll, 'tcx>,
727 reg_addr: &'ll Value,
728 layout: TyAndLayout<'tcx, Ty<'tcx>>,
729 src_align: Align,
730) -> &'ll Value {
731 if layout.layout.align.abi > src_align {
732 let tmp = bx.alloca(layout.layout.size(), layout.layout.align().abi);
733 bx.memcpy(
734 tmp,
735 layout.layout.align.abi,
736 reg_addr,
737 src_align,
738 bx.const_u32(layout.layout.size().bytes() as u32),
739 MemFlags::empty(),
740 None,
741 );
742 tmp
743 } else {
744 reg_addr
745 }
746}
747
748fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
749 bx: &mut Builder<'_, 'll, 'tcx>,
750 va_list_addr: &'ll Value,
751 layout: TyAndLayout<'tcx, Ty<'tcx>>,
752) -> &'ll Value {
753 let dl = bx.cx.data_layout();
754 let ptr_align_abi = dl.data_layout().pointer_align().abi;
755
756 let overflow_arg_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(8));
757
758 let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, ptr_align_abi);
759 if layout.layout.align.bytes() > 8 {
764 unreachable!("all instances of VaArgSafe have an alignment <= 8");
765 }
766
767 let mem_addr = overflow_arg_area_v;
769
770 let size_in_bytes = layout.layout.size().bytes();
775 let offset = bx.const_i32(size_in_bytes.next_multiple_of(8) as i32);
776 let overflow_arg_area = bx.inbounds_ptradd(overflow_arg_area_v, offset);
777 bx.store(overflow_arg_area, overflow_arg_area_ptr, ptr_align_abi);
778
779 mem_addr
780}
781
782fn emit_xtensa_va_arg<'ll, 'tcx>(
783 bx: &mut Builder<'_, 'll, 'tcx>,
784 list: OperandRef<'tcx, &'ll Value>,
785 target_ty: Ty<'tcx>,
786) -> &'ll Value {
787 let va_list_addr = list.immediate();
804 let layout = bx.cx.layout_of(target_ty);
806 let from_stack = bx.append_sibling_block("va_arg.from_stack");
807 let from_regsave = bx.append_sibling_block("va_arg.from_regsave");
808 let end = bx.append_sibling_block("va_arg.end");
809 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
810
811 let va_reg_offset = 4;
813 let va_ndx_offset = va_reg_offset + 4;
814 let offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_ndx_offset));
815
816 let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align);
817 let offset = round_up_to_alignment(bx, offset, layout.align.abi);
818
819 let slot_size = layout.size.align_to(Align::from_bytes(4).unwrap()).bytes() as i32;
820
821 let offset_next = bx.add(offset, bx.const_i32(slot_size));
823
824 let regsave_size = bx.const_i32(24);
827 let use_regsave = bx.icmp(IntPredicate::IntULE, offset_next, regsave_size);
828 bx.cond_br(use_regsave, from_regsave, from_stack);
829
830 bx.switch_to_block(from_regsave);
831 bx.store(offset_next, offset_ptr, ptr_align_abi);
833
834 let regsave_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_reg_offset));
836 let regsave_area = bx.load(bx.type_ptr(), regsave_area_ptr, ptr_align_abi);
837 let regsave_value_ptr = bx.inbounds_ptradd(regsave_area, offset);
838 bx.br(end);
839
840 bx.switch_to_block(from_stack);
841
842 let stack_offset_start = bx.const_i32(32);
848 let needs_correction = bx.icmp(IntPredicate::IntULE, offset, stack_offset_start);
849 let offset_corrected = bx.select(needs_correction, stack_offset_start, offset);
850
851 let offset_next_corrected = bx.add(offset_next, bx.const_i32(slot_size));
854 bx.store(offset_next_corrected, offset_ptr, ptr_align_abi);
856
857 let stack_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(0));
859 let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, ptr_align_abi);
860 let stack_value_ptr = bx.inbounds_ptradd(stack_area, offset_corrected);
861 bx.br(end);
862
863 bx.switch_to_block(end);
864
865 assert!(bx.tcx().sess.target.endian == Endian::Little);
875 let value_ptr =
876 bx.phi(bx.type_ptr(), &[regsave_value_ptr, stack_value_ptr], &[from_regsave, from_stack]);
877 return bx.load(layout.llvm_type(bx), value_ptr, layout.align.abi);
878}
879
880pub(super) fn emit_va_arg<'ll, 'tcx>(
881 bx: &mut Builder<'_, 'll, 'tcx>,
882 addr: OperandRef<'tcx, &'ll Value>,
883 target_ty: Ty<'tcx>,
884) -> &'ll Value {
885 let target = &bx.cx.tcx.sess.target;
888
889 match &*target.arch {
890 "x86" => emit_ptr_va_arg(
891 bx,
892 addr,
893 target_ty,
894 PassMode::Direct,
895 SlotSize::Bytes4,
896 if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
897 ForceRightAdjust::No,
898 ),
899 "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => {
900 emit_ptr_va_arg(
901 bx,
902 addr,
903 target_ty,
904 PassMode::Direct,
905 SlotSize::Bytes8,
906 if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
907 ForceRightAdjust::No,
908 )
909 }
910 "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
911 "arm" => {
912 assert!(bx.cx.size_of(target_ty).bytes() <= 16);
915
916 emit_ptr_va_arg(
917 bx,
918 addr,
919 target_ty,
920 PassMode::Direct,
921 SlotSize::Bytes4,
922 AllowHigherAlign::Yes,
923 ForceRightAdjust::No,
924 )
925 }
926 "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
927 "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty),
928 "powerpc64" | "powerpc64le" => emit_ptr_va_arg(
929 bx,
930 addr,
931 target_ty,
932 PassMode::Direct,
933 SlotSize::Bytes8,
934 AllowHigherAlign::Yes,
935 match &*target.arch {
936 "powerpc64" => ForceRightAdjust::Yes,
937 _ => ForceRightAdjust::No,
938 },
939 ),
940 "x86_64" if target.is_like_windows => {
942 let target_ty_size = bx.cx.size_of(target_ty).bytes();
943 emit_ptr_va_arg(
944 bx,
945 addr,
946 target_ty,
947 if target_ty_size > 8 || !target_ty_size.is_power_of_two() {
948 PassMode::Indirect
949 } else {
950 PassMode::Direct
951 },
952 SlotSize::Bytes8,
953 AllowHigherAlign::No,
954 ForceRightAdjust::No,
955 )
956 }
957 "x86_64" => emit_x86_64_sysv64_va_arg(bx, addr, target_ty),
959 "xtensa" => emit_xtensa_va_arg(bx, addr, target_ty),
960 _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
964 }
965}