1use rustc_abi::{Align, BackendRepr, Endian, HasDataLayout, Primitive, Size, TyAndLayout};
2use rustc_codegen_ssa::MemFlags;
3use rustc_codegen_ssa::common::IntPredicate;
4use rustc_codegen_ssa::mir::operand::OperandRef;
5use rustc_codegen_ssa::traits::{
6 BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, LayoutTypeCodegenMethods,
7};
8use rustc_middle::ty::Ty;
9use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
10use rustc_target::spec::{Abi, Arch, Env};
11
12use crate::builder::Builder;
13use crate::llvm::{Type, Value};
14use crate::type_of::LayoutLlvmExt;
15
16fn round_up_to_alignment<'ll>(
17 bx: &mut Builder<'_, 'll, '_>,
18 mut value: &'ll Value,
19 align: Align,
20) -> &'ll Value {
21 value = bx.add(value, bx.cx().const_i32(align.bytes() as i32 - 1));
22 return bx.and(value, bx.cx().const_i32(-(align.bytes() as i32)));
23}
24
25fn round_pointer_up_to_alignment<'ll>(
26 bx: &mut Builder<'_, 'll, '_>,
27 addr: &'ll Value,
28 align: Align,
29 ptr_ty: &'ll Type,
30) -> &'ll Value {
31 let ptr = bx.inbounds_ptradd(addr, bx.const_i32(align.bytes() as i32 - 1));
32 bx.call_intrinsic(
33 "llvm.ptrmask",
34 &[ptr_ty, bx.type_i32()],
35 &[ptr, bx.const_int(bx.isize_ty, -(align.bytes() as isize) as i64)],
36 )
37}
38
39fn emit_direct_ptr_va_arg<'ll, 'tcx>(
40 bx: &mut Builder<'_, 'll, 'tcx>,
41 list: OperandRef<'tcx, &'ll Value>,
42 size: Size,
43 align: Align,
44 slot_size: Align,
45 allow_higher_align: bool,
46 force_right_adjust: bool,
47) -> (&'ll Value, Align) {
48 let va_list_ty = bx.type_ptr();
49 let va_list_addr = list.immediate();
50
51 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
52 let ptr = bx.load(va_list_ty, va_list_addr, ptr_align_abi);
53
54 let (addr, addr_align) = if allow_higher_align && align > slot_size {
55 (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
56 } else {
57 (ptr, slot_size)
58 };
59
60 let aligned_size = size.align_to(slot_size).bytes() as i32;
61 let full_direct_size = bx.cx().const_i32(aligned_size);
62 let next = bx.inbounds_ptradd(addr, full_direct_size);
63 bx.store(next, va_list_addr, ptr_align_abi);
64
65 if size.bytes() < slot_size.bytes()
66 && bx.tcx().sess.target.endian == Endian::Big
67 && force_right_adjust
68 {
69 let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
70 let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
71 (adjusted, addr_align)
72 } else {
73 (addr, addr_align)
74 }
75}
76
77enum PassMode {
78 Direct,
79 Indirect,
80}
81
82enum SlotSize {
83 Bytes8 = 8,
84 Bytes4 = 4,
85}
86
87enum AllowHigherAlign {
88 No,
89 Yes,
90}
91
92enum ForceRightAdjust {
93 No,
94 Yes,
95}
96
97fn emit_ptr_va_arg<'ll, 'tcx>(
98 bx: &mut Builder<'_, 'll, 'tcx>,
99 list: OperandRef<'tcx, &'ll Value>,
100 target_ty: Ty<'tcx>,
101 pass_mode: PassMode,
102 slot_size: SlotSize,
103 allow_higher_align: AllowHigherAlign,
104 force_right_adjust: ForceRightAdjust,
105) -> &'ll Value {
106 let indirect = matches!(pass_mode, PassMode::Indirect);
107 let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes);
108 let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes);
109 let slot_size = Align::from_bytes(slot_size as u64).unwrap();
110
111 let layout = bx.cx.layout_of(target_ty);
112 let (llty, size, align) = if indirect {
113 (
114 bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
115 bx.cx.data_layout().pointer_size(),
116 bx.cx.data_layout().pointer_align(),
117 )
118 } else {
119 (layout.llvm_type(bx.cx), layout.size, layout.align)
120 };
121 let (addr, addr_align) = emit_direct_ptr_va_arg(
122 bx,
123 list,
124 size,
125 align.abi,
126 slot_size,
127 allow_higher_align,
128 force_right_adjust,
129 );
130 if indirect {
131 let tmp_ret = bx.load(llty, addr, addr_align);
132 bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
133 } else {
134 bx.load(llty, addr, addr_align)
135 }
136}
137
138fn emit_aapcs_va_arg<'ll, 'tcx>(
139 bx: &mut Builder<'_, 'll, 'tcx>,
140 list: OperandRef<'tcx, &'ll Value>,
141 target_ty: Ty<'tcx>,
142) -> &'ll Value {
143 let dl = bx.cx.data_layout();
144
145 let va_list_addr = list.immediate();
156
157 let ptr_offset = 8;
162 let i32_offset = 4;
163 let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
164 let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
165 let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
166 let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
167
168 let layout = bx.cx.layout_of(target_ty);
169
170 let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
171 let in_reg = bx.append_sibling_block("va_arg.in_reg");
172 let on_stack = bx.append_sibling_block("va_arg.on_stack");
173 let end = bx.append_sibling_block("va_arg.end");
174 let zero = bx.const_i32(0);
175 let offset_align = Align::from_bytes(4).unwrap();
176
177 let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
178 let (reg_off, reg_top, slot_size) = if gr_type {
179 let nreg = layout.size.bytes().div_ceil(8);
180 (gr_offs, gr_top, nreg * 8)
181 } else {
182 let nreg = layout.size.bytes().div_ceil(16);
183 (vr_offs, vr_top, nreg * 16)
184 };
185
186 let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
188 let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
189 bx.cond_br(use_stack, on_stack, maybe_reg);
190
191 bx.switch_to_block(maybe_reg);
196 if gr_type && layout.align.bytes() > 8 {
197 reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
198 reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
199 }
200 let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
201
202 bx.store(new_reg_off_v, reg_off, offset_align);
203
204 let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
207 bx.cond_br(use_stack, on_stack, in_reg);
208
209 bx.switch_to_block(in_reg);
210 let top_type = bx.type_ptr();
211 let top = bx.load(top_type, reg_top, dl.pointer_align().abi);
212
213 let mut reg_addr = bx.ptradd(top, reg_off_v);
215 if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
216 let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
218 reg_addr = bx.ptradd(reg_addr, offset);
219 }
220 let reg_type = layout.llvm_type(bx);
221 let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
222 bx.br(end);
223
224 bx.switch_to_block(on_stack);
226 let stack_value = emit_ptr_va_arg(
227 bx,
228 list,
229 target_ty,
230 PassMode::Direct,
231 SlotSize::Bytes8,
232 AllowHigherAlign::Yes,
233 ForceRightAdjust::No,
234 );
235 bx.br(end);
236
237 bx.switch_to_block(end);
238 let val =
239 bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
240
241 val
242}
243
244fn emit_powerpc_va_arg<'ll, 'tcx>(
245 bx: &mut Builder<'_, 'll, 'tcx>,
246 list: OperandRef<'tcx, &'ll Value>,
247 target_ty: Ty<'tcx>,
248) -> &'ll Value {
249 let dl = bx.cx.data_layout();
250
251 let va_list_addr = list.immediate();
259
260 let layout = {
262 let mut layout = bx.cx.layout_of(target_ty);
263
264 while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
265 layout = inner;
266 }
267
268 layout
269 };
270
271 let target = &bx.cx.tcx.sess.target;
273 let is_soft_float_abi = target.abi == Abi::SoftFloat;
274 assert!(!is_soft_float_abi);
275
276 let is_indirect = false;
278
279 let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() {
280 BackendRepr::Scalar(scalar) => match scalar.primitive() {
281 rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false),
282 rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64),
283 rustc_abi::Primitive::Pointer(_) => (false, true, false),
284 },
285 _ => unreachable!("all instances of VaArgSafe are represented as scalars"),
286 };
287
288 let num_regs_addr = if is_int || is_soft_float_abi {
289 va_list_addr } else {
291 bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) };
293
294 let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align);
295
296 if is_i64 || (is_f64 && is_soft_float_abi) {
298 num_regs = bx.add(num_regs, bx.const_u8(1));
299 num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110));
300 }
301
302 let max_regs = 8u8;
303 let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs));
304 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
305
306 let in_reg = bx.append_sibling_block("va_arg.in_reg");
307 let in_mem = bx.append_sibling_block("va_arg.in_mem");
308 let end = bx.append_sibling_block("va_arg.end");
309
310 bx.cond_br(use_regs, in_reg, in_mem);
311
312 let reg_addr = {
313 bx.switch_to_block(in_reg);
314
315 let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4));
316 let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, ptr_align_abi);
317
318 if !is_int && !is_soft_float_abi {
320 reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32))
321 }
322
323 let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 };
326 let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size));
327 let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset);
328
329 let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 };
331 let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr));
332 bx.store(new_num_regs, num_regs_addr, dl.i8_align);
333
334 bx.br(end);
335
336 reg_addr
337 };
338
339 let mem_addr = {
340 bx.switch_to_block(in_mem);
341
342 bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align);
343
344 let overflow_area_align = Align::from_bytes(4).unwrap();
346
347 let size = if !is_indirect {
348 layout.layout.size.align_to(overflow_area_align)
349 } else {
350 dl.pointer_size()
351 };
352
353 let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2));
354 let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, ptr_align_abi);
355
356 if layout.layout.align.abi > overflow_area_align {
358 overflow_area = round_pointer_up_to_alignment(
359 bx,
360 overflow_area,
361 layout.layout.align.abi,
362 bx.type_ptr(),
363 );
364 }
365
366 let mem_addr = overflow_area;
367
368 overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes()));
370 bx.store(overflow_area, overflow_area_ptr, ptr_align_abi);
371
372 bx.br(end);
373
374 mem_addr
375 };
376
377 bx.switch_to_block(end);
379 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
380 let val_type = layout.llvm_type(bx);
381 let val_addr =
382 if is_indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
383 bx.load(val_type, val_addr, layout.align.abi)
384}
385
386fn emit_s390x_va_arg<'ll, 'tcx>(
387 bx: &mut Builder<'_, 'll, 'tcx>,
388 list: OperandRef<'tcx, &'ll Value>,
389 target_ty: Ty<'tcx>,
390) -> &'ll Value {
391 let dl = bx.cx.data_layout();
392
393 let va_list_addr = list.immediate();
403
404 let i64_offset = 8;
407 let ptr_offset = 8;
408 let gpr = va_list_addr;
409 let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
410 let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
411 let reg_save_area =
412 bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
413
414 let layout = bx.cx.layout_of(target_ty);
415
416 let in_reg = bx.append_sibling_block("va_arg.in_reg");
417 let in_mem = bx.append_sibling_block("va_arg.in_mem");
418 let end = bx.append_sibling_block("va_arg.end");
419 let ptr_align_abi = dl.pointer_align().abi;
420
421 let target_ty_size = bx.cx.size_of(target_ty).bytes();
423 let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
424 let unpadded_size = if indirect { 8 } else { target_ty_size };
425 let padded_size = 8;
426 let padding = padded_size - unpadded_size;
427
428 let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
429 let (max_regs, reg_count, reg_save_index, reg_padding) =
430 if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
431
432 let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
434 let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
435 bx.cond_br(use_regs, in_reg, in_mem);
436
437 bx.switch_to_block(in_reg);
439
440 let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, ptr_align_abi);
442 let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
443 let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
444 let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
445
446 let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
448 bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
449 bx.br(end);
450
451 bx.switch_to_block(in_mem);
453
454 let arg_ptr_v = bx.load(bx.type_ptr(), overflow_arg_area, ptr_align_abi);
456 let arg_off = bx.const_u64(padding);
457 let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
458
459 let arg_size = bx.cx().const_u64(padded_size);
461 let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
462 bx.store(new_arg_ptr_v, overflow_arg_area, ptr_align_abi);
463 bx.br(end);
464
465 bx.switch_to_block(end);
467 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
468 let val_type = layout.llvm_type(bx);
469 let val_addr =
470 if indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
471 bx.load(val_type, val_addr, layout.align.abi)
472}
473
474fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>(
475 bx: &mut Builder<'_, 'll, 'tcx>,
476 list: OperandRef<'tcx, &'ll Value>,
477 target_ty: Ty<'tcx>,
478) -> &'ll Value {
479 let dl = bx.cx.data_layout();
480
481 let va_list_addr = list.immediate();
495
496 let layout = {
513 let mut layout = bx.cx.layout_of(target_ty);
514
515 while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
516 layout = inner;
517 }
518
519 layout
520 };
521
522 let mut num_gp_registers = 0;
530 let mut num_fp_registers = 0;
531
532 let mut registers_for_primitive = |p| match p {
533 Primitive::Int(integer, _is_signed) => {
534 num_gp_registers += integer.size().bytes().div_ceil(8) as u32;
535 }
536 Primitive::Float(float) => {
537 num_fp_registers += float.size().bytes().div_ceil(16) as u32;
538 }
539 Primitive::Pointer(_) => {
540 num_gp_registers += 1;
541 }
542 };
543
544 match layout.layout.backend_repr() {
545 BackendRepr::Scalar(scalar) => {
546 registers_for_primitive(scalar.primitive());
547 }
548 BackendRepr::ScalarPair(scalar1, scalar2) => {
549 registers_for_primitive(scalar1.primitive());
550 registers_for_primitive(scalar2.primitive());
551 }
552 BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
553 unreachable!(
555 "No x86-64 SysV va_arg implementation for {:?}",
556 layout.layout.backend_repr()
557 )
558 }
559 BackendRepr::Memory { .. } => {
560 let mem_addr = x86_64_sysv64_va_arg_from_memory(bx, va_list_addr, layout);
561 return bx.load(layout.llvm_type(bx), mem_addr, layout.align.abi);
562 }
563 };
564
565 let unsigned_int_offset = 4;
570 let ptr_offset = 8;
571 let gp_offset_ptr = va_list_addr;
572 let fp_offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(unsigned_int_offset));
573
574 let gp_offset_v = bx.load(bx.type_i32(), gp_offset_ptr, Align::from_bytes(8).unwrap());
575 let fp_offset_v = bx.load(bx.type_i32(), fp_offset_ptr, Align::from_bytes(4).unwrap());
576
577 let mut use_regs = bx.const_bool(false);
578
579 if num_gp_registers > 0 {
580 let max_offset_val = 48u32 - num_gp_registers * 8;
581 let fits_in_gp = bx.icmp(IntPredicate::IntULE, gp_offset_v, bx.const_u32(max_offset_val));
582 use_regs = fits_in_gp;
583 }
584
585 if num_fp_registers > 0 {
586 let max_offset_val = 176u32 - num_fp_registers * 16;
587 let fits_in_fp = bx.icmp(IntPredicate::IntULE, fp_offset_v, bx.const_u32(max_offset_val));
588 use_regs = if num_gp_registers > 0 { bx.and(use_regs, fits_in_fp) } else { fits_in_fp };
589 }
590
591 let in_reg = bx.append_sibling_block("va_arg.in_reg");
592 let in_mem = bx.append_sibling_block("va_arg.in_mem");
593 let end = bx.append_sibling_block("va_arg.end");
594
595 bx.cond_br(use_regs, in_reg, in_mem);
596
597 bx.switch_to_block(in_reg);
599
600 let reg_save_area_ptr =
611 bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * unsigned_int_offset + ptr_offset));
612 let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align().abi);
613
614 let reg_addr = match layout.layout.backend_repr() {
615 BackendRepr::Scalar(scalar) => match scalar.primitive() {
616 Primitive::Int(_, _) | Primitive::Pointer(_) => {
617 let reg_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
618
619 let gp_align = Align::from_bytes(8).unwrap();
621 copy_to_temporary_if_more_aligned(bx, reg_addr, layout, gp_align)
622 }
623 Primitive::Float(_) => bx.inbounds_ptradd(reg_save_area_v, fp_offset_v),
624 },
625 BackendRepr::ScalarPair(scalar1, scalar2) => {
626 let ty_lo = bx.cx().scalar_pair_element_backend_type(layout, 0, false);
627 let ty_hi = bx.cx().scalar_pair_element_backend_type(layout, 1, false);
628
629 let align_lo = layout.field(bx.cx, 0).layout.align().abi;
630 let align_hi = layout.field(bx.cx, 1).layout.align().abi;
631
632 match (scalar1.primitive(), scalar2.primitive()) {
633 (Primitive::Float(_), Primitive::Float(_)) => {
634 let reg_lo_addr = bx.inbounds_ptradd(reg_save_area_v, fp_offset_v);
641 let reg_hi_addr = bx.inbounds_ptradd(reg_lo_addr, bx.const_i32(16));
642
643 let align = layout.layout.align().abi;
644 let tmp = bx.alloca(layout.layout.size(), align);
645
646 let reg_lo = bx.load(ty_lo, reg_lo_addr, align_lo);
647 let reg_hi = bx.load(ty_hi, reg_hi_addr, align_hi);
648
649 let offset = scalar1.size(bx.cx).align_to(align_hi).bytes();
650 let field0 = tmp;
651 let field1 = bx.inbounds_ptradd(tmp, bx.const_u32(offset as u32));
652
653 bx.store(reg_lo, field0, align);
654 bx.store(reg_hi, field1, align);
655
656 tmp
657 }
658 (Primitive::Float(_), _) | (_, Primitive::Float(_)) => {
659 let gp_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
660 let fp_addr = bx.inbounds_ptradd(reg_save_area_v, fp_offset_v);
661
662 let (reg_lo_addr, reg_hi_addr) = match scalar1.primitive() {
663 Primitive::Float(_) => (fp_addr, gp_addr),
664 Primitive::Int(_, _) | Primitive::Pointer(_) => (gp_addr, fp_addr),
665 };
666
667 let tmp = bx.alloca(layout.layout.size(), layout.layout.align().abi);
668
669 let reg_lo = bx.load(ty_lo, reg_lo_addr, align_lo);
670 let reg_hi = bx.load(ty_hi, reg_hi_addr, align_hi);
671
672 let offset = scalar1.size(bx.cx).align_to(align_hi).bytes();
673 let field0 = tmp;
674 let field1 = bx.inbounds_ptradd(tmp, bx.const_u32(offset as u32));
675
676 bx.store(reg_lo, field0, align_lo);
677 bx.store(reg_hi, field1, align_hi);
678
679 tmp
680 }
681 (_, _) => {
682 let reg_addr = bx.inbounds_ptradd(reg_save_area_v, gp_offset_v);
684
685 let gp_align = Align::from_bytes(8).unwrap();
687 copy_to_temporary_if_more_aligned(bx, reg_addr, layout, gp_align)
688 }
689 }
690 }
691 BackendRepr::SimdVector { .. }
693 | BackendRepr::ScalableVector { .. }
694 | BackendRepr::Memory { .. } => unreachable!(),
695 };
696
697 if num_gp_registers > 0 {
700 let offset = bx.const_u32(num_gp_registers * 8);
701 let sum = bx.add(gp_offset_v, offset);
702 bx.store(sum, gp_offset_ptr, Align::from_bytes(8).unwrap());
704 }
705
706 if num_fp_registers > 0 {
708 let offset = bx.const_u32(num_fp_registers * 16);
709 let sum = bx.add(fp_offset_v, offset);
710 bx.store(sum, fp_offset_ptr, Align::from_bytes(4).unwrap());
711 }
712
713 bx.br(end);
714
715 bx.switch_to_block(in_mem);
716 let mem_addr = x86_64_sysv64_va_arg_from_memory(bx, va_list_addr, layout);
717 bx.br(end);
718
719 bx.switch_to_block(end);
720
721 let val_type = layout.llvm_type(bx);
722 let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
723
724 bx.load(val_type, val_addr, layout.align.abi)
725}
726
727fn copy_to_temporary_if_more_aligned<'ll, 'tcx>(
729 bx: &mut Builder<'_, 'll, 'tcx>,
730 reg_addr: &'ll Value,
731 layout: TyAndLayout<'tcx, Ty<'tcx>>,
732 src_align: Align,
733) -> &'ll Value {
734 if layout.layout.align.abi > src_align {
735 let tmp = bx.alloca(layout.layout.size(), layout.layout.align().abi);
736 bx.memcpy(
737 tmp,
738 layout.layout.align.abi,
739 reg_addr,
740 src_align,
741 bx.const_u32(layout.layout.size().bytes() as u32),
742 MemFlags::empty(),
743 None,
744 );
745 tmp
746 } else {
747 reg_addr
748 }
749}
750
751fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
752 bx: &mut Builder<'_, 'll, 'tcx>,
753 va_list_addr: &'ll Value,
754 layout: TyAndLayout<'tcx, Ty<'tcx>>,
755) -> &'ll Value {
756 let dl = bx.cx.data_layout();
757 let ptr_align_abi = dl.data_layout().pointer_align().abi;
758
759 let overflow_arg_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(8));
760
761 let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, ptr_align_abi);
762 if layout.layout.align.bytes() > 8 {
767 unreachable!("all instances of VaArgSafe have an alignment <= 8");
768 }
769
770 let mem_addr = overflow_arg_area_v;
772
773 let size_in_bytes = layout.layout.size().bytes();
778 let offset = bx.const_i32(size_in_bytes.next_multiple_of(8) as i32);
779 let overflow_arg_area = bx.inbounds_ptradd(overflow_arg_area_v, offset);
780 bx.store(overflow_arg_area, overflow_arg_area_ptr, ptr_align_abi);
781
782 mem_addr
783}
784
785fn emit_hexagon_va_arg_musl<'ll, 'tcx>(
786 bx: &mut Builder<'_, 'll, 'tcx>,
787 list: OperandRef<'tcx, &'ll Value>,
788 target_ty: Ty<'tcx>,
789) -> &'ll Value {
790 let va_list_addr = list.immediate();
802 let layout = bx.cx.layout_of(target_ty);
803 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
804 let ptr_size = bx.tcx().data_layout.pointer_size().bytes();
805
806 let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
808 let from_overflow = bx.append_sibling_block("va_arg.from_overflow");
809 let end = bx.append_sibling_block("va_arg.end");
810
811 let current_ptr_addr = va_list_addr;
813 let end_ptr_addr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(ptr_size));
814 let overflow_ptr_addr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(2 * ptr_size));
815
816 let current_ptr = bx.load(bx.type_ptr(), current_ptr_addr, ptr_align_abi);
817 let end_ptr = bx.load(bx.type_ptr(), end_ptr_addr, ptr_align_abi);
818 let overflow_ptr = bx.load(bx.type_ptr(), overflow_ptr_addr, ptr_align_abi);
819
820 let type_size_bits = bx.cx.size_of(target_ty).bits();
823 let arg_align = if type_size_bits > 32 {
824 Align::from_bytes(8).unwrap()
825 } else {
826 Align::from_bytes(4).unwrap()
827 };
828 let aligned_current = round_pointer_up_to_alignment(bx, current_ptr, arg_align, bx.type_ptr());
829
830 let arg_size = if type_size_bits > 32 { 8 } else { 4 };
833 let next_ptr = bx.inbounds_ptradd(aligned_current, bx.const_usize(arg_size));
834
835 let fits_in_regs = bx.icmp(IntPredicate::IntULE, next_ptr, end_ptr);
837 bx.cond_br(fits_in_regs, maybe_reg, from_overflow);
838
839 bx.switch_to_block(maybe_reg);
841 let reg_value_addr = aligned_current;
842 bx.store(next_ptr, current_ptr_addr, ptr_align_abi);
844 bx.br(end);
845
846 bx.switch_to_block(from_overflow);
848
849 let aligned_overflow =
851 round_pointer_up_to_alignment(bx, overflow_ptr, arg_align, bx.type_ptr());
852
853 let overflow_value_addr = aligned_overflow;
854 let next_overflow = bx.inbounds_ptradd(aligned_overflow, bx.const_usize(arg_size));
856 bx.store(next_overflow, overflow_ptr_addr, ptr_align_abi);
857
858 bx.store(next_overflow, current_ptr_addr, ptr_align_abi);
861 bx.br(end);
862
863 bx.switch_to_block(end);
865 let value_addr =
866 bx.phi(bx.type_ptr(), &[reg_value_addr, overflow_value_addr], &[maybe_reg, from_overflow]);
867 bx.load(layout.llvm_type(bx), value_addr, layout.align.abi)
868}
869
870fn emit_hexagon_va_arg_bare_metal<'ll, 'tcx>(
871 bx: &mut Builder<'_, 'll, 'tcx>,
872 list: OperandRef<'tcx, &'ll Value>,
873 target_ty: Ty<'tcx>,
874) -> &'ll Value {
875 let va_list_addr = list.immediate();
880 let layout = bx.cx.layout_of(target_ty);
881 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
882
883 let current_ptr = bx.load(bx.type_ptr(), va_list_addr, ptr_align_abi);
885
886 let ty_align = layout.align.abi;
888 let aligned_ptr = if ty_align.bytes() > 4 {
889 debug_assert!(ty_align.bytes().is_power_of_two(), "Alignment is not power of 2!");
891 round_pointer_up_to_alignment(bx, current_ptr, ty_align, bx.type_ptr())
892 } else {
893 current_ptr
894 };
895
896 let type_size = layout.size.bytes();
898 let offset = type_size.next_multiple_of(4); let next_ptr = bx.inbounds_ptradd(aligned_ptr, bx.const_usize(offset));
902 bx.store(next_ptr, va_list_addr, ptr_align_abi);
903
904 bx.load(layout.llvm_type(bx), aligned_ptr, layout.align.abi)
906}
907
908fn emit_xtensa_va_arg<'ll, 'tcx>(
909 bx: &mut Builder<'_, 'll, 'tcx>,
910 list: OperandRef<'tcx, &'ll Value>,
911 target_ty: Ty<'tcx>,
912) -> &'ll Value {
913 let va_list_addr = list.immediate();
930 let layout = bx.cx.layout_of(target_ty);
932 let from_stack = bx.append_sibling_block("va_arg.from_stack");
933 let from_regsave = bx.append_sibling_block("va_arg.from_regsave");
934 let end = bx.append_sibling_block("va_arg.end");
935 let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
936
937 let va_reg_offset = 4;
939 let va_ndx_offset = va_reg_offset + 4;
940 let offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_ndx_offset));
941
942 let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align);
943 let offset = round_up_to_alignment(bx, offset, layout.align.abi);
944
945 let slot_size = layout.size.align_to(Align::from_bytes(4).unwrap()).bytes() as i32;
946
947 let offset_next = bx.add(offset, bx.const_i32(slot_size));
949
950 let regsave_size = bx.const_i32(24);
953 let use_regsave = bx.icmp(IntPredicate::IntULE, offset_next, regsave_size);
954 bx.cond_br(use_regsave, from_regsave, from_stack);
955
956 bx.switch_to_block(from_regsave);
957 bx.store(offset_next, offset_ptr, ptr_align_abi);
959
960 let regsave_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_reg_offset));
962 let regsave_area = bx.load(bx.type_ptr(), regsave_area_ptr, ptr_align_abi);
963 let regsave_value_ptr = bx.inbounds_ptradd(regsave_area, offset);
964 bx.br(end);
965
966 bx.switch_to_block(from_stack);
967
968 let stack_offset_start = bx.const_i32(32);
974 let needs_correction = bx.icmp(IntPredicate::IntULE, offset, stack_offset_start);
975 let offset_corrected = bx.select(needs_correction, stack_offset_start, offset);
976
977 let offset_next_corrected = bx.add(offset_next, bx.const_i32(slot_size));
980 bx.store(offset_next_corrected, offset_ptr, ptr_align_abi);
982
983 let stack_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(0));
985 let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, ptr_align_abi);
986 let stack_value_ptr = bx.inbounds_ptradd(stack_area, offset_corrected);
987 bx.br(end);
988
989 bx.switch_to_block(end);
990
991 assert!(bx.tcx().sess.target.endian == Endian::Little);
1001 let value_ptr =
1002 bx.phi(bx.type_ptr(), &[regsave_value_ptr, stack_value_ptr], &[from_regsave, from_stack]);
1003 return bx.load(layout.llvm_type(bx), value_ptr, layout.align.abi);
1004}
1005
1006pub(super) fn emit_va_arg<'ll, 'tcx>(
1007 bx: &mut Builder<'_, 'll, 'tcx>,
1008 addr: OperandRef<'tcx, &'ll Value>,
1009 target_ty: Ty<'tcx>,
1010) -> &'ll Value {
1011 let target = &bx.cx.tcx.sess.target;
1014
1015 match target.arch {
1016 Arch::X86 => emit_ptr_va_arg(
1017 bx,
1018 addr,
1019 target_ty,
1020 PassMode::Direct,
1021 SlotSize::Bytes4,
1022 if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
1023 ForceRightAdjust::No,
1024 ),
1025 Arch::AArch64 | Arch::Arm64EC if target.is_like_windows || target.is_like_darwin => {
1026 emit_ptr_va_arg(
1027 bx,
1028 addr,
1029 target_ty,
1030 PassMode::Direct,
1031 SlotSize::Bytes8,
1032 if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
1033 ForceRightAdjust::No,
1034 )
1035 }
1036 Arch::AArch64 => emit_aapcs_va_arg(bx, addr, target_ty),
1037 Arch::Arm => {
1038 assert!(bx.cx.size_of(target_ty).bytes() <= 16);
1041
1042 emit_ptr_va_arg(
1043 bx,
1044 addr,
1045 target_ty,
1046 PassMode::Direct,
1047 SlotSize::Bytes4,
1048 AllowHigherAlign::Yes,
1049 ForceRightAdjust::No,
1050 )
1051 }
1052 Arch::S390x => emit_s390x_va_arg(bx, addr, target_ty),
1053 Arch::PowerPC => emit_powerpc_va_arg(bx, addr, target_ty),
1054 Arch::PowerPC64 => emit_ptr_va_arg(
1055 bx,
1056 addr,
1057 target_ty,
1058 PassMode::Direct,
1059 SlotSize::Bytes8,
1060 AllowHigherAlign::Yes,
1061 ForceRightAdjust::Yes,
1062 ),
1063 Arch::PowerPC64LE => emit_ptr_va_arg(
1064 bx,
1065 addr,
1066 target_ty,
1067 PassMode::Direct,
1068 SlotSize::Bytes8,
1069 AllowHigherAlign::Yes,
1070 ForceRightAdjust::No,
1071 ),
1072 Arch::X86_64 if target.is_like_windows => {
1074 let target_ty_size = bx.cx.size_of(target_ty).bytes();
1075 emit_ptr_va_arg(
1076 bx,
1077 addr,
1078 target_ty,
1079 if target_ty_size > 8 || !target_ty_size.is_power_of_two() {
1080 PassMode::Indirect
1081 } else {
1082 PassMode::Direct
1083 },
1084 SlotSize::Bytes8,
1085 AllowHigherAlign::No,
1086 ForceRightAdjust::No,
1087 )
1088 }
1089 Arch::X86_64 => emit_x86_64_sysv64_va_arg(bx, addr, target_ty),
1091 Arch::Xtensa => emit_xtensa_va_arg(bx, addr, target_ty),
1092 Arch::Hexagon => {
1093 if target.env == Env::Musl {
1094 emit_hexagon_va_arg_musl(bx, addr, target_ty)
1095 } else {
1096 emit_hexagon_va_arg_bare_metal(bx, addr, target_ty)
1097 }
1098 }
1099 _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
1103 }
1104}