rustc_codegen_llvm/
abi.rs

1use std::cmp;
2
3use libc::c_uint;
4use rustc_abi::{
5    ArmCall, BackendRepr, CanonAbi, HasDataLayout, InterruptKind, Primitive, Reg, RegKind, Size,
6    X86Call,
7};
8use rustc_codegen_ssa::MemFlags;
9use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
10use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
11use rustc_codegen_ssa::traits::*;
12use rustc_middle::ty::Ty;
13use rustc_middle::ty::layout::LayoutOf;
14use rustc_middle::{bug, ty};
15use rustc_session::{Session, config};
16use rustc_target::callconv::{
17    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode,
18};
19use rustc_target::spec::SanitizerSet;
20use smallvec::SmallVec;
21
22use crate::attributes::{self, llfn_attrs_from_instance};
23use crate::builder::Builder;
24use crate::context::CodegenCx;
25use crate::llvm::{self, Attribute, AttributePlace, Type, Value};
26use crate::llvm_util;
27use crate::type_of::LayoutLlvmExt;
28
29trait ArgAttributesExt {
30    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
31    fn apply_attrs_to_callsite(
32        &self,
33        idx: AttributePlace,
34        cx: &CodegenCx<'_, '_>,
35        callsite: &Value,
36    );
37}
38
39const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
40    [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
41
42const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 6] = [
43    (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
44    (ArgAttribute::CapturesAddress, llvm::AttributeKind::CapturesAddress),
45    (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
46    (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
47    (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
48    (ArgAttribute::CapturesReadOnly, llvm::AttributeKind::CapturesReadOnly),
49];
50
51fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
52    let mut regular = this.regular;
53
54    let mut attrs = SmallVec::new();
55
56    // ABI-affecting attributes must always be applied
57    for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
58        if regular.contains(attr) {
59            attrs.push(llattr.create_attr(cx.llcx));
60        }
61    }
62    if let Some(align) = this.pointee_align {
63        attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
64    }
65    match this.arg_ext {
66        ArgExtension::None => {}
67        ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
68        ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
69    }
70
71    // Only apply remaining attributes when optimizing
72    if cx.sess().opts.optimize != config::OptLevel::No {
73        let deref = this.pointee_size.bytes();
74        if deref != 0 {
75            if regular.contains(ArgAttribute::NonNull) {
76                attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
77            } else {
78                attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
79            }
80            regular -= ArgAttribute::NonNull;
81        }
82        for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
83            if regular.contains(attr) {
84                // captures(...) is only available since LLVM 21.
85                if (attr == ArgAttribute::CapturesReadOnly || attr == ArgAttribute::CapturesAddress)
86                    && llvm_util::get_version() < (21, 0, 0)
87                {
88                    continue;
89                }
90                attrs.push(llattr.create_attr(cx.llcx));
91            }
92        }
93    } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
94        // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
95        // memory sanitizer's behavior.
96
97        if regular.contains(ArgAttribute::NoUndef) {
98            attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx));
99        }
100    }
101
102    attrs
103}
104
105impl ArgAttributesExt for ArgAttributes {
106    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
107        let attrs = get_attrs(self, cx);
108        attributes::apply_to_llfn(llfn, idx, &attrs);
109    }
110
111    fn apply_attrs_to_callsite(
112        &self,
113        idx: AttributePlace,
114        cx: &CodegenCx<'_, '_>,
115        callsite: &Value,
116    ) {
117        let attrs = get_attrs(self, cx);
118        attributes::apply_to_callsite(callsite, idx, &attrs);
119    }
120}
121
122pub(crate) trait LlvmType {
123    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
124}
125
126impl LlvmType for Reg {
127    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
128        match self.kind {
129            RegKind::Integer => cx.type_ix(self.size.bits()),
130            RegKind::Float => match self.size.bits() {
131                16 => cx.type_f16(),
132                32 => cx.type_f32(),
133                64 => cx.type_f64(),
134                128 => cx.type_f128(),
135                _ => bug!("unsupported float: {:?}", self),
136            },
137            RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
138        }
139    }
140}
141
142impl LlvmType for CastTarget {
143    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
144        let rest_ll_unit = self.rest.unit.llvm_type(cx);
145        let rest_count = if self.rest.total == Size::ZERO {
146            0
147        } else {
148            assert_ne!(
149                self.rest.unit.size,
150                Size::ZERO,
151                "total size {:?} cannot be divided into units of zero size",
152                self.rest.total
153            );
154            if !self.rest.total.bytes().is_multiple_of(self.rest.unit.size.bytes()) {
155                assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
156            }
157            self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())
158        };
159
160        // Simplify to a single unit or an array if there's no prefix.
161        // This produces the same layout, but using a simpler type.
162        if self.prefix.iter().all(|x| x.is_none()) {
163            // We can't do this if is_consecutive is set and the unit would get
164            // split on the target. Currently, this is only relevant for i128
165            // registers.
166            if rest_count == 1 && (!self.rest.is_consecutive || self.rest.unit != Reg::i128()) {
167                return rest_ll_unit;
168            }
169
170            return cx.type_array(rest_ll_unit, rest_count);
171        }
172
173        // Generate a struct type with the prefix and the "rest" arguments.
174        let prefix_args =
175            self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)));
176        let rest_args = (0..rest_count).map(|_| rest_ll_unit);
177        let args: Vec<_> = prefix_args.chain(rest_args).collect();
178        cx.type_struct(&args, false)
179    }
180}
181
182trait ArgAbiExt<'ll, 'tcx> {
183    fn store(
184        &self,
185        bx: &mut Builder<'_, 'll, 'tcx>,
186        val: &'ll Value,
187        dst: PlaceRef<'tcx, &'ll Value>,
188    );
189    fn store_fn_arg(
190        &self,
191        bx: &mut Builder<'_, 'll, 'tcx>,
192        idx: &mut usize,
193        dst: PlaceRef<'tcx, &'ll Value>,
194    );
195}
196
197impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
198    /// Stores a direct/indirect value described by this ArgAbi into a
199    /// place for the original Rust type of this argument/return.
200    /// Can be used for both storing formal arguments into Rust variables
201    /// or results of call/invoke instructions into their destinations.
202    fn store(
203        &self,
204        bx: &mut Builder<'_, 'll, 'tcx>,
205        val: &'ll Value,
206        dst: PlaceRef<'tcx, &'ll Value>,
207    ) {
208        match &self.mode {
209            PassMode::Ignore => {}
210            // Sized indirect arguments
211            PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
212                let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
213                OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst);
214            }
215            // Unsized indirect arguments cannot be stored
216            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
217                bug!("unsized `ArgAbi` cannot be stored");
218            }
219            PassMode::Cast { cast, pad_i32: _ } => {
220                // The ABI mandates that the value is passed as a different struct representation.
221                // Spill and reload it from the stack to convert from the ABI representation to
222                // the Rust representation.
223                let scratch_size = cast.size(bx);
224                let scratch_align = cast.align(bx);
225                // Note that the ABI type may be either larger or smaller than the Rust type,
226                // due to the presence or absence of trailing padding. For example:
227                // - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
228                //   when passed by value, making it smaller.
229                // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
230                //   when passed by value, making it larger.
231                let copy_bytes =
232                    cmp::min(cast.unaligned_size(bx).bytes(), self.layout.size.bytes());
233                // Allocate some scratch space...
234                let llscratch = bx.alloca(scratch_size, scratch_align);
235                bx.lifetime_start(llscratch, scratch_size);
236                // ...store the value...
237                rustc_codegen_ssa::mir::store_cast(bx, cast, val, llscratch, scratch_align);
238                // ... and then memcpy it to the intended destination.
239                bx.memcpy(
240                    dst.val.llval,
241                    self.layout.align.abi,
242                    llscratch,
243                    scratch_align,
244                    bx.const_usize(copy_bytes),
245                    MemFlags::empty(),
246                    None,
247                );
248                bx.lifetime_end(llscratch, scratch_size);
249            }
250            _ => {
251                OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
252            }
253        }
254    }
255
256    fn store_fn_arg(
257        &self,
258        bx: &mut Builder<'_, 'll, 'tcx>,
259        idx: &mut usize,
260        dst: PlaceRef<'tcx, &'ll Value>,
261    ) {
262        let mut next = || {
263            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
264            *idx += 1;
265            val
266        };
267        match self.mode {
268            PassMode::Ignore => {}
269            PassMode::Pair(..) => {
270                OperandValue::Pair(next(), next()).store(bx, dst);
271            }
272            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
273                bug!("unsized `ArgAbi` cannot be stored");
274            }
275            PassMode::Direct(_)
276            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
277            | PassMode::Cast { .. } => {
278                let next_arg = next();
279                self.store(bx, next_arg, dst);
280            }
281        }
282    }
283}
284
285impl<'ll, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
286    fn store_fn_arg(
287        &mut self,
288        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
289        idx: &mut usize,
290        dst: PlaceRef<'tcx, Self::Value>,
291    ) {
292        arg_abi.store_fn_arg(self, idx, dst)
293    }
294    fn store_arg(
295        &mut self,
296        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
297        val: &'ll Value,
298        dst: PlaceRef<'tcx, &'ll Value>,
299    ) {
300        arg_abi.store(self, val, dst)
301    }
302}
303
304pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> {
305    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
306    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
307    fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv;
308
309    /// Apply attributes to a function declaration/definition.
310    fn apply_attrs_llfn(
311        &self,
312        cx: &CodegenCx<'ll, 'tcx>,
313        llfn: &'ll Value,
314        instance: Option<ty::Instance<'tcx>>,
315    );
316
317    /// Apply attributes to a function call.
318    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
319}
320
321impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
322    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
323        // Ignore "extra" args from the call site for C variadic functions.
324        // Only the "fixed" args are part of the LLVM function signature.
325        let args =
326            if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args };
327
328        // This capacity calculation is approximate.
329        let mut llargument_tys = Vec::with_capacity(
330            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
331        );
332
333        let llreturn_ty = match &self.ret.mode {
334            PassMode::Ignore => cx.type_void(),
335            PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
336            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
337            PassMode::Indirect { .. } => {
338                llargument_tys.push(cx.type_ptr());
339                cx.type_void()
340            }
341        };
342
343        for arg in args {
344            // Note that the exact number of arguments pushed here is carefully synchronized with
345            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
346            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
347            let llarg_ty = match &arg.mode {
348                PassMode::Ignore => continue,
349                PassMode::Direct(_) => {
350                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
351                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
352                    // guaranteeing that we generate ABI-compatible LLVM IR.
353                    arg.layout.immediate_llvm_type(cx)
354                }
355                PassMode::Pair(..) => {
356                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
357                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
358                    // LLVM IR.
359                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
360                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
361                    continue;
362                }
363                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
364                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
365                    // Any two ABI-compatible unsized types have the same metadata type and
366                    // moreover the same metadata value leads to the same dynamic size and
367                    // alignment, so this respects ABI compatibility.
368                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
369                    let ptr_layout = cx.layout_of(ptr_ty);
370                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
371                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
372                    continue;
373                }
374                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(),
375                PassMode::Cast { cast, pad_i32 } => {
376                    // add padding
377                    if *pad_i32 {
378                        llargument_tys.push(Reg::i32().llvm_type(cx));
379                    }
380                    // Compute the LLVM type we use for this function from the cast type.
381                    // We assume here that ABI-compatible Rust types have the same cast type.
382                    cast.llvm_type(cx)
383                }
384            };
385            llargument_tys.push(llarg_ty);
386        }
387
388        if self.c_variadic {
389            cx.type_variadic_func(&llargument_tys, llreturn_ty)
390        } else {
391            cx.type_func(&llargument_tys, llreturn_ty)
392        }
393    }
394
395    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
396        cx.type_ptr_ext(cx.data_layout().instruction_address_space)
397    }
398
399    fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv {
400        to_llvm_calling_convention(cx.tcx.sess, self.conv)
401    }
402
403    fn apply_attrs_llfn(
404        &self,
405        cx: &CodegenCx<'ll, 'tcx>,
406        llfn: &'ll Value,
407        instance: Option<ty::Instance<'tcx>>,
408    ) {
409        let mut func_attrs = SmallVec::<[_; 3]>::new();
410        if self.ret.layout.is_uninhabited() {
411            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
412        }
413        if !self.can_unwind {
414            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
415        }
416        match self.conv {
417            CanonAbi::Interrupt(InterruptKind::RiscvMachine) => {
418                func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "machine"))
419            }
420            CanonAbi::Interrupt(InterruptKind::RiscvSupervisor) => {
421                func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "supervisor"))
422            }
423            CanonAbi::Arm(ArmCall::CCmseNonSecureEntry) => {
424                func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
425            }
426            _ => (),
427        }
428        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
429
430        let mut i = 0;
431        let mut apply = |attrs: &ArgAttributes| {
432            attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
433            i += 1;
434            i - 1
435        };
436
437        let apply_range_attr = |idx: AttributePlace, scalar: rustc_abi::Scalar| {
438            if cx.sess().opts.optimize != config::OptLevel::No
439                && matches!(scalar.primitive(), Primitive::Int(..))
440                // If the value is a boolean, the range is 0..2 and that ultimately
441                // become 0..0 when the type becomes i1, which would be rejected
442                // by the LLVM verifier.
443                && !scalar.is_bool()
444                // LLVM also rejects full range.
445                && !scalar.is_always_valid(cx)
446            {
447                attributes::apply_to_llfn(
448                    llfn,
449                    idx,
450                    &[llvm::CreateRangeAttr(cx.llcx, scalar.size(cx), scalar.valid_range(cx))],
451                );
452            }
453        };
454
455        match &self.ret.mode {
456            PassMode::Direct(attrs) => {
457                attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
458                if let BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
459                    apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
460                }
461            }
462            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
463                assert!(!on_stack);
464                let i = apply(attrs);
465                let sret = llvm::CreateStructRetAttr(
466                    cx.llcx,
467                    cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
468                );
469                attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
470                if cx.sess().opts.optimize != config::OptLevel::No {
471                    attributes::apply_to_llfn(
472                        llfn,
473                        llvm::AttributePlace::Argument(i),
474                        &[
475                            llvm::AttributeKind::Writable.create_attr(cx.llcx),
476                            llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx),
477                        ],
478                    );
479                }
480            }
481            PassMode::Cast { cast, pad_i32: _ } => {
482                cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
483            }
484            _ => {}
485        }
486        for arg in self.args.iter() {
487            match &arg.mode {
488                PassMode::Ignore => {}
489                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
490                    let i = apply(attrs);
491                    let byval = llvm::CreateByValAttr(
492                        cx.llcx,
493                        cx.type_array(cx.type_i8(), arg.layout.size.bytes()),
494                    );
495                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
496                }
497                PassMode::Direct(attrs) => {
498                    let i = apply(attrs);
499                    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
500                        apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
501                    }
502                }
503                PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
504                    let i = apply(attrs);
505                    if cx.sess().opts.optimize != config::OptLevel::No
506                        && llvm_util::get_version() >= (21, 0, 0)
507                    {
508                        attributes::apply_to_llfn(
509                            llfn,
510                            llvm::AttributePlace::Argument(i),
511                            &[llvm::AttributeKind::DeadOnReturn.create_attr(cx.llcx)],
512                        );
513                    }
514                }
515                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
516                    assert!(!on_stack);
517                    apply(attrs);
518                    apply(meta_attrs);
519                }
520                PassMode::Pair(a, b) => {
521                    let i = apply(a);
522                    let ii = apply(b);
523                    if let BackendRepr::ScalarPair(scalar_a, scalar_b) = arg.layout.backend_repr {
524                        apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
525                        apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
526                    }
527                }
528                PassMode::Cast { cast, pad_i32 } => {
529                    if *pad_i32 {
530                        apply(&ArgAttributes::new());
531                    }
532                    apply(&cast.attrs);
533                }
534            }
535        }
536
537        // If the declaration has an associated instance, compute extra attributes based on that.
538        if let Some(instance) = instance {
539            llfn_attrs_from_instance(
540                cx,
541                cx.tcx,
542                llfn,
543                &cx.tcx.codegen_instance_attrs(instance.def),
544                Some(instance),
545            );
546        }
547    }
548
549    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
550        let mut func_attrs = SmallVec::<[_; 2]>::new();
551        if self.ret.layout.is_uninhabited() {
552            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
553        }
554        if !self.can_unwind {
555            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
556        }
557        attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
558
559        let mut i = 0;
560        let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
561            attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
562            i += 1;
563            i - 1
564        };
565        match &self.ret.mode {
566            PassMode::Direct(attrs) => {
567                attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
568            }
569            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
570                assert!(!on_stack);
571                let i = apply(bx.cx, attrs);
572                let sret = llvm::CreateStructRetAttr(
573                    bx.cx.llcx,
574                    bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()),
575                );
576                attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
577            }
578            PassMode::Cast { cast, pad_i32: _ } => {
579                cast.attrs.apply_attrs_to_callsite(
580                    llvm::AttributePlace::ReturnValue,
581                    bx.cx,
582                    callsite,
583                );
584            }
585            _ => {}
586        }
587        for arg in self.args.iter() {
588            match &arg.mode {
589                PassMode::Ignore => {}
590                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
591                    let i = apply(bx.cx, attrs);
592                    let byval = llvm::CreateByValAttr(
593                        bx.cx.llcx,
594                        bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
595                    );
596                    attributes::apply_to_callsite(
597                        callsite,
598                        llvm::AttributePlace::Argument(i),
599                        &[byval],
600                    );
601                }
602                PassMode::Direct(attrs)
603                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
604                    apply(bx.cx, attrs);
605                }
606                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
607                    apply(bx.cx, attrs);
608                    apply(bx.cx, meta_attrs);
609                }
610                PassMode::Pair(a, b) => {
611                    apply(bx.cx, a);
612                    apply(bx.cx, b);
613                }
614                PassMode::Cast { cast, pad_i32 } => {
615                    if *pad_i32 {
616                        apply(bx.cx, &ArgAttributes::new());
617                    }
618                    apply(bx.cx, &cast.attrs);
619                }
620            }
621        }
622
623        let cconv = self.llvm_cconv(&bx.cx);
624        if cconv != llvm::CCallConv {
625            llvm::SetInstructionCallConv(callsite, cconv);
626        }
627
628        if self.conv == CanonAbi::Arm(ArmCall::CCmseNonSecureCall) {
629            // This will probably get ignored on all targets but those supporting the TrustZone-M
630            // extension (thumbv8m targets).
631            let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
632            attributes::apply_to_callsite(
633                callsite,
634                llvm::AttributePlace::Function,
635                &[cmse_nonsecure_call],
636            );
637        }
638
639        // Some intrinsics require that an elementtype attribute (with the pointee type of a
640        // pointer argument) is added to the callsite.
641        let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
642        if element_type_index >= 0 {
643            let arg_ty = self.args[element_type_index as usize].layout.ty;
644            let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument");
645            let element_type_attr = unsafe {
646                llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
647            };
648            attributes::apply_to_callsite(
649                callsite,
650                llvm::AttributePlace::Argument(element_type_index as u32),
651                &[element_type_attr],
652            );
653        }
654    }
655}
656
657impl AbiBuilderMethods for Builder<'_, '_, '_> {
658    fn get_param(&mut self, index: usize) -> Self::Value {
659        llvm::get_param(self.llfn(), index as c_uint)
660    }
661}
662
663/// Determines the appropriate [`llvm::CallConv`] to use for a given function
664/// ABI, for the current target.
665pub(crate) fn to_llvm_calling_convention(sess: &Session, abi: CanonAbi) -> llvm::CallConv {
666    match abi {
667        CanonAbi::C | CanonAbi::Rust => llvm::CCallConv,
668        CanonAbi::RustCold => llvm::PreserveMost,
669        // Functions with this calling convention can only be called from assembly, but it is
670        // possible to declare an `extern "custom"` block, so the backend still needs a calling
671        // convention for declaring foreign functions.
672        CanonAbi::Custom => llvm::CCallConv,
673        CanonAbi::GpuKernel => {
674            let arch = sess.target.arch.as_ref();
675            if arch == "amdgpu" {
676                llvm::AmdgpuKernel
677            } else if arch == "nvptx64" {
678                llvm::PtxKernel
679            } else {
680                panic!("Architecture {arch} does not support GpuKernel calling convention");
681            }
682        }
683        CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
684            InterruptKind::Avr => llvm::AvrInterrupt,
685            InterruptKind::AvrNonBlocking => llvm::AvrNonBlockingInterrupt,
686            InterruptKind::Msp430 => llvm::Msp430Intr,
687            InterruptKind::RiscvMachine | InterruptKind::RiscvSupervisor => llvm::CCallConv,
688            InterruptKind::X86 => llvm::X86_Intr,
689        },
690        CanonAbi::Arm(arm_call) => match arm_call {
691            ArmCall::Aapcs => llvm::ArmAapcsCallConv,
692            ArmCall::CCmseNonSecureCall | ArmCall::CCmseNonSecureEntry => llvm::CCallConv,
693        },
694        CanonAbi::X86(x86_call) => match x86_call {
695            X86Call::Fastcall => llvm::X86FastcallCallConv,
696            X86Call::Stdcall => llvm::X86StdcallCallConv,
697            X86Call::SysV64 => llvm::X86_64_SysV,
698            X86Call::Thiscall => llvm::X86_ThisCall,
699            X86Call::Vectorcall => llvm::X86_VectorCall,
700            X86Call::Win64 => llvm::X86_64_Win64,
701        },
702    }
703}