Skip to main content

rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr, RtsanSetting};
3use rustc_hir::def_id::DefId;
4use rustc_hir::find_attr;
5use rustc_middle::middle::codegen_fn_attrs::{
6    CodegenFnAttrFlags, CodegenFnAttrs, PatchableFunctionEntry, SanitizerFnAttrs,
7};
8use rustc_middle::ty::{self, TyCtxt};
9use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
10use rustc_symbol_mangling::mangle_internal_symbol;
11use rustc_target::spec::{Arch, FramePointer, SanitizerSet, StackProbeType, StackProtector};
12use smallvec::SmallVec;
13
14use crate::context::SimpleCx;
15use crate::errors::SanitizerMemtagRequiresMte;
16use crate::llvm::AttributePlace::Function;
17use crate::llvm::{
18    self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects, Value,
19};
20use crate::{Session, attributes, llvm_util};
21
22pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
23    if !attrs.is_empty() {
24        llvm::AddFunctionAttributes(llfn, idx, attrs);
25    }
26}
27
28pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
29    if !attrs.is_empty() {
30        llvm::AddCallSiteAttributes(callsite, idx, attrs);
31    }
32}
33
34pub(crate) fn has_string_attr(llfn: &Value, name: &str) -> bool {
35    llvm::HasStringAttribute(llfn, name)
36}
37
38pub(crate) fn remove_string_attr_from_llfn(llfn: &Value, name: &str) {
39    llvm::RemoveStringAttrFromFn(llfn, name);
40}
41
42/// Get LLVM attribute for the provided inline heuristic.
43pub(crate) fn inline_attr<'ll, 'tcx>(
44    cx: &SimpleCx<'ll>,
45    tcx: TyCtxt<'tcx>,
46    instance: ty::Instance<'tcx>,
47) -> Option<&'ll Attribute> {
48    // `optnone` requires `noinline`
49    let codegen_fn_attrs = tcx.codegen_fn_attrs(instance.def_id());
50    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
51        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
52        (InlineAttr::None, _) if instance.def.requires_inline(tcx) => InlineAttr::Hint,
53        (inline, _) => inline,
54    };
55
56    if !tcx.sess.opts.unstable_opts.inline_llvm {
57        // disable LLVM inlining
58        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
59    }
60    match inline {
61        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
62        InlineAttr::Always | InlineAttr::Force { .. } => {
63            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
64        }
65        InlineAttr::Never => {
66            if tcx.sess.target.arch != Arch::AmdGpu {
67                Some(AttributeKind::NoInline.create_attr(cx.llcx))
68            } else {
69                None
70            }
71        }
72        InlineAttr::None => None,
73    }
74}
75
76#[inline]
77fn patchable_function_entry_attrs<'ll>(
78    cx: &SimpleCx<'ll>,
79    sess: &Session,
80    attr: Option<PatchableFunctionEntry>,
81) -> SmallVec<[&'ll Attribute; 2]> {
82    let mut attrs = SmallVec::new();
83    let patchable_spec = attr.unwrap_or_else(|| {
84        PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry)
85    });
86    let entry = patchable_spec.entry();
87    let prefix = patchable_spec.prefix();
88    if entry > 0 {
89        attrs.push(llvm::CreateAttrStringValue(
90            cx.llcx,
91            "patchable-function-entry",
92            &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}", entry))
    })format!("{}", entry),
93        ));
94    }
95    if prefix > 0 {
96        attrs.push(llvm::CreateAttrStringValue(
97            cx.llcx,
98            "patchable-function-prefix",
99            &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}", prefix))
    })format!("{}", prefix),
100        ));
101    }
102    attrs
103}
104
105/// Get LLVM sanitize attributes.
106#[inline]
107pub(crate) fn sanitize_attrs<'ll, 'tcx>(
108    cx: &SimpleCx<'ll>,
109    tcx: TyCtxt<'tcx>,
110    sanitizer_fn_attr: SanitizerFnAttrs,
111) -> SmallVec<[&'ll Attribute; 4]> {
112    let mut attrs = SmallVec::new();
113    let enabled = tcx.sess.sanitizers() - sanitizer_fn_attr.disabled;
114    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
115        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
116    }
117    if enabled.contains(SanitizerSet::MEMORY) {
118        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
119    }
120    if enabled.contains(SanitizerSet::THREAD) {
121        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
122    }
123    if enabled.contains(SanitizerSet::HWADDRESS) || enabled.contains(SanitizerSet::KERNELHWADDRESS)
124    {
125        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
126    }
127    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
128        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
129    }
130    if enabled.contains(SanitizerSet::MEMTAG) {
131        // Check to make sure the mte target feature is actually enabled.
132        let features = tcx.global_backend_features(());
133        let mte_feature =
134            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
135        if let None | Some("-mte") = mte_feature {
136            tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
137        }
138
139        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
140    }
141    if enabled.contains(SanitizerSet::SAFESTACK) {
142        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
143    }
144    if tcx.sess.sanitizers().contains(SanitizerSet::REALTIME) {
145        match sanitizer_fn_attr.rtsan_setting {
146            RtsanSetting::Nonblocking => {
147                attrs.push(llvm::AttributeKind::SanitizeRealtimeNonblocking.create_attr(cx.llcx))
148            }
149            RtsanSetting::Blocking => {
150                attrs.push(llvm::AttributeKind::SanitizeRealtimeBlocking.create_attr(cx.llcx))
151            }
152            // caller is the default, so no llvm attribute
153            RtsanSetting::Caller => (),
154        }
155    }
156    attrs
157}
158
159/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
160#[inline]
161pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
162    // NOTE: We should determine if we even need async unwind tables, as they
163    // take have more overhead and if we can use sync unwind tables we
164    // probably should.
165    let async_unwind = !use_sync_unwind.unwrap_or(false);
166    llvm::CreateUWTableAttr(llcx, async_unwind)
167}
168
169pub(crate) fn frame_pointer_type_attr<'ll>(
170    cx: &SimpleCx<'ll>,
171    sess: &Session,
172) -> Option<&'ll Attribute> {
173    let mut fp = sess.target.frame_pointer;
174    let opts = &sess.opts;
175    // "mcount" function relies on stack pointer.
176    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
177    if opts.unstable_opts.instrument_mcount {
178        fp.ratchet(FramePointer::Always);
179    }
180    fp.ratchet(opts.cg.force_frame_pointers);
181    let attr_value = match fp {
182        FramePointer::Always => "all",
183        FramePointer::NonLeaf => "non-leaf",
184        FramePointer::MayOmit => return None,
185    };
186    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
187}
188
189fn function_return_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
190    let function_return_attr = match sess.opts.unstable_opts.function_return {
191        FunctionReturn::Keep => return None,
192        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
193    };
194
195    Some(function_return_attr.create_attr(cx.llcx))
196}
197
198/// Tell LLVM what instrument function to insert.
199#[inline]
200fn instrument_function_attr<'ll>(
201    cx: &SimpleCx<'ll>,
202    sess: &Session,
203) -> SmallVec<[&'ll Attribute; 4]> {
204    let mut attrs = SmallVec::new();
205    if sess.opts.unstable_opts.instrument_mcount {
206        // Similar to `clang -pg` behavior. Handled by the
207        // `post-inline-ee-instrument` LLVM pass.
208
209        // The function name varies on platforms.
210        // See test/CodeGen/mcount.c in clang.
211        let mcount_name = match &sess.target.llvm_mcount_intrinsic {
212            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
213            None => sess.target.mcount.as_ref(),
214        };
215
216        attrs.push(llvm::CreateAttrStringValue(
217            cx.llcx,
218            "instrument-function-entry-inlined",
219            mcount_name,
220        ));
221    }
222    if let Some(options) = &sess.opts.unstable_opts.instrument_xray {
223        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
224        // Function prologue and epilogue are instrumented with NOP sleds,
225        // a runtime library later replaces them with detours into tracing code.
226        if options.always {
227            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
228        }
229        if options.never {
230            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
231        }
232        if options.ignore_loops {
233            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
234        }
235        // LLVM will not choose the default for us, but rather requires specific
236        // threshold in absence of "xray-always". Use the same default as Clang.
237        let threshold = options.instruction_threshold.unwrap_or(200);
238        attrs.push(llvm::CreateAttrStringValue(
239            cx.llcx,
240            "xray-instruction-threshold",
241            &threshold.to_string(),
242        ));
243        if options.skip_entry {
244            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
245        }
246        if options.skip_exit {
247            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
248        }
249    }
250    attrs
251}
252
253fn nojumptables_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
254    if sess.opts.cg.jump_tables {
255        return None;
256    }
257
258    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
259}
260
261fn probestack_attr<'ll, 'tcx>(cx: &SimpleCx<'ll>, tcx: TyCtxt<'tcx>) -> Option<&'ll Attribute> {
262    // Currently stack probes seem somewhat incompatible with the address
263    // sanitizer and thread sanitizer. With asan we're already protected from
264    // stack overflow anyway so we don't really need stack probes regardless.
265    if tcx.sess.sanitizers().intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD) {
266        return None;
267    }
268
269    // probestack doesn't play nice either with `-C profile-generate`.
270    if tcx.sess.opts.cg.profile_generate.enabled() {
271        return None;
272    }
273
274    let attr_value = match tcx.sess.target.stack_probes {
275        StackProbeType::None => return None,
276        // Request LLVM to generate the probes inline. If the given LLVM version does not support
277        // this, no probe is generated at all (even if the attribute is specified).
278        StackProbeType::Inline => "inline-asm",
279        // Flag our internal `__rust_probestack` function as the stack probe symbol.
280        // This is defined in the `compiler-builtins` crate for each architecture.
281        StackProbeType::Call => &mangle_internal_symbol(tcx, "__rust_probestack"),
282        // Pick from the two above based on the LLVM version.
283        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
284            if llvm_util::get_version() < min_llvm_version_for_inline {
285                &mangle_internal_symbol(tcx, "__rust_probestack")
286            } else {
287                "inline-asm"
288            }
289        }
290    };
291    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
292}
293
294fn stackprotector_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
295    let sspattr = match sess.stack_protector() {
296        StackProtector::None => return None,
297        StackProtector::All => AttributeKind::StackProtectReq,
298        StackProtector::Strong => AttributeKind::StackProtectStrong,
299        StackProtector::Basic => AttributeKind::StackProtect,
300    };
301
302    Some(sspattr.create_attr(cx.llcx))
303}
304
305pub(crate) fn target_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> &'ll Attribute {
306    let target_cpu = llvm_util::target_cpu(sess);
307    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
308}
309
310pub(crate) fn tune_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
311    llvm_util::tune_cpu(sess)
312        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
313}
314
315/// Get the `target-features` LLVM attribute.
316pub(crate) fn target_features_attr<'ll, 'tcx>(
317    cx: &SimpleCx<'ll>,
318    tcx: TyCtxt<'tcx>,
319    function_features: Vec<String>,
320) -> Option<&'ll Attribute> {
321    let global_features = tcx.global_backend_features(()).iter().map(String::as_str);
322    let function_features = function_features.iter().map(String::as_str);
323    let target_features =
324        global_features.chain(function_features).intersperse(",").collect::<String>();
325    (!target_features.is_empty())
326        .then(|| llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features))
327}
328
329/// Get the `NonLazyBind` LLVM attribute,
330/// if the codegen options allow skipping the PLT.
331pub(crate) fn non_lazy_bind_attr<'ll>(
332    cx: &SimpleCx<'ll>,
333    sess: &Session,
334) -> Option<&'ll Attribute> {
335    // Don't generate calls through PLT if it's not necessary
336    if !sess.needs_plt() { Some(AttributeKind::NonLazyBind.create_attr(cx.llcx)) } else { None }
337}
338
339/// Get the default optimizations attrs for a function.
340#[inline]
341pub(crate) fn default_optimisation_attrs<'ll>(
342    cx: &SimpleCx<'ll>,
343    sess: &Session,
344) -> SmallVec<[&'ll Attribute; 2]> {
345    let mut attrs = SmallVec::new();
346    match sess.opts.optimize {
347        OptLevel::Size => {
348            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
349        }
350        OptLevel::SizeMin => {
351            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
352            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
353        }
354        _ => {}
355    }
356    attrs
357}
358
359fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
360    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
361}
362
363/// Helper for `FnAbiLlvmExt::apply_attrs_llfn`:
364/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
365/// attributes.
366pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
367    cx: &SimpleCx<'ll>,
368    tcx: TyCtxt<'tcx>,
369    llfn: &'ll Value,
370    codegen_fn_attrs: &CodegenFnAttrs,
371    instance: Option<ty::Instance<'tcx>>,
372) {
373    let sess = tcx.sess;
374    let mut to_add = SmallVec::<[_; 16]>::new();
375
376    match codegen_fn_attrs.optimize {
377        OptimizeAttr::Default => {
378            to_add.extend(default_optimisation_attrs(cx, sess));
379        }
380        OptimizeAttr::DoNotOptimize => {
381            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
382        }
383        OptimizeAttr::Size => {
384            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
385            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
386        }
387        OptimizeAttr::Speed => {}
388    }
389
390    if sess.must_emit_unwind_tables() {
391        to_add.push(uwtable_attr(cx.llcx, sess.opts.unstable_opts.use_sync_unwind));
392    }
393
394    if sess.opts.unstable_opts.profile_sample_use.is_some() {
395        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
396    }
397
398    // FIXME: none of these functions interact with source level attributes.
399    to_add.extend(frame_pointer_type_attr(cx, sess));
400    to_add.extend(function_return_attr(cx, sess));
401    to_add.extend(instrument_function_attr(cx, sess));
402    to_add.extend(nojumptables_attr(cx, sess));
403    to_add.extend(probestack_attr(cx, tcx));
404    to_add.extend(stackprotector_attr(cx, sess));
405
406    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
407        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
408    }
409
410    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::OFFLOAD_KERNEL) {
411        to_add.push(llvm::CreateAttrString(cx.llcx, "offload-kernel"))
412    }
413
414    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
415        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
416    }
417    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
418        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
419    }
420    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
421        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
422    }
423    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
424        // do nothing; a naked function is converted into an extern function
425        // and a global assembly block. LLVM's support for naked functions is
426        // not used.
427    } else {
428        // Do not set sanitizer attributes for naked functions.
429        to_add.extend(sanitize_attrs(cx, tcx, codegen_fn_attrs.sanitizers));
430
431        // For non-naked functions, set branch protection attributes on aarch64.
432        if let Some(BranchProtection { bti, pac_ret, gcs }) =
433            sess.opts.unstable_opts.branch_protection
434        {
435            if !(sess.target.arch == Arch::AArch64) {
    ::core::panicking::panic("assertion failed: sess.target.arch == Arch::AArch64")
};assert!(sess.target.arch == Arch::AArch64);
436            if bti {
437                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
438            }
439            if gcs {
440                to_add.push(llvm::CreateAttrString(cx.llcx, "guarded-control-stack"));
441            }
442            if let Some(PacRet { leaf, pc, key }) = pac_ret {
443                if pc {
444                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
445                }
446                to_add.push(llvm::CreateAttrStringValue(
447                    cx.llcx,
448                    "sign-return-address",
449                    if leaf { "all" } else { "non-leaf" },
450                ));
451                to_add.push(llvm::CreateAttrStringValue(
452                    cx.llcx,
453                    "sign-return-address-key",
454                    if key == PAuthKey::A { "a_key" } else { "b_key" },
455                ));
456            }
457        }
458    }
459    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
460        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
461    {
462        to_add.push(create_alloc_family_attr(cx.llcx));
463        if let Some(instance) = instance
464            && let Some(name) =
465                {
    {
        'done:
            {
            for i in
                ::rustc_hir::attrs::HasAttrs::get_attrs(instance.def_id(),
                    &tcx) {
                #[allow(unused_imports)]
                use rustc_hir::attrs::AttributeKind::*;
                let i: &rustc_hir::Attribute = i;
                match i {
                    rustc_hir::Attribute::Parsed(RustcAllocatorZeroedVariant {
                        name }) => {
                        break 'done Some(name);
                    }
                    rustc_hir::Attribute::Unparsed(..) =>
                        {}
                        #[deny(unreachable_patterns)]
                        _ => {}
                }
            }
            None
        }
    }
}find_attr!(tcx, instance.def_id(), RustcAllocatorZeroedVariant {name} => name)
466        {
467            to_add.push(llvm::CreateAttrStringValue(
468                cx.llcx,
469                "alloc-variant-zeroed",
470                &mangle_internal_symbol(tcx, name.as_str()),
471            ));
472        }
473        // apply to argument place instead of function
474        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
475        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
476        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
477        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
478        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
479            flags |= AllocKindFlags::Uninitialized;
480        } else {
481            flags |= AllocKindFlags::Zeroed;
482        }
483        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
484        // apply to return place instead of function (unlike all other attributes applied in this
485        // function)
486        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
487        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
488    }
489    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
490        to_add.push(create_alloc_family_attr(cx.llcx));
491        to_add.push(llvm::CreateAllocKindAttr(
492            cx.llcx,
493            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
494        ));
495        // applies to argument place instead of function place
496        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
497        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
498        // apply to argument place instead of function
499        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
500        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
501        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
502        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
503        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
504    }
505    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
506        to_add.push(create_alloc_family_attr(cx.llcx));
507        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
508        // applies to argument place instead of function place
509        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
510        // "Does not capture provenance" means "if the function call stashes the pointer somewhere,
511        // accessing that pointer after the function returns is UB". That is definitely the case here since
512        // freeing will destroy the provenance.
513        let captures_addr = AttributeKind::CapturesAddress.create_attr(cx.llcx);
514        let attrs = &[allocated_pointer, captures_addr];
515        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), attrs);
516    }
517    if let Some(align) = codegen_fn_attrs.alignment {
518        llvm::set_alignment(llfn, align);
519    }
520    to_add.extend(patchable_function_entry_attrs(
521        cx,
522        sess,
523        codegen_fn_attrs.patchable_function_entry,
524    ));
525
526    // Always annotate functions with the target-cpu they are compiled for.
527    // Without this, ThinLTO won't inline Rust functions into Clang generated
528    // functions (because Clang annotates functions this way too).
529    to_add.push(target_cpu_attr(cx, sess));
530    // tune-cpu is only conveyed through the attribute for our purpose.
531    // The target doesn't care; the subtarget reads our attribute.
532    to_add.extend(tune_cpu_attr(cx, sess));
533
534    let function_features =
535        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
536
537    // Apply function attributes as per usual if there are no user defined
538    // target features otherwise this will get applied at the callsite.
539    if function_features.is_empty() {
540        if let Some(instance) = instance
541            && let Some(inline_attr) = inline_attr(cx, tcx, instance)
542        {
543            to_add.push(inline_attr);
544        }
545    }
546
547    let function_features = function_features
548        .iter()
549        // Convert to LLVMFeatures and filter out unavailable ones
550        .flat_map(|feat| llvm_util::to_llvm_features(sess, feat))
551        // Convert LLVMFeatures & dependencies to +<feats>s
552        .flat_map(|feat| feat.into_iter().map(|f| ::alloc::__export::must_use({ ::alloc::fmt::format(format_args!("+{0}", f)) })format!("+{f}")))
553        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
554            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
555            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
556        }))
557        .collect::<Vec<String>>();
558
559    if sess.target.is_like_wasm {
560        // If this function is an import from the environment but the wasm
561        // import has a specific module/name, apply them here.
562        if let Some(instance) = instance
563            && let Some(module) = wasm_import_module(tcx, instance.def_id())
564        {
565            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
566
567            let name =
568                codegen_fn_attrs.symbol_name.unwrap_or_else(|| tcx.item_name(instance.def_id()));
569            let name = name.as_str();
570            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
571        }
572    }
573
574    to_add.extend(target_features_attr(cx, tcx, function_features));
575
576    attributes::apply_to_llfn(llfn, Function, &to_add);
577}
578
579fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
580    tcx.wasm_import_module_map(id.krate).get(&id)
581}