Skip to main content

rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr, RtsanSetting};
3use rustc_hir::def_id::DefId;
4use rustc_hir::find_attr;
5use rustc_middle::middle::codegen_fn_attrs::{
6    CodegenFnAttrFlags, CodegenFnAttrs, PatchableFunctionEntry, SanitizerFnAttrs, TargetFeature,
7};
8use rustc_middle::ty::{self, TyCtxt};
9use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
10use rustc_span::sym;
11use rustc_symbol_mangling::mangle_internal_symbol;
12use rustc_target::spec::{Arch, FramePointer, SanitizerSet, StackProbeType, StackProtector};
13use smallvec::SmallVec;
14
15use crate::context::SimpleCx;
16use crate::errors::{PackedStackBackchainNeedsSoftfloat, SanitizerMemtagRequiresMte};
17use crate::llvm::AttributePlace::Function;
18use crate::llvm::{
19    self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects, Value,
20};
21use crate::{Session, attributes, llvm_util};
22
23pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
24    if !attrs.is_empty() {
25        llvm::AddFunctionAttributes(llfn, idx, attrs);
26    }
27}
28
29pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
30    if !attrs.is_empty() {
31        llvm::AddCallSiteAttributes(callsite, idx, attrs);
32    }
33}
34
35pub(crate) fn has_string_attr(llfn: &Value, name: &str) -> bool {
36    llvm::HasStringAttribute(llfn, name)
37}
38
39pub(crate) fn remove_string_attr_from_llfn(llfn: &Value, name: &str) {
40    llvm::RemoveStringAttrFromFn(llfn, name);
41}
42
43/// Get LLVM attribute for the provided inline heuristic.
44pub(crate) fn inline_attr<'ll, 'tcx>(
45    cx: &SimpleCx<'ll>,
46    tcx: TyCtxt<'tcx>,
47    instance: ty::Instance<'tcx>,
48) -> Option<&'ll Attribute> {
49    // `optnone` requires `noinline`
50    let codegen_fn_attrs = tcx.codegen_fn_attrs(instance.def_id());
51    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
52        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
53        (InlineAttr::None, _) if instance.def.requires_inline(tcx) => InlineAttr::Hint,
54        (inline, _) => inline,
55    };
56
57    if !tcx.sess.opts.unstable_opts.inline_llvm {
58        // disable LLVM inlining
59        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
60    }
61    match inline {
62        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
63        InlineAttr::Always | InlineAttr::Force { .. } => {
64            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
65        }
66        InlineAttr::Never => {
67            if tcx.sess.target.arch != Arch::AmdGpu {
68                Some(AttributeKind::NoInline.create_attr(cx.llcx))
69            } else {
70                None
71            }
72        }
73        InlineAttr::None => None,
74    }
75}
76
77#[inline]
78fn patchable_function_entry_attrs<'ll>(
79    cx: &SimpleCx<'ll>,
80    sess: &Session,
81    attr: Option<PatchableFunctionEntry>,
82) -> SmallVec<[&'ll Attribute; 2]> {
83    let mut attrs = SmallVec::new();
84    let patchable_spec = attr.unwrap_or_else(|| {
85        PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry)
86    });
87    let entry = patchable_spec.entry();
88    let prefix = patchable_spec.prefix();
89    if entry > 0 {
90        attrs.push(llvm::CreateAttrStringValue(
91            cx.llcx,
92            "patchable-function-entry",
93            &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}", entry))
    })format!("{}", entry),
94        ));
95    }
96    if prefix > 0 {
97        attrs.push(llvm::CreateAttrStringValue(
98            cx.llcx,
99            "patchable-function-prefix",
100            &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}", prefix))
    })format!("{}", prefix),
101        ));
102    }
103    attrs
104}
105
106/// Get LLVM sanitize attributes.
107#[inline]
108pub(crate) fn sanitize_attrs<'ll, 'tcx>(
109    cx: &SimpleCx<'ll>,
110    tcx: TyCtxt<'tcx>,
111    sanitizer_fn_attr: SanitizerFnAttrs,
112) -> SmallVec<[&'ll Attribute; 4]> {
113    let mut attrs = SmallVec::new();
114    let enabled = tcx.sess.sanitizers() - sanitizer_fn_attr.disabled;
115    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
116        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
117    }
118    if enabled.contains(SanitizerSet::MEMORY) {
119        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
120    }
121    if enabled.contains(SanitizerSet::THREAD) {
122        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
123    }
124    if enabled.contains(SanitizerSet::HWADDRESS) || enabled.contains(SanitizerSet::KERNELHWADDRESS)
125    {
126        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
127    }
128    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
129        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
130    }
131    if enabled.contains(SanitizerSet::MEMTAG) {
132        // Check to make sure the mte target feature is actually enabled.
133        let features = tcx.global_backend_features(());
134        let mte_feature =
135            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
136        if let None | Some("-mte") = mte_feature {
137            tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
138        }
139
140        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
141    }
142    if enabled.contains(SanitizerSet::SAFESTACK) {
143        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
144    }
145    if tcx.sess.sanitizers().contains(SanitizerSet::REALTIME) {
146        match sanitizer_fn_attr.rtsan_setting {
147            RtsanSetting::Nonblocking => {
148                attrs.push(llvm::AttributeKind::SanitizeRealtimeNonblocking.create_attr(cx.llcx))
149            }
150            RtsanSetting::Blocking => {
151                attrs.push(llvm::AttributeKind::SanitizeRealtimeBlocking.create_attr(cx.llcx))
152            }
153            // caller is the default, so no llvm attribute
154            RtsanSetting::Caller => (),
155        }
156    }
157    attrs
158}
159
160/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
161#[inline]
162pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
163    // NOTE: We should determine if we even need async unwind tables, as they
164    // take have more overhead and if we can use sync unwind tables we
165    // probably should.
166    let async_unwind = !use_sync_unwind.unwrap_or(false);
167    llvm::CreateUWTableAttr(llcx, async_unwind)
168}
169
170pub(crate) fn frame_pointer_type_attr<'ll>(
171    cx: &SimpleCx<'ll>,
172    sess: &Session,
173) -> Option<&'ll Attribute> {
174    let mut fp = sess.target.frame_pointer;
175    let opts = &sess.opts;
176    // "mcount" function relies on stack pointer.
177    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
178    if opts.unstable_opts.instrument_mcount {
179        fp.ratchet(FramePointer::Always);
180    }
181    fp.ratchet(opts.cg.force_frame_pointers);
182    let attr_value = match fp {
183        FramePointer::Always => "all",
184        FramePointer::NonLeaf => "non-leaf",
185        FramePointer::MayOmit => return None,
186    };
187    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
188}
189
190fn function_return_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
191    let function_return_attr = match sess.opts.unstable_opts.function_return {
192        FunctionReturn::Keep => return None,
193        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
194    };
195
196    Some(function_return_attr.create_attr(cx.llcx))
197}
198
199/// Tell LLVM what instrument function to insert.
200#[inline]
201fn instrument_function_attr<'ll>(
202    cx: &SimpleCx<'ll>,
203    sess: &Session,
204) -> SmallVec<[&'ll Attribute; 4]> {
205    let mut attrs = SmallVec::new();
206    if sess.opts.unstable_opts.instrument_mcount {
207        // Similar to `clang -pg` behavior. Handled by the
208        // `post-inline-ee-instrument` LLVM pass.
209
210        // The function name varies on platforms.
211        // See test/CodeGen/mcount.c in clang.
212        let mcount_name = match &sess.target.llvm_mcount_intrinsic {
213            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
214            None => sess.target.mcount.as_ref(),
215        };
216
217        attrs.push(llvm::CreateAttrStringValue(
218            cx.llcx,
219            "instrument-function-entry-inlined",
220            mcount_name,
221        ));
222    }
223    if let Some(options) = &sess.opts.unstable_opts.instrument_xray {
224        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
225        // Function prologue and epilogue are instrumented with NOP sleds,
226        // a runtime library later replaces them with detours into tracing code.
227        if options.always {
228            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
229        }
230        if options.never {
231            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
232        }
233        if options.ignore_loops {
234            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
235        }
236        // LLVM will not choose the default for us, but rather requires specific
237        // threshold in absence of "xray-always". Use the same default as Clang.
238        let threshold = options.instruction_threshold.unwrap_or(200);
239        attrs.push(llvm::CreateAttrStringValue(
240            cx.llcx,
241            "xray-instruction-threshold",
242            &threshold.to_string(),
243        ));
244        if options.skip_entry {
245            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
246        }
247        if options.skip_exit {
248            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
249        }
250    }
251    attrs
252}
253
254fn nojumptables_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
255    if sess.opts.cg.jump_tables {
256        return None;
257    }
258
259    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
260}
261
262fn probestack_attr<'ll, 'tcx>(cx: &SimpleCx<'ll>, tcx: TyCtxt<'tcx>) -> Option<&'ll Attribute> {
263    // Currently stack probes seem somewhat incompatible with the address
264    // sanitizer and thread sanitizer. With asan we're already protected from
265    // stack overflow anyway so we don't really need stack probes regardless.
266    if tcx.sess.sanitizers().intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD) {
267        return None;
268    }
269
270    // probestack doesn't play nice either with `-C profile-generate`.
271    if tcx.sess.opts.cg.profile_generate.enabled() {
272        return None;
273    }
274
275    let attr_value = match tcx.sess.target.stack_probes {
276        StackProbeType::None => return None,
277        // Request LLVM to generate the probes inline. If the given LLVM version does not support
278        // this, no probe is generated at all (even if the attribute is specified).
279        StackProbeType::Inline => "inline-asm",
280        // Flag our internal `__rust_probestack` function as the stack probe symbol.
281        // This is defined in the `compiler-builtins` crate for each architecture.
282        StackProbeType::Call => &mangle_internal_symbol(tcx, "__rust_probestack"),
283        // Pick from the two above based on the LLVM version.
284        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
285            if llvm_util::get_version() < min_llvm_version_for_inline {
286                &mangle_internal_symbol(tcx, "__rust_probestack")
287            } else {
288                "inline-asm"
289            }
290        }
291    };
292    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
293}
294
295fn stackprotector_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
296    let sspattr = match sess.stack_protector() {
297        StackProtector::None => return None,
298        StackProtector::All => AttributeKind::StackProtectReq,
299        StackProtector::Strong => AttributeKind::StackProtectStrong,
300        StackProtector::Basic => AttributeKind::StackProtect,
301    };
302
303    Some(sspattr.create_attr(cx.llcx))
304}
305
306fn packed_stack_attr<'ll>(
307    cx: &SimpleCx<'ll>,
308    sess: &Session,
309    function_attributes: &Vec<TargetFeature>,
310) -> Option<&'ll Attribute> {
311    if sess.target.arch != Arch::S390x {
312        return None;
313    }
314    if !sess.opts.unstable_opts.packed_stack {
315        return None;
316    }
317
318    // The backchain and softfloat flags can be set via -Ctarget-features=...
319    // or via #[target_features(enable = ...)] so we have to check both possibilities
320    let have_backchain = sess.unstable_target_features.contains(&sym::backchain)
321        || function_attributes.iter().any(|feature| feature.name == sym::backchain);
322    let have_softfloat = sess.unstable_target_features.contains(&sym::soft_float)
323        || function_attributes.iter().any(|feature| feature.name == sym::soft_float);
324
325    // If both, backchain and packedstack, are enabled LLVM cannot generate valid function entry points
326    // with the default ABI. However if the softfloat flag is set LLVM will switch to the softfloat
327    // ABI, where this works.
328    if have_backchain && !have_softfloat {
329        sess.dcx().emit_err(PackedStackBackchainNeedsSoftfloat);
330        return None;
331    }
332
333    Some(llvm::CreateAttrString(cx.llcx, "packed-stack"))
334}
335
336pub(crate) fn target_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> &'ll Attribute {
337    let target_cpu = llvm_util::target_cpu(sess);
338    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
339}
340
341pub(crate) fn tune_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
342    llvm_util::tune_cpu(sess)
343        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
344}
345
346/// Get the `target-features` LLVM attribute.
347pub(crate) fn target_features_attr<'ll, 'tcx>(
348    cx: &SimpleCx<'ll>,
349    tcx: TyCtxt<'tcx>,
350    function_features: Vec<String>,
351) -> Option<&'ll Attribute> {
352    let global_features = tcx.global_backend_features(()).iter().map(String::as_str);
353    let function_features = function_features.iter().map(String::as_str);
354    let target_features =
355        global_features.chain(function_features).intersperse(",").collect::<String>();
356    (!target_features.is_empty())
357        .then(|| llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features))
358}
359
360/// Get the `NonLazyBind` LLVM attribute,
361/// if the codegen options allow skipping the PLT.
362pub(crate) fn non_lazy_bind_attr<'ll>(
363    cx: &SimpleCx<'ll>,
364    sess: &Session,
365) -> Option<&'ll Attribute> {
366    // Don't generate calls through PLT if it's not necessary
367    if !sess.needs_plt() { Some(AttributeKind::NonLazyBind.create_attr(cx.llcx)) } else { None }
368}
369
370/// Get the default optimizations attrs for a function.
371#[inline]
372pub(crate) fn default_optimisation_attrs<'ll>(
373    cx: &SimpleCx<'ll>,
374    sess: &Session,
375) -> SmallVec<[&'ll Attribute; 2]> {
376    let mut attrs = SmallVec::new();
377    match sess.opts.optimize {
378        OptLevel::Size => {
379            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
380        }
381        OptLevel::SizeMin => {
382            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
383            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
384        }
385        _ => {}
386    }
387    attrs
388}
389
390fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
391    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
392}
393
394/// Helper for `FnAbiLlvmExt::apply_attrs_llfn`:
395/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
396/// attributes.
397pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
398    cx: &SimpleCx<'ll>,
399    tcx: TyCtxt<'tcx>,
400    llfn: &'ll Value,
401    codegen_fn_attrs: &CodegenFnAttrs,
402    instance: Option<ty::Instance<'tcx>>,
403) {
404    let sess = tcx.sess;
405    let mut to_add = SmallVec::<[_; 16]>::new();
406
407    match codegen_fn_attrs.optimize {
408        OptimizeAttr::Default => {
409            to_add.extend(default_optimisation_attrs(cx, sess));
410        }
411        OptimizeAttr::DoNotOptimize => {
412            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
413        }
414        OptimizeAttr::Size => {
415            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
416            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
417        }
418        OptimizeAttr::Speed => {}
419    }
420
421    if sess.must_emit_unwind_tables() {
422        to_add.push(uwtable_attr(cx.llcx, sess.opts.unstable_opts.use_sync_unwind));
423    }
424
425    if sess.opts.unstable_opts.profile_sample_use.is_some() {
426        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
427    }
428
429    // FIXME: none of these functions interact with source level attributes.
430    to_add.extend(frame_pointer_type_attr(cx, sess));
431    to_add.extend(function_return_attr(cx, sess));
432    to_add.extend(instrument_function_attr(cx, sess));
433    to_add.extend(nojumptables_attr(cx, sess));
434    to_add.extend(probestack_attr(cx, tcx));
435    to_add.extend(stackprotector_attr(cx, sess));
436
437    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
438        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
439    }
440
441    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::OFFLOAD_KERNEL) {
442        to_add.push(llvm::CreateAttrString(cx.llcx, "offload-kernel"))
443    }
444
445    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
446        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
447    }
448    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
449        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
450    }
451    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
452        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
453    }
454    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
455        // do nothing; a naked function is converted into an extern function
456        // and a global assembly block. LLVM's support for naked functions is
457        // not used.
458    } else {
459        // Do not set sanitizer attributes for naked functions.
460        to_add.extend(sanitize_attrs(cx, tcx, codegen_fn_attrs.sanitizers));
461
462        // For non-naked functions, set branch protection attributes on aarch64.
463        if let Some(BranchProtection { bti, pac_ret, gcs }) =
464            sess.opts.unstable_opts.branch_protection
465        {
466            if !(sess.target.arch == Arch::AArch64) {
    ::core::panicking::panic("assertion failed: sess.target.arch == Arch::AArch64")
};assert!(sess.target.arch == Arch::AArch64);
467            if bti {
468                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
469            }
470            if gcs {
471                to_add.push(llvm::CreateAttrString(cx.llcx, "guarded-control-stack"));
472            }
473            if let Some(PacRet { leaf, pc, key }) = pac_ret {
474                if pc {
475                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
476                }
477                to_add.push(llvm::CreateAttrStringValue(
478                    cx.llcx,
479                    "sign-return-address",
480                    if leaf { "all" } else { "non-leaf" },
481                ));
482                to_add.push(llvm::CreateAttrStringValue(
483                    cx.llcx,
484                    "sign-return-address-key",
485                    if key == PAuthKey::A { "a_key" } else { "b_key" },
486                ));
487            }
488        }
489    }
490    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
491        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
492    {
493        to_add.push(create_alloc_family_attr(cx.llcx));
494        if let Some(instance) = instance
495            && let Some(name) =
496                {
    {
        'done:
            {
            for i in
                ::rustc_hir::attrs::HasAttrs::get_attrs(instance.def_id(),
                    &tcx) {
                #[allow(unused_imports)]
                use rustc_hir::attrs::AttributeKind::*;
                let i: &rustc_hir::Attribute = i;
                match i {
                    rustc_hir::Attribute::Parsed(RustcAllocatorZeroedVariant {
                        name }) => {
                        break 'done Some(name);
                    }
                    rustc_hir::Attribute::Unparsed(..) =>
                        {}
                        #[deny(unreachable_patterns)]
                        _ => {}
                }
            }
            None
        }
    }
}find_attr!(tcx, instance.def_id(), RustcAllocatorZeroedVariant {name} => name)
497        {
498            to_add.push(llvm::CreateAttrStringValue(
499                cx.llcx,
500                "alloc-variant-zeroed",
501                &mangle_internal_symbol(tcx, name.as_str()),
502            ));
503        }
504        // apply to argument place instead of function
505        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
506        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
507        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
508        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
509        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
510            flags |= AllocKindFlags::Uninitialized;
511        } else {
512            flags |= AllocKindFlags::Zeroed;
513        }
514        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
515        // apply to return place instead of function (unlike all other attributes applied in this
516        // function)
517        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
518        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
519    }
520    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
521        to_add.push(create_alloc_family_attr(cx.llcx));
522        to_add.push(llvm::CreateAllocKindAttr(
523            cx.llcx,
524            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
525        ));
526        // applies to argument place instead of function place
527        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
528        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
529        // apply to argument place instead of function
530        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
531        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
532        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
533        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
534        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
535    }
536    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
537        to_add.push(create_alloc_family_attr(cx.llcx));
538        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
539        // applies to argument place instead of function place
540        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
541        // "Does not capture provenance" means "if the function call stashes the pointer somewhere,
542        // accessing that pointer after the function returns is UB". That is definitely the case here since
543        // freeing will destroy the provenance.
544        let captures_addr = AttributeKind::CapturesAddress.create_attr(cx.llcx);
545        let attrs = &[allocated_pointer, captures_addr];
546        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), attrs);
547    }
548    if let Some(align) = codegen_fn_attrs.alignment {
549        llvm::set_alignment(llfn, align);
550    }
551    if let Some(packed_stack) = packed_stack_attr(cx, sess, &codegen_fn_attrs.target_features) {
552        to_add.push(packed_stack);
553    }
554    to_add.extend(patchable_function_entry_attrs(
555        cx,
556        sess,
557        codegen_fn_attrs.patchable_function_entry,
558    ));
559
560    // Always annotate functions with the target-cpu they are compiled for.
561    // Without this, ThinLTO won't inline Rust functions into Clang generated
562    // functions (because Clang annotates functions this way too).
563    to_add.push(target_cpu_attr(cx, sess));
564    // tune-cpu is only conveyed through the attribute for our purpose.
565    // The target doesn't care; the subtarget reads our attribute.
566    to_add.extend(tune_cpu_attr(cx, sess));
567
568    let function_features =
569        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
570
571    // Apply function attributes as per usual if there are no user defined
572    // target features otherwise this will get applied at the callsite.
573    if function_features.is_empty() {
574        if let Some(instance) = instance
575            && let Some(inline_attr) = inline_attr(cx, tcx, instance)
576        {
577            to_add.push(inline_attr);
578        }
579    }
580
581    let function_features = function_features
582        .iter()
583        // Convert to LLVMFeatures and filter out unavailable ones
584        .flat_map(|feat| llvm_util::to_llvm_features(sess, feat))
585        // Convert LLVMFeatures & dependencies to +<feats>s
586        .flat_map(|feat| feat.into_iter().map(|f| ::alloc::__export::must_use({ ::alloc::fmt::format(format_args!("+{0}", f)) })format!("+{f}")))
587        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
588            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
589            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
590        }))
591        .collect::<Vec<String>>();
592
593    if sess.target.is_like_wasm {
594        // If this function is an import from the environment but the wasm
595        // import has a specific module/name, apply them here.
596        if let Some(instance) = instance
597            && let Some(module) = wasm_import_module(tcx, instance.def_id())
598        {
599            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
600
601            let name =
602                codegen_fn_attrs.symbol_name.unwrap_or_else(|| tcx.item_name(instance.def_id()));
603            let name = name.as_str();
604            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
605        }
606    }
607
608    to_add.extend(target_features_attr(cx, tcx, function_features));
609
610    attributes::apply_to_llfn(llfn, Function, &to_add);
611}
612
613fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
614    tcx.wasm_import_module_map(id.krate).get(&id)
615}