rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr, RtsanSetting};
3use rustc_hir::def_id::DefId;
4use rustc_middle::middle::codegen_fn_attrs::{
5    CodegenFnAttrFlags, CodegenFnAttrs, PatchableFunctionEntry, SanitizerFnAttrs,
6};
7use rustc_middle::ty::{self, TyCtxt};
8use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
9use rustc_symbol_mangling::mangle_internal_symbol;
10use rustc_target::spec::{Arch, FramePointer, SanitizerSet, StackProbeType, StackProtector};
11use smallvec::SmallVec;
12
13use crate::context::SimpleCx;
14use crate::errors::SanitizerMemtagRequiresMte;
15use crate::llvm::AttributePlace::Function;
16use crate::llvm::{
17    self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects, Value,
18};
19use crate::{Session, attributes, llvm_util};
20
21pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
22    if !attrs.is_empty() {
23        llvm::AddFunctionAttributes(llfn, idx, attrs);
24    }
25}
26
27pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
28    if !attrs.is_empty() {
29        llvm::AddCallSiteAttributes(callsite, idx, attrs);
30    }
31}
32
33pub(crate) fn has_string_attr(llfn: &Value, name: &str) -> bool {
34    llvm::HasStringAttribute(llfn, name)
35}
36
37pub(crate) fn remove_string_attr_from_llfn(llfn: &Value, name: &str) {
38    llvm::RemoveStringAttrFromFn(llfn, name);
39}
40
41/// Get LLVM attribute for the provided inline heuristic.
42pub(crate) fn inline_attr<'ll, 'tcx>(
43    cx: &SimpleCx<'ll>,
44    tcx: TyCtxt<'tcx>,
45    instance: ty::Instance<'tcx>,
46) -> Option<&'ll Attribute> {
47    // `optnone` requires `noinline`
48    let codegen_fn_attrs = tcx.codegen_fn_attrs(instance.def_id());
49    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
50        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
51        (InlineAttr::None, _) if instance.def.requires_inline(tcx) => InlineAttr::Hint,
52        (inline, _) => inline,
53    };
54
55    if !tcx.sess.opts.unstable_opts.inline_llvm {
56        // disable LLVM inlining
57        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
58    }
59    match inline {
60        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
61        InlineAttr::Always | InlineAttr::Force { .. } => {
62            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
63        }
64        InlineAttr::Never => {
65            if tcx.sess.target.arch != Arch::AmdGpu {
66                Some(AttributeKind::NoInline.create_attr(cx.llcx))
67            } else {
68                None
69            }
70        }
71        InlineAttr::None => None,
72    }
73}
74
75#[inline]
76fn patchable_function_entry_attrs<'ll>(
77    cx: &SimpleCx<'ll>,
78    sess: &Session,
79    attr: Option<PatchableFunctionEntry>,
80) -> SmallVec<[&'ll Attribute; 2]> {
81    let mut attrs = SmallVec::new();
82    let patchable_spec = attr.unwrap_or_else(|| {
83        PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry)
84    });
85    let entry = patchable_spec.entry();
86    let prefix = patchable_spec.prefix();
87    if entry > 0 {
88        attrs.push(llvm::CreateAttrStringValue(
89            cx.llcx,
90            "patchable-function-entry",
91            &format!("{}", entry),
92        ));
93    }
94    if prefix > 0 {
95        attrs.push(llvm::CreateAttrStringValue(
96            cx.llcx,
97            "patchable-function-prefix",
98            &format!("{}", prefix),
99        ));
100    }
101    attrs
102}
103
104/// Get LLVM sanitize attributes.
105#[inline]
106pub(crate) fn sanitize_attrs<'ll, 'tcx>(
107    cx: &SimpleCx<'ll>,
108    tcx: TyCtxt<'tcx>,
109    sanitizer_fn_attr: SanitizerFnAttrs,
110) -> SmallVec<[&'ll Attribute; 4]> {
111    let mut attrs = SmallVec::new();
112    let enabled = tcx.sess.sanitizers() - sanitizer_fn_attr.disabled;
113    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
114        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
115    }
116    if enabled.contains(SanitizerSet::MEMORY) {
117        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
118    }
119    if enabled.contains(SanitizerSet::THREAD) {
120        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
121    }
122    if enabled.contains(SanitizerSet::HWADDRESS) {
123        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
124    }
125    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
126        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
127    }
128    if enabled.contains(SanitizerSet::MEMTAG) {
129        // Check to make sure the mte target feature is actually enabled.
130        let features = tcx.global_backend_features(());
131        let mte_feature =
132            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
133        if let None | Some("-mte") = mte_feature {
134            tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
135        }
136
137        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
138    }
139    if enabled.contains(SanitizerSet::SAFESTACK) {
140        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
141    }
142    if tcx.sess.sanitizers().contains(SanitizerSet::REALTIME) {
143        match sanitizer_fn_attr.rtsan_setting {
144            RtsanSetting::Nonblocking => {
145                attrs.push(llvm::AttributeKind::SanitizeRealtimeNonblocking.create_attr(cx.llcx))
146            }
147            RtsanSetting::Blocking => {
148                attrs.push(llvm::AttributeKind::SanitizeRealtimeBlocking.create_attr(cx.llcx))
149            }
150            // caller is the default, so no llvm attribute
151            RtsanSetting::Caller => (),
152        }
153    }
154    attrs
155}
156
157/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
158#[inline]
159pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
160    // NOTE: We should determine if we even need async unwind tables, as they
161    // take have more overhead and if we can use sync unwind tables we
162    // probably should.
163    let async_unwind = !use_sync_unwind.unwrap_or(false);
164    llvm::CreateUWTableAttr(llcx, async_unwind)
165}
166
167pub(crate) fn frame_pointer_type_attr<'ll>(
168    cx: &SimpleCx<'ll>,
169    sess: &Session,
170) -> Option<&'ll Attribute> {
171    let mut fp = sess.target.frame_pointer;
172    let opts = &sess.opts;
173    // "mcount" function relies on stack pointer.
174    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
175    if opts.unstable_opts.instrument_mcount {
176        fp.ratchet(FramePointer::Always);
177    }
178    fp.ratchet(opts.cg.force_frame_pointers);
179    let attr_value = match fp {
180        FramePointer::Always => "all",
181        FramePointer::NonLeaf => "non-leaf",
182        FramePointer::MayOmit => return None,
183    };
184    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
185}
186
187fn function_return_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
188    let function_return_attr = match sess.opts.unstable_opts.function_return {
189        FunctionReturn::Keep => return None,
190        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
191    };
192
193    Some(function_return_attr.create_attr(cx.llcx))
194}
195
196/// Tell LLVM what instrument function to insert.
197#[inline]
198fn instrument_function_attr<'ll>(
199    cx: &SimpleCx<'ll>,
200    sess: &Session,
201) -> SmallVec<[&'ll Attribute; 4]> {
202    let mut attrs = SmallVec::new();
203    if sess.opts.unstable_opts.instrument_mcount {
204        // Similar to `clang -pg` behavior. Handled by the
205        // `post-inline-ee-instrument` LLVM pass.
206
207        // The function name varies on platforms.
208        // See test/CodeGen/mcount.c in clang.
209        let mcount_name = match &sess.target.llvm_mcount_intrinsic {
210            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
211            None => sess.target.mcount.as_ref(),
212        };
213
214        attrs.push(llvm::CreateAttrStringValue(
215            cx.llcx,
216            "instrument-function-entry-inlined",
217            mcount_name,
218        ));
219    }
220    if let Some(options) = &sess.opts.unstable_opts.instrument_xray {
221        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
222        // Function prologue and epilogue are instrumented with NOP sleds,
223        // a runtime library later replaces them with detours into tracing code.
224        if options.always {
225            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
226        }
227        if options.never {
228            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
229        }
230        if options.ignore_loops {
231            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
232        }
233        // LLVM will not choose the default for us, but rather requires specific
234        // threshold in absence of "xray-always". Use the same default as Clang.
235        let threshold = options.instruction_threshold.unwrap_or(200);
236        attrs.push(llvm::CreateAttrStringValue(
237            cx.llcx,
238            "xray-instruction-threshold",
239            &threshold.to_string(),
240        ));
241        if options.skip_entry {
242            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
243        }
244        if options.skip_exit {
245            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
246        }
247    }
248    attrs
249}
250
251fn nojumptables_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
252    if sess.opts.cg.jump_tables {
253        return None;
254    }
255
256    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
257}
258
259fn probestack_attr<'ll, 'tcx>(cx: &SimpleCx<'ll>, tcx: TyCtxt<'tcx>) -> Option<&'ll Attribute> {
260    // Currently stack probes seem somewhat incompatible with the address
261    // sanitizer and thread sanitizer. With asan we're already protected from
262    // stack overflow anyway so we don't really need stack probes regardless.
263    if tcx.sess.sanitizers().intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD) {
264        return None;
265    }
266
267    // probestack doesn't play nice either with `-C profile-generate`.
268    if tcx.sess.opts.cg.profile_generate.enabled() {
269        return None;
270    }
271
272    let attr_value = match tcx.sess.target.stack_probes {
273        StackProbeType::None => return None,
274        // Request LLVM to generate the probes inline. If the given LLVM version does not support
275        // this, no probe is generated at all (even if the attribute is specified).
276        StackProbeType::Inline => "inline-asm",
277        // Flag our internal `__rust_probestack` function as the stack probe symbol.
278        // This is defined in the `compiler-builtins` crate for each architecture.
279        StackProbeType::Call => &mangle_internal_symbol(tcx, "__rust_probestack"),
280        // Pick from the two above based on the LLVM version.
281        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
282            if llvm_util::get_version() < min_llvm_version_for_inline {
283                &mangle_internal_symbol(tcx, "__rust_probestack")
284            } else {
285                "inline-asm"
286            }
287        }
288    };
289    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
290}
291
292fn stackprotector_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
293    let sspattr = match sess.stack_protector() {
294        StackProtector::None => return None,
295        StackProtector::All => AttributeKind::StackProtectReq,
296        StackProtector::Strong => AttributeKind::StackProtectStrong,
297        StackProtector::Basic => AttributeKind::StackProtect,
298    };
299
300    Some(sspattr.create_attr(cx.llcx))
301}
302
303fn backchain_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
304    if sess.target.arch != Arch::S390x {
305        return None;
306    }
307
308    let requested_features = sess.opts.cg.target_feature.split(',');
309    let found_positive = requested_features.clone().any(|r| r == "+backchain");
310
311    if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None }
312}
313
314pub(crate) fn target_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> &'ll Attribute {
315    let target_cpu = llvm_util::target_cpu(sess);
316    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
317}
318
319pub(crate) fn tune_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> {
320    llvm_util::tune_cpu(sess)
321        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
322}
323
324/// Get the `target-features` LLVM attribute.
325pub(crate) fn target_features_attr<'ll, 'tcx>(
326    cx: &SimpleCx<'ll>,
327    tcx: TyCtxt<'tcx>,
328    function_features: Vec<String>,
329) -> Option<&'ll Attribute> {
330    let global_features = tcx.global_backend_features(()).iter().map(String::as_str);
331    let function_features = function_features.iter().map(String::as_str);
332    let target_features =
333        global_features.chain(function_features).intersperse(",").collect::<String>();
334    (!target_features.is_empty())
335        .then(|| llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features))
336}
337
338/// Get the `NonLazyBind` LLVM attribute,
339/// if the codegen options allow skipping the PLT.
340pub(crate) fn non_lazy_bind_attr<'ll>(
341    cx: &SimpleCx<'ll>,
342    sess: &Session,
343) -> Option<&'ll Attribute> {
344    // Don't generate calls through PLT if it's not necessary
345    if !sess.needs_plt() { Some(AttributeKind::NonLazyBind.create_attr(cx.llcx)) } else { None }
346}
347
348/// Get the default optimizations attrs for a function.
349#[inline]
350pub(crate) fn default_optimisation_attrs<'ll>(
351    cx: &SimpleCx<'ll>,
352    sess: &Session,
353) -> SmallVec<[&'ll Attribute; 2]> {
354    let mut attrs = SmallVec::new();
355    match sess.opts.optimize {
356        OptLevel::Size => {
357            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
358        }
359        OptLevel::SizeMin => {
360            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
361            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
362        }
363        _ => {}
364    }
365    attrs
366}
367
368fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
369    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
370}
371
372/// Helper for `FnAbi::apply_attrs_llfn`:
373/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
374/// attributes.
375pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
376    cx: &SimpleCx<'ll>,
377    tcx: TyCtxt<'tcx>,
378    llfn: &'ll Value,
379    codegen_fn_attrs: &CodegenFnAttrs,
380    instance: Option<ty::Instance<'tcx>>,
381) {
382    let sess = tcx.sess;
383    let mut to_add = SmallVec::<[_; 16]>::new();
384
385    match codegen_fn_attrs.optimize {
386        OptimizeAttr::Default => {
387            to_add.extend(default_optimisation_attrs(cx, sess));
388        }
389        OptimizeAttr::DoNotOptimize => {
390            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
391        }
392        OptimizeAttr::Size => {
393            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
394            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
395        }
396        OptimizeAttr::Speed => {}
397    }
398
399    if sess.must_emit_unwind_tables() {
400        to_add.push(uwtable_attr(cx.llcx, sess.opts.unstable_opts.use_sync_unwind));
401    }
402
403    if sess.opts.unstable_opts.profile_sample_use.is_some() {
404        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
405    }
406
407    // FIXME: none of these functions interact with source level attributes.
408    to_add.extend(frame_pointer_type_attr(cx, sess));
409    to_add.extend(function_return_attr(cx, sess));
410    to_add.extend(instrument_function_attr(cx, sess));
411    to_add.extend(nojumptables_attr(cx, sess));
412    to_add.extend(probestack_attr(cx, tcx));
413    to_add.extend(stackprotector_attr(cx, sess));
414
415    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
416        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
417    }
418
419    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::OFFLOAD_KERNEL) {
420        to_add.push(llvm::CreateAttrString(cx.llcx, "offload-kernel"))
421    }
422
423    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
424        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
425    }
426    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
427        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
428    }
429    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
430        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
431    }
432    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
433        // do nothing; a naked function is converted into an extern function
434        // and a global assembly block. LLVM's support for naked functions is
435        // not used.
436    } else {
437        // Do not set sanitizer attributes for naked functions.
438        to_add.extend(sanitize_attrs(cx, tcx, codegen_fn_attrs.sanitizers));
439
440        // For non-naked functions, set branch protection attributes on aarch64.
441        if let Some(BranchProtection { bti, pac_ret, gcs }) =
442            sess.opts.unstable_opts.branch_protection
443        {
444            assert!(sess.target.arch == Arch::AArch64);
445            if bti {
446                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
447            }
448            if gcs {
449                to_add.push(llvm::CreateAttrString(cx.llcx, "guarded-control-stack"));
450            }
451            if let Some(PacRet { leaf, pc, key }) = pac_ret {
452                if pc {
453                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
454                }
455                to_add.push(llvm::CreateAttrStringValue(
456                    cx.llcx,
457                    "sign-return-address",
458                    if leaf { "all" } else { "non-leaf" },
459                ));
460                to_add.push(llvm::CreateAttrStringValue(
461                    cx.llcx,
462                    "sign-return-address-key",
463                    if key == PAuthKey::A { "a_key" } else { "b_key" },
464                ));
465            }
466        }
467    }
468    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
469        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
470    {
471        to_add.push(create_alloc_family_attr(cx.llcx));
472        if let Some(instance) = instance
473            && let Some(zv) =
474                tcx.get_attr(instance.def_id(), rustc_span::sym::rustc_allocator_zeroed_variant)
475            && let Some(name) = zv.value_str()
476        {
477            to_add.push(llvm::CreateAttrStringValue(
478                cx.llcx,
479                "alloc-variant-zeroed",
480                &mangle_internal_symbol(tcx, name.as_str()),
481            ));
482        }
483        // apply to argument place instead of function
484        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
485        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
486        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
487        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
488        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
489            flags |= AllocKindFlags::Uninitialized;
490        } else {
491            flags |= AllocKindFlags::Zeroed;
492        }
493        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
494        // apply to return place instead of function (unlike all other attributes applied in this
495        // function)
496        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
497        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
498    }
499    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
500        to_add.push(create_alloc_family_attr(cx.llcx));
501        to_add.push(llvm::CreateAllocKindAttr(
502            cx.llcx,
503            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
504        ));
505        // applies to argument place instead of function place
506        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
507        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
508        // apply to argument place instead of function
509        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
510        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
511        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
512        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
513        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
514    }
515    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
516        to_add.push(create_alloc_family_attr(cx.llcx));
517        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
518        // applies to argument place instead of function place
519        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
520        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
521    }
522    if let Some(align) = codegen_fn_attrs.alignment {
523        llvm::set_alignment(llfn, align);
524    }
525    if let Some(backchain) = backchain_attr(cx, sess) {
526        to_add.push(backchain);
527    }
528    to_add.extend(patchable_function_entry_attrs(
529        cx,
530        sess,
531        codegen_fn_attrs.patchable_function_entry,
532    ));
533
534    // Always annotate functions with the target-cpu they are compiled for.
535    // Without this, ThinLTO won't inline Rust functions into Clang generated
536    // functions (because Clang annotates functions this way too).
537    to_add.push(target_cpu_attr(cx, sess));
538    // tune-cpu is only conveyed through the attribute for our purpose.
539    // The target doesn't care; the subtarget reads our attribute.
540    to_add.extend(tune_cpu_attr(cx, sess));
541
542    let function_features =
543        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
544
545    // Apply function attributes as per usual if there are no user defined
546    // target features otherwise this will get applied at the callsite.
547    if function_features.is_empty() {
548        if let Some(instance) = instance
549            && let Some(inline_attr) = inline_attr(cx, tcx, instance)
550        {
551            to_add.push(inline_attr);
552        }
553    }
554
555    let function_features = function_features
556        .iter()
557        // Convert to LLVMFeatures and filter out unavailable ones
558        .flat_map(|feat| llvm_util::to_llvm_features(sess, feat))
559        // Convert LLVMFeatures & dependencies to +<feats>s
560        .flat_map(|feat| feat.into_iter().map(|f| format!("+{f}")))
561        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
562            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
563            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
564        }))
565        .collect::<Vec<String>>();
566
567    if sess.target.is_like_wasm {
568        // If this function is an import from the environment but the wasm
569        // import has a specific module/name, apply them here.
570        if let Some(instance) = instance
571            && let Some(module) = wasm_import_module(tcx, instance.def_id())
572        {
573            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
574
575            let name =
576                codegen_fn_attrs.symbol_name.unwrap_or_else(|| tcx.item_name(instance.def_id()));
577            let name = name.as_str();
578            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
579        }
580    }
581
582    to_add.extend(target_features_attr(cx, tcx, function_features));
583
584    attributes::apply_to_llfn(llfn, Function, &to_add);
585}
586
587fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
588    tcx.wasm_import_module_map(id.krate).get(&id)
589}