rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_codegen_ssa::traits::*;
3use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr};
4use rustc_hir::def_id::DefId;
5use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry};
6use rustc_middle::ty::{self, TyCtxt};
7use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
8use rustc_symbol_mangling::mangle_internal_symbol;
9use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
10use smallvec::SmallVec;
11
12use crate::context::CodegenCx;
13use crate::errors::SanitizerMemtagRequiresMte;
14use crate::llvm::AttributePlace::Function;
15use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects};
16use crate::value::Value;
17use crate::{attributes, llvm_util};
18
19pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
20    if !attrs.is_empty() {
21        llvm::AddFunctionAttributes(llfn, idx, attrs);
22    }
23}
24
25pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
26    if !attrs.is_empty() {
27        llvm::AddCallSiteAttributes(callsite, idx, attrs);
28    }
29}
30
31/// Get LLVM attribute for the provided inline heuristic.
32pub(crate) fn inline_attr<'ll, 'tcx>(
33    cx: &CodegenCx<'ll, 'tcx>,
34    instance: ty::Instance<'tcx>,
35) -> Option<&'ll Attribute> {
36    // `optnone` requires `noinline`
37    let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
38    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
39        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
40        (InlineAttr::None, _) if instance.def.requires_inline(cx.tcx) => InlineAttr::Hint,
41        (inline, _) => inline,
42    };
43
44    if !cx.tcx.sess.opts.unstable_opts.inline_llvm {
45        // disable LLVM inlining
46        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
47    }
48    match inline {
49        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
50        InlineAttr::Always | InlineAttr::Force { .. } => {
51            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
52        }
53        InlineAttr::Never => {
54            if cx.sess().target.arch != "amdgpu" {
55                Some(AttributeKind::NoInline.create_attr(cx.llcx))
56            } else {
57                None
58            }
59        }
60        InlineAttr::None => None,
61    }
62}
63
64#[inline]
65fn patchable_function_entry_attrs<'ll>(
66    cx: &CodegenCx<'ll, '_>,
67    attr: Option<PatchableFunctionEntry>,
68) -> SmallVec<[&'ll Attribute; 2]> {
69    let mut attrs = SmallVec::new();
70    let patchable_spec = attr.unwrap_or_else(|| {
71        PatchableFunctionEntry::from_config(cx.tcx.sess.opts.unstable_opts.patchable_function_entry)
72    });
73    let entry = patchable_spec.entry();
74    let prefix = patchable_spec.prefix();
75    if entry > 0 {
76        attrs.push(llvm::CreateAttrStringValue(
77            cx.llcx,
78            "patchable-function-entry",
79            &format!("{}", entry),
80        ));
81    }
82    if prefix > 0 {
83        attrs.push(llvm::CreateAttrStringValue(
84            cx.llcx,
85            "patchable-function-prefix",
86            &format!("{}", prefix),
87        ));
88    }
89    attrs
90}
91
92/// Get LLVM sanitize attributes.
93#[inline]
94pub(crate) fn sanitize_attrs<'ll>(
95    cx: &CodegenCx<'ll, '_>,
96    no_sanitize: SanitizerSet,
97) -> SmallVec<[&'ll Attribute; 4]> {
98    let mut attrs = SmallVec::new();
99    let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
100    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
101        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
102    }
103    if enabled.contains(SanitizerSet::MEMORY) {
104        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
105    }
106    if enabled.contains(SanitizerSet::THREAD) {
107        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
108    }
109    if enabled.contains(SanitizerSet::HWADDRESS) {
110        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
111    }
112    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
113        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
114    }
115    if enabled.contains(SanitizerSet::MEMTAG) {
116        // Check to make sure the mte target feature is actually enabled.
117        let features = cx.tcx.global_backend_features(());
118        let mte_feature =
119            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
120        if let None | Some("-mte") = mte_feature {
121            cx.tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
122        }
123
124        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
125    }
126    if enabled.contains(SanitizerSet::SAFESTACK) {
127        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
128    }
129    attrs
130}
131
132/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
133#[inline]
134pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
135    // NOTE: We should determine if we even need async unwind tables, as they
136    // take have more overhead and if we can use sync unwind tables we
137    // probably should.
138    let async_unwind = !use_sync_unwind.unwrap_or(false);
139    llvm::CreateUWTableAttr(llcx, async_unwind)
140}
141
142pub(crate) fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
143    let mut fp = cx.sess().target.frame_pointer;
144    let opts = &cx.sess().opts;
145    // "mcount" function relies on stack pointer.
146    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
147    if opts.unstable_opts.instrument_mcount {
148        fp.ratchet(FramePointer::Always);
149    }
150    fp.ratchet(opts.cg.force_frame_pointers);
151    let attr_value = match fp {
152        FramePointer::Always => "all",
153        FramePointer::NonLeaf => "non-leaf",
154        FramePointer::MayOmit => return None,
155    };
156    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
157}
158
159fn function_return_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
160    let function_return_attr = match cx.sess().opts.unstable_opts.function_return {
161        FunctionReturn::Keep => return None,
162        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
163    };
164
165    Some(function_return_attr.create_attr(cx.llcx))
166}
167
168/// Tell LLVM what instrument function to insert.
169#[inline]
170fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
171    let mut attrs = SmallVec::new();
172    if cx.sess().opts.unstable_opts.instrument_mcount {
173        // Similar to `clang -pg` behavior. Handled by the
174        // `post-inline-ee-instrument` LLVM pass.
175
176        // The function name varies on platforms.
177        // See test/CodeGen/mcount.c in clang.
178        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
179            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
180            None => cx.sess().target.mcount.as_ref(),
181        };
182
183        attrs.push(llvm::CreateAttrStringValue(
184            cx.llcx,
185            "instrument-function-entry-inlined",
186            mcount_name,
187        ));
188    }
189    if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
190        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
191        // Function prologue and epilogue are instrumented with NOP sleds,
192        // a runtime library later replaces them with detours into tracing code.
193        if options.always {
194            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
195        }
196        if options.never {
197            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
198        }
199        if options.ignore_loops {
200            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
201        }
202        // LLVM will not choose the default for us, but rather requires specific
203        // threshold in absence of "xray-always". Use the same default as Clang.
204        let threshold = options.instruction_threshold.unwrap_or(200);
205        attrs.push(llvm::CreateAttrStringValue(
206            cx.llcx,
207            "xray-instruction-threshold",
208            &threshold.to_string(),
209        ));
210        if options.skip_entry {
211            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
212        }
213        if options.skip_exit {
214            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
215        }
216    }
217    attrs
218}
219
220fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
221    if !cx.sess().opts.unstable_opts.no_jump_tables {
222        return None;
223    }
224
225    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
226}
227
228fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
229    // Currently stack probes seem somewhat incompatible with the address
230    // sanitizer and thread sanitizer. With asan we're already protected from
231    // stack overflow anyway so we don't really need stack probes regardless.
232    if cx
233        .sess()
234        .opts
235        .unstable_opts
236        .sanitizer
237        .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
238    {
239        return None;
240    }
241
242    // probestack doesn't play nice either with `-C profile-generate`.
243    if cx.sess().opts.cg.profile_generate.enabled() {
244        return None;
245    }
246
247    let attr_value = match cx.sess().target.stack_probes {
248        StackProbeType::None => return None,
249        // Request LLVM to generate the probes inline. If the given LLVM version does not support
250        // this, no probe is generated at all (even if the attribute is specified).
251        StackProbeType::Inline => "inline-asm",
252        // Flag our internal `__rust_probestack` function as the stack probe symbol.
253        // This is defined in the `compiler-builtins` crate for each architecture.
254        StackProbeType::Call => &mangle_internal_symbol(cx.tcx, "__rust_probestack"),
255        // Pick from the two above based on the LLVM version.
256        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
257            if llvm_util::get_version() < min_llvm_version_for_inline {
258                &mangle_internal_symbol(cx.tcx, "__rust_probestack")
259            } else {
260                "inline-asm"
261            }
262        }
263    };
264    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
265}
266
267fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
268    let sspattr = match cx.sess().stack_protector() {
269        StackProtector::None => return None,
270        StackProtector::All => AttributeKind::StackProtectReq,
271        StackProtector::Strong => AttributeKind::StackProtectStrong,
272        StackProtector::Basic => AttributeKind::StackProtect,
273    };
274
275    Some(sspattr.create_attr(cx.llcx))
276}
277
278fn backchain_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
279    if cx.sess().target.arch != "s390x" {
280        return None;
281    }
282
283    let requested_features = cx.sess().opts.cg.target_feature.split(',');
284    let found_positive = requested_features.clone().any(|r| r == "+backchain");
285
286    if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None }
287}
288
289pub(crate) fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
290    let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
291    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
292}
293
294pub(crate) fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
295    llvm_util::tune_cpu(cx.tcx.sess)
296        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
297}
298
299/// Get the `target-features` LLVM attribute.
300pub(crate) fn target_features_attr<'ll>(
301    cx: &CodegenCx<'ll, '_>,
302    function_features: Vec<String>,
303) -> Option<&'ll Attribute> {
304    let global_features = cx.tcx.global_backend_features(()).iter().map(String::as_str);
305    let function_features = function_features.iter().map(String::as_str);
306    let target_features =
307        global_features.chain(function_features).intersperse(",").collect::<String>();
308    (!target_features.is_empty())
309        .then(|| llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features))
310}
311
312/// Get the `NonLazyBind` LLVM attribute,
313/// if the codegen options allow skipping the PLT.
314pub(crate) fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
315    // Don't generate calls through PLT if it's not necessary
316    if !cx.sess().needs_plt() {
317        Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
318    } else {
319        None
320    }
321}
322
323/// Get the default optimizations attrs for a function.
324#[inline]
325pub(crate) fn default_optimisation_attrs<'ll>(
326    cx: &CodegenCx<'ll, '_>,
327) -> SmallVec<[&'ll Attribute; 2]> {
328    let mut attrs = SmallVec::new();
329    match cx.sess().opts.optimize {
330        OptLevel::Size => {
331            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
332        }
333        OptLevel::SizeMin => {
334            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
335            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
336        }
337        _ => {}
338    }
339    attrs
340}
341
342fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
343    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
344}
345
346/// Helper for `FnAbi::apply_attrs_llfn`:
347/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
348/// attributes.
349pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
350    cx: &CodegenCx<'ll, 'tcx>,
351    llfn: &'ll Value,
352    instance: ty::Instance<'tcx>,
353) {
354    let codegen_fn_attrs = cx.tcx.codegen_instance_attrs(instance.def);
355
356    let mut to_add = SmallVec::<[_; 16]>::new();
357
358    match codegen_fn_attrs.optimize {
359        OptimizeAttr::Default => {
360            to_add.extend(default_optimisation_attrs(cx));
361        }
362        OptimizeAttr::DoNotOptimize => {
363            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
364        }
365        OptimizeAttr::Size => {
366            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
367            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
368        }
369        OptimizeAttr::Speed => {}
370    }
371
372    if cx.sess().must_emit_unwind_tables() {
373        to_add.push(uwtable_attr(cx.llcx, cx.sess().opts.unstable_opts.use_sync_unwind));
374    }
375
376    if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
377        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
378    }
379
380    // FIXME: none of these functions interact with source level attributes.
381    to_add.extend(frame_pointer_type_attr(cx));
382    to_add.extend(function_return_attr(cx));
383    to_add.extend(instrument_function_attr(cx));
384    to_add.extend(nojumptables_attr(cx));
385    to_add.extend(probestack_attr(cx));
386    to_add.extend(stackprotector_attr(cx));
387
388    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
389        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
390    }
391
392    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
393        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
394    }
395    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
396        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
397    }
398    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
399        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
400    }
401    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
402        // do nothing; a naked function is converted into an extern function
403        // and a global assembly block. LLVM's support for naked functions is
404        // not used.
405    } else {
406        // Do not set sanitizer attributes for naked functions.
407        to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
408
409        // For non-naked functions, set branch protection attributes on aarch64.
410        if let Some(BranchProtection { bti, pac_ret, gcs }) =
411            cx.sess().opts.unstable_opts.branch_protection
412        {
413            assert!(cx.sess().target.arch == "aarch64");
414            if bti {
415                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
416            }
417            if gcs {
418                to_add.push(llvm::CreateAttrString(cx.llcx, "guarded-control-stack"));
419            }
420            if let Some(PacRet { leaf, pc, key }) = pac_ret {
421                if pc {
422                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
423                }
424                to_add.push(llvm::CreateAttrStringValue(
425                    cx.llcx,
426                    "sign-return-address",
427                    if leaf { "all" } else { "non-leaf" },
428                ));
429                to_add.push(llvm::CreateAttrStringValue(
430                    cx.llcx,
431                    "sign-return-address-key",
432                    if key == PAuthKey::A { "a_key" } else { "b_key" },
433                ));
434            }
435        }
436    }
437    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
438        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
439    {
440        to_add.push(create_alloc_family_attr(cx.llcx));
441        if let Some(zv) =
442            cx.tcx.get_attr(instance.def_id(), rustc_span::sym::rustc_allocator_zeroed_variant)
443            && let Some(name) = zv.value_str()
444        {
445            to_add.push(llvm::CreateAttrStringValue(
446                cx.llcx,
447                "alloc-variant-zeroed",
448                &mangle_internal_symbol(cx.tcx, name.as_str()),
449            ));
450        }
451        // apply to argument place instead of function
452        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
453        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
454        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
455        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
456        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
457            flags |= AllocKindFlags::Uninitialized;
458        } else {
459            flags |= AllocKindFlags::Zeroed;
460        }
461        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
462        // apply to return place instead of function (unlike all other attributes applied in this
463        // function)
464        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
465        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
466    }
467    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
468        to_add.push(create_alloc_family_attr(cx.llcx));
469        to_add.push(llvm::CreateAllocKindAttr(
470            cx.llcx,
471            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
472        ));
473        // applies to argument place instead of function place
474        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
475        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
476        // apply to argument place instead of function
477        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
478        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
479        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
480        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
481        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
482    }
483    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
484        to_add.push(create_alloc_family_attr(cx.llcx));
485        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
486        // applies to argument place instead of function place
487        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
488        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
489    }
490    if let Some(align) = codegen_fn_attrs.alignment {
491        llvm::set_alignment(llfn, align);
492    }
493    if let Some(backchain) = backchain_attr(cx) {
494        to_add.push(backchain);
495    }
496    to_add.extend(patchable_function_entry_attrs(cx, codegen_fn_attrs.patchable_function_entry));
497
498    // Always annotate functions with the target-cpu they are compiled for.
499    // Without this, ThinLTO won't inline Rust functions into Clang generated
500    // functions (because Clang annotates functions this way too).
501    to_add.push(target_cpu_attr(cx));
502    // tune-cpu is only conveyed through the attribute for our purpose.
503    // The target doesn't care; the subtarget reads our attribute.
504    to_add.extend(tune_cpu_attr(cx));
505
506    let function_features =
507        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
508
509    // Apply function attributes as per usual if there are no user defined
510    // target features otherwise this will get applied at the callsite.
511    if function_features.is_empty() {
512        if let Some(inline_attr) = inline_attr(cx, instance) {
513            to_add.push(inline_attr);
514        }
515    }
516
517    let function_features = function_features
518        .iter()
519        // Convert to LLVMFeatures and filter out unavailable ones
520        .flat_map(|feat| llvm_util::to_llvm_features(cx.tcx.sess, feat))
521        // Convert LLVMFeatures & dependencies to +<feats>s
522        .flat_map(|feat| feat.into_iter().map(|f| format!("+{f}")))
523        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
524            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
525            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
526        }))
527        .collect::<Vec<String>>();
528
529    if cx.tcx.sess.target.is_like_wasm {
530        // If this function is an import from the environment but the wasm
531        // import has a specific module/name, apply them here.
532        if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
533            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
534
535            let name =
536                codegen_fn_attrs.symbol_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
537            let name = name.as_str();
538            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
539        }
540    }
541
542    to_add.extend(target_features_attr(cx, function_features));
543
544    attributes::apply_to_llfn(llfn, Function, &to_add);
545}
546
547fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
548    tcx.wasm_import_module_map(id.krate).get(&id)
549}