rustc_codegen_llvm/back/
write.rs

1use std::ffi::{CStr, CString};
2use std::io::{self, Write};
3use std::path::{Path, PathBuf};
4use std::ptr::null_mut;
5use std::sync::Arc;
6use std::{fs, slice, str};
7
8use libc::{c_char, c_int, c_void, size_t};
9use rustc_codegen_ssa::back::link::ensure_removed;
10use rustc_codegen_ssa::back::versioned_llvm_target;
11use rustc_codegen_ssa::back::write::{
12    BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig,
13    TargetMachineFactoryConfig, TargetMachineFactoryFn,
14};
15use rustc_codegen_ssa::base::wants_wasm_eh;
16use rustc_codegen_ssa::traits::*;
17use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind};
18use rustc_data_structures::profiling::SelfProfilerRef;
19use rustc_data_structures::small_c_str::SmallCStr;
20use rustc_errors::{DiagCtxtHandle, Level};
21use rustc_fs_util::{link_or_copy, path_to_c_string};
22use rustc_middle::ty::TyCtxt;
23use rustc_session::Session;
24use rustc_session::config::{
25    self, Lto, OutputType, Passes, RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath,
26};
27use rustc_span::{BytePos, InnerSpan, Pos, SpanData, SyntaxContext, sym};
28use rustc_target::spec::{
29    Arch, CodeModel, FloatAbi, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel,
30};
31use tracing::{debug, trace};
32
33use crate::back::lto::ThinBuffer;
34use crate::back::owned_target_machine::OwnedTargetMachine;
35use crate::back::profiling::{
36    LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback,
37};
38use crate::common::AsCCharPtr;
39use crate::errors::{
40    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
41    WithLlvmError, WriteBytecode,
42};
43use crate::llvm::diagnostic::OptimizationDiagnosticKind::*;
44use crate::llvm::{self, DiagnosticInfo};
45use crate::type_::llvm_type_ptr;
46use crate::{LlvmCodegenBackend, ModuleLlvm, SimpleCx, attributes, base, common, llvm_util};
47
48pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> ! {
49    match llvm::last_error() {
50        Some(llvm_err) => dcx.emit_fatal(WithLlvmError(err, llvm_err)),
51        None => dcx.emit_fatal(err),
52    }
53}
54
55fn write_output_file<'ll>(
56    dcx: DiagCtxtHandle<'_>,
57    target: &'ll llvm::TargetMachine,
58    no_builtins: bool,
59    m: &'ll llvm::Module,
60    output: &Path,
61    dwo_output: Option<&Path>,
62    file_type: llvm::FileType,
63    self_profiler_ref: &SelfProfilerRef,
64    verify_llvm_ir: bool,
65) {
66    debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
67    let output_c = path_to_c_string(output);
68    let dwo_output_c;
69    let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
70        dwo_output_c = path_to_c_string(dwo_output);
71        dwo_output_c.as_ptr()
72    } else {
73        std::ptr::null()
74    };
75    let result = unsafe {
76        let pm = llvm::LLVMCreatePassManager();
77        llvm::LLVMAddAnalysisPasses(target, pm);
78        llvm::LLVMRustAddLibraryInfo(target, pm, m, no_builtins);
79        llvm::LLVMRustWriteOutputFile(
80            target,
81            pm,
82            m,
83            output_c.as_ptr(),
84            dwo_output_ptr,
85            file_type,
86            verify_llvm_ir,
87        )
88    };
89
90    // Record artifact sizes for self-profiling
91    if result == llvm::LLVMRustResult::Success {
92        let artifact_kind = match file_type {
93            llvm::FileType::ObjectFile => "object_file",
94            llvm::FileType::AssemblyFile => "assembly_file",
95        };
96        record_artifact_size(self_profiler_ref, artifact_kind, output);
97        if let Some(dwo_file) = dwo_output {
98            record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
99        }
100    }
101
102    result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
103}
104
105pub(crate) fn create_informational_target_machine(
106    sess: &Session,
107    only_base_features: bool,
108) -> OwnedTargetMachine {
109    let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
110    // Can't use query system here quite yet because this function is invoked before the query
111    // system/tcx is set up.
112    let features = llvm_util::global_llvm_features(sess, only_base_features);
113    target_machine_factory(sess, config::OptLevel::No, &features)(config)
114        .unwrap_or_else(|err| llvm_err(sess.dcx(), err))
115}
116
117pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
118    let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
119        tcx.output_filenames(()).split_dwarf_path(
120            tcx.sess.split_debuginfo(),
121            tcx.sess.opts.unstable_opts.split_dwarf_kind,
122            mod_name,
123            tcx.sess.invocation_temp.as_deref(),
124        )
125    } else {
126        None
127    };
128
129    let output_obj_file = Some(tcx.output_filenames(()).temp_path_for_cgu(
130        OutputType::Object,
131        mod_name,
132        tcx.sess.invocation_temp.as_deref(),
133    ));
134    let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
135
136    target_machine_factory(
137        tcx.sess,
138        tcx.backend_optimization_level(()),
139        tcx.global_backend_features(()),
140    )(config)
141    .unwrap_or_else(|err| llvm_err(tcx.dcx(), err))
142}
143
144fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
145    use self::config::OptLevel::*;
146    match cfg {
147        No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
148        Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
149        More => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
150        Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
151        Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
152        SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
153    }
154}
155
156fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
157    use config::OptLevel::*;
158    match cfg {
159        No => llvm::PassBuilderOptLevel::O0,
160        Less => llvm::PassBuilderOptLevel::O1,
161        More => llvm::PassBuilderOptLevel::O2,
162        Aggressive => llvm::PassBuilderOptLevel::O3,
163        Size => llvm::PassBuilderOptLevel::Os,
164        SizeMin => llvm::PassBuilderOptLevel::Oz,
165    }
166}
167
168fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
169    match relocation_model {
170        RelocModel::Static => llvm::RelocModel::Static,
171        // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra
172        // attribute.
173        RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
174        RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
175        RelocModel::Ropi => llvm::RelocModel::ROPI,
176        RelocModel::Rwpi => llvm::RelocModel::RWPI,
177        RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
178    }
179}
180
181pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
182    match code_model {
183        Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
184        Some(CodeModel::Small) => llvm::CodeModel::Small,
185        Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
186        Some(CodeModel::Medium) => llvm::CodeModel::Medium,
187        Some(CodeModel::Large) => llvm::CodeModel::Large,
188        None => llvm::CodeModel::None,
189    }
190}
191
192fn to_llvm_float_abi(float_abi: Option<FloatAbi>) -> llvm::FloatAbi {
193    match float_abi {
194        None => llvm::FloatAbi::Default,
195        Some(FloatAbi::Soft) => llvm::FloatAbi::Soft,
196        Some(FloatAbi::Hard) => llvm::FloatAbi::Hard,
197    }
198}
199
200pub(crate) fn target_machine_factory(
201    sess: &Session,
202    optlvl: config::OptLevel,
203    target_features: &[String],
204) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
205    // Self-profile timer for creating a _factory_.
206    let _prof_timer = sess.prof.generic_activity("target_machine_factory");
207
208    let reloc_model = to_llvm_relocation_model(sess.relocation_model());
209
210    let (opt_level, _) = to_llvm_opt_settings(optlvl);
211    let float_abi = if sess.target.arch == Arch::Arm && sess.opts.cg.soft_float {
212        llvm::FloatAbi::Soft
213    } else {
214        // `validate_commandline_args_with_session_available` has already warned about this being
215        // ignored. Let's make sure LLVM doesn't suddenly start using this flag on more targets.
216        to_llvm_float_abi(sess.target.llvm_floatabi)
217    };
218
219    let ffunction_sections =
220        sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections);
221    let fdata_sections = ffunction_sections;
222    let funique_section_names = !sess.opts.unstable_opts.no_unique_section_names;
223
224    let code_model = to_llvm_code_model(sess.code_model());
225
226    let mut singlethread = sess.target.singlethread;
227
228    // On the wasm target once the `atomics` feature is enabled that means that
229    // we're no longer single-threaded, or otherwise we don't want LLVM to
230    // lower atomic operations to single-threaded operations.
231    if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
232        singlethread = false;
233    }
234
235    let triple = SmallCStr::new(&versioned_llvm_target(sess));
236    let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
237    let features = CString::new(target_features.join(",")).unwrap();
238    let abi = SmallCStr::new(&sess.target.llvm_abiname);
239    let trap_unreachable =
240        sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
241    let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
242
243    let verbose_asm = sess.opts.unstable_opts.verbose_asm;
244    let relax_elf_relocations =
245        sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
246
247    let use_init_array =
248        !sess.opts.unstable_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
249
250    let path_mapping = sess.source_map().path_mapping().clone();
251
252    let use_emulated_tls = matches!(sess.tls_model(), TlsModel::Emulated);
253
254    let debuginfo_compression = match sess.opts.debuginfo_compression {
255        config::DebugInfoCompression::None => llvm::CompressionKind::None,
256        config::DebugInfoCompression::Zlib => {
257            if llvm::LLVMRustLLVMHasZlibCompression() {
258                llvm::CompressionKind::Zlib
259            } else {
260                sess.dcx().emit_warn(UnknownCompression { algorithm: "zlib" });
261                llvm::CompressionKind::None
262            }
263        }
264        config::DebugInfoCompression::Zstd => {
265            if llvm::LLVMRustLLVMHasZstdCompression() {
266                llvm::CompressionKind::Zstd
267            } else {
268                sess.dcx().emit_warn(UnknownCompression { algorithm: "zstd" });
269                llvm::CompressionKind::None
270            }
271        }
272    };
273
274    let file_name_display_preference =
275        sess.filename_display_preference(RemapPathScopeComponents::DEBUGINFO);
276
277    let use_wasm_eh = wants_wasm_eh(sess);
278
279    let prof = SelfProfilerRef::clone(&sess.prof);
280    Arc::new(move |config: TargetMachineFactoryConfig| {
281        // Self-profile timer for invoking a factory to create a target machine.
282        let _prof_timer = prof.generic_activity("target_machine_factory_inner");
283
284        let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
285            let path = path.unwrap_or_default();
286            let path = path_mapping
287                .to_real_filename(path)
288                .to_string_lossy(file_name_display_preference)
289                .into_owned();
290            CString::new(path).unwrap()
291        };
292
293        let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
294        let output_obj_file = path_to_cstring_helper(config.output_obj_file);
295
296        OwnedTargetMachine::new(
297            &triple,
298            &cpu,
299            &features,
300            &abi,
301            code_model,
302            reloc_model,
303            opt_level,
304            float_abi,
305            ffunction_sections,
306            fdata_sections,
307            funique_section_names,
308            trap_unreachable,
309            singlethread,
310            verbose_asm,
311            emit_stack_size_section,
312            relax_elf_relocations,
313            use_init_array,
314            &split_dwarf_file,
315            &output_obj_file,
316            debuginfo_compression,
317            use_emulated_tls,
318            use_wasm_eh,
319        )
320    })
321}
322
323pub(crate) fn save_temp_bitcode(
324    cgcx: &CodegenContext<LlvmCodegenBackend>,
325    module: &ModuleCodegen<ModuleLlvm>,
326    name: &str,
327) {
328    if !cgcx.save_temps {
329        return;
330    }
331    let ext = format!("{name}.bc");
332    let path = cgcx.output_filenames.temp_path_ext_for_cgu(
333        &ext,
334        &module.name,
335        cgcx.invocation_temp.as_deref(),
336    );
337    write_bitcode_to_file(module, &path)
338}
339
340fn write_bitcode_to_file(module: &ModuleCodegen<ModuleLlvm>, path: &Path) {
341    unsafe {
342        let path = path_to_c_string(&path);
343        let llmod = module.module_llvm.llmod();
344        llvm::LLVMWriteBitcodeToFile(llmod, path.as_ptr());
345    }
346}
347
348/// In what context is a diagnostic handler being attached to a codegen unit?
349pub(crate) enum CodegenDiagnosticsStage {
350    /// Prelink optimization stage.
351    Opt,
352    /// LTO/ThinLTO postlink optimization stage.
353    LTO,
354    /// Code generation.
355    Codegen,
356}
357
358pub(crate) struct DiagnosticHandlers<'a> {
359    data: *mut (&'a CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'a>),
360    llcx: &'a llvm::Context,
361    old_handler: Option<&'a llvm::DiagnosticHandler>,
362}
363
364impl<'a> DiagnosticHandlers<'a> {
365    pub(crate) fn new(
366        cgcx: &'a CodegenContext<LlvmCodegenBackend>,
367        dcx: DiagCtxtHandle<'a>,
368        llcx: &'a llvm::Context,
369        module: &ModuleCodegen<ModuleLlvm>,
370        stage: CodegenDiagnosticsStage,
371    ) -> Self {
372        let remark_passes_all: bool;
373        let remark_passes: Vec<CString>;
374        match &cgcx.remark {
375            Passes::All => {
376                remark_passes_all = true;
377                remark_passes = Vec::new();
378            }
379            Passes::Some(passes) => {
380                remark_passes_all = false;
381                remark_passes =
382                    passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
383            }
384        };
385        let remark_passes: Vec<*const c_char> =
386            remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
387        let remark_file = cgcx
388            .remark_dir
389            .as_ref()
390            // Use the .opt.yaml file suffix, which is supported by LLVM's opt-viewer.
391            .map(|dir| {
392                let stage_suffix = match stage {
393                    CodegenDiagnosticsStage::Codegen => "codegen",
394                    CodegenDiagnosticsStage::Opt => "opt",
395                    CodegenDiagnosticsStage::LTO => "lto",
396                };
397                dir.join(format!("{}.{stage_suffix}.opt.yaml", module.name))
398            })
399            .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
400
401        let pgo_available = cgcx.opts.cg.profile_use.is_some();
402        let data = Box::into_raw(Box::new((cgcx, dcx)));
403        unsafe {
404            let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
405            llvm::LLVMRustContextConfigureDiagnosticHandler(
406                llcx,
407                diagnostic_handler,
408                data.cast(),
409                remark_passes_all,
410                remark_passes.as_ptr(),
411                remark_passes.len(),
412                // The `as_ref()` is important here, otherwise the `CString` will be dropped
413                // too soon!
414                remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
415                pgo_available,
416            );
417            DiagnosticHandlers { data, llcx, old_handler }
418        }
419    }
420}
421
422impl<'a> Drop for DiagnosticHandlers<'a> {
423    fn drop(&mut self) {
424        unsafe {
425            llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
426            drop(Box::from_raw(self.data));
427        }
428    }
429}
430
431fn report_inline_asm(
432    cgcx: &CodegenContext<LlvmCodegenBackend>,
433    msg: String,
434    level: llvm::DiagnosticLevel,
435    cookie: u64,
436    source: Option<(String, Vec<InnerSpan>)>,
437) -> InlineAsmError {
438    // In LTO build we may get srcloc values from other crates which are invalid
439    // since they use a different source map. To be safe we just suppress these
440    // in LTO builds.
441    let span = if cookie == 0 || matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
442        SpanData::default()
443    } else {
444        SpanData {
445            lo: BytePos::from_u32(cookie as u32),
446            hi: BytePos::from_u32((cookie >> 32) as u32),
447            ctxt: SyntaxContext::root(),
448            parent: None,
449        }
450    };
451    let level = match level {
452        llvm::DiagnosticLevel::Error => Level::Error,
453        llvm::DiagnosticLevel::Warning => Level::Warning,
454        llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
455    };
456    let msg = msg.trim_prefix("error: ").to_string();
457    InlineAsmError { span, msg, level, source }
458}
459
460unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
461    if user.is_null() {
462        return;
463    }
464    let (cgcx, dcx) =
465        unsafe { *(user as *const (&CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'_>)) };
466
467    match unsafe { llvm::diagnostic::Diagnostic::unpack(info) } {
468        llvm::diagnostic::InlineAsm(inline) => {
469            cgcx.diag_emitter.inline_asm_error(report_inline_asm(
470                cgcx,
471                inline.message,
472                inline.level,
473                inline.cookie,
474                inline.source,
475            ));
476        }
477
478        llvm::diagnostic::Optimization(opt) => {
479            dcx.emit_note(FromLlvmOptimizationDiag {
480                filename: &opt.filename,
481                line: opt.line,
482                column: opt.column,
483                pass_name: &opt.pass_name,
484                kind: match opt.kind {
485                    OptimizationRemark => "success",
486                    OptimizationMissed | OptimizationFailure => "missed",
487                    OptimizationAnalysis
488                    | OptimizationAnalysisFPCommute
489                    | OptimizationAnalysisAliasing => "analysis",
490                    OptimizationRemarkOther => "other",
491                },
492                message: &opt.message,
493            });
494        }
495        llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
496            let message = llvm::build_string(|s| unsafe {
497                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
498            })
499            .expect("non-UTF8 diagnostic");
500            dcx.emit_warn(FromLlvmDiag { message });
501        }
502        llvm::diagnostic::Unsupported(diagnostic_ref) => {
503            let message = llvm::build_string(|s| unsafe {
504                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
505            })
506            .expect("non-UTF8 diagnostic");
507            dcx.emit_err(FromLlvmDiag { message });
508        }
509        llvm::diagnostic::UnknownDiagnostic(..) => {}
510    }
511}
512
513fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
514    match config.pgo_gen {
515        SwitchWithOptPath::Enabled(ref opt_dir_path) => {
516            let path = if let Some(dir_path) = opt_dir_path {
517                dir_path.join("default_%m.profraw")
518            } else {
519                PathBuf::from("default_%m.profraw")
520            };
521
522            Some(CString::new(format!("{}", path.display())).unwrap())
523        }
524        SwitchWithOptPath::Disabled => None,
525    }
526}
527
528fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
529    config
530        .pgo_use
531        .as_ref()
532        .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
533}
534
535fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
536    config
537        .pgo_sample_use
538        .as_ref()
539        .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
540}
541
542fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
543    config.instrument_coverage.then(|| c"default_%m_%p.profraw".to_owned())
544}
545
546// PreAD will run llvm opts but disable size increasing opts (vectorization, loop unrolling)
547// DuringAD is the same as above, but also runs the enzyme opt and autodiff passes.
548// PostAD will run all opts, including size increasing opts.
549#[derive(Debug, Eq, PartialEq)]
550pub(crate) enum AutodiffStage {
551    PreAD,
552    DuringAD,
553    PostAD,
554}
555
556pub(crate) unsafe fn llvm_optimize(
557    cgcx: &CodegenContext<LlvmCodegenBackend>,
558    dcx: DiagCtxtHandle<'_>,
559    module: &ModuleCodegen<ModuleLlvm>,
560    thin_lto_buffer: Option<&mut *mut llvm::ThinLTOBuffer>,
561    config: &ModuleConfig,
562    opt_level: config::OptLevel,
563    opt_stage: llvm::OptStage,
564    autodiff_stage: AutodiffStage,
565) {
566    // Enzyme:
567    // The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized
568    // source code. However, benchmarks show that optimizations increasing the code size
569    // tend to reduce AD performance. Therefore deactivate them before AD, then differentiate the code
570    // and finally re-optimize the module, now with all optimizations available.
571    // FIXME(ZuseZ4): In a future update we could figure out how to only optimize individual functions getting
572    // differentiated.
573
574    let consider_ad =
575        cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
576    let run_enzyme = autodiff_stage == AutodiffStage::DuringAD;
577    let print_before_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModBefore);
578    let print_after_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModAfter);
579    let print_passes = config.autodiff.contains(&config::AutoDiff::PrintPasses);
580    let merge_functions;
581    let unroll_loops;
582    let vectorize_slp;
583    let vectorize_loop;
584
585    // When we build rustc with enzyme/autodiff support, we want to postpone size-increasing
586    // optimizations until after differentiation. Our pipeline is thus: (opt + enzyme), (full opt).
587    // We therefore have two calls to llvm_optimize, if autodiff is used.
588    //
589    // We also must disable merge_functions, since autodiff placeholder/dummy bodies tend to be
590    // identical. We run opts before AD, so there is a chance that LLVM will merge our dummies.
591    // In that case, we lack some dummy bodies and can't replace them with the real AD code anymore.
592    // We then would need to abort compilation. This was especially common in test cases.
593    if consider_ad && autodiff_stage != AutodiffStage::PostAD {
594        merge_functions = false;
595        unroll_loops = false;
596        vectorize_slp = false;
597        vectorize_loop = false;
598    } else {
599        unroll_loops =
600            opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
601        merge_functions = config.merge_functions;
602        vectorize_slp = config.vectorize_slp;
603        vectorize_loop = config.vectorize_loop;
604    }
605    trace!(?unroll_loops, ?vectorize_slp, ?vectorize_loop, ?run_enzyme);
606    if thin_lto_buffer.is_some() {
607        assert!(
608            matches!(
609                opt_stage,
610                llvm::OptStage::PreLinkNoLTO
611                    | llvm::OptStage::PreLinkFatLTO
612                    | llvm::OptStage::PreLinkThinLTO
613            ),
614            "the bitcode for LTO can only be obtained at the pre-link stage"
615        );
616    }
617    let pgo_gen_path = get_pgo_gen_path(config);
618    let pgo_use_path = get_pgo_use_path(config);
619    let pgo_sample_use_path = get_pgo_sample_use_path(config);
620    let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
621    let instr_profile_output_path = get_instr_profile_output_path(config);
622    let sanitize_dataflow_abilist: Vec<_> = config
623        .sanitizer_dataflow_abilist
624        .iter()
625        .map(|file| CString::new(file.as_str()).unwrap())
626        .collect();
627    let sanitize_dataflow_abilist_ptrs: Vec<_> =
628        sanitize_dataflow_abilist.iter().map(|file| file.as_ptr()).collect();
629    // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
630    let sanitizer_options = if !is_lto {
631        Some(llvm::SanitizerOptions {
632            sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
633            sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
634            sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
635            sanitize_dataflow: config.sanitizer.contains(SanitizerSet::DATAFLOW),
636            sanitize_dataflow_abilist: sanitize_dataflow_abilist_ptrs.as_ptr(),
637            sanitize_dataflow_abilist_len: sanitize_dataflow_abilist_ptrs.len(),
638            sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
639            sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
640            sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
641            sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
642            sanitize_realtime: config.sanitizer.contains(SanitizerSet::REALTIME),
643            sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
644            sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
645            sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
646            sanitize_kernel_address: config.sanitizer.contains(SanitizerSet::KERNELADDRESS),
647            sanitize_kernel_address_recover: config
648                .sanitizer_recover
649                .contains(SanitizerSet::KERNELADDRESS),
650        })
651    } else {
652        None
653    };
654
655    fn handle_offload<'ll>(cx: &'ll SimpleCx<'_>, old_fn: &llvm::Value) {
656        let old_fn_ty = cx.get_type_of_global(old_fn);
657        let old_param_types = cx.func_params_types(old_fn_ty);
658        let old_param_count = old_param_types.len();
659        if old_param_count == 0 {
660            return;
661        }
662
663        let first_param = llvm::get_param(old_fn, 0);
664        let c_name = llvm::get_value_name(first_param);
665        let first_arg_name = str::from_utf8(&c_name).unwrap();
666        // We might call llvm_optimize (and thus this code) multiple times on the same IR,
667        // but we shouldn't add this helper ptr multiple times.
668        // FIXME(offload): This could break if the user calls his first argument `dyn_ptr`.
669        if first_arg_name == "dyn_ptr" {
670            return;
671        }
672
673        // Create the new parameter list, with ptr as the first argument
674        let mut new_param_types = Vec::with_capacity(old_param_count as usize + 1);
675        new_param_types.push(cx.type_ptr());
676        new_param_types.extend(old_param_types);
677
678        // Create the new function type
679        let ret_ty = unsafe { llvm::LLVMGetReturnType(old_fn_ty) };
680        let new_fn_ty = cx.type_func(&new_param_types, ret_ty);
681
682        // Create the new function, with a temporary .offload name to avoid a name collision.
683        let old_fn_name = String::from_utf8(llvm::get_value_name(old_fn)).unwrap();
684        let new_fn_name = format!("{}.offload", &old_fn_name);
685        let new_fn = cx.add_func(&new_fn_name, new_fn_ty);
686        let a0 = llvm::get_param(new_fn, 0);
687        llvm::set_value_name(a0, CString::new("dyn_ptr").unwrap().as_bytes());
688
689        // Here we map the old arguments to the new arguments, with an offset of 1 to make sure
690        // that we don't use the newly added `%dyn_ptr`.
691        unsafe {
692            llvm::LLVMRustOffloadMapper(old_fn, new_fn);
693        }
694
695        llvm::set_linkage(new_fn, llvm::get_linkage(old_fn));
696        llvm::set_visibility(new_fn, llvm::get_visibility(old_fn));
697
698        // Replace all uses of old_fn with new_fn (RAUW)
699        unsafe {
700            llvm::LLVMReplaceAllUsesWith(old_fn, new_fn);
701        }
702        let name = llvm::get_value_name(old_fn);
703        unsafe {
704            llvm::LLVMDeleteFunction(old_fn);
705        }
706        // Now we can re-use the old name, without name collision.
707        llvm::set_value_name(new_fn, &name);
708    }
709
710    if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
711        let cx =
712            SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size);
713        // For now we only support up to 10 kernels named kernel_0 ... kernel_9, a follow-up PR is
714        // introducing a proper offload intrinsic to solve this limitation.
715        for func in cx.get_functions() {
716            let offload_kernel = "offload-kernel";
717            if attributes::has_string_attr(func, offload_kernel) {
718                handle_offload(&cx, func);
719            }
720            attributes::remove_string_attr_from_llfn(func, offload_kernel);
721        }
722    }
723
724    let mut llvm_profiler = cgcx
725        .prof
726        .llvm_recording_enabled()
727        .then(|| LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()));
728
729    let llvm_selfprofiler =
730        llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
731
732    let extra_passes = if !is_lto { config.passes.join(",") } else { "".to_string() };
733
734    let llvm_plugins = config.llvm_plugins.join(",");
735
736    let result = unsafe {
737        llvm::LLVMRustOptimize(
738            module.module_llvm.llmod(),
739            &*module.module_llvm.tm.raw(),
740            to_pass_builder_opt_level(opt_level),
741            opt_stage,
742            cgcx.opts.cg.linker_plugin_lto.enabled(),
743            config.no_prepopulate_passes,
744            config.verify_llvm_ir,
745            config.lint_llvm_ir,
746            thin_lto_buffer,
747            config.emit_thin_lto,
748            config.emit_thin_lto_summary,
749            merge_functions,
750            unroll_loops,
751            vectorize_slp,
752            vectorize_loop,
753            config.no_builtins,
754            config.emit_lifetime_markers,
755            run_enzyme,
756            print_before_enzyme,
757            print_after_enzyme,
758            print_passes,
759            sanitizer_options.as_ref(),
760            pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
761            pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
762            config.instrument_coverage,
763            instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
764            pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
765            config.debug_info_for_profiling,
766            llvm_selfprofiler,
767            selfprofile_before_pass_callback,
768            selfprofile_after_pass_callback,
769            extra_passes.as_c_char_ptr(),
770            extra_passes.len(),
771            llvm_plugins.as_c_char_ptr(),
772            llvm_plugins.len(),
773        )
774    };
775
776    if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
777        unsafe {
778            llvm::LLVMRustBundleImages(module.module_llvm.llmod(), module.module_llvm.tm.raw());
779        }
780    }
781
782    result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
783}
784
785// Unsafe due to LLVM calls.
786pub(crate) fn optimize(
787    cgcx: &CodegenContext<LlvmCodegenBackend>,
788    dcx: DiagCtxtHandle<'_>,
789    module: &mut ModuleCodegen<ModuleLlvm>,
790    config: &ModuleConfig,
791) {
792    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
793
794    let llcx = &*module.module_llvm.llcx;
795    let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
796
797    if config.emit_no_opt_bc {
798        let out = cgcx.output_filenames.temp_path_ext_for_cgu(
799            "no-opt.bc",
800            &module.name,
801            cgcx.invocation_temp.as_deref(),
802        );
803        write_bitcode_to_file(module, &out)
804    }
805
806    // FIXME(ZuseZ4): support SanitizeHWAddress and prevent illegal/unsupported opts
807
808    if let Some(opt_level) = config.opt_level {
809        let opt_stage = match cgcx.lto {
810            Lto::Fat => llvm::OptStage::PreLinkFatLTO,
811            Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
812            _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
813            _ => llvm::OptStage::PreLinkNoLTO,
814        };
815
816        // If we know that we will later run AD, then we disable vectorization and loop unrolling.
817        // Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD).
818        let consider_ad =
819            cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
820        let autodiff_stage = if consider_ad { AutodiffStage::PreAD } else { AutodiffStage::PostAD };
821        // The embedded bitcode is used to run LTO/ThinLTO.
822        // The bitcode obtained during the `codegen` phase is no longer suitable for performing LTO.
823        // It may have undergone LTO due to ThinLocal, so we need to obtain the embedded bitcode at
824        // this point.
825        let mut thin_lto_buffer = if (module.kind == ModuleKind::Regular
826            && config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full))
827            || config.emit_thin_lto_summary
828        {
829            Some(null_mut())
830        } else {
831            None
832        };
833        unsafe {
834            llvm_optimize(
835                cgcx,
836                dcx,
837                module,
838                thin_lto_buffer.as_mut(),
839                config,
840                opt_level,
841                opt_stage,
842                autodiff_stage,
843            )
844        };
845        if let Some(thin_lto_buffer) = thin_lto_buffer {
846            let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) };
847            module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec());
848            let bc_summary_out = cgcx.output_filenames.temp_path_for_cgu(
849                OutputType::ThinLinkBitcode,
850                &module.name,
851                cgcx.invocation_temp.as_deref(),
852            );
853            if config.emit_thin_lto_summary
854                && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name()
855            {
856                let summary_data = thin_lto_buffer.thin_link_data();
857                cgcx.prof.artifact_size(
858                    "llvm_bitcode_summary",
859                    thin_link_bitcode_filename.to_string_lossy(),
860                    summary_data.len() as u64,
861                );
862                let _timer = cgcx.prof.generic_activity_with_arg(
863                    "LLVM_module_codegen_emit_bitcode_summary",
864                    &*module.name,
865                );
866                if let Err(err) = fs::write(&bc_summary_out, summary_data) {
867                    dcx.emit_err(WriteBytecode { path: &bc_summary_out, err });
868                }
869            }
870        }
871    }
872}
873
874pub(crate) fn codegen(
875    cgcx: &CodegenContext<LlvmCodegenBackend>,
876    module: ModuleCodegen<ModuleLlvm>,
877    config: &ModuleConfig,
878) -> CompiledModule {
879    let dcx = cgcx.create_dcx();
880    let dcx = dcx.handle();
881
882    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
883    {
884        let llmod = module.module_llvm.llmod();
885        let llcx = &*module.module_llvm.llcx;
886        let tm = &*module.module_llvm.tm;
887        let _handlers =
888            DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen);
889
890        if cgcx.msvc_imps_needed {
891            create_msvc_imps(cgcx, llcx, llmod);
892        }
893
894        // Note that if object files are just LLVM bitcode we write bitcode,
895        // copy it to the .o file, and delete the bitcode if it wasn't
896        // otherwise requested.
897
898        let bc_out = cgcx.output_filenames.temp_path_for_cgu(
899            OutputType::Bitcode,
900            &module.name,
901            cgcx.invocation_temp.as_deref(),
902        );
903        let obj_out = cgcx.output_filenames.temp_path_for_cgu(
904            OutputType::Object,
905            &module.name,
906            cgcx.invocation_temp.as_deref(),
907        );
908
909        if config.bitcode_needed() {
910            if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
911                let thin = {
912                    let _timer = cgcx.prof.generic_activity_with_arg(
913                        "LLVM_module_codegen_make_bitcode",
914                        &*module.name,
915                    );
916                    ThinBuffer::new(llmod, config.emit_thin_lto)
917                };
918                let data = thin.data();
919                let _timer = cgcx
920                    .prof
921                    .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
922                if let Some(bitcode_filename) = bc_out.file_name() {
923                    cgcx.prof.artifact_size(
924                        "llvm_bitcode",
925                        bitcode_filename.to_string_lossy(),
926                        data.len() as u64,
927                    );
928                }
929                if let Err(err) = fs::write(&bc_out, data) {
930                    dcx.emit_err(WriteBytecode { path: &bc_out, err });
931                }
932            }
933
934            if config.embed_bitcode() && module.kind == ModuleKind::Regular {
935                let _timer = cgcx
936                    .prof
937                    .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
938                let thin_bc =
939                    module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode");
940                embed_bitcode(cgcx, llcx, llmod, &thin_bc);
941            }
942        }
943
944        if config.emit_ir {
945            let _timer =
946                cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
947            let out = cgcx.output_filenames.temp_path_for_cgu(
948                OutputType::LlvmAssembly,
949                &module.name,
950                cgcx.invocation_temp.as_deref(),
951            );
952            let out_c = path_to_c_string(&out);
953
954            extern "C" fn demangle_callback(
955                input_ptr: *const c_char,
956                input_len: size_t,
957                output_ptr: *mut c_char,
958                output_len: size_t,
959            ) -> size_t {
960                let input =
961                    unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
962
963                let Ok(input) = str::from_utf8(input) else { return 0 };
964
965                let output = unsafe {
966                    slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
967                };
968                let mut cursor = io::Cursor::new(output);
969
970                let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
971
972                if write!(cursor, "{demangled:#}").is_err() {
973                    // Possible only if provided buffer is not big enough
974                    return 0;
975                }
976
977                cursor.position() as size_t
978            }
979
980            let result =
981                unsafe { llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback) };
982
983            if result == llvm::LLVMRustResult::Success {
984                record_artifact_size(&cgcx.prof, "llvm_ir", &out);
985            }
986
987            result
988                .into_result()
989                .unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }));
990        }
991
992        if config.emit_asm {
993            let _timer =
994                cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
995            let path = cgcx.output_filenames.temp_path_for_cgu(
996                OutputType::Assembly,
997                &module.name,
998                cgcx.invocation_temp.as_deref(),
999            );
1000
1001            // We can't use the same module for asm and object code output,
1002            // because that triggers various errors like invalid IR or broken
1003            // binaries. So we must clone the module to produce the asm output
1004            // if we are also producing object code.
1005            let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
1006                llvm::LLVMCloneModule(llmod)
1007            } else {
1008                llmod
1009            };
1010            write_output_file(
1011                dcx,
1012                tm.raw(),
1013                config.no_builtins,
1014                llmod,
1015                &path,
1016                None,
1017                llvm::FileType::AssemblyFile,
1018                &cgcx.prof,
1019                config.verify_llvm_ir,
1020            );
1021        }
1022
1023        match config.emit_obj {
1024            EmitObj::ObjectCode(_) => {
1025                let _timer = cgcx
1026                    .prof
1027                    .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
1028
1029                let dwo_out = cgcx
1030                    .output_filenames
1031                    .temp_path_dwo_for_cgu(&module.name, cgcx.invocation_temp.as_deref());
1032                let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
1033                    // Don't change how DWARF is emitted when disabled.
1034                    (SplitDebuginfo::Off, _) => None,
1035                    // Don't provide a DWARF object path if split debuginfo is enabled but this is
1036                    // a platform that doesn't support Split DWARF.
1037                    _ if !cgcx.target_can_use_split_dwarf => None,
1038                    // Don't provide a DWARF object path in single mode, sections will be written
1039                    // into the object as normal but ignored by linker.
1040                    (_, SplitDwarfKind::Single) => None,
1041                    // Emit (a subset of the) DWARF into a separate dwarf object file in split
1042                    // mode.
1043                    (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
1044                };
1045
1046                write_output_file(
1047                    dcx,
1048                    tm.raw(),
1049                    config.no_builtins,
1050                    llmod,
1051                    &obj_out,
1052                    dwo_out,
1053                    llvm::FileType::ObjectFile,
1054                    &cgcx.prof,
1055                    config.verify_llvm_ir,
1056                );
1057            }
1058
1059            EmitObj::Bitcode => {
1060                debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
1061                if let Err(err) = link_or_copy(&bc_out, &obj_out) {
1062                    dcx.emit_err(CopyBitcode { err });
1063                }
1064
1065                if !config.emit_bc {
1066                    debug!("removing_bitcode {:?}", bc_out);
1067                    ensure_removed(dcx, &bc_out);
1068                }
1069            }
1070
1071            EmitObj::None => {}
1072        }
1073
1074        record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
1075    }
1076
1077    // `.dwo` files are only emitted if:
1078    //
1079    // - Object files are being emitted (i.e. bitcode only or metadata only compilations will not
1080    //   produce dwarf objects, even if otherwise enabled)
1081    // - Target supports Split DWARF
1082    // - Split debuginfo is enabled
1083    // - Split DWARF kind is `split` (i.e. debuginfo is split into `.dwo` files, not different
1084    //   sections in the `.o` files).
1085    let dwarf_object_emitted = matches!(config.emit_obj, EmitObj::ObjectCode(_))
1086        && cgcx.target_can_use_split_dwarf
1087        && cgcx.split_debuginfo != SplitDebuginfo::Off
1088        && cgcx.split_dwarf_kind == SplitDwarfKind::Split;
1089    module.into_compiled_module(
1090        config.emit_obj != EmitObj::None,
1091        dwarf_object_emitted,
1092        config.emit_bc,
1093        config.emit_asm,
1094        config.emit_ir,
1095        &cgcx.output_filenames,
1096        cgcx.invocation_temp.as_deref(),
1097    )
1098}
1099
1100fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
1101    let mut asm = format!(".section {section_name},\"{section_flags}\"\n").into_bytes();
1102    asm.extend_from_slice(b".ascii \"");
1103    asm.reserve(data.len());
1104    for &byte in data {
1105        if byte == b'\\' || byte == b'"' {
1106            asm.push(b'\\');
1107            asm.push(byte);
1108        } else if byte < 0x20 || byte >= 0x80 {
1109            // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
1110            // width, while hex escapes will consume following characters.
1111            asm.push(b'\\');
1112            asm.push(b'0' + ((byte >> 6) & 0x7));
1113            asm.push(b'0' + ((byte >> 3) & 0x7));
1114            asm.push(b'0' + ((byte >> 0) & 0x7));
1115        } else {
1116            asm.push(byte);
1117        }
1118    }
1119    asm.extend_from_slice(b"\"\n");
1120    asm
1121}
1122
1123pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static CStr {
1124    if cgcx.target_is_like_darwin {
1125        c"__LLVM,__bitcode"
1126    } else if cgcx.target_is_like_aix {
1127        c".ipa"
1128    } else {
1129        c".llvmbc"
1130    }
1131}
1132
1133/// Embed the bitcode of an LLVM module for LTO in the LLVM module itself.
1134fn embed_bitcode(
1135    cgcx: &CodegenContext<LlvmCodegenBackend>,
1136    llcx: &llvm::Context,
1137    llmod: &llvm::Module,
1138    bitcode: &[u8],
1139) {
1140    // We're adding custom sections to the output object file, but we definitely
1141    // do not want these custom sections to make their way into the final linked
1142    // executable. The purpose of these custom sections is for tooling
1143    // surrounding object files to work with the LLVM IR, if necessary. For
1144    // example rustc's own LTO will look for LLVM IR inside of the object file
1145    // in these sections by default.
1146    //
1147    // To handle this is a bit different depending on the object file format
1148    // used by the backend, broken down into a few different categories:
1149    //
1150    // * Mach-O - this is for macOS. Inspecting the source code for the native
1151    //   linker here shows that the `.llvmbc` and `.llvmcmd` sections are
1152    //   automatically skipped by the linker. In that case there's nothing extra
1153    //   that we need to do here. We do need to make sure that the
1154    //   `__LLVM,__cmdline` section exists even though it is empty as otherwise
1155    //   ld64 rejects the object file.
1156    //
1157    // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
1158    //   `.llvmcmd` sections, so there's nothing extra we need to do.
1159    //
1160    // * COFF - if we don't do anything the linker will by default copy all
1161    //   these sections to the output artifact, not what we want! To subvert
1162    //   this we want to flag the sections we inserted here as
1163    //   `IMAGE_SCN_LNK_REMOVE`.
1164    //
1165    // * ELF - this is very similar to COFF above. One difference is that these
1166    //   sections are removed from the output linked artifact when
1167    //   `--gc-sections` is passed, which we pass by default. If that flag isn't
1168    //   passed though then these sections will show up in the final output.
1169    //   Additionally the flag that we need to set here is `SHF_EXCLUDE`.
1170    //
1171    // * XCOFF - AIX linker ignores content in .ipa and .info if no auxiliary
1172    //   symbol associated with these sections.
1173    //
1174    // Unfortunately, LLVM provides no way to set custom section flags. For ELF
1175    // and COFF we emit the sections using module level inline assembly for that
1176    // reason (see issue #90326 for historical background).
1177
1178    if cgcx.target_is_like_darwin
1179        || cgcx.target_is_like_aix
1180        || cgcx.target_arch == "wasm32"
1181        || cgcx.target_arch == "wasm64"
1182    {
1183        // We don't need custom section flags, create LLVM globals.
1184        let llconst = common::bytes_in_context(llcx, bitcode);
1185        let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module");
1186        llvm::set_initializer(llglobal, llconst);
1187
1188        llvm::set_section(llglobal, bitcode_section_name(cgcx));
1189        llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
1190        llvm::LLVMSetGlobalConstant(llglobal, llvm::TRUE);
1191
1192        let llconst = common::bytes_in_context(llcx, &[]);
1193        let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline");
1194        llvm::set_initializer(llglobal, llconst);
1195        let section = if cgcx.target_is_like_darwin {
1196            c"__LLVM,__cmdline"
1197        } else if cgcx.target_is_like_aix {
1198            c".info"
1199        } else {
1200            c".llvmcmd"
1201        };
1202        llvm::set_section(llglobal, section);
1203        llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
1204    } else {
1205        // We need custom section flags, so emit module-level inline assembly.
1206        let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
1207        let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
1208        llvm::append_module_inline_asm(llmod, &asm);
1209        let asm = create_section_with_flags_asm(".llvmcmd", section_flags, &[]);
1210        llvm::append_module_inline_asm(llmod, &asm);
1211    }
1212}
1213
1214// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
1215// This is required to satisfy `dllimport` references to static data in .rlibs
1216// when using MSVC linker. We do this only for data, as linker can fix up
1217// code references on its own.
1218// See #26591, #27438
1219fn create_msvc_imps(
1220    cgcx: &CodegenContext<LlvmCodegenBackend>,
1221    llcx: &llvm::Context,
1222    llmod: &llvm::Module,
1223) {
1224    if !cgcx.msvc_imps_needed {
1225        return;
1226    }
1227    // The x86 ABI seems to require that leading underscores are added to symbol
1228    // names, so we need an extra underscore on x86. There's also a leading
1229    // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
1230    // underscores added in front).
1231    let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
1232
1233    let ptr_ty = llvm_type_ptr(llcx);
1234    let globals = base::iter_globals(llmod)
1235        .filter(|&val| {
1236            llvm::get_linkage(val) == llvm::Linkage::ExternalLinkage && !llvm::is_declaration(val)
1237        })
1238        .filter_map(|val| {
1239            // Exclude some symbols that we know are not Rust symbols.
1240            let name = llvm::get_value_name(val);
1241            if ignored(&name) { None } else { Some((val, name)) }
1242        })
1243        .map(move |(val, name)| {
1244            let mut imp_name = prefix.as_bytes().to_vec();
1245            imp_name.extend(name);
1246            let imp_name = CString::new(imp_name).unwrap();
1247            (imp_name, val)
1248        })
1249        .collect::<Vec<_>>();
1250
1251    for (imp_name, val) in globals {
1252        let imp = llvm::add_global(llmod, ptr_ty, &imp_name);
1253
1254        llvm::set_initializer(imp, val);
1255        llvm::set_linkage(imp, llvm::Linkage::ExternalLinkage);
1256    }
1257
1258    // Use this function to exclude certain symbols from `__imp` generation.
1259    fn ignored(symbol_name: &[u8]) -> bool {
1260        // These are symbols generated by LLVM's profiling instrumentation
1261        symbol_name.starts_with(b"__llvm_profile_")
1262    }
1263}
1264
1265fn record_artifact_size(
1266    self_profiler_ref: &SelfProfilerRef,
1267    artifact_kind: &'static str,
1268    path: &Path,
1269) {
1270    // Don't stat the file if we are not going to record its size.
1271    if !self_profiler_ref.enabled() {
1272        return;
1273    }
1274
1275    if let Some(artifact_name) = path.file_name() {
1276        let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
1277        self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
1278    }
1279}
1280
1281fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Module) {
1282    if !prof.enabled() {
1283        return;
1284    }
1285
1286    let raw_stats =
1287        llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(llmod, s) })
1288            .expect("cannot get module instruction stats");
1289
1290    #[derive(serde::Deserialize)]
1291    struct InstructionsStats {
1292        module: String,
1293        total: u64,
1294    }
1295
1296    let InstructionsStats { module, total } =
1297        serde_json::from_str(&raw_stats).expect("cannot parse llvm cgu instructions stats");
1298    prof.artifact_size("cgu_instructions", module, total);
1299}