rustc_codegen_llvm/back/
lto.rs

1use std::collections::BTreeMap;
2use std::ffi::{CStr, CString};
3use std::fs::File;
4use std::path::{Path, PathBuf};
5use std::ptr::NonNull;
6use std::sync::Arc;
7use std::{io, iter, slice};
8
9use object::read::archive::ArchiveFile;
10use object::{Object, ObjectSection};
11use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
12use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
13use rustc_codegen_ssa::traits::*;
14use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
15use rustc_data_structures::fx::FxHashMap;
16use rustc_data_structures::memmap::Mmap;
17use rustc_errors::{DiagCtxtHandle, FatalError};
18use rustc_middle::bug;
19use rustc_middle::dep_graph::WorkProduct;
20use rustc_session::config::{self, Lto};
21use tracing::{debug, info};
22
23use crate::back::write::{
24    self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode,
25};
26use crate::errors::{LlvmError, LtoBitcodeFromRlib};
27use crate::llvm::AttributePlace::Function;
28use crate::llvm::{self, build_string};
29use crate::{LlvmCodegenBackend, ModuleLlvm, SimpleCx, attributes};
30
31/// We keep track of the computed LTO cache keys from the previous
32/// session to determine which CGUs we can reuse.
33const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
34
35fn prepare_lto(
36    cgcx: &CodegenContext<LlvmCodegenBackend>,
37    exported_symbols_for_lto: &[String],
38    each_linked_rlib_for_lto: &[PathBuf],
39    dcx: DiagCtxtHandle<'_>,
40) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
41    let mut symbols_below_threshold = exported_symbols_for_lto
42        .iter()
43        .map(|symbol| CString::new(symbol.to_owned()).unwrap())
44        .collect::<Vec<CString>>();
45
46    // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
47    // __llvm_profile_runtime, therefore we won't know until link time if this symbol
48    // should have default visibility.
49    symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned());
50
51    // If we're performing LTO for the entire crate graph, then for each of our
52    // upstream dependencies, find the corresponding rlib and load the bitcode
53    // from the archive.
54    //
55    // We save off all the bytecode and LLVM module ids for later processing
56    // with either fat or thin LTO
57    let mut upstream_modules = Vec::new();
58    if cgcx.lto != Lto::ThinLocal {
59        for path in each_linked_rlib_for_lto {
60            let archive_data = unsafe {
61                Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib"))
62                    .expect("couldn't map rlib")
63            };
64            let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
65            let obj_files = archive
66                .members()
67                .filter_map(|child| {
68                    child.ok().and_then(|c| {
69                        std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
70                    })
71                })
72                .filter(|&(name, _)| looks_like_rust_object_file(name));
73            for (name, child) in obj_files {
74                info!("adding bitcode from {}", name);
75                match get_bitcode_slice_from_object_data(
76                    child.data(&*archive_data).expect("corrupt rlib"),
77                    cgcx,
78                ) {
79                    Ok(data) => {
80                        let module = SerializedModule::FromRlib(data.to_vec());
81                        upstream_modules.push((module, CString::new(name).unwrap()));
82                    }
83                    Err(e) => {
84                        dcx.emit_err(e);
85                        return Err(FatalError);
86                    }
87                }
88            }
89        }
90    }
91
92    Ok((symbols_below_threshold, upstream_modules))
93}
94
95fn get_bitcode_slice_from_object_data<'a>(
96    obj: &'a [u8],
97    cgcx: &CodegenContext<LlvmCodegenBackend>,
98) -> Result<&'a [u8], LtoBitcodeFromRlib> {
99    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR
100    // that won't work. Fortunately, if that's what we have we can just return the object directly,
101    // so we sniff the relevant magic strings here and return.
102    if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
103        return Ok(obj);
104    }
105    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment
106    // name" which in the public API for sections gets treated as part of the section name, but
107    // internally in MachOObjectFile.cpp gets treated separately.
108    let section_name = bitcode_section_name(cgcx).to_str().unwrap().trim_start_matches("__LLVM,");
109
110    let obj =
111        object::File::parse(obj).map_err(|err| LtoBitcodeFromRlib { err: err.to_string() })?;
112
113    let section = obj
114        .section_by_name(section_name)
115        .ok_or_else(|| LtoBitcodeFromRlib { err: format!("Can't find section {section_name}") })?;
116
117    section.data().map_err(|err| LtoBitcodeFromRlib { err: err.to_string() })
118}
119
120/// Performs fat LTO by merging all modules into a single one and returning it
121/// for further optimization.
122pub(crate) fn run_fat(
123    cgcx: &CodegenContext<LlvmCodegenBackend>,
124    exported_symbols_for_lto: &[String],
125    each_linked_rlib_for_lto: &[PathBuf],
126    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
127) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
128    let dcx = cgcx.create_dcx();
129    let dcx = dcx.handle();
130    let (symbols_below_threshold, upstream_modules) =
131        prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
132    let symbols_below_threshold =
133        symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
134    fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
135}
136
137/// Performs thin LTO by performing necessary global analysis and returning two
138/// lists, one of the modules that need optimization and another for modules that
139/// can simply be copied over from the incr. comp. cache.
140pub(crate) fn run_thin(
141    cgcx: &CodegenContext<LlvmCodegenBackend>,
142    exported_symbols_for_lto: &[String],
143    each_linked_rlib_for_lto: &[PathBuf],
144    modules: Vec<(String, ThinBuffer)>,
145    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
146) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
147    let dcx = cgcx.create_dcx();
148    let dcx = dcx.handle();
149    let (symbols_below_threshold, upstream_modules) =
150        prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
151    let symbols_below_threshold =
152        symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
153    if cgcx.opts.cg.linker_plugin_lto.enabled() {
154        unreachable!(
155            "We should never reach this case if the LTO step \
156                      is deferred to the linker"
157        );
158    }
159    thin_lto(cgcx, dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold)
160}
161
162pub(crate) fn prepare_thin(
163    module: ModuleCodegen<ModuleLlvm>,
164    emit_summary: bool,
165) -> (String, ThinBuffer) {
166    let name = module.name;
167    let buffer = ThinBuffer::new(module.module_llvm.llmod(), true, emit_summary);
168    (name, buffer)
169}
170
171fn fat_lto(
172    cgcx: &CodegenContext<LlvmCodegenBackend>,
173    dcx: DiagCtxtHandle<'_>,
174    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
175    mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
176    symbols_below_threshold: &[*const libc::c_char],
177) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
178    let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
179    info!("going for a fat lto");
180
181    // Sort out all our lists of incoming modules into two lists.
182    //
183    // * `serialized_modules` (also and argument to this function) contains all
184    //   modules that are serialized in-memory.
185    // * `in_memory` contains modules which are already parsed and in-memory,
186    //   such as from multi-CGU builds.
187    let mut in_memory = Vec::new();
188    for module in modules {
189        match module {
190            FatLtoInput::InMemory(m) => in_memory.push(m),
191            FatLtoInput::Serialized { name, buffer } => {
192                info!("pushing serialized module {:?}", name);
193                serialized_modules.push((buffer, CString::new(name).unwrap()));
194            }
195        }
196    }
197
198    // Find the "costliest" module and merge everything into that codegen unit.
199    // All the other modules will be serialized and reparsed into the new
200    // context, so this hopefully avoids serializing and parsing the largest
201    // codegen unit.
202    //
203    // Additionally use a regular module as the base here to ensure that various
204    // file copy operations in the backend work correctly. The only other kind
205    // of module here should be an allocator one, and if your crate is smaller
206    // than the allocator module then the size doesn't really matter anyway.
207    let costliest_module = in_memory
208        .iter()
209        .enumerate()
210        .filter(|&(_, module)| module.kind == ModuleKind::Regular)
211        .map(|(i, module)| {
212            let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
213            (cost, i)
214        })
215        .max();
216
217    // If we found a costliest module, we're good to go. Otherwise all our
218    // inputs were serialized which could happen in the case, for example, that
219    // all our inputs were incrementally reread from the cache and we're just
220    // re-executing the LTO passes. If that's the case deserialize the first
221    // module and create a linker with it.
222    let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
223        Some((_cost, i)) => in_memory.remove(i),
224        None => {
225            assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
226            let (buffer, name) = serialized_modules.remove(0);
227            info!("no in-memory regular modules to choose from, parsing {:?}", name);
228            let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?;
229            ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module)
230        }
231    };
232    {
233        let (llcx, llmod) = {
234            let llvm = &module.module_llvm;
235            (&llvm.llcx, llvm.llmod())
236        };
237        info!("using {:?} as a base module", module.name);
238
239        // The linking steps below may produce errors and diagnostics within LLVM
240        // which we'd like to handle and print, so set up our diagnostic handlers
241        // (which get unregistered when they go out of scope below).
242        let _handler =
243            DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO);
244
245        // For all other modules we codegened we'll need to link them into our own
246        // bitcode. All modules were codegened in their own LLVM context, however,
247        // and we want to move everything to the same LLVM context. Currently the
248        // way we know of to do that is to serialize them to a string and them parse
249        // them later. Not great but hey, that's why it's "fat" LTO, right?
250        for module in in_memory {
251            let buffer = ModuleBuffer::new(module.module_llvm.llmod());
252            let llmod_id = CString::new(&module.name[..]).unwrap();
253            serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
254        }
255        // Sort the modules to ensure we produce deterministic results.
256        serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
257
258        // For all serialized bitcode files we parse them and link them in as we did
259        // above, this is all mostly handled in C++.
260        let mut linker = Linker::new(llmod);
261        for (bc_decoded, name) in serialized_modules {
262            let _timer = cgcx
263                .prof
264                .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
265                    recorder.record_arg(format!("{name:?}"))
266                });
267            info!("linking {:?}", name);
268            let data = bc_decoded.data();
269            linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?;
270        }
271        drop(linker);
272        save_temp_bitcode(cgcx, &module, "lto.input");
273
274        // Internalize everything below threshold to help strip out more modules and such.
275        unsafe {
276            let ptr = symbols_below_threshold.as_ptr();
277            llvm::LLVMRustRunRestrictionPass(
278                llmod,
279                ptr as *const *const libc::c_char,
280                symbols_below_threshold.len() as libc::size_t,
281            );
282        }
283        save_temp_bitcode(cgcx, &module, "lto.after-restriction");
284    }
285
286    Ok(module)
287}
288
289pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
290
291impl<'a> Linker<'a> {
292    pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
293        unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
294    }
295
296    pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
297        unsafe {
298            if llvm::LLVMRustLinkerAdd(
299                self.0,
300                bytecode.as_ptr() as *const libc::c_char,
301                bytecode.len(),
302            ) {
303                Ok(())
304            } else {
305                Err(())
306            }
307        }
308    }
309}
310
311impl Drop for Linker<'_> {
312    fn drop(&mut self) {
313        unsafe {
314            llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
315        }
316    }
317}
318
319/// Prepare "thin" LTO to get run on these modules.
320///
321/// The general structure of ThinLTO is quite different from the structure of
322/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
323/// one giant LLVM module, and then we run more optimization passes over this
324/// big module after internalizing most symbols. Thin LTO, on the other hand,
325/// avoid this large bottleneck through more targeted optimization.
326///
327/// At a high level Thin LTO looks like:
328///
329///    1. Prepare a "summary" of each LLVM module in question which describes
330///       the values inside, cost of the values, etc.
331///    2. Merge the summaries of all modules in question into one "index"
332///    3. Perform some global analysis on this index
333///    4. For each module, use the index and analysis calculated previously to
334///       perform local transformations on the module, for example inlining
335///       small functions from other modules.
336///    5. Run thin-specific optimization passes over each module, and then code
337///       generate everything at the end.
338///
339/// The summary for each module is intended to be quite cheap, and the global
340/// index is relatively quite cheap to create as well. As a result, the goal of
341/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
342/// situations. For example one cheap optimization is that we can parallelize
343/// all codegen modules, easily making use of all the cores on a machine.
344///
345/// With all that in mind, the function here is designed at specifically just
346/// calculating the *index* for ThinLTO. This index will then be shared amongst
347/// all of the `LtoModuleCodegen` units returned below and destroyed once
348/// they all go out of scope.
349fn thin_lto(
350    cgcx: &CodegenContext<LlvmCodegenBackend>,
351    dcx: DiagCtxtHandle<'_>,
352    modules: Vec<(String, ThinBuffer)>,
353    serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
354    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
355    symbols_below_threshold: &[*const libc::c_char],
356) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
357    let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
358    unsafe {
359        info!("going for that thin, thin LTO");
360
361        let green_modules: FxHashMap<_, _> =
362            cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();
363
364        let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
365        let mut thin_buffers = Vec::with_capacity(modules.len());
366        let mut module_names = Vec::with_capacity(full_scope_len);
367        let mut thin_modules = Vec::with_capacity(full_scope_len);
368
369        for (i, (name, buffer)) in modules.into_iter().enumerate() {
370            info!("local module: {} - {}", i, name);
371            let cname = CString::new(name.as_bytes()).unwrap();
372            thin_modules.push(llvm::ThinLTOModule {
373                identifier: cname.as_ptr(),
374                data: buffer.data().as_ptr(),
375                len: buffer.data().len(),
376            });
377            thin_buffers.push(buffer);
378            module_names.push(cname);
379        }
380
381        // FIXME: All upstream crates are deserialized internally in the
382        //        function below to extract their summary and modules. Note that
383        //        unlike the loop above we *must* decode and/or read something
384        //        here as these are all just serialized files on disk. An
385        //        improvement, however, to make here would be to store the
386        //        module summary separately from the actual module itself. Right
387        //        now this is store in one large bitcode file, and the entire
388        //        file is deflate-compressed. We could try to bypass some of the
389        //        decompression by storing the index uncompressed and only
390        //        lazily decompressing the bytecode if necessary.
391        //
392        //        Note that truly taking advantage of this optimization will
393        //        likely be further down the road. We'd have to implement
394        //        incremental ThinLTO first where we could actually avoid
395        //        looking at upstream modules entirely sometimes (the contents,
396        //        we must always unconditionally look at the index).
397        let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
398
399        let cached_modules =
400            cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
401
402        for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
403            info!("upstream or cached module {:?}", name);
404            thin_modules.push(llvm::ThinLTOModule {
405                identifier: name.as_ptr(),
406                data: module.data().as_ptr(),
407                len: module.data().len(),
408            });
409            serialized.push(module);
410            module_names.push(name);
411        }
412
413        // Sanity check
414        assert_eq!(thin_modules.len(), module_names.len());
415
416        // Delegate to the C++ bindings to create some data here. Once this is a
417        // tried-and-true interface we may wish to try to upstream some of this
418        // to LLVM itself, right now we reimplement a lot of what they do
419        // upstream...
420        let data = llvm::LLVMRustCreateThinLTOData(
421            thin_modules.as_ptr(),
422            thin_modules.len(),
423            symbols_below_threshold.as_ptr(),
424            symbols_below_threshold.len(),
425        )
426        .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
427
428        let data = ThinData(data);
429
430        info!("thin LTO data created");
431
432        let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
433            cgcx.incr_comp_session_dir
434        {
435            let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
436            // If the previous file was deleted, or we get an IO error
437            // reading the file, then we'll just use `None` as the
438            // prev_key_map, which will force the code to be recompiled.
439            let prev =
440                if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
441            let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
442            (Some(path), prev, curr)
443        } else {
444            // If we don't compile incrementally, we don't need to load the
445            // import data from LLVM.
446            assert!(green_modules.is_empty());
447            let curr = ThinLTOKeysMap::default();
448            (None, None, curr)
449        };
450        info!("thin LTO cache key map loaded");
451        info!("prev_key_map: {:#?}", prev_key_map);
452        info!("curr_key_map: {:#?}", curr_key_map);
453
454        // Throw our data in an `Arc` as we'll be sharing it across threads. We
455        // also put all memory referenced by the C++ data (buffers, ids, etc)
456        // into the arc as well. After this we'll create a thin module
457        // codegen per module in this data.
458        let shared = Arc::new(ThinShared {
459            data,
460            thin_buffers,
461            serialized_modules: serialized,
462            module_names,
463        });
464
465        let mut copy_jobs = vec![];
466        let mut opt_jobs = vec![];
467
468        info!("checking which modules can be-reused and which have to be re-optimized.");
469        for (module_index, module_name) in shared.module_names.iter().enumerate() {
470            let module_name = module_name_to_str(module_name);
471            if let (Some(prev_key_map), true) =
472                (prev_key_map.as_ref(), green_modules.contains_key(module_name))
473            {
474                assert!(cgcx.incr_comp_session_dir.is_some());
475
476                // If a module exists in both the current and the previous session,
477                // and has the same LTO cache key in both sessions, then we can re-use it
478                if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
479                    let work_product = green_modules[module_name].clone();
480                    copy_jobs.push(work_product);
481                    info!(" - {}: re-used", module_name);
482                    assert!(cgcx.incr_comp_session_dir.is_some());
483                    continue;
484                }
485            }
486
487            info!(" - {}: re-compiled", module_name);
488            opt_jobs.push(ThinModule { shared: Arc::clone(&shared), idx: module_index });
489        }
490
491        // Save the current ThinLTO import information for the next compilation
492        // session, overwriting the previous serialized data (if any).
493        if let Some(path) = key_map_path
494            && let Err(err) = curr_key_map.save_to_file(&path)
495        {
496            return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
497        }
498
499        Ok((opt_jobs, copy_jobs))
500    }
501}
502
503fn enable_autodiff_settings(ad: &[config::AutoDiff]) {
504    for val in ad {
505        // We intentionally don't use a wildcard, to not forget handling anything new.
506        match val {
507            config::AutoDiff::PrintPerf => {
508                llvm::set_print_perf(true);
509            }
510            config::AutoDiff::PrintAA => {
511                llvm::set_print_activity(true);
512            }
513            config::AutoDiff::PrintTA => {
514                llvm::set_print_type(true);
515            }
516            config::AutoDiff::PrintTAFn(fun) => {
517                llvm::set_print_type(true); // Enable general type printing
518                llvm::set_print_type_fun(&fun); // Set specific function to analyze
519            }
520            config::AutoDiff::Inline => {
521                llvm::set_inline(true);
522            }
523            config::AutoDiff::LooseTypes => {
524                llvm::set_loose_types(true);
525            }
526            config::AutoDiff::PrintSteps => {
527                llvm::set_print(true);
528            }
529            // We handle this in the PassWrapper.cpp
530            config::AutoDiff::PrintPasses => {}
531            // We handle this in the PassWrapper.cpp
532            config::AutoDiff::PrintModBefore => {}
533            // We handle this in the PassWrapper.cpp
534            config::AutoDiff::PrintModAfter => {}
535            // We handle this in the PassWrapper.cpp
536            config::AutoDiff::PrintModFinal => {}
537            // This is required and already checked
538            config::AutoDiff::Enable => {}
539            // We handle this below
540            config::AutoDiff::NoPostopt => {}
541        }
542    }
543    // This helps with handling enums for now.
544    llvm::set_strict_aliasing(false);
545    // FIXME(ZuseZ4): Test this, since it was added a long time ago.
546    llvm::set_rust_rules(true);
547}
548
549pub(crate) fn run_pass_manager(
550    cgcx: &CodegenContext<LlvmCodegenBackend>,
551    dcx: DiagCtxtHandle<'_>,
552    module: &mut ModuleCodegen<ModuleLlvm>,
553    thin: bool,
554) -> Result<(), FatalError> {
555    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
556    let config = cgcx.config(module.kind);
557
558    // Now we have one massive module inside of llmod. Time to run the
559    // LTO-specific optimization passes that LLVM provides.
560    //
561    // This code is based off the code found in llvm's LTO code generator:
562    //      llvm/lib/LTO/LTOCodeGenerator.cpp
563    debug!("running the pass manager");
564    let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
565    let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
566
567    // The PostAD behavior is the same that we would have if no autodiff was used.
568    // It will run the default optimization pipeline. If AD is enabled we select
569    // the DuringAD stage, which will disable vectorization and loop unrolling, and
570    // schedule two autodiff optimization + differentiation passes.
571    // We then run the llvm_optimize function a second time, to optimize the code which we generated
572    // in the enzyme differentiation pass.
573    let enable_ad = config.autodiff.contains(&config::AutoDiff::Enable);
574    let enable_gpu = config.offload.contains(&config::Offload::Enable);
575    let stage = if thin {
576        write::AutodiffStage::PreAD
577    } else {
578        if enable_ad { write::AutodiffStage::DuringAD } else { write::AutodiffStage::PostAD }
579    };
580
581    if enable_ad {
582        enable_autodiff_settings(&config.autodiff);
583    }
584
585    unsafe {
586        write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?;
587    }
588
589    if enable_gpu && !thin {
590        let cx =
591            SimpleCx::new(module.module_llvm.llmod(), &module.module_llvm.llcx, cgcx.pointer_size);
592        crate::builder::gpu_offload::handle_gpu_code(cgcx, &cx);
593    }
594
595    if cfg!(llvm_enzyme) && enable_ad && !thin {
596        let cx =
597            SimpleCx::new(module.module_llvm.llmod(), &module.module_llvm.llcx, cgcx.pointer_size);
598
599        for function in cx.get_functions() {
600            let enzyme_marker = "enzyme_marker";
601            if attributes::has_string_attr(function, enzyme_marker) {
602                // Sanity check: Ensure 'noinline' is present before replacing it.
603                assert!(
604                    attributes::has_attr(function, Function, llvm::AttributeKind::NoInline),
605                    "Expected __enzyme function to have 'noinline' before adding 'alwaysinline'"
606                );
607
608                attributes::remove_from_llfn(function, Function, llvm::AttributeKind::NoInline);
609                attributes::remove_string_attr_from_llfn(function, enzyme_marker);
610
611                assert!(
612                    !attributes::has_string_attr(function, enzyme_marker),
613                    "Expected function to not have 'enzyme_marker'"
614                );
615
616                let always_inline = llvm::AttributeKind::AlwaysInline.create_attr(cx.llcx);
617                attributes::apply_to_llfn(function, Function, &[always_inline]);
618            }
619        }
620
621        let opt_stage = llvm::OptStage::FatLTO;
622        let stage = write::AutodiffStage::PostAD;
623        if !config.autodiff.contains(&config::AutoDiff::NoPostopt) {
624            unsafe {
625                write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?;
626            }
627        }
628
629        // This is the final IR, so people should be able to inspect the optimized autodiff output,
630        // for manual inspection.
631        if config.autodiff.contains(&config::AutoDiff::PrintModFinal) {
632            unsafe { llvm::LLVMDumpModule(module.module_llvm.llmod()) };
633        }
634    }
635
636    debug!("lto done");
637    Ok(())
638}
639
640pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
641
642unsafe impl Send for ModuleBuffer {}
643unsafe impl Sync for ModuleBuffer {}
644
645impl ModuleBuffer {
646    pub(crate) fn new(m: &llvm::Module) -> ModuleBuffer {
647        ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
648    }
649}
650
651impl ModuleBufferMethods for ModuleBuffer {
652    fn data(&self) -> &[u8] {
653        unsafe {
654            let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
655            let len = llvm::LLVMRustModuleBufferLen(self.0);
656            slice::from_raw_parts(ptr, len)
657        }
658    }
659}
660
661impl Drop for ModuleBuffer {
662    fn drop(&mut self) {
663        unsafe {
664            llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
665        }
666    }
667}
668
669pub struct ThinData(&'static mut llvm::ThinLTOData);
670
671unsafe impl Send for ThinData {}
672unsafe impl Sync for ThinData {}
673
674impl Drop for ThinData {
675    fn drop(&mut self) {
676        unsafe {
677            llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
678        }
679    }
680}
681
682pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
683
684unsafe impl Send for ThinBuffer {}
685unsafe impl Sync for ThinBuffer {}
686
687impl ThinBuffer {
688    pub(crate) fn new(m: &llvm::Module, is_thin: bool, emit_summary: bool) -> ThinBuffer {
689        unsafe {
690            let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin, emit_summary);
691            ThinBuffer(buffer)
692        }
693    }
694
695    pub(crate) unsafe fn from_raw_ptr(ptr: *mut llvm::ThinLTOBuffer) -> ThinBuffer {
696        let mut ptr = NonNull::new(ptr).unwrap();
697        ThinBuffer(unsafe { ptr.as_mut() })
698    }
699}
700
701impl ThinBufferMethods for ThinBuffer {
702    fn data(&self) -> &[u8] {
703        unsafe {
704            let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
705            let len = llvm::LLVMRustThinLTOBufferLen(self.0);
706            slice::from_raw_parts(ptr, len)
707        }
708    }
709
710    fn thin_link_data(&self) -> &[u8] {
711        unsafe {
712            let ptr = llvm::LLVMRustThinLTOBufferThinLinkDataPtr(self.0) as *const _;
713            let len = llvm::LLVMRustThinLTOBufferThinLinkDataLen(self.0);
714            slice::from_raw_parts(ptr, len)
715        }
716    }
717}
718
719impl Drop for ThinBuffer {
720    fn drop(&mut self) {
721        unsafe {
722            llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
723        }
724    }
725}
726
727pub(crate) fn optimize_thin_module(
728    thin_module: ThinModule<LlvmCodegenBackend>,
729    cgcx: &CodegenContext<LlvmCodegenBackend>,
730) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
731    let dcx = cgcx.create_dcx();
732    let dcx = dcx.handle();
733
734    let module_name = &thin_module.shared.module_names[thin_module.idx];
735
736    // Right now the implementation we've got only works over serialized
737    // modules, so we create a fresh new LLVM context and parse the module
738    // into that context. One day, however, we may do this for upstream
739    // crates but for locally codegened modules we may be able to reuse
740    // that LLVM Context and Module.
741    let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?;
742    let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm);
743    // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here.
744    if cgcx.config(ModuleKind::Regular).embed_bitcode() {
745        module.thin_lto_buffer = Some(thin_module.data().to_vec());
746    }
747    {
748        let target = &*module.module_llvm.tm;
749        let llmod = module.module_llvm.llmod();
750        save_temp_bitcode(cgcx, &module, "thin-lto-input");
751
752        // Up next comes the per-module local analyses that we do for Thin LTO.
753        // Each of these functions is basically copied from the LLVM
754        // implementation and then tailored to suit this implementation. Ideally
755        // each of these would be supported by upstream LLVM but that's perhaps
756        // a patch for another day!
757        //
758        // You can find some more comments about these functions in the LLVM
759        // bindings we've got (currently `PassWrapper.cpp`)
760        {
761            let _timer =
762                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
763            unsafe {
764                llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target.raw())
765            };
766            save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
767        }
768
769        {
770            let _timer = cgcx
771                .prof
772                .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
773            if unsafe { !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) }
774            {
775                return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
776            }
777            save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
778        }
779
780        {
781            let _timer = cgcx
782                .prof
783                .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
784            if unsafe { !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) }
785            {
786                return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
787            }
788            save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
789        }
790
791        {
792            let _timer =
793                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
794            if unsafe {
795                !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw())
796            } {
797                return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
798            }
799            save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
800        }
801
802        // Alright now that we've done everything related to the ThinLTO
803        // analysis it's time to run some optimizations! Here we use the same
804        // `run_pass_manager` as the "fat" LTO above except that we tell it to
805        // populate a thin-specific pass manager, which presumably LLVM treats a
806        // little differently.
807        {
808            info!("running thin lto passes over {}", module.name);
809            run_pass_manager(cgcx, dcx, &mut module, true)?;
810            save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
811        }
812    }
813    Ok(module)
814}
815
816/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
817#[derive(Debug, Default)]
818struct ThinLTOKeysMap {
819    // key = llvm name of importing module, value = LLVM cache key
820    keys: BTreeMap<String, String>,
821}
822
823impl ThinLTOKeysMap {
824    fn save_to_file(&self, path: &Path) -> io::Result<()> {
825        use std::io::Write;
826        let mut writer = File::create_buffered(path)?;
827        // The entries are loaded back into a hash map in `load_from_file()`, so
828        // the order in which we write them to file here does not matter.
829        for (module, key) in &self.keys {
830            writeln!(writer, "{module} {key}")?;
831        }
832        Ok(())
833    }
834
835    fn load_from_file(path: &Path) -> io::Result<Self> {
836        use std::io::BufRead;
837        let mut keys = BTreeMap::default();
838        let file = File::open_buffered(path)?;
839        for line in file.lines() {
840            let line = line?;
841            let mut split = line.split(' ');
842            let module = split.next().unwrap();
843            let key = split.next().unwrap();
844            assert_eq!(split.next(), None, "Expected two space-separated values, found {line:?}");
845            keys.insert(module.to_string(), key.to_string());
846        }
847        Ok(Self { keys })
848    }
849
850    fn from_thin_lto_modules(
851        data: &ThinData,
852        modules: &[llvm::ThinLTOModule],
853        names: &[CString],
854    ) -> Self {
855        let keys = iter::zip(modules, names)
856            .map(|(module, name)| {
857                let key = build_string(|rust_str| unsafe {
858                    llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
859                })
860                .expect("Invalid ThinLTO module key");
861                (module_name_to_str(name).to_string(), key)
862            })
863            .collect();
864        Self { keys }
865    }
866}
867
868fn module_name_to_str(c_str: &CStr) -> &str {
869    c_str.to_str().unwrap_or_else(|e| {
870        bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
871    })
872}
873
874pub(crate) fn parse_module<'a>(
875    cx: &'a llvm::Context,
876    name: &CStr,
877    data: &[u8],
878    dcx: DiagCtxtHandle<'_>,
879) -> Result<&'a llvm::Module, FatalError> {
880    unsafe {
881        llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
882            .ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode))
883    }
884}