rustc_codegen_ssa/back/
write.rs

1use std::assert_matches::assert_matches;
2use std::marker::PhantomData;
3use std::panic::AssertUnwindSafe;
4use std::path::{Path, PathBuf};
5use std::sync::Arc;
6use std::sync::mpsc::{Receiver, Sender, channel};
7use std::{fs, io, mem, str, thread};
8
9use rustc_abi::Size;
10use rustc_ast::attr;
11use rustc_data_structures::fx::FxIndexMap;
12use rustc_data_structures::jobserver::{self, Acquired};
13use rustc_data_structures::memmap::Mmap;
14use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
15use rustc_errors::emitter::Emitter;
16use rustc_errors::translation::Translator;
17use rustc_errors::{
18    Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, FatalErrorMarker, Level,
19    MultiSpan, Style, Suggestions,
20};
21use rustc_fs_util::link_or_copy;
22use rustc_incremental::{
23    copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
24};
25use rustc_metadata::fs::copy_to_stdout;
26use rustc_middle::bug;
27use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
28use rustc_middle::ty::TyCtxt;
29use rustc_session::Session;
30use rustc_session::config::{
31    self, CrateType, Lto, OutFileName, OutputFilenames, OutputType, Passes, SwitchWithOptPath,
32};
33use rustc_span::source_map::SourceMap;
34use rustc_span::{FileName, InnerSpan, Span, SpanData, sym};
35use rustc_target::spec::{MergeFunctions, SanitizerSet};
36use tracing::debug;
37
38use super::link::{self, ensure_removed};
39use super::lto::{self, SerializedModule};
40use crate::back::lto::check_lto_allowed;
41use crate::errors::ErrorCreatingRemarkDir;
42use crate::traits::*;
43use crate::{
44    CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
45    errors,
46};
47
48const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
49
50/// What kind of object file to emit.
51#[derive(Clone, Copy, PartialEq)]
52pub enum EmitObj {
53    // No object file.
54    None,
55
56    // Just uncompressed llvm bitcode. Provides easy compatibility with
57    // emscripten's ecc compiler, when used as the linker.
58    Bitcode,
59
60    // Object code, possibly augmented with a bitcode section.
61    ObjectCode(BitcodeSection),
62}
63
64/// What kind of llvm bitcode section to embed in an object file.
65#[derive(Clone, Copy, PartialEq)]
66pub enum BitcodeSection {
67    // No bitcode section.
68    None,
69
70    // A full, uncompressed bitcode section.
71    Full,
72}
73
74/// Module-specific configuration for `optimize_and_codegen`.
75pub struct ModuleConfig {
76    /// Names of additional optimization passes to run.
77    pub passes: Vec<String>,
78    /// Some(level) to optimize at a certain level, or None to run
79    /// absolutely no optimizations (used for the allocator module).
80    pub opt_level: Option<config::OptLevel>,
81
82    pub pgo_gen: SwitchWithOptPath,
83    pub pgo_use: Option<PathBuf>,
84    pub pgo_sample_use: Option<PathBuf>,
85    pub debug_info_for_profiling: bool,
86    pub instrument_coverage: bool,
87
88    pub sanitizer: SanitizerSet,
89    pub sanitizer_recover: SanitizerSet,
90    pub sanitizer_dataflow_abilist: Vec<String>,
91    pub sanitizer_memory_track_origins: usize,
92
93    // Flags indicating which outputs to produce.
94    pub emit_pre_lto_bc: bool,
95    pub emit_no_opt_bc: bool,
96    pub emit_bc: bool,
97    pub emit_ir: bool,
98    pub emit_asm: bool,
99    pub emit_obj: EmitObj,
100    pub emit_thin_lto: bool,
101    pub emit_thin_lto_summary: bool,
102
103    // Miscellaneous flags. These are mostly copied from command-line
104    // options.
105    pub verify_llvm_ir: bool,
106    pub lint_llvm_ir: bool,
107    pub no_prepopulate_passes: bool,
108    pub no_builtins: bool,
109    pub vectorize_loop: bool,
110    pub vectorize_slp: bool,
111    pub merge_functions: bool,
112    pub emit_lifetime_markers: bool,
113    pub llvm_plugins: Vec<String>,
114    pub autodiff: Vec<config::AutoDiff>,
115    pub offload: Vec<config::Offload>,
116}
117
118impl ModuleConfig {
119    fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
120        // If it's a regular module, use `$regular`, otherwise use `$other`.
121        // `$regular` and `$other` are evaluated lazily.
122        macro_rules! if_regular {
123            ($regular: expr, $other: expr) => {
124                if let ModuleKind::Regular = kind { $regular } else { $other }
125            };
126        }
127
128        let sess = tcx.sess;
129        let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
130
131        let save_temps = sess.opts.cg.save_temps;
132
133        let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
134            || match kind {
135                ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
136                ModuleKind::Allocator => false,
137            };
138
139        let emit_obj = if !should_emit_obj {
140            EmitObj::None
141        } else if sess.target.obj_is_bitcode
142            || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
143        {
144            // This case is selected if the target uses objects as bitcode, or
145            // if linker plugin LTO is enabled. In the linker plugin LTO case
146            // the assumption is that the final link-step will read the bitcode
147            // and convert it to object code. This may be done by either the
148            // native linker or rustc itself.
149            //
150            // Note, however, that the linker-plugin-lto requested here is
151            // explicitly ignored for `#![no_builtins]` crates. These crates are
152            // specifically ignored by rustc's LTO passes and wouldn't work if
153            // loaded into the linker. These crates define symbols that LLVM
154            // lowers intrinsics to, and these symbol dependencies aren't known
155            // until after codegen. As a result any crate marked
156            // `#![no_builtins]` is assumed to not participate in LTO and
157            // instead goes on to generate object code.
158            EmitObj::Bitcode
159        } else if need_bitcode_in_object(tcx) {
160            EmitObj::ObjectCode(BitcodeSection::Full)
161        } else {
162            EmitObj::ObjectCode(BitcodeSection::None)
163        };
164
165        ModuleConfig {
166            passes: if_regular!(sess.opts.cg.passes.clone(), vec![]),
167
168            opt_level: opt_level_and_size,
169
170            pgo_gen: if_regular!(
171                sess.opts.cg.profile_generate.clone(),
172                SwitchWithOptPath::Disabled
173            ),
174            pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
175            pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
176            debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
177            instrument_coverage: if_regular!(sess.instrument_coverage(), false),
178
179            sanitizer: if_regular!(sess.sanitizers(), SanitizerSet::empty()),
180            sanitizer_dataflow_abilist: if_regular!(
181                sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
182                Vec::new()
183            ),
184            sanitizer_recover: if_regular!(
185                sess.opts.unstable_opts.sanitizer_recover,
186                SanitizerSet::empty()
187            ),
188            sanitizer_memory_track_origins: if_regular!(
189                sess.opts.unstable_opts.sanitizer_memory_track_origins,
190                0
191            ),
192
193            emit_pre_lto_bc: if_regular!(
194                save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
195                false
196            ),
197            emit_no_opt_bc: if_regular!(save_temps, false),
198            emit_bc: if_regular!(
199                save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
200                save_temps
201            ),
202            emit_ir: if_regular!(
203                sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
204                false
205            ),
206            emit_asm: if_regular!(
207                sess.opts.output_types.contains_key(&OutputType::Assembly),
208                false
209            ),
210            emit_obj,
211            // thin lto summaries prevent fat lto, so do not emit them if fat
212            // lto is requested. See PR #136840 for background information.
213            emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto && sess.lto() != Lto::Fat,
214            emit_thin_lto_summary: if_regular!(
215                sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
216                false
217            ),
218
219            verify_llvm_ir: sess.verify_llvm_ir(),
220            lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
221            no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
222            no_builtins: no_builtins || sess.target.no_builtins,
223
224            // Copy what clang does by turning on loop vectorization at O2 and
225            // slp vectorization at O3.
226            vectorize_loop: !sess.opts.cg.no_vectorize_loops
227                && (sess.opts.optimize == config::OptLevel::More
228                    || sess.opts.optimize == config::OptLevel::Aggressive),
229            vectorize_slp: !sess.opts.cg.no_vectorize_slp
230                && sess.opts.optimize == config::OptLevel::Aggressive,
231
232            // Some targets (namely, NVPTX) interact badly with the
233            // MergeFunctions pass. This is because MergeFunctions can generate
234            // new function calls which may interfere with the target calling
235            // convention; e.g. for the NVPTX target, PTX kernels should not
236            // call other PTX kernels. MergeFunctions can also be configured to
237            // generate aliases instead, but aliases are not supported by some
238            // backends (again, NVPTX). Therefore, allow targets to opt out of
239            // the MergeFunctions pass, but otherwise keep the pass enabled (at
240            // O2 and O3) since it can be useful for reducing code size.
241            merge_functions: match sess
242                .opts
243                .unstable_opts
244                .merge_functions
245                .unwrap_or(sess.target.merge_functions)
246            {
247                MergeFunctions::Disabled => false,
248                MergeFunctions::Trampolines | MergeFunctions::Aliases => {
249                    use config::OptLevel::*;
250                    match sess.opts.optimize {
251                        Aggressive | More | SizeMin | Size => true,
252                        Less | No => false,
253                    }
254                }
255            },
256
257            emit_lifetime_markers: sess.emit_lifetime_markers(),
258            llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
259            autodiff: if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
260            offload: if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
261        }
262    }
263
264    pub fn bitcode_needed(&self) -> bool {
265        self.emit_bc
266            || self.emit_thin_lto_summary
267            || self.emit_obj == EmitObj::Bitcode
268            || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
269    }
270
271    pub fn embed_bitcode(&self) -> bool {
272        self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
273    }
274}
275
276/// Configuration passed to the function returned by the `target_machine_factory`.
277pub struct TargetMachineFactoryConfig {
278    /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
279    /// so the path to the dwarf object has to be provided when we create the target machine.
280    /// This can be ignored by backends which do not need it for their Split DWARF support.
281    pub split_dwarf_file: Option<PathBuf>,
282
283    /// The name of the output object file. Used for setting OutputFilenames in target options
284    /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
285    pub output_obj_file: Option<PathBuf>,
286}
287
288impl TargetMachineFactoryConfig {
289    pub fn new(
290        cgcx: &CodegenContext<impl WriteBackendMethods>,
291        module_name: &str,
292    ) -> TargetMachineFactoryConfig {
293        let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
294            cgcx.output_filenames.split_dwarf_path(
295                cgcx.split_debuginfo,
296                cgcx.split_dwarf_kind,
297                module_name,
298                cgcx.invocation_temp.as_deref(),
299            )
300        } else {
301            None
302        };
303
304        let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
305            OutputType::Object,
306            module_name,
307            cgcx.invocation_temp.as_deref(),
308        ));
309        TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
310    }
311}
312
313pub type TargetMachineFactoryFn<B> = Arc<
314    dyn Fn(
315            TargetMachineFactoryConfig,
316        ) -> Result<
317            <B as WriteBackendMethods>::TargetMachine,
318            <B as WriteBackendMethods>::TargetMachineError,
319        > + Send
320        + Sync,
321>;
322
323/// Additional resources used by optimize_and_codegen (not module specific)
324#[derive(Clone)]
325pub struct CodegenContext<B: WriteBackendMethods> {
326    // Resources needed when running LTO
327    pub prof: SelfProfilerRef,
328    pub lto: Lto,
329    pub save_temps: bool,
330    pub fewer_names: bool,
331    pub time_trace: bool,
332    pub opts: Arc<config::Options>,
333    pub crate_types: Vec<CrateType>,
334    pub output_filenames: Arc<OutputFilenames>,
335    pub invocation_temp: Option<String>,
336    pub module_config: Arc<ModuleConfig>,
337    pub allocator_config: Arc<ModuleConfig>,
338    pub tm_factory: TargetMachineFactoryFn<B>,
339    pub msvc_imps_needed: bool,
340    pub is_pe_coff: bool,
341    pub target_can_use_split_dwarf: bool,
342    pub target_arch: String,
343    pub target_is_like_darwin: bool,
344    pub target_is_like_aix: bool,
345    pub target_is_like_gpu: bool,
346    pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
347    pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
348    pub pointer_size: Size,
349
350    /// Emitter to use for diagnostics produced during codegen.
351    pub diag_emitter: SharedEmitter,
352    /// LLVM optimizations for which we want to print remarks.
353    pub remark: Passes,
354    /// Directory into which should the LLVM optimization remarks be written.
355    /// If `None`, they will be written to stderr.
356    pub remark_dir: Option<PathBuf>,
357    /// The incremental compilation session directory, or None if we are not
358    /// compiling incrementally
359    pub incr_comp_session_dir: Option<PathBuf>,
360    /// `true` if the codegen should be run in parallel.
361    ///
362    /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
363    pub parallel: bool,
364}
365
366impl<B: WriteBackendMethods> CodegenContext<B> {
367    pub fn create_dcx(&self) -> DiagCtxt {
368        DiagCtxt::new(Box::new(self.diag_emitter.clone()))
369    }
370}
371
372fn generate_thin_lto_work<B: ExtraBackendMethods>(
373    cgcx: &CodegenContext<B>,
374    exported_symbols_for_lto: &[String],
375    each_linked_rlib_for_lto: &[PathBuf],
376    needs_thin_lto: Vec<(String, B::ThinBuffer)>,
377    import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
378) -> Vec<(ThinLtoWorkItem<B>, u64)> {
379    let _prof_timer = cgcx.prof.generic_activity("codegen_thin_generate_lto_work");
380
381    let (lto_modules, copy_jobs) = B::run_thin_lto(
382        cgcx,
383        exported_symbols_for_lto,
384        each_linked_rlib_for_lto,
385        needs_thin_lto,
386        import_only_modules,
387    );
388    lto_modules
389        .into_iter()
390        .map(|module| {
391            let cost = module.cost();
392            (ThinLtoWorkItem::ThinLto(module), cost)
393        })
394        .chain(copy_jobs.into_iter().map(|wp| {
395            (
396                ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
397                    name: wp.cgu_name.clone(),
398                    source: wp,
399                }),
400                0, // copying is very cheap
401            )
402        }))
403        .collect()
404}
405
406struct CompiledModules {
407    modules: Vec<CompiledModule>,
408    allocator_module: Option<CompiledModule>,
409}
410
411fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
412    let sess = tcx.sess;
413    sess.opts.cg.embed_bitcode
414        && tcx.crate_types().contains(&CrateType::Rlib)
415        && sess.opts.output_types.contains_key(&OutputType::Exe)
416}
417
418fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
419    if sess.opts.incremental.is_none() {
420        return false;
421    }
422
423    match sess.lto() {
424        Lto::No => false,
425        Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
426    }
427}
428
429pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
430    backend: B,
431    tcx: TyCtxt<'_>,
432    target_cpu: String,
433    allocator_module: Option<ModuleCodegen<B::Module>>,
434) -> OngoingCodegen<B> {
435    let (coordinator_send, coordinator_receive) = channel();
436
437    let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID);
438    let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins);
439
440    let crate_info = CrateInfo::new(tcx, target_cpu);
441
442    let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
443    let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
444
445    let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
446    let (codegen_worker_send, codegen_worker_receive) = channel();
447
448    let coordinator_thread = start_executing_work(
449        backend.clone(),
450        tcx,
451        &crate_info,
452        shared_emitter,
453        codegen_worker_send,
454        coordinator_receive,
455        Arc::new(regular_config),
456        Arc::new(allocator_config),
457        allocator_module,
458        coordinator_send.clone(),
459    );
460
461    OngoingCodegen {
462        backend,
463        crate_info,
464
465        codegen_worker_receive,
466        shared_emitter_main,
467        coordinator: Coordinator {
468            sender: coordinator_send,
469            future: Some(coordinator_thread),
470            phantom: PhantomData,
471        },
472        output_filenames: Arc::clone(tcx.output_filenames(())),
473    }
474}
475
476fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
477    sess: &Session,
478    compiled_modules: &CompiledModules,
479) -> FxIndexMap<WorkProductId, WorkProduct> {
480    let mut work_products = FxIndexMap::default();
481
482    if sess.opts.incremental.is_none() {
483        return work_products;
484    }
485
486    let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
487
488    for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
489        let mut files = Vec::new();
490        if let Some(object_file_path) = &module.object {
491            files.push((OutputType::Object.extension(), object_file_path.as_path()));
492        }
493        if let Some(dwarf_object_file_path) = &module.dwarf_object {
494            files.push(("dwo", dwarf_object_file_path.as_path()));
495        }
496        if let Some(path) = &module.assembly {
497            files.push((OutputType::Assembly.extension(), path.as_path()));
498        }
499        if let Some(path) = &module.llvm_ir {
500            files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
501        }
502        if let Some(path) = &module.bytecode {
503            files.push((OutputType::Bitcode.extension(), path.as_path()));
504        }
505        if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
506            sess,
507            &module.name,
508            files.as_slice(),
509            &module.links_from_incr_cache,
510        ) {
511            work_products.insert(id, product);
512        }
513    }
514
515    work_products
516}
517
518fn produce_final_output_artifacts(
519    sess: &Session,
520    compiled_modules: &CompiledModules,
521    crate_output: &OutputFilenames,
522) {
523    let mut user_wants_bitcode = false;
524    let mut user_wants_objects = false;
525
526    // Produce final compile outputs.
527    let copy_gracefully = |from: &Path, to: &OutFileName| match to {
528        OutFileName::Stdout if let Err(e) = copy_to_stdout(from) => {
529            sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
530        }
531        OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
532            sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
533        }
534        _ => {}
535    };
536
537    let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
538        if let [module] = &compiled_modules.modules[..] {
539            // 1) Only one codegen unit. In this case it's no difficulty
540            //    to copy `foo.0.x` to `foo.x`.
541            let path = crate_output.temp_path_for_cgu(
542                output_type,
543                &module.name,
544                sess.invocation_temp.as_deref(),
545            );
546            let output = crate_output.path(output_type);
547            if !output_type.is_text_output() && output.is_tty() {
548                sess.dcx()
549                    .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
550            } else {
551                copy_gracefully(&path, &output);
552            }
553            if !sess.opts.cg.save_temps && !keep_numbered {
554                // The user just wants `foo.x`, not `foo.#module-name#.x`.
555                ensure_removed(sess.dcx(), &path);
556            }
557        } else {
558            if crate_output.outputs.contains_explicit_name(&output_type) {
559                // 2) Multiple codegen units, with `--emit foo=some_name`. We have
560                //    no good solution for this case, so warn the user.
561                sess.dcx()
562                    .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
563            } else if crate_output.single_output_file.is_some() {
564                // 3) Multiple codegen units, with `-o some_name`. We have
565                //    no good solution for this case, so warn the user.
566                sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
567            } else {
568                // 4) Multiple codegen units, but no explicit name. We
569                //    just leave the `foo.0.x` files in place.
570                // (We don't have to do any work in this case.)
571            }
572        }
573    };
574
575    // Flag to indicate whether the user explicitly requested bitcode.
576    // Otherwise, we produced it only as a temporary output, and will need
577    // to get rid of it.
578    for output_type in crate_output.outputs.keys() {
579        match *output_type {
580            OutputType::Bitcode => {
581                user_wants_bitcode = true;
582                // Copy to .bc, but always keep the .0.bc. There is a later
583                // check to figure out if we should delete .0.bc files, or keep
584                // them for making an rlib.
585                copy_if_one_unit(OutputType::Bitcode, true);
586            }
587            OutputType::ThinLinkBitcode => {
588                copy_if_one_unit(OutputType::ThinLinkBitcode, false);
589            }
590            OutputType::LlvmAssembly => {
591                copy_if_one_unit(OutputType::LlvmAssembly, false);
592            }
593            OutputType::Assembly => {
594                copy_if_one_unit(OutputType::Assembly, false);
595            }
596            OutputType::Object => {
597                user_wants_objects = true;
598                copy_if_one_unit(OutputType::Object, true);
599            }
600            OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
601        }
602    }
603
604    // Clean up unwanted temporary files.
605
606    // We create the following files by default:
607    //  - #crate#.#module-name#.bc
608    //  - #crate#.#module-name#.o
609    //  - #crate#.crate.metadata.bc
610    //  - #crate#.crate.metadata.o
611    //  - #crate#.o (linked from crate.##.o)
612    //  - #crate#.bc (copied from crate.##.bc)
613    // We may create additional files if requested by the user (through
614    // `-C save-temps` or `--emit=` flags).
615
616    if !sess.opts.cg.save_temps {
617        // Remove the temporary .#module-name#.o objects. If the user didn't
618        // explicitly request bitcode (with --emit=bc), and the bitcode is not
619        // needed for building an rlib, then we must remove .#module-name#.bc as
620        // well.
621
622        // Specific rules for keeping .#module-name#.bc:
623        //  - If the user requested bitcode (`user_wants_bitcode`), and
624        //    codegen_units > 1, then keep it.
625        //  - If the user requested bitcode but codegen_units == 1, then we
626        //    can toss .#module-name#.bc because we copied it to .bc earlier.
627        //  - If we're not building an rlib and the user didn't request
628        //    bitcode, then delete .#module-name#.bc.
629        // If you change how this works, also update back::link::link_rlib,
630        // where .#module-name#.bc files are (maybe) deleted after making an
631        // rlib.
632        let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
633
634        let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
635
636        let keep_numbered_objects =
637            needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
638
639        for module in compiled_modules.modules.iter() {
640            if !keep_numbered_objects {
641                if let Some(ref path) = module.object {
642                    ensure_removed(sess.dcx(), path);
643                }
644
645                if let Some(ref path) = module.dwarf_object {
646                    ensure_removed(sess.dcx(), path);
647                }
648            }
649
650            if let Some(ref path) = module.bytecode {
651                if !keep_numbered_bitcode {
652                    ensure_removed(sess.dcx(), path);
653                }
654            }
655        }
656
657        if !user_wants_bitcode
658            && let Some(ref allocator_module) = compiled_modules.allocator_module
659            && let Some(ref path) = allocator_module.bytecode
660        {
661            ensure_removed(sess.dcx(), path);
662        }
663    }
664
665    if sess.opts.json_artifact_notifications {
666        if let [module] = &compiled_modules.modules[..] {
667            module.for_each_output(|_path, ty| {
668                if sess.opts.output_types.contains_key(&ty) {
669                    let descr = ty.shorthand();
670                    // for single cgu file is renamed to drop cgu specific suffix
671                    // so we regenerate it the same way
672                    let path = crate_output.path(ty);
673                    sess.dcx().emit_artifact_notification(path.as_path(), descr);
674                }
675            });
676        } else {
677            for module in &compiled_modules.modules {
678                module.for_each_output(|path, ty| {
679                    if sess.opts.output_types.contains_key(&ty) {
680                        let descr = ty.shorthand();
681                        sess.dcx().emit_artifact_notification(&path, descr);
682                    }
683                });
684            }
685        }
686    }
687
688    // We leave the following files around by default:
689    //  - #crate#.o
690    //  - #crate#.crate.metadata.o
691    //  - #crate#.bc
692    // These are used in linking steps and will be cleaned up afterward.
693}
694
695pub(crate) enum WorkItem<B: WriteBackendMethods> {
696    /// Optimize a newly codegened, totally unoptimized module.
697    Optimize(ModuleCodegen<B::Module>),
698    /// Copy the post-LTO artifacts from the incremental cache to the output
699    /// directory.
700    CopyPostLtoArtifacts(CachedModuleCodegen),
701}
702
703enum ThinLtoWorkItem<B: WriteBackendMethods> {
704    /// Copy the post-LTO artifacts from the incremental cache to the output
705    /// directory.
706    CopyPostLtoArtifacts(CachedModuleCodegen),
707    /// Performs thin-LTO on the given module.
708    ThinLto(lto::ThinModule<B>),
709}
710
711// `pthread_setname()` on *nix ignores anything beyond the first 15
712// bytes. Use short descriptions to maximize the space available for
713// the module name.
714#[cfg(not(windows))]
715fn desc(short: &str, _long: &str, name: &str) -> String {
716    // The short label is three bytes, and is followed by a space. That
717    // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
718    // depends on the CGU name form.
719    //
720    // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
721    //   before the `-cgu.0` is the same for every CGU, so use the
722    //   `cgu.0` part. The number suffix will be different for each
723    //   CGU.
724    //
725    // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
726    //   name because each CGU will have a unique ASCII hash, and the
727    //   first 11 bytes will be enough to identify it.
728    //
729    // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
730    //   `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
731    //   name. The first 11 bytes won't be enough to uniquely identify
732    //   it, but no obvious substring will, and this is a rarely used
733    //   option so it doesn't matter much.
734    //
735    assert_eq!(short.len(), 3);
736    let name = if let Some(index) = name.find("-cgu.") {
737        &name[index + 1..] // +1 skips the leading '-'.
738    } else {
739        name
740    };
741    format!("{short} {name}")
742}
743
744// Windows has no thread name length limit, so use more descriptive names.
745#[cfg(windows)]
746fn desc(_short: &str, long: &str, name: &str) -> String {
747    format!("{long} {name}")
748}
749
750impl<B: WriteBackendMethods> WorkItem<B> {
751    /// Generate a short description of this work item suitable for use as a thread name.
752    fn short_description(&self) -> String {
753        match self {
754            WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
755            WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
756        }
757    }
758}
759
760impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
761    /// Generate a short description of this work item suitable for use as a thread name.
762    fn short_description(&self) -> String {
763        match self {
764            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
765                desc("cpy", "copy LTO artifacts for", &m.name)
766            }
767            ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
768        }
769    }
770}
771
772/// A result produced by the backend.
773pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
774    /// The backend has finished compiling a CGU, nothing more required.
775    Finished(CompiledModule),
776
777    /// The backend has finished compiling a CGU, which now needs to go through
778    /// fat LTO.
779    NeedsFatLto(FatLtoInput<B>),
780
781    /// The backend has finished compiling a CGU, which now needs to go through
782    /// thin LTO.
783    NeedsThinLto(String, B::ThinBuffer),
784}
785
786pub enum FatLtoInput<B: WriteBackendMethods> {
787    Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
788    InMemory(ModuleCodegen<B::Module>),
789}
790
791/// Actual LTO type we end up choosing based on multiple factors.
792pub(crate) enum ComputedLtoType {
793    No,
794    Thin,
795    Fat,
796}
797
798pub(crate) fn compute_per_cgu_lto_type(
799    sess_lto: &Lto,
800    opts: &config::Options,
801    sess_crate_types: &[CrateType],
802    module_kind: ModuleKind,
803) -> ComputedLtoType {
804    // If the linker does LTO, we don't have to do it. Note that we
805    // keep doing full LTO, if it is requested, as not to break the
806    // assumption that the output will be a single module.
807    let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
808
809    // When we're automatically doing ThinLTO for multi-codegen-unit
810    // builds we don't actually want to LTO the allocator module if
811    // it shows up. This is due to various linker shenanigans that
812    // we'll encounter later.
813    let is_allocator = module_kind == ModuleKind::Allocator;
814
815    // We ignore a request for full crate graph LTO if the crate type
816    // is only an rlib, as there is no full crate graph to process,
817    // that'll happen later.
818    //
819    // This use case currently comes up primarily for targets that
820    // require LTO so the request for LTO is always unconditionally
821    // passed down to the backend, but we don't actually want to do
822    // anything about it yet until we've got a final product.
823    let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]);
824
825    match sess_lto {
826        Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
827        Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
828        Lto::Fat if !is_rlib => ComputedLtoType::Fat,
829        _ => ComputedLtoType::No,
830    }
831}
832
833fn execute_optimize_work_item<B: ExtraBackendMethods>(
834    cgcx: &CodegenContext<B>,
835    mut module: ModuleCodegen<B::Module>,
836) -> WorkItemResult<B> {
837    let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
838
839    let dcx = cgcx.create_dcx();
840    let dcx = dcx.handle();
841
842    let module_config = match module.kind {
843        ModuleKind::Regular => &cgcx.module_config,
844        ModuleKind::Allocator => &cgcx.allocator_config,
845    };
846
847    B::optimize(cgcx, dcx, &mut module, module_config);
848
849    // After we've done the initial round of optimizations we need to
850    // decide whether to synchronously codegen this module or ship it
851    // back to the coordinator thread for further LTO processing (which
852    // has to wait for all the initial modules to be optimized).
853
854    let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
855
856    // If we're doing some form of incremental LTO then we need to be sure to
857    // save our module to disk first.
858    let bitcode = if module_config.emit_pre_lto_bc {
859        let filename = pre_lto_bitcode_filename(&module.name);
860        cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
861    } else {
862        None
863    };
864
865    match lto_type {
866        ComputedLtoType::No => {
867            let module = B::codegen(cgcx, module, module_config);
868            WorkItemResult::Finished(module)
869        }
870        ComputedLtoType::Thin => {
871            let (name, thin_buffer) = B::prepare_thin(module);
872            if let Some(path) = bitcode {
873                fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
874                    panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
875                });
876            }
877            WorkItemResult::NeedsThinLto(name, thin_buffer)
878        }
879        ComputedLtoType::Fat => match bitcode {
880            Some(path) => {
881                let (name, buffer) = B::serialize_module(module);
882                fs::write(&path, buffer.data()).unwrap_or_else(|e| {
883                    panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
884                });
885                WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
886                    name,
887                    buffer: SerializedModule::Local(buffer),
888                })
889            }
890            None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
891        },
892    }
893}
894
895fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
896    cgcx: &CodegenContext<B>,
897    module: CachedModuleCodegen,
898) -> CompiledModule {
899    let _timer = cgcx
900        .prof
901        .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
902
903    let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
904
905    let mut links_from_incr_cache = Vec::new();
906
907    let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
908        let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
909        debug!(
910            "copying preexisting module `{}` from {:?} to {}",
911            module.name,
912            source_file,
913            output_path.display()
914        );
915        match link_or_copy(&source_file, &output_path) {
916            Ok(_) => {
917                links_from_incr_cache.push(source_file);
918                Some(output_path)
919            }
920            Err(error) => {
921                cgcx.create_dcx().handle().emit_err(errors::CopyPathBuf {
922                    source_file,
923                    output_path,
924                    error,
925                });
926                None
927            }
928        }
929    };
930
931    let dwarf_object =
932        module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
933            let dwarf_obj_out = cgcx
934                .output_filenames
935                .split_dwarf_path(
936                    cgcx.split_debuginfo,
937                    cgcx.split_dwarf_kind,
938                    &module.name,
939                    cgcx.invocation_temp.as_deref(),
940                )
941                .expect(
942                    "saved dwarf object in work product but `split_dwarf_path` returned `None`",
943                );
944            load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
945        });
946
947    let mut load_from_incr_cache = |perform, output_type: OutputType| {
948        if perform {
949            let saved_file = module.source.saved_files.get(output_type.extension())?;
950            let output_path = cgcx.output_filenames.temp_path_for_cgu(
951                output_type,
952                &module.name,
953                cgcx.invocation_temp.as_deref(),
954            );
955            load_from_incr_comp_dir(output_path, &saved_file)
956        } else {
957            None
958        }
959    };
960
961    let module_config = &cgcx.module_config;
962    let should_emit_obj = module_config.emit_obj != EmitObj::None;
963    let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
964    let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
965    let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
966    let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
967    if should_emit_obj && object.is_none() {
968        cgcx.create_dcx().handle().emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
969    }
970
971    CompiledModule {
972        links_from_incr_cache,
973        kind: ModuleKind::Regular,
974        name: module.name,
975        object,
976        dwarf_object,
977        bytecode,
978        assembly,
979        llvm_ir,
980    }
981}
982
983fn do_fat_lto<B: ExtraBackendMethods>(
984    cgcx: &CodegenContext<B>,
985    exported_symbols_for_lto: &[String],
986    each_linked_rlib_for_lto: &[PathBuf],
987    mut needs_fat_lto: Vec<FatLtoInput<B>>,
988    import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
989) -> CompiledModule {
990    let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto");
991
992    check_lto_allowed(&cgcx);
993
994    for (module, wp) in import_only_modules {
995        needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
996    }
997
998    let module = B::run_and_optimize_fat_lto(
999        cgcx,
1000        exported_symbols_for_lto,
1001        each_linked_rlib_for_lto,
1002        needs_fat_lto,
1003    );
1004    B::codegen(cgcx, module, &cgcx.module_config)
1005}
1006
1007fn do_thin_lto<'a, B: ExtraBackendMethods>(
1008    cgcx: &'a CodegenContext<B>,
1009    exported_symbols_for_lto: Arc<Vec<String>>,
1010    each_linked_rlib_for_lto: Vec<PathBuf>,
1011    needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
1012    lto_import_only_modules: Vec<(
1013        SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>,
1014        WorkProduct,
1015    )>,
1016) -> Vec<CompiledModule> {
1017    let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto");
1018
1019    check_lto_allowed(&cgcx);
1020
1021    let (coordinator_send, coordinator_receive) = channel();
1022
1023    // First up, convert our jobserver into a helper thread so we can use normal
1024    // mpsc channels to manage our messages and such.
1025    // After we've requested tokens then we'll, when we can,
1026    // get tokens on `coordinator_receive` which will
1027    // get managed in the main loop below.
1028    let coordinator_send2 = coordinator_send.clone();
1029    let helper = jobserver::client()
1030        .into_helper_thread(move |token| {
1031            drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1032        })
1033        .expect("failed to spawn helper thread");
1034
1035    let mut work_items = vec![];
1036
1037    // We have LTO work to do. Perform the serial work here of
1038    // figuring out what we're going to LTO and then push a
1039    // bunch of work items onto our queue to do LTO. This all
1040    // happens on the coordinator thread but it's very quick so
1041    // we don't worry about tokens.
1042    for (work, cost) in generate_thin_lto_work(
1043        cgcx,
1044        &exported_symbols_for_lto,
1045        &each_linked_rlib_for_lto,
1046        needs_thin_lto,
1047        lto_import_only_modules,
1048    ) {
1049        let insertion_index =
1050            work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1051        work_items.insert(insertion_index, (work, cost));
1052        if cgcx.parallel {
1053            helper.request_token();
1054        }
1055    }
1056
1057    let mut codegen_aborted = None;
1058
1059    // These are the Jobserver Tokens we currently hold. Does not include
1060    // the implicit Token the compiler process owns no matter what.
1061    let mut tokens = vec![];
1062
1063    // Amount of tokens that are used (including the implicit token).
1064    let mut used_token_count = 0;
1065
1066    let mut compiled_modules = vec![];
1067
1068    // Run the message loop while there's still anything that needs message
1069    // processing. Note that as soon as codegen is aborted we simply want to
1070    // wait for all existing work to finish, so many of the conditions here
1071    // only apply if codegen hasn't been aborted as they represent pending
1072    // work to be done.
1073    loop {
1074        if codegen_aborted.is_none() {
1075            if used_token_count == 0 && work_items.is_empty() {
1076                // All codegen work is done.
1077                break;
1078            }
1079
1080            // Spin up what work we can, only doing this while we've got available
1081            // parallelism slots and work left to spawn.
1082            while used_token_count < tokens.len() + 1
1083                && let Some((item, _)) = work_items.pop()
1084            {
1085                spawn_thin_lto_work(&cgcx, coordinator_send.clone(), item);
1086                used_token_count += 1;
1087            }
1088        } else {
1089            // Don't queue up any more work if codegen was aborted, we're
1090            // just waiting for our existing children to finish.
1091            if used_token_count == 0 {
1092                break;
1093            }
1094        }
1095
1096        // Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1097        tokens.truncate(used_token_count.saturating_sub(1));
1098
1099        match coordinator_receive.recv().unwrap() {
1100            // Save the token locally and the next turn of the loop will use
1101            // this to spawn a new unit of work, or it may get dropped
1102            // immediately if we have no more work to spawn.
1103            ThinLtoMessage::Token(token) => match token {
1104                Ok(token) => {
1105                    tokens.push(token);
1106                }
1107                Err(e) => {
1108                    let msg = &format!("failed to acquire jobserver token: {e}");
1109                    cgcx.diag_emitter.fatal(msg);
1110                    codegen_aborted = Some(FatalError);
1111                }
1112            },
1113
1114            ThinLtoMessage::WorkItem { result } => {
1115                // If a thread exits successfully then we drop a token associated
1116                // with that worker and update our `used_token_count` count.
1117                // We may later re-acquire a token to continue running more work.
1118                // We may also not actually drop a token here if the worker was
1119                // running with an "ephemeral token".
1120                used_token_count -= 1;
1121
1122                match result {
1123                    Ok(compiled_module) => compiled_modules.push(compiled_module),
1124                    Err(Some(WorkerFatalError)) => {
1125                        // Like `CodegenAborted`, wait for remaining work to finish.
1126                        codegen_aborted = Some(FatalError);
1127                    }
1128                    Err(None) => {
1129                        // If the thread failed that means it panicked, so
1130                        // we abort immediately.
1131                        bug!("worker thread panicked");
1132                    }
1133                }
1134            }
1135        }
1136    }
1137
1138    if let Some(codegen_aborted) = codegen_aborted {
1139        codegen_aborted.raise();
1140    }
1141
1142    compiled_modules
1143}
1144
1145fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
1146    cgcx: &CodegenContext<B>,
1147    module: lto::ThinModule<B>,
1148) -> CompiledModule {
1149    let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
1150
1151    let module = B::optimize_thin(cgcx, module);
1152    B::codegen(cgcx, module, &cgcx.module_config)
1153}
1154
1155/// Messages sent to the coordinator.
1156pub(crate) enum Message<B: WriteBackendMethods> {
1157    /// A jobserver token has become available. Sent from the jobserver helper
1158    /// thread.
1159    Token(io::Result<Acquired>),
1160
1161    /// The backend has finished processing a work item for a codegen unit.
1162    /// Sent from a backend worker thread.
1163    WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
1164
1165    /// The frontend has finished generating something (backend IR or a
1166    /// post-LTO artifact) for a codegen unit, and it should be passed to the
1167    /// backend. Sent from the main thread.
1168    CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
1169
1170    /// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1171    /// Sent from the main thread.
1172    AddImportOnlyModule {
1173        module_data: SerializedModule<B::ModuleBuffer>,
1174        work_product: WorkProduct,
1175    },
1176
1177    /// The frontend has finished generating everything for all codegen units.
1178    /// Sent from the main thread.
1179    CodegenComplete,
1180
1181    /// Some normal-ish compiler error occurred, and codegen should be wound
1182    /// down. Sent from the main thread.
1183    CodegenAborted,
1184}
1185
1186/// Messages sent to the coordinator.
1187pub(crate) enum ThinLtoMessage {
1188    /// A jobserver token has become available. Sent from the jobserver helper
1189    /// thread.
1190    Token(io::Result<Acquired>),
1191
1192    /// The backend has finished processing a work item for a codegen unit.
1193    /// Sent from a backend worker thread.
1194    WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1195}
1196
1197/// A message sent from the coordinator thread to the main thread telling it to
1198/// process another codegen unit.
1199pub struct CguMessage;
1200
1201// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1202// can be used to send diagnostics from codegen threads to the main thread.
1203// It's missing the following fields from `rustc_errors::DiagInner`.
1204// - `span`: it doesn't impl `Send`.
1205// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1206//   diagnostics.
1207// - `sort_span`: it doesn't impl `Send`.
1208// - `is_lint`: lints aren't relevant during codegen.
1209// - `emitted_at`: not used for codegen diagnostics.
1210struct Diagnostic {
1211    span: Vec<SpanData>,
1212    level: Level,
1213    messages: Vec<(DiagMessage, Style)>,
1214    code: Option<ErrCode>,
1215    children: Vec<Subdiagnostic>,
1216    args: DiagArgMap,
1217}
1218
1219// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1220// missing the following fields from `rustc_errors::Subdiag`.
1221// - `span`: it doesn't impl `Send`.
1222struct Subdiagnostic {
1223    level: Level,
1224    messages: Vec<(DiagMessage, Style)>,
1225}
1226
1227#[derive(PartialEq, Clone, Copy, Debug)]
1228enum MainThreadState {
1229    /// Doing nothing.
1230    Idle,
1231
1232    /// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1233    Codegenning,
1234
1235    /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1236    Lending,
1237}
1238
1239fn start_executing_work<B: ExtraBackendMethods>(
1240    backend: B,
1241    tcx: TyCtxt<'_>,
1242    crate_info: &CrateInfo,
1243    shared_emitter: SharedEmitter,
1244    codegen_worker_send: Sender<CguMessage>,
1245    coordinator_receive: Receiver<Message<B>>,
1246    regular_config: Arc<ModuleConfig>,
1247    allocator_config: Arc<ModuleConfig>,
1248    allocator_module: Option<ModuleCodegen<B::Module>>,
1249    coordinator_send: Sender<Message<B>>,
1250) -> thread::JoinHandle<Result<CompiledModules, ()>> {
1251    let sess = tcx.sess;
1252
1253    let mut each_linked_rlib_for_lto = Vec::new();
1254    let mut each_linked_rlib_file_for_lto = Vec::new();
1255    drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1256        if link::ignored_for_lto(sess, crate_info, cnum) {
1257            return;
1258        }
1259        each_linked_rlib_for_lto.push(cnum);
1260        each_linked_rlib_file_for_lto.push(path.to_path_buf());
1261    }));
1262
1263    // Compute the set of symbols we need to retain when doing LTO (if we need to)
1264    let exported_symbols_for_lto =
1265        Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
1266
1267    // First up, convert our jobserver into a helper thread so we can use normal
1268    // mpsc channels to manage our messages and such.
1269    // After we've requested tokens then we'll, when we can,
1270    // get tokens on `coordinator_receive` which will
1271    // get managed in the main loop below.
1272    let coordinator_send2 = coordinator_send.clone();
1273    let helper = jobserver::client()
1274        .into_helper_thread(move |token| {
1275            drop(coordinator_send2.send(Message::Token::<B>(token)));
1276        })
1277        .expect("failed to spawn helper thread");
1278
1279    let ol = tcx.backend_optimization_level(());
1280    let backend_features = tcx.global_backend_features(());
1281
1282    let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1283        let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1284        match result {
1285            Ok(dir) => Some(dir),
1286            Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1287        }
1288    } else {
1289        None
1290    };
1291
1292    let cgcx = CodegenContext::<B> {
1293        crate_types: tcx.crate_types().to_vec(),
1294        lto: sess.lto(),
1295        fewer_names: sess.fewer_names(),
1296        save_temps: sess.opts.cg.save_temps,
1297        time_trace: sess.opts.unstable_opts.llvm_time_trace,
1298        opts: Arc::new(sess.opts.clone()),
1299        prof: sess.prof.clone(),
1300        remark: sess.opts.cg.remark.clone(),
1301        remark_dir,
1302        incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1303        diag_emitter: shared_emitter.clone(),
1304        output_filenames: Arc::clone(tcx.output_filenames(())),
1305        module_config: regular_config,
1306        allocator_config,
1307        tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
1308        msvc_imps_needed: msvc_imps_needed(tcx),
1309        is_pe_coff: tcx.sess.target.is_like_windows,
1310        target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1311        target_arch: tcx.sess.target.arch.to_string(),
1312        target_is_like_darwin: tcx.sess.target.is_like_darwin,
1313        target_is_like_aix: tcx.sess.target.is_like_aix,
1314        target_is_like_gpu: tcx.sess.target.is_like_gpu,
1315        split_debuginfo: tcx.sess.split_debuginfo(),
1316        split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1317        parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1318        pointer_size: tcx.data_layout.pointer_size(),
1319        invocation_temp: sess.invocation_temp.clone(),
1320    };
1321
1322    // This is the "main loop" of parallel work happening for parallel codegen.
1323    // It's here that we manage parallelism, schedule work, and work with
1324    // messages coming from clients.
1325    //
1326    // There are a few environmental pre-conditions that shape how the system
1327    // is set up:
1328    //
1329    // - Error reporting can only happen on the main thread because that's the
1330    //   only place where we have access to the compiler `Session`.
1331    // - LLVM work can be done on any thread.
1332    // - Codegen can only happen on the main thread.
1333    // - Each thread doing substantial work must be in possession of a `Token`
1334    //   from the `Jobserver`.
1335    // - The compiler process always holds one `Token`. Any additional `Tokens`
1336    //   have to be requested from the `Jobserver`.
1337    //
1338    // Error Reporting
1339    // ===============
1340    // The error reporting restriction is handled separately from the rest: We
1341    // set up a `SharedEmitter` that holds an open channel to the main thread.
1342    // When an error occurs on any thread, the shared emitter will send the
1343    // error message to the receiver main thread (`SharedEmitterMain`). The
1344    // main thread will periodically query this error message queue and emit
1345    // any error messages it has received. It might even abort compilation if
1346    // it has received a fatal error. In this case we rely on all other threads
1347    // being torn down automatically with the main thread.
1348    // Since the main thread will often be busy doing codegen work, error
1349    // reporting will be somewhat delayed, since the message queue can only be
1350    // checked in between two work packages.
1351    //
1352    // Work Processing Infrastructure
1353    // ==============================
1354    // The work processing infrastructure knows three major actors:
1355    //
1356    // - the coordinator thread,
1357    // - the main thread, and
1358    // - LLVM worker threads
1359    //
1360    // The coordinator thread is running a message loop. It instructs the main
1361    // thread about what work to do when, and it will spawn off LLVM worker
1362    // threads as open LLVM WorkItems become available.
1363    //
1364    // The job of the main thread is to codegen CGUs into LLVM work packages
1365    // (since the main thread is the only thread that can do this). The main
1366    // thread will block until it receives a message from the coordinator, upon
1367    // which it will codegen one CGU, send it to the coordinator and block
1368    // again. This way the coordinator can control what the main thread is
1369    // doing.
1370    //
1371    // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1372    // available, it will spawn off a new LLVM worker thread and let it process
1373    // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1374    // it will just shut down, which also frees all resources associated with
1375    // the given LLVM module, and sends a message to the coordinator that the
1376    // WorkItem has been completed.
1377    //
1378    // Work Scheduling
1379    // ===============
1380    // The scheduler's goal is to minimize the time it takes to complete all
1381    // work there is, however, we also want to keep memory consumption low
1382    // if possible. These two goals are at odds with each other: If memory
1383    // consumption were not an issue, we could just let the main thread produce
1384    // LLVM WorkItems at full speed, assuring maximal utilization of
1385    // Tokens/LLVM worker threads. However, since codegen is usually faster
1386    // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1387    // WorkItem potentially holds on to a substantial amount of memory.
1388    //
1389    // So the actual goal is to always produce just enough LLVM WorkItems as
1390    // not to starve our LLVM worker threads. That means, once we have enough
1391    // WorkItems in our queue, we can block the main thread, so it does not
1392    // produce more until we need them.
1393    //
1394    // Doing LLVM Work on the Main Thread
1395    // ----------------------------------
1396    // Since the main thread owns the compiler process's implicit `Token`, it is
1397    // wasteful to keep it blocked without doing any work. Therefore, what we do
1398    // in this case is: We spawn off an additional LLVM worker thread that helps
1399    // reduce the queue. The work it is doing corresponds to the implicit
1400    // `Token`. The coordinator will mark the main thread as being busy with
1401    // LLVM work. (The actual work happens on another OS thread but we just care
1402    // about `Tokens`, not actual threads).
1403    //
1404    // When any LLVM worker thread finishes while the main thread is marked as
1405    // "busy with LLVM work", we can do a little switcheroo: We give the Token
1406    // of the just finished thread to the LLVM worker thread that is working on
1407    // behalf of the main thread's implicit Token, thus freeing up the main
1408    // thread again. The coordinator can then again decide what the main thread
1409    // should do. This allows the coordinator to make decisions at more points
1410    // in time.
1411    //
1412    // Striking a Balance between Throughput and Memory Consumption
1413    // ------------------------------------------------------------
1414    // Since our two goals, (1) use as many Tokens as possible and (2) keep
1415    // memory consumption as low as possible, are in conflict with each other,
1416    // we have to find a trade off between them. Right now, the goal is to keep
1417    // all workers busy, which means that no worker should find the queue empty
1418    // when it is ready to start.
1419    // How do we do achieve this? Good question :) We actually never know how
1420    // many `Tokens` are potentially available so it's hard to say how much to
1421    // fill up the queue before switching the main thread to LLVM work. Also we
1422    // currently don't have a means to estimate how long a running LLVM worker
1423    // will still be busy with it's current WorkItem. However, we know the
1424    // maximal count of available Tokens that makes sense (=the number of CPU
1425    // cores), so we can take a conservative guess. The heuristic we use here
1426    // is implemented in the `queue_full_enough()` function.
1427    //
1428    // Some Background on Jobservers
1429    // -----------------------------
1430    // It's worth also touching on the management of parallelism here. We don't
1431    // want to just spawn a thread per work item because while that's optimal
1432    // parallelism it may overload a system with too many threads or violate our
1433    // configuration for the maximum amount of cpu to use for this process. To
1434    // manage this we use the `jobserver` crate.
1435    //
1436    // Job servers are an artifact of GNU make and are used to manage
1437    // parallelism between processes. A jobserver is a glorified IPC semaphore
1438    // basically. Whenever we want to run some work we acquire the semaphore,
1439    // and whenever we're done with that work we release the semaphore. In this
1440    // manner we can ensure that the maximum number of parallel workers is
1441    // capped at any one point in time.
1442    //
1443    // LTO and the coordinator thread
1444    // ------------------------------
1445    //
1446    // The final job the coordinator thread is responsible for is managing LTO
1447    // and how that works. When LTO is requested what we'll do is collect all
1448    // optimized LLVM modules into a local vector on the coordinator. Once all
1449    // modules have been codegened and optimized we hand this to the `lto`
1450    // module for further optimization. The `lto` module will return back a list
1451    // of more modules to work on, which the coordinator will continue to spawn
1452    // work for.
1453    //
1454    // Each LLVM module is automatically sent back to the coordinator for LTO if
1455    // necessary. There's already optimizations in place to avoid sending work
1456    // back to the coordinator if LTO isn't requested.
1457    return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
1458        // This is where we collect codegen units that have gone all the way
1459        // through codegen and LLVM.
1460        let mut compiled_modules = vec![];
1461        let mut needs_fat_lto = Vec::new();
1462        let mut needs_thin_lto = Vec::new();
1463        let mut lto_import_only_modules = Vec::new();
1464
1465        /// Possible state transitions:
1466        /// - Ongoing -> Completed
1467        /// - Ongoing -> Aborted
1468        /// - Completed -> Aborted
1469        #[derive(Debug, PartialEq)]
1470        enum CodegenState {
1471            Ongoing,
1472            Completed,
1473            Aborted,
1474        }
1475        use CodegenState::*;
1476        let mut codegen_state = Ongoing;
1477
1478        // This is the queue of LLVM work items that still need processing.
1479        let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1480
1481        // This are the Jobserver Tokens we currently hold. Does not include
1482        // the implicit Token the compiler process owns no matter what.
1483        let mut tokens = Vec::new();
1484
1485        let mut main_thread_state = MainThreadState::Idle;
1486
1487        // How many LLVM worker threads are running while holding a Token. This
1488        // *excludes* any that the main thread is lending a Token to.
1489        let mut running_with_own_token = 0;
1490
1491        // How many LLVM worker threads are running in total. This *includes*
1492        // any that the main thread is lending a Token to.
1493        let running_with_any_token = |main_thread_state, running_with_own_token| {
1494            running_with_own_token
1495                + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1496        };
1497
1498        let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1499
1500        let compiled_allocator_module = allocator_module.and_then(|allocator_module| {
1501            match execute_optimize_work_item(&cgcx, allocator_module) {
1502                WorkItemResult::Finished(compiled_module) => return Some(compiled_module),
1503                WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input),
1504                WorkItemResult::NeedsThinLto(name, thin_buffer) => {
1505                    needs_thin_lto.push((name, thin_buffer))
1506                }
1507            }
1508            None
1509        });
1510
1511        // Run the message loop while there's still anything that needs message
1512        // processing. Note that as soon as codegen is aborted we simply want to
1513        // wait for all existing work to finish, so many of the conditions here
1514        // only apply if codegen hasn't been aborted as they represent pending
1515        // work to be done.
1516        loop {
1517            // While there are still CGUs to be codegened, the coordinator has
1518            // to decide how to utilize the compiler processes implicit Token:
1519            // For codegenning more CGU or for running them through LLVM.
1520            if codegen_state == Ongoing {
1521                if main_thread_state == MainThreadState::Idle {
1522                    // Compute the number of workers that will be running once we've taken as many
1523                    // items from the work queue as we can, plus one for the main thread. It's not
1524                    // critically important that we use this instead of just
1525                    // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1526                    // from fluctuating just because a worker finished up and we decreased the
1527                    // `running_with_own_token` count, even though we're just going to increase it
1528                    // right after this when we put a new worker to work.
1529                    let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1530                    let additional_running = std::cmp::min(extra_tokens, work_items.len());
1531                    let anticipated_running = running_with_own_token + additional_running + 1;
1532
1533                    if !queue_full_enough(work_items.len(), anticipated_running) {
1534                        // The queue is not full enough, process more codegen units:
1535                        if codegen_worker_send.send(CguMessage).is_err() {
1536                            panic!("Could not send CguMessage to main thread")
1537                        }
1538                        main_thread_state = MainThreadState::Codegenning;
1539                    } else {
1540                        // The queue is full enough to not let the worker
1541                        // threads starve. Use the implicit Token to do some
1542                        // LLVM work too.
1543                        let (item, _) =
1544                            work_items.pop().expect("queue empty - queue_full_enough() broken?");
1545                        main_thread_state = MainThreadState::Lending;
1546                        spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
1547                    }
1548                }
1549            } else if codegen_state == Completed {
1550                if running_with_any_token(main_thread_state, running_with_own_token) == 0
1551                    && work_items.is_empty()
1552                {
1553                    // All codegen work is done.
1554                    break;
1555                }
1556
1557                // In this branch, we know that everything has been codegened,
1558                // so it's just a matter of determining whether the implicit
1559                // Token is free to use for LLVM work.
1560                match main_thread_state {
1561                    MainThreadState::Idle => {
1562                        if let Some((item, _)) = work_items.pop() {
1563                            main_thread_state = MainThreadState::Lending;
1564                            spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
1565                        } else {
1566                            // There is no unstarted work, so let the main thread
1567                            // take over for a running worker. Otherwise the
1568                            // implicit token would just go to waste.
1569                            // We reduce the `running` counter by one. The
1570                            // `tokens.truncate()` below will take care of
1571                            // giving the Token back.
1572                            assert!(running_with_own_token > 0);
1573                            running_with_own_token -= 1;
1574                            main_thread_state = MainThreadState::Lending;
1575                        }
1576                    }
1577                    MainThreadState::Codegenning => bug!(
1578                        "codegen worker should not be codegenning after \
1579                              codegen was already completed"
1580                    ),
1581                    MainThreadState::Lending => {
1582                        // Already making good use of that token
1583                    }
1584                }
1585            } else {
1586                // Don't queue up any more work if codegen was aborted, we're
1587                // just waiting for our existing children to finish.
1588                assert!(codegen_state == Aborted);
1589                if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1590                    break;
1591                }
1592            }
1593
1594            // Spin up what work we can, only doing this while we've got available
1595            // parallelism slots and work left to spawn.
1596            if codegen_state != Aborted {
1597                while running_with_own_token < tokens.len()
1598                    && let Some((item, _)) = work_items.pop()
1599                {
1600                    spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
1601                    running_with_own_token += 1;
1602                }
1603            }
1604
1605            // Relinquish accidentally acquired extra tokens.
1606            tokens.truncate(running_with_own_token);
1607
1608            match coordinator_receive.recv().unwrap() {
1609                // Save the token locally and the next turn of the loop will use
1610                // this to spawn a new unit of work, or it may get dropped
1611                // immediately if we have no more work to spawn.
1612                Message::Token(token) => {
1613                    match token {
1614                        Ok(token) => {
1615                            tokens.push(token);
1616
1617                            if main_thread_state == MainThreadState::Lending {
1618                                // If the main thread token is used for LLVM work
1619                                // at the moment, we turn that thread into a regular
1620                                // LLVM worker thread, so the main thread is free
1621                                // to react to codegen demand.
1622                                main_thread_state = MainThreadState::Idle;
1623                                running_with_own_token += 1;
1624                            }
1625                        }
1626                        Err(e) => {
1627                            let msg = &format!("failed to acquire jobserver token: {e}");
1628                            shared_emitter.fatal(msg);
1629                            codegen_state = Aborted;
1630                        }
1631                    }
1632                }
1633
1634                Message::CodegenDone { llvm_work_item, cost } => {
1635                    // We keep the queue sorted by estimated processing cost,
1636                    // so that more expensive items are processed earlier. This
1637                    // is good for throughput as it gives the main thread more
1638                    // time to fill up the queue and it avoids scheduling
1639                    // expensive items to the end.
1640                    // Note, however, that this is not ideal for memory
1641                    // consumption, as LLVM module sizes are not evenly
1642                    // distributed.
1643                    let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1644                    let insertion_index = match insertion_index {
1645                        Ok(idx) | Err(idx) => idx,
1646                    };
1647                    work_items.insert(insertion_index, (llvm_work_item, cost));
1648
1649                    if cgcx.parallel {
1650                        helper.request_token();
1651                    }
1652                    assert_eq!(main_thread_state, MainThreadState::Codegenning);
1653                    main_thread_state = MainThreadState::Idle;
1654                }
1655
1656                Message::CodegenComplete => {
1657                    if codegen_state != Aborted {
1658                        codegen_state = Completed;
1659                    }
1660                    assert_eq!(main_thread_state, MainThreadState::Codegenning);
1661                    main_thread_state = MainThreadState::Idle;
1662                }
1663
1664                // If codegen is aborted that means translation was aborted due
1665                // to some normal-ish compiler error. In this situation we want
1666                // to exit as soon as possible, but we want to make sure all
1667                // existing work has finished. Flag codegen as being done, and
1668                // then conditions above will ensure no more work is spawned but
1669                // we'll keep executing this loop until `running_with_own_token`
1670                // hits 0.
1671                Message::CodegenAborted => {
1672                    codegen_state = Aborted;
1673                }
1674
1675                Message::WorkItem { result } => {
1676                    // If a thread exits successfully then we drop a token associated
1677                    // with that worker and update our `running_with_own_token` count.
1678                    // We may later re-acquire a token to continue running more work.
1679                    // We may also not actually drop a token here if the worker was
1680                    // running with an "ephemeral token".
1681                    if main_thread_state == MainThreadState::Lending {
1682                        main_thread_state = MainThreadState::Idle;
1683                    } else {
1684                        running_with_own_token -= 1;
1685                    }
1686
1687                    match result {
1688                        Ok(WorkItemResult::Finished(compiled_module)) => {
1689                            compiled_modules.push(compiled_module);
1690                        }
1691                        Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1692                            assert!(needs_thin_lto.is_empty());
1693                            needs_fat_lto.push(fat_lto_input);
1694                        }
1695                        Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1696                            assert!(needs_fat_lto.is_empty());
1697                            needs_thin_lto.push((name, thin_buffer));
1698                        }
1699                        Err(Some(WorkerFatalError)) => {
1700                            // Like `CodegenAborted`, wait for remaining work to finish.
1701                            codegen_state = Aborted;
1702                        }
1703                        Err(None) => {
1704                            // If the thread failed that means it panicked, so
1705                            // we abort immediately.
1706                            bug!("worker thread panicked");
1707                        }
1708                    }
1709                }
1710
1711                Message::AddImportOnlyModule { module_data, work_product } => {
1712                    assert_eq!(codegen_state, Ongoing);
1713                    assert_eq!(main_thread_state, MainThreadState::Codegenning);
1714                    lto_import_only_modules.push((module_data, work_product));
1715                    main_thread_state = MainThreadState::Idle;
1716                }
1717            }
1718        }
1719
1720        // Drop to print timings
1721        drop(llvm_start_time);
1722
1723        if codegen_state == Aborted {
1724            return Err(());
1725        }
1726
1727        drop(codegen_state);
1728        drop(tokens);
1729        drop(helper);
1730        assert!(work_items.is_empty());
1731
1732        if !needs_fat_lto.is_empty() {
1733            assert!(compiled_modules.is_empty());
1734            assert!(needs_thin_lto.is_empty());
1735
1736            // This uses the implicit token
1737            let module = do_fat_lto(
1738                &cgcx,
1739                &exported_symbols_for_lto,
1740                &each_linked_rlib_file_for_lto,
1741                needs_fat_lto,
1742                lto_import_only_modules,
1743            );
1744            compiled_modules.push(module);
1745        } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1746            assert!(compiled_modules.is_empty());
1747            assert!(needs_fat_lto.is_empty());
1748
1749            compiled_modules.extend(do_thin_lto(
1750                &cgcx,
1751                exported_symbols_for_lto,
1752                each_linked_rlib_file_for_lto,
1753                needs_thin_lto,
1754                lto_import_only_modules,
1755            ));
1756        }
1757
1758        // Regardless of what order these modules completed in, report them to
1759        // the backend in the same order every time to ensure that we're handing
1760        // out deterministic results.
1761        compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
1762
1763        Ok(CompiledModules {
1764            modules: compiled_modules,
1765            allocator_module: compiled_allocator_module,
1766        })
1767    })
1768    .expect("failed to spawn coordinator thread");
1769
1770    // A heuristic that determines if we have enough LLVM WorkItems in the
1771    // queue so that the main thread can do LLVM work instead of codegen
1772    fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1773        // This heuristic scales ahead-of-time codegen according to available
1774        // concurrency, as measured by `workers_running`. The idea is that the
1775        // more concurrency we have available, the more demand there will be for
1776        // work items, and the fuller the queue should be kept to meet demand.
1777        // An important property of this approach is that we codegen ahead of
1778        // time only as much as necessary, so as to keep fewer LLVM modules in
1779        // memory at once, thereby reducing memory consumption.
1780        //
1781        // When the number of workers running is less than the max concurrency
1782        // available to us, this heuristic can cause us to instruct the main
1783        // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1784        // of codegen, even though it seems like it *should* be codegenning so
1785        // that we can create more work items and spawn more LLVM workers.
1786        //
1787        // But this is not a problem. When the main thread is told to LLVM,
1788        // according to this heuristic and how work is scheduled, there is
1789        // always at least one item in the queue, and therefore at least one
1790        // pending jobserver token request. If there *is* more concurrency
1791        // available, we will immediately receive a token, which will upgrade
1792        // the main thread's LLVM worker to a real one (conceptually), and free
1793        // up the main thread to codegen if necessary. On the other hand, if
1794        // there isn't more concurrency, then the main thread working on an LLVM
1795        // item is appropriate, as long as the queue is full enough for demand.
1796        //
1797        // Speaking of which, how full should we keep the queue? Probably less
1798        // full than you'd think. A lot has to go wrong for the queue not to be
1799        // full enough and for that to have a negative effect on compile times.
1800        //
1801        // Workers are unlikely to finish at exactly the same time, so when one
1802        // finishes and takes another work item off the queue, we often have
1803        // ample time to codegen at that point before the next worker finishes.
1804        // But suppose that codegen takes so long that the workers exhaust the
1805        // queue, and we have one or more workers that have nothing to work on.
1806        // Well, it might not be so bad. Of all the LLVM modules we create and
1807        // optimize, one has to finish last. It's not necessarily the case that
1808        // by losing some concurrency for a moment, we delay the point at which
1809        // that last LLVM module is finished and the rest of compilation can
1810        // proceed. Also, when we can't take advantage of some concurrency, we
1811        // give tokens back to the job server. That enables some other rustc to
1812        // potentially make use of the available concurrency. That could even
1813        // *decrease* overall compile time if we're lucky. But yes, if no other
1814        // rustc can make use of the concurrency, then we've squandered it.
1815        //
1816        // However, keeping the queue full is also beneficial when we have a
1817        // surge in available concurrency. Then items can be taken from the
1818        // queue immediately, without having to wait for codegen.
1819        //
1820        // So, the heuristic below tries to keep one item in the queue for every
1821        // four running workers. Based on limited benchmarking, this appears to
1822        // be more than sufficient to avoid increasing compilation times.
1823        let quarter_of_workers = workers_running - 3 * workers_running / 4;
1824        items_in_queue > 0 && items_in_queue >= quarter_of_workers
1825    }
1826}
1827
1828/// `FatalError` is explicitly not `Send`.
1829#[must_use]
1830pub(crate) struct WorkerFatalError;
1831
1832fn spawn_work<'a, B: ExtraBackendMethods>(
1833    cgcx: &'a CodegenContext<B>,
1834    coordinator_send: Sender<Message<B>>,
1835    llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1836    work: WorkItem<B>,
1837) {
1838    if llvm_start_time.is_none() {
1839        *llvm_start_time = Some(cgcx.prof.verbose_generic_activity("LLVM_passes"));
1840    }
1841
1842    let cgcx = cgcx.clone();
1843
1844    B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1845        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1846            WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m),
1847            WorkItem::CopyPostLtoArtifacts(m) => {
1848                WorkItemResult::Finished(execute_copy_from_cache_work_item(&cgcx, m))
1849            }
1850        }));
1851
1852        let msg = match result {
1853            Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
1854
1855            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1856            // diagnostic was already sent off to the main thread - just surface
1857            // that there was an error in this worker.
1858            Err(err) if err.is::<FatalErrorMarker>() => {
1859                Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1860            }
1861
1862            Err(_) => Message::WorkItem::<B> { result: Err(None) },
1863        };
1864        drop(coordinator_send.send(msg));
1865    })
1866    .expect("failed to spawn work thread");
1867}
1868
1869fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
1870    cgcx: &'a CodegenContext<B>,
1871    coordinator_send: Sender<ThinLtoMessage>,
1872    work: ThinLtoWorkItem<B>,
1873) {
1874    let cgcx = cgcx.clone();
1875
1876    B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1877        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1878            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m),
1879            ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m),
1880        }));
1881
1882        let msg = match result {
1883            Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
1884
1885            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1886            // diagnostic was already sent off to the main thread - just surface
1887            // that there was an error in this worker.
1888            Err(err) if err.is::<FatalErrorMarker>() => {
1889                ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1890            }
1891
1892            Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1893        };
1894        drop(coordinator_send.send(msg));
1895    })
1896    .expect("failed to spawn work thread");
1897}
1898
1899enum SharedEmitterMessage {
1900    Diagnostic(Diagnostic),
1901    InlineAsmError(InlineAsmError),
1902    Fatal(String),
1903}
1904
1905pub struct InlineAsmError {
1906    pub span: SpanData,
1907    pub msg: String,
1908    pub level: Level,
1909    pub source: Option<(String, Vec<InnerSpan>)>,
1910}
1911
1912#[derive(Clone)]
1913pub struct SharedEmitter {
1914    sender: Sender<SharedEmitterMessage>,
1915}
1916
1917pub struct SharedEmitterMain {
1918    receiver: Receiver<SharedEmitterMessage>,
1919}
1920
1921impl SharedEmitter {
1922    fn new() -> (SharedEmitter, SharedEmitterMain) {
1923        let (sender, receiver) = channel();
1924
1925        (SharedEmitter { sender }, SharedEmitterMain { receiver })
1926    }
1927
1928    pub fn inline_asm_error(&self, err: InlineAsmError) {
1929        drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
1930    }
1931
1932    fn fatal(&self, msg: &str) {
1933        drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1934    }
1935}
1936
1937impl Emitter for SharedEmitter {
1938    fn emit_diagnostic(
1939        &mut self,
1940        mut diag: rustc_errors::DiagInner,
1941        _registry: &rustc_errors::registry::Registry,
1942    ) {
1943        // Check that we aren't missing anything interesting when converting to
1944        // the cut-down local `DiagInner`.
1945        assert!(!diag.span.has_span_labels());
1946        assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
1947        assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
1948        assert_eq!(diag.is_lint, None);
1949        // No sensible check for `diag.emitted_at`.
1950
1951        let args = mem::replace(&mut diag.args, DiagArgMap::default());
1952        drop(
1953            self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1954                span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
1955                level: diag.level(),
1956                messages: diag.messages,
1957                code: diag.code,
1958                children: diag
1959                    .children
1960                    .into_iter()
1961                    .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
1962                    .collect(),
1963                args,
1964            })),
1965        );
1966    }
1967
1968    fn source_map(&self) -> Option<&SourceMap> {
1969        None
1970    }
1971
1972    fn translator(&self) -> &Translator {
1973        panic!("shared emitter attempted to translate a diagnostic");
1974    }
1975}
1976
1977impl SharedEmitterMain {
1978    fn check(&self, sess: &Session, blocking: bool) {
1979        loop {
1980            let message = if blocking {
1981                match self.receiver.recv() {
1982                    Ok(message) => Ok(message),
1983                    Err(_) => Err(()),
1984                }
1985            } else {
1986                match self.receiver.try_recv() {
1987                    Ok(message) => Ok(message),
1988                    Err(_) => Err(()),
1989                }
1990            };
1991
1992            match message {
1993                Ok(SharedEmitterMessage::Diagnostic(diag)) => {
1994                    // The diagnostic has been received on the main thread.
1995                    // Convert it back to a full `Diagnostic` and emit.
1996                    let dcx = sess.dcx();
1997                    let mut d =
1998                        rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
1999                    d.span = MultiSpan::from_spans(
2000                        diag.span.into_iter().map(|span| span.span()).collect(),
2001                    );
2002                    d.code = diag.code; // may be `None`, that's ok
2003                    d.children = diag
2004                        .children
2005                        .into_iter()
2006                        .map(|sub| rustc_errors::Subdiag {
2007                            level: sub.level,
2008                            messages: sub.messages,
2009                            span: MultiSpan::new(),
2010                        })
2011                        .collect();
2012                    d.args = diag.args;
2013                    dcx.emit_diagnostic(d);
2014                    sess.dcx().abort_if_errors();
2015                }
2016                Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2017                    assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2018                    let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2019                    if !inner.span.is_dummy() {
2020                        err.span(inner.span.span());
2021                    }
2022
2023                    // Point to the generated assembly if it is available.
2024                    if let Some((buffer, spans)) = inner.source {
2025                        let source = sess
2026                            .source_map()
2027                            .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2028                        let spans: Vec<_> = spans
2029                            .iter()
2030                            .map(|sp| {
2031                                Span::with_root_ctxt(
2032                                    source.normalized_byte_pos(sp.start as u32),
2033                                    source.normalized_byte_pos(sp.end as u32),
2034                                )
2035                            })
2036                            .collect();
2037                        err.span_note(spans, "instantiated into assembly here");
2038                    }
2039
2040                    err.emit();
2041                }
2042                Ok(SharedEmitterMessage::Fatal(msg)) => {
2043                    sess.dcx().fatal(msg);
2044                }
2045                Err(_) => {
2046                    break;
2047                }
2048            }
2049        }
2050    }
2051}
2052
2053pub struct Coordinator<B: ExtraBackendMethods> {
2054    sender: Sender<Message<B>>,
2055    future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
2056    // Only used for the Message type.
2057    phantom: PhantomData<B>,
2058}
2059
2060impl<B: ExtraBackendMethods> Coordinator<B> {
2061    fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
2062        self.future.take().unwrap().join()
2063    }
2064}
2065
2066impl<B: ExtraBackendMethods> Drop for Coordinator<B> {
2067    fn drop(&mut self) {
2068        if let Some(future) = self.future.take() {
2069            // If we haven't joined yet, signal to the coordinator that it should spawn no more
2070            // work, and wait for worker threads to finish.
2071            drop(self.sender.send(Message::CodegenAborted::<B>));
2072            drop(future.join());
2073        }
2074    }
2075}
2076
2077pub struct OngoingCodegen<B: ExtraBackendMethods> {
2078    pub backend: B,
2079    pub crate_info: CrateInfo,
2080    pub output_filenames: Arc<OutputFilenames>,
2081    // Field order below is intended to terminate the coordinator thread before two fields below
2082    // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2083    // `Drop` implementation for more info.
2084    pub coordinator: Coordinator<B>,
2085    pub codegen_worker_receive: Receiver<CguMessage>,
2086    pub shared_emitter_main: SharedEmitterMain,
2087}
2088
2089impl<B: ExtraBackendMethods> OngoingCodegen<B> {
2090    pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
2091        self.shared_emitter_main.check(sess, true);
2092        let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2093            Ok(Ok(compiled_modules)) => compiled_modules,
2094            Ok(Err(())) => {
2095                sess.dcx().abort_if_errors();
2096                panic!("expected abort due to worker thread errors")
2097            }
2098            Err(_) => {
2099                bug!("panic during codegen/LLVM phase");
2100            }
2101        });
2102
2103        sess.dcx().abort_if_errors();
2104
2105        let work_products =
2106            copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2107        produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
2108
2109        // FIXME: time_llvm_passes support - does this use a global context or
2110        // something?
2111        if sess.codegen_units().as_usize() == 1 && sess.opts.unstable_opts.time_llvm_passes {
2112            self.backend.print_pass_timings()
2113        }
2114
2115        if sess.print_llvm_stats() {
2116            self.backend.print_statistics()
2117        }
2118
2119        (
2120            CodegenResults {
2121                crate_info: self.crate_info,
2122
2123                modules: compiled_modules.modules,
2124                allocator_module: compiled_modules.allocator_module,
2125            },
2126            work_products,
2127        )
2128    }
2129
2130    pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2131        self.wait_for_signal_to_codegen_item();
2132        self.check_for_errors(tcx.sess);
2133        drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2134    }
2135
2136    pub(crate) fn check_for_errors(&self, sess: &Session) {
2137        self.shared_emitter_main.check(sess, false);
2138    }
2139
2140    pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2141        match self.codegen_worker_receive.recv() {
2142            Ok(CguMessage) => {
2143                // Ok to proceed.
2144            }
2145            Err(_) => {
2146                // One of the LLVM threads must have panicked, fall through so
2147                // error handling can be reached.
2148            }
2149        }
2150    }
2151}
2152
2153pub(crate) fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
2154    coordinator: &Coordinator<B>,
2155    module: ModuleCodegen<B::Module>,
2156    cost: u64,
2157) {
2158    let llvm_work_item = WorkItem::Optimize(module);
2159    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2160}
2161
2162pub(crate) fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
2163    coordinator: &Coordinator<B>,
2164    module: CachedModuleCodegen,
2165) {
2166    let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2167    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2168}
2169
2170pub(crate) fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
2171    tcx: TyCtxt<'_>,
2172    coordinator: &Coordinator<B>,
2173    module: CachedModuleCodegen,
2174) {
2175    let filename = pre_lto_bitcode_filename(&module.name);
2176    let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2177    let file = fs::File::open(&bc_path)
2178        .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
2179
2180    let mmap = unsafe {
2181        Mmap::map(file).unwrap_or_else(|e| {
2182            panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
2183        })
2184    };
2185    // Schedule the module to be loaded
2186    drop(coordinator.sender.send(Message::AddImportOnlyModule::<B> {
2187        module_data: SerializedModule::FromUncompressedFile(mmap),
2188        work_product: module.source,
2189    }));
2190}
2191
2192fn pre_lto_bitcode_filename(module_name: &str) -> String {
2193    format!("{module_name}.{PRE_LTO_BC_EXT}")
2194}
2195
2196fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2197    // This should never be true (because it's not supported). If it is true,
2198    // something is wrong with commandline arg validation.
2199    assert!(
2200        !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2201            && tcx.sess.target.is_like_windows
2202            && tcx.sess.opts.cg.prefer_dynamic)
2203    );
2204
2205    // We need to generate _imp__ symbol if we are generating an rlib or we include one
2206    // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2207    // these, but it currently does not do so.
2208    let can_have_static_objects =
2209        tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
2210
2211    tcx.sess.target.is_like_windows &&
2212    can_have_static_objects   &&
2213    // ThinLTO can't handle this workaround in all cases, so we don't
2214    // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2215    // dynamic linking when linker plugin LTO is enabled.
2216    !tcx.sess.opts.cg.linker_plugin_lto.enabled()
2217}