Skip to main content

rustc_codegen_ssa/back/
write.rs

1use std::marker::PhantomData;
2use std::panic::AssertUnwindSafe;
3use std::path::{Path, PathBuf};
4use std::sync::Arc;
5use std::sync::mpsc::{Receiver, Sender, channel};
6use std::{fs, io, mem, str, thread};
7
8use rustc_abi::Size;
9use rustc_data_structures::assert_matches;
10use rustc_data_structures::fx::FxIndexMap;
11use rustc_data_structures::jobserver::{self, Acquired};
12use rustc_data_structures::memmap::Mmap;
13use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
14use rustc_errors::emitter::Emitter;
15use rustc_errors::translation::Translator;
16use rustc_errors::{
17    Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
18    Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
19};
20use rustc_fs_util::link_or_copy;
21use rustc_hir::attrs::AttributeKind;
22use rustc_hir::find_attr;
23use rustc_incremental::{
24    copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
25};
26use rustc_macros::{Decodable, Encodable};
27use rustc_metadata::fs::copy_to_stdout;
28use rustc_middle::bug;
29use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
30use rustc_middle::ty::TyCtxt;
31use rustc_session::Session;
32use rustc_session::config::{
33    self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes,
34    SwitchWithOptPath,
35};
36use rustc_span::source_map::SourceMap;
37use rustc_span::{FileName, InnerSpan, Span, SpanData};
38use rustc_target::spec::{MergeFunctions, SanitizerSet};
39use tracing::debug;
40
41use super::link::{self, ensure_removed};
42use super::lto::{self, SerializedModule};
43use crate::back::lto::check_lto_allowed;
44use crate::errors::ErrorCreatingRemarkDir;
45use crate::traits::*;
46use crate::{
47    CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
48    errors,
49};
50
51const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
52
53/// What kind of object file to emit.
54#[derive(#[automatically_derived]
impl ::core::clone::Clone for EmitObj {
    #[inline]
    fn clone(&self) -> EmitObj {
        let _: ::core::clone::AssertParamIsClone<BitcodeSection>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EmitObj { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for EmitObj {
    #[inline]
    fn eq(&self, other: &EmitObj) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (EmitObj::ObjectCode(__self_0), EmitObj::ObjectCode(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for EmitObj {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        EmitObj::None => { 0usize }
                        EmitObj::Bitcode => { 1usize }
                        EmitObj::ObjectCode(ref __binding_0) => { 2usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    EmitObj::None => {}
                    EmitObj::Bitcode => {}
                    EmitObj::ObjectCode(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for EmitObj {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { EmitObj::None }
                    1usize => { EmitObj::Bitcode }
                    2usize => {
                        EmitObj::ObjectCode(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `EmitObj`, expected 0..3, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable)]
55pub enum EmitObj {
56    // No object file.
57    None,
58
59    // Just uncompressed llvm bitcode. Provides easy compatibility with
60    // emscripten's ecc compiler, when used as the linker.
61    Bitcode,
62
63    // Object code, possibly augmented with a bitcode section.
64    ObjectCode(BitcodeSection),
65}
66
67/// What kind of llvm bitcode section to embed in an object file.
68#[derive(#[automatically_derived]
impl ::core::clone::Clone for BitcodeSection {
    #[inline]
    fn clone(&self) -> BitcodeSection { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BitcodeSection { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BitcodeSection {
    #[inline]
    fn eq(&self, other: &BitcodeSection) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for BitcodeSection {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        BitcodeSection::None => { 0usize }
                        BitcodeSection::Full => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    BitcodeSection::None => {}
                    BitcodeSection::Full => {}
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for BitcodeSection {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { BitcodeSection::None }
                    1usize => { BitcodeSection::Full }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `BitcodeSection`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable)]
69pub enum BitcodeSection {
70    // No bitcode section.
71    None,
72
73    // A full, uncompressed bitcode section.
74    Full,
75}
76
77/// Module-specific configuration for `optimize_and_codegen`.
78#[derive(const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for ModuleConfig {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ModuleConfig {
                        passes: ref __binding_0,
                        opt_level: ref __binding_1,
                        pgo_gen: ref __binding_2,
                        pgo_use: ref __binding_3,
                        pgo_sample_use: ref __binding_4,
                        debug_info_for_profiling: ref __binding_5,
                        instrument_coverage: ref __binding_6,
                        sanitizer: ref __binding_7,
                        sanitizer_recover: ref __binding_8,
                        sanitizer_dataflow_abilist: ref __binding_9,
                        sanitizer_memory_track_origins: ref __binding_10,
                        emit_pre_lto_bc: ref __binding_11,
                        emit_no_opt_bc: ref __binding_12,
                        emit_bc: ref __binding_13,
                        emit_ir: ref __binding_14,
                        emit_asm: ref __binding_15,
                        emit_obj: ref __binding_16,
                        emit_thin_lto: ref __binding_17,
                        emit_thin_lto_summary: ref __binding_18,
                        verify_llvm_ir: ref __binding_19,
                        lint_llvm_ir: ref __binding_20,
                        no_prepopulate_passes: ref __binding_21,
                        no_builtins: ref __binding_22,
                        vectorize_loop: ref __binding_23,
                        vectorize_slp: ref __binding_24,
                        merge_functions: ref __binding_25,
                        emit_lifetime_markers: ref __binding_26,
                        llvm_plugins: ref __binding_27,
                        autodiff: ref __binding_28,
                        offload: ref __binding_29 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_6,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_7,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_8,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_9,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_10,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_11,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_12,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_13,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_14,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_15,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_16,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_17,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_18,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_19,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_20,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_21,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_22,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_23,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_24,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_25,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_26,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_27,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_28,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_29,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for ModuleConfig {
            fn decode(__decoder: &mut __D) -> Self {
                ModuleConfig {
                    passes: ::rustc_serialize::Decodable::decode(__decoder),
                    opt_level: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_gen: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_use: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_sample_use: ::rustc_serialize::Decodable::decode(__decoder),
                    debug_info_for_profiling: ::rustc_serialize::Decodable::decode(__decoder),
                    instrument_coverage: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_recover: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_dataflow_abilist: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_memory_track_origins: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_pre_lto_bc: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_no_opt_bc: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_bc: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_asm: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_obj: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_thin_lto: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_thin_lto_summary: ::rustc_serialize::Decodable::decode(__decoder),
                    verify_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    lint_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    no_prepopulate_passes: ::rustc_serialize::Decodable::decode(__decoder),
                    no_builtins: ::rustc_serialize::Decodable::decode(__decoder),
                    vectorize_loop: ::rustc_serialize::Decodable::decode(__decoder),
                    vectorize_slp: ::rustc_serialize::Decodable::decode(__decoder),
                    merge_functions: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_lifetime_markers: ::rustc_serialize::Decodable::decode(__decoder),
                    llvm_plugins: ::rustc_serialize::Decodable::decode(__decoder),
                    autodiff: ::rustc_serialize::Decodable::decode(__decoder),
                    offload: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
79pub struct ModuleConfig {
80    /// Names of additional optimization passes to run.
81    pub passes: Vec<String>,
82    /// Some(level) to optimize at a certain level, or None to run
83    /// absolutely no optimizations (used for the allocator module).
84    pub opt_level: Option<config::OptLevel>,
85
86    pub pgo_gen: SwitchWithOptPath,
87    pub pgo_use: Option<PathBuf>,
88    pub pgo_sample_use: Option<PathBuf>,
89    pub debug_info_for_profiling: bool,
90    pub instrument_coverage: bool,
91
92    pub sanitizer: SanitizerSet,
93    pub sanitizer_recover: SanitizerSet,
94    pub sanitizer_dataflow_abilist: Vec<String>,
95    pub sanitizer_memory_track_origins: usize,
96
97    // Flags indicating which outputs to produce.
98    pub emit_pre_lto_bc: bool,
99    pub emit_no_opt_bc: bool,
100    pub emit_bc: bool,
101    pub emit_ir: bool,
102    pub emit_asm: bool,
103    pub emit_obj: EmitObj,
104    pub emit_thin_lto: bool,
105    pub emit_thin_lto_summary: bool,
106
107    // Miscellaneous flags. These are mostly copied from command-line
108    // options.
109    pub verify_llvm_ir: bool,
110    pub lint_llvm_ir: bool,
111    pub no_prepopulate_passes: bool,
112    pub no_builtins: bool,
113    pub vectorize_loop: bool,
114    pub vectorize_slp: bool,
115    pub merge_functions: bool,
116    pub emit_lifetime_markers: bool,
117    pub llvm_plugins: Vec<String>,
118    pub autodiff: Vec<config::AutoDiff>,
119    pub offload: Vec<config::Offload>,
120}
121
122impl ModuleConfig {
123    fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
124        // If it's a regular module, use `$regular`, otherwise use `$other`.
125        // `$regular` and `$other` are evaluated lazily.
126        macro_rules! if_regular {
127            ($regular: expr, $other: expr) => {
128                if let ModuleKind::Regular = kind { $regular } else { $other }
129            };
130        }
131
132        let sess = tcx.sess;
133        let opt_level_and_size = if let ModuleKind::Regular = kind { Some(sess.opts.optimize) } else { None }if_regular!(Some(sess.opts.optimize), None);
134
135        let save_temps = sess.opts.cg.save_temps;
136
137        let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
138            || match kind {
139                ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
140                ModuleKind::Allocator => false,
141            };
142
143        let emit_obj = if !should_emit_obj {
144            EmitObj::None
145        } else if sess.target.obj_is_bitcode
146            || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
147        {
148            // This case is selected if the target uses objects as bitcode, or
149            // if linker plugin LTO is enabled. In the linker plugin LTO case
150            // the assumption is that the final link-step will read the bitcode
151            // and convert it to object code. This may be done by either the
152            // native linker or rustc itself.
153            //
154            // Note, however, that the linker-plugin-lto requested here is
155            // explicitly ignored for `#![no_builtins]` crates. These crates are
156            // specifically ignored by rustc's LTO passes and wouldn't work if
157            // loaded into the linker. These crates define symbols that LLVM
158            // lowers intrinsics to, and these symbol dependencies aren't known
159            // until after codegen. As a result any crate marked
160            // `#![no_builtins]` is assumed to not participate in LTO and
161            // instead goes on to generate object code.
162            EmitObj::Bitcode
163        } else if need_bitcode_in_object(tcx) {
164            EmitObj::ObjectCode(BitcodeSection::Full)
165        } else {
166            EmitObj::ObjectCode(BitcodeSection::None)
167        };
168
169        ModuleConfig {
170            passes: if let ModuleKind::Regular = kind {
    sess.opts.cg.passes.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.cg.passes.clone(), vec![]),
171
172            opt_level: opt_level_and_size,
173
174            pgo_gen: if let ModuleKind::Regular = kind {
    sess.opts.cg.profile_generate.clone()
} else { SwitchWithOptPath::Disabled }if_regular!(
175                sess.opts.cg.profile_generate.clone(),
176                SwitchWithOptPath::Disabled
177            ),
178            pgo_use: if let ModuleKind::Regular = kind {
    sess.opts.cg.profile_use.clone()
} else { None }if_regular!(sess.opts.cg.profile_use.clone(), None),
179            pgo_sample_use: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.profile_sample_use.clone()
} else { None }if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
180            debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
181            instrument_coverage: if let ModuleKind::Regular = kind {
    sess.instrument_coverage()
} else { false }if_regular!(sess.instrument_coverage(), false),
182
183            sanitizer: if let ModuleKind::Regular = kind {
    sess.sanitizers()
} else { SanitizerSet::empty() }if_regular!(sess.sanitizers(), SanitizerSet::empty()),
184            sanitizer_dataflow_abilist: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone()
} else { Vec::new() }if_regular!(
185                sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
186                Vec::new()
187            ),
188            sanitizer_recover: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_recover
} else { SanitizerSet::empty() }if_regular!(
189                sess.opts.unstable_opts.sanitizer_recover,
190                SanitizerSet::empty()
191            ),
192            sanitizer_memory_track_origins: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_memory_track_origins
} else { 0 }if_regular!(
193                sess.opts.unstable_opts.sanitizer_memory_track_origins,
194                0
195            ),
196
197            emit_pre_lto_bc: if let ModuleKind::Regular = kind {
    save_temps || need_pre_lto_bitcode_for_incr_comp(sess)
} else { false }if_regular!(
198                save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
199                false
200            ),
201            emit_no_opt_bc: if let ModuleKind::Regular = kind { save_temps } else { false }if_regular!(save_temps, false),
202            emit_bc: if let ModuleKind::Regular = kind {
    save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode)
} else { save_temps }if_regular!(
203                save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
204                save_temps
205            ),
206            emit_ir: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
} else { false }if_regular!(
207                sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
208                false
209            ),
210            emit_asm: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::Assembly)
} else { false }if_regular!(
211                sess.opts.output_types.contains_key(&OutputType::Assembly),
212                false
213            ),
214            emit_obj,
215            // thin lto summaries prevent fat lto, so do not emit them if fat
216            // lto is requested. See PR #136840 for background information.
217            emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto && sess.lto() != Lto::Fat,
218            emit_thin_lto_summary: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode)
} else { false }if_regular!(
219                sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
220                false
221            ),
222
223            verify_llvm_ir: sess.verify_llvm_ir(),
224            lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
225            no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
226            no_builtins: no_builtins || sess.target.no_builtins,
227
228            // Copy what clang does by turning on loop vectorization at O2 and
229            // slp vectorization at O3.
230            vectorize_loop: !sess.opts.cg.no_vectorize_loops
231                && (sess.opts.optimize == config::OptLevel::More
232                    || sess.opts.optimize == config::OptLevel::Aggressive),
233            vectorize_slp: !sess.opts.cg.no_vectorize_slp
234                && sess.opts.optimize == config::OptLevel::Aggressive,
235
236            // Some targets (namely, NVPTX) interact badly with the
237            // MergeFunctions pass. This is because MergeFunctions can generate
238            // new function calls which may interfere with the target calling
239            // convention; e.g. for the NVPTX target, PTX kernels should not
240            // call other PTX kernels. MergeFunctions can also be configured to
241            // generate aliases instead, but aliases are not supported by some
242            // backends (again, NVPTX). Therefore, allow targets to opt out of
243            // the MergeFunctions pass, but otherwise keep the pass enabled (at
244            // O2 and O3) since it can be useful for reducing code size.
245            merge_functions: match sess
246                .opts
247                .unstable_opts
248                .merge_functions
249                .unwrap_or(sess.target.merge_functions)
250            {
251                MergeFunctions::Disabled => false,
252                MergeFunctions::Trampolines | MergeFunctions::Aliases => {
253                    use config::OptLevel::*;
254                    match sess.opts.optimize {
255                        Aggressive | More | SizeMin | Size => true,
256                        Less | No => false,
257                    }
258                }
259            },
260
261            emit_lifetime_markers: sess.emit_lifetime_markers(),
262            llvm_plugins: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.llvm_plugins.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
263            autodiff: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.autodiff.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
264            offload: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.offload.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
265        }
266    }
267
268    pub fn bitcode_needed(&self) -> bool {
269        self.emit_bc
270            || self.emit_thin_lto_summary
271            || self.emit_obj == EmitObj::Bitcode
272            || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
273    }
274
275    pub fn embed_bitcode(&self) -> bool {
276        self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
277    }
278}
279
280/// Configuration passed to the function returned by the `target_machine_factory`.
281pub struct TargetMachineFactoryConfig {
282    /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
283    /// so the path to the dwarf object has to be provided when we create the target machine.
284    /// This can be ignored by backends which do not need it for their Split DWARF support.
285    pub split_dwarf_file: Option<PathBuf>,
286
287    /// The name of the output object file. Used for setting OutputFilenames in target options
288    /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
289    pub output_obj_file: Option<PathBuf>,
290}
291
292impl TargetMachineFactoryConfig {
293    pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig {
294        let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
295            cgcx.output_filenames.split_dwarf_path(
296                cgcx.split_debuginfo,
297                cgcx.split_dwarf_kind,
298                module_name,
299                cgcx.invocation_temp.as_deref(),
300            )
301        } else {
302            None
303        };
304
305        let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
306            OutputType::Object,
307            module_name,
308            cgcx.invocation_temp.as_deref(),
309        ));
310        TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
311    }
312}
313
314pub type TargetMachineFactoryFn<B> = Arc<
315    dyn Fn(
316            DiagCtxtHandle<'_>,
317            TargetMachineFactoryConfig,
318        ) -> <B as WriteBackendMethods>::TargetMachine
319        + Send
320        + Sync,
321>;
322
323/// Additional resources used by optimize_and_codegen (not module specific)
324#[derive(#[automatically_derived]
impl ::core::clone::Clone for CodegenContext {
    #[inline]
    fn clone(&self) -> CodegenContext {
        CodegenContext {
            lto: ::core::clone::Clone::clone(&self.lto),
            use_linker_plugin_lto: ::core::clone::Clone::clone(&self.use_linker_plugin_lto),
            dylib_lto: ::core::clone::Clone::clone(&self.dylib_lto),
            prefer_dynamic: ::core::clone::Clone::clone(&self.prefer_dynamic),
            save_temps: ::core::clone::Clone::clone(&self.save_temps),
            fewer_names: ::core::clone::Clone::clone(&self.fewer_names),
            time_trace: ::core::clone::Clone::clone(&self.time_trace),
            crate_types: ::core::clone::Clone::clone(&self.crate_types),
            output_filenames: ::core::clone::Clone::clone(&self.output_filenames),
            invocation_temp: ::core::clone::Clone::clone(&self.invocation_temp),
            module_config: ::core::clone::Clone::clone(&self.module_config),
            opt_level: ::core::clone::Clone::clone(&self.opt_level),
            backend_features: ::core::clone::Clone::clone(&self.backend_features),
            msvc_imps_needed: ::core::clone::Clone::clone(&self.msvc_imps_needed),
            is_pe_coff: ::core::clone::Clone::clone(&self.is_pe_coff),
            target_can_use_split_dwarf: ::core::clone::Clone::clone(&self.target_can_use_split_dwarf),
            target_arch: ::core::clone::Clone::clone(&self.target_arch),
            target_is_like_darwin: ::core::clone::Clone::clone(&self.target_is_like_darwin),
            target_is_like_aix: ::core::clone::Clone::clone(&self.target_is_like_aix),
            target_is_like_gpu: ::core::clone::Clone::clone(&self.target_is_like_gpu),
            split_debuginfo: ::core::clone::Clone::clone(&self.split_debuginfo),
            split_dwarf_kind: ::core::clone::Clone::clone(&self.split_dwarf_kind),
            pointer_size: ::core::clone::Clone::clone(&self.pointer_size),
            remark: ::core::clone::Clone::clone(&self.remark),
            remark_dir: ::core::clone::Clone::clone(&self.remark_dir),
            incr_comp_session_dir: ::core::clone::Clone::clone(&self.incr_comp_session_dir),
            parallel: ::core::clone::Clone::clone(&self.parallel),
        }
    }
}Clone, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for CodegenContext {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    CodegenContext {
                        lto: ref __binding_0,
                        use_linker_plugin_lto: ref __binding_1,
                        dylib_lto: ref __binding_2,
                        prefer_dynamic: ref __binding_3,
                        save_temps: ref __binding_4,
                        fewer_names: ref __binding_5,
                        time_trace: ref __binding_6,
                        crate_types: ref __binding_7,
                        output_filenames: ref __binding_8,
                        invocation_temp: ref __binding_9,
                        module_config: ref __binding_10,
                        opt_level: ref __binding_11,
                        backend_features: ref __binding_12,
                        msvc_imps_needed: ref __binding_13,
                        is_pe_coff: ref __binding_14,
                        target_can_use_split_dwarf: ref __binding_15,
                        target_arch: ref __binding_16,
                        target_is_like_darwin: ref __binding_17,
                        target_is_like_aix: ref __binding_18,
                        target_is_like_gpu: ref __binding_19,
                        split_debuginfo: ref __binding_20,
                        split_dwarf_kind: ref __binding_21,
                        pointer_size: ref __binding_22,
                        remark: ref __binding_23,
                        remark_dir: ref __binding_24,
                        incr_comp_session_dir: ref __binding_25,
                        parallel: ref __binding_26 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_6,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_7,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_8,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_9,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_10,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_11,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_12,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_13,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_14,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_15,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_16,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_17,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_18,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_19,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_20,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_21,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_22,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_23,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_24,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_25,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_26,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for CodegenContext {
            fn decode(__decoder: &mut __D) -> Self {
                CodegenContext {
                    lto: ::rustc_serialize::Decodable::decode(__decoder),
                    use_linker_plugin_lto: ::rustc_serialize::Decodable::decode(__decoder),
                    dylib_lto: ::rustc_serialize::Decodable::decode(__decoder),
                    prefer_dynamic: ::rustc_serialize::Decodable::decode(__decoder),
                    save_temps: ::rustc_serialize::Decodable::decode(__decoder),
                    fewer_names: ::rustc_serialize::Decodable::decode(__decoder),
                    time_trace: ::rustc_serialize::Decodable::decode(__decoder),
                    crate_types: ::rustc_serialize::Decodable::decode(__decoder),
                    output_filenames: ::rustc_serialize::Decodable::decode(__decoder),
                    invocation_temp: ::rustc_serialize::Decodable::decode(__decoder),
                    module_config: ::rustc_serialize::Decodable::decode(__decoder),
                    opt_level: ::rustc_serialize::Decodable::decode(__decoder),
                    backend_features: ::rustc_serialize::Decodable::decode(__decoder),
                    msvc_imps_needed: ::rustc_serialize::Decodable::decode(__decoder),
                    is_pe_coff: ::rustc_serialize::Decodable::decode(__decoder),
                    target_can_use_split_dwarf: ::rustc_serialize::Decodable::decode(__decoder),
                    target_arch: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_darwin: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_aix: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_gpu: ::rustc_serialize::Decodable::decode(__decoder),
                    split_debuginfo: ::rustc_serialize::Decodable::decode(__decoder),
                    split_dwarf_kind: ::rustc_serialize::Decodable::decode(__decoder),
                    pointer_size: ::rustc_serialize::Decodable::decode(__decoder),
                    remark: ::rustc_serialize::Decodable::decode(__decoder),
                    remark_dir: ::rustc_serialize::Decodable::decode(__decoder),
                    incr_comp_session_dir: ::rustc_serialize::Decodable::decode(__decoder),
                    parallel: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
325pub struct CodegenContext {
326    // Resources needed when running LTO
327    pub lto: Lto,
328    pub use_linker_plugin_lto: bool,
329    pub dylib_lto: bool,
330    pub prefer_dynamic: bool,
331    pub save_temps: bool,
332    pub fewer_names: bool,
333    pub time_trace: bool,
334    pub crate_types: Vec<CrateType>,
335    pub output_filenames: Arc<OutputFilenames>,
336    pub invocation_temp: Option<String>,
337    pub module_config: Arc<ModuleConfig>,
338    pub opt_level: OptLevel,
339    pub backend_features: Vec<String>,
340    pub msvc_imps_needed: bool,
341    pub is_pe_coff: bool,
342    pub target_can_use_split_dwarf: bool,
343    pub target_arch: String,
344    pub target_is_like_darwin: bool,
345    pub target_is_like_aix: bool,
346    pub target_is_like_gpu: bool,
347    pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
348    pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
349    pub pointer_size: Size,
350
351    /// LLVM optimizations for which we want to print remarks.
352    pub remark: Passes,
353    /// Directory into which should the LLVM optimization remarks be written.
354    /// If `None`, they will be written to stderr.
355    pub remark_dir: Option<PathBuf>,
356    /// The incremental compilation session directory, or None if we are not
357    /// compiling incrementally
358    pub incr_comp_session_dir: Option<PathBuf>,
359    /// `true` if the codegen should be run in parallel.
360    ///
361    /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
362    pub parallel: bool,
363}
364
365fn generate_thin_lto_work<B: ExtraBackendMethods>(
366    cgcx: &CodegenContext,
367    prof: &SelfProfilerRef,
368    dcx: DiagCtxtHandle<'_>,
369    exported_symbols_for_lto: &[String],
370    each_linked_rlib_for_lto: &[PathBuf],
371    needs_thin_lto: Vec<(String, B::ThinBuffer)>,
372    import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
373) -> Vec<(ThinLtoWorkItem<B>, u64)> {
374    let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work");
375
376    let (lto_modules, copy_jobs) = B::run_thin_lto(
377        cgcx,
378        prof,
379        dcx,
380        exported_symbols_for_lto,
381        each_linked_rlib_for_lto,
382        needs_thin_lto,
383        import_only_modules,
384    );
385    lto_modules
386        .into_iter()
387        .map(|module| {
388            let cost = module.cost();
389            (ThinLtoWorkItem::ThinLto(module), cost)
390        })
391        .chain(copy_jobs.into_iter().map(|wp| {
392            (
393                ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
394                    name: wp.cgu_name.clone(),
395                    source: wp,
396                }),
397                0, // copying is very cheap
398            )
399        }))
400        .collect()
401}
402
403pub struct CompiledModules {
404    pub modules: Vec<CompiledModule>,
405    pub allocator_module: Option<CompiledModule>,
406}
407
408enum MaybeLtoModules<B: WriteBackendMethods> {
409    NoLto {
410        modules: Vec<CompiledModule>,
411        allocator_module: Option<CompiledModule>,
412    },
413    FatLto {
414        cgcx: CodegenContext,
415        exported_symbols_for_lto: Arc<Vec<String>>,
416        each_linked_rlib_file_for_lto: Vec<PathBuf>,
417        needs_fat_lto: Vec<FatLtoInput<B>>,
418        lto_import_only_modules:
419            Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
420    },
421    ThinLto {
422        cgcx: CodegenContext,
423        exported_symbols_for_lto: Arc<Vec<String>>,
424        each_linked_rlib_file_for_lto: Vec<PathBuf>,
425        needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
426        lto_import_only_modules:
427            Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
428    },
429}
430
431fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
432    let sess = tcx.sess;
433    sess.opts.cg.embed_bitcode
434        && tcx.crate_types().contains(&CrateType::Rlib)
435        && sess.opts.output_types.contains_key(&OutputType::Exe)
436}
437
438fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
439    if sess.opts.incremental.is_none() {
440        return false;
441    }
442
443    match sess.lto() {
444        Lto::No => false,
445        Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
446    }
447}
448
449pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
450    backend: B,
451    tcx: TyCtxt<'_>,
452    target_cpu: String,
453    allocator_module: Option<ModuleCodegen<B::Module>>,
454) -> OngoingCodegen<B> {
455    let (coordinator_send, coordinator_receive) = channel();
456
457    let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID);
458    let no_builtins = {
    {
            'done:
                {
                for i in crate_attrs {
                    let i: &rustc_hir::Attribute = i;
                    match i {
                        rustc_hir::Attribute::Parsed(AttributeKind::NoBuiltins) => {
                            break 'done Some(());
                        }
                        _ => {}
                    }
                }
                None
            }
        }.is_some()
}find_attr!(crate_attrs, AttributeKind::NoBuiltins);
459
460    let crate_info = CrateInfo::new(tcx, target_cpu);
461
462    let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
463    let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
464
465    let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
466    let (codegen_worker_send, codegen_worker_receive) = channel();
467
468    let coordinator_thread = start_executing_work(
469        backend.clone(),
470        tcx,
471        &crate_info,
472        shared_emitter,
473        codegen_worker_send,
474        coordinator_receive,
475        Arc::new(regular_config),
476        Arc::new(allocator_config),
477        allocator_module,
478        coordinator_send.clone(),
479    );
480
481    OngoingCodegen {
482        backend,
483        crate_info,
484
485        codegen_worker_receive,
486        shared_emitter_main,
487        coordinator: Coordinator {
488            sender: coordinator_send,
489            future: Some(coordinator_thread),
490            phantom: PhantomData,
491        },
492        output_filenames: Arc::clone(tcx.output_filenames(())),
493    }
494}
495
496fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
497    sess: &Session,
498    compiled_modules: &CompiledModules,
499) -> FxIndexMap<WorkProductId, WorkProduct> {
500    let mut work_products = FxIndexMap::default();
501
502    if sess.opts.incremental.is_none() {
503        return work_products;
504    }
505
506    let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
507
508    for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
509        let mut files = Vec::new();
510        if let Some(object_file_path) = &module.object {
511            files.push((OutputType::Object.extension(), object_file_path.as_path()));
512        }
513        if let Some(dwarf_object_file_path) = &module.dwarf_object {
514            files.push(("dwo", dwarf_object_file_path.as_path()));
515        }
516        if let Some(path) = &module.assembly {
517            files.push((OutputType::Assembly.extension(), path.as_path()));
518        }
519        if let Some(path) = &module.llvm_ir {
520            files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
521        }
522        if let Some(path) = &module.bytecode {
523            files.push((OutputType::Bitcode.extension(), path.as_path()));
524        }
525        if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
526            sess,
527            &module.name,
528            files.as_slice(),
529            &module.links_from_incr_cache,
530        ) {
531            work_products.insert(id, product);
532        }
533    }
534
535    work_products
536}
537
538pub fn produce_final_output_artifacts(
539    sess: &Session,
540    compiled_modules: &CompiledModules,
541    crate_output: &OutputFilenames,
542) {
543    let mut user_wants_bitcode = false;
544    let mut user_wants_objects = false;
545
546    // Produce final compile outputs.
547    let copy_gracefully = |from: &Path, to: &OutFileName| match to {
548        OutFileName::Stdout if let Err(e) = copy_to_stdout(from) => {
549            sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
550        }
551        OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
552            sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
553        }
554        _ => {}
555    };
556
557    let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
558        if let [module] = &compiled_modules.modules[..] {
559            // 1) Only one codegen unit. In this case it's no difficulty
560            //    to copy `foo.0.x` to `foo.x`.
561            let path = crate_output.temp_path_for_cgu(
562                output_type,
563                &module.name,
564                sess.invocation_temp.as_deref(),
565            );
566            let output = crate_output.path(output_type);
567            if !output_type.is_text_output() && output.is_tty() {
568                sess.dcx()
569                    .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
570            } else {
571                copy_gracefully(&path, &output);
572            }
573            if !sess.opts.cg.save_temps && !keep_numbered {
574                // The user just wants `foo.x`, not `foo.#module-name#.x`.
575                ensure_removed(sess.dcx(), &path);
576            }
577        } else {
578            if crate_output.outputs.contains_explicit_name(&output_type) {
579                // 2) Multiple codegen units, with `--emit foo=some_name`. We have
580                //    no good solution for this case, so warn the user.
581                sess.dcx()
582                    .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
583            } else if crate_output.single_output_file.is_some() {
584                // 3) Multiple codegen units, with `-o some_name`. We have
585                //    no good solution for this case, so warn the user.
586                sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
587            } else {
588                // 4) Multiple codegen units, but no explicit name. We
589                //    just leave the `foo.0.x` files in place.
590                // (We don't have to do any work in this case.)
591            }
592        }
593    };
594
595    // Flag to indicate whether the user explicitly requested bitcode.
596    // Otherwise, we produced it only as a temporary output, and will need
597    // to get rid of it.
598    for output_type in crate_output.outputs.keys() {
599        match *output_type {
600            OutputType::Bitcode => {
601                user_wants_bitcode = true;
602                // Copy to .bc, but always keep the .0.bc. There is a later
603                // check to figure out if we should delete .0.bc files, or keep
604                // them for making an rlib.
605                copy_if_one_unit(OutputType::Bitcode, true);
606            }
607            OutputType::ThinLinkBitcode => {
608                copy_if_one_unit(OutputType::ThinLinkBitcode, false);
609            }
610            OutputType::LlvmAssembly => {
611                copy_if_one_unit(OutputType::LlvmAssembly, false);
612            }
613            OutputType::Assembly => {
614                copy_if_one_unit(OutputType::Assembly, false);
615            }
616            OutputType::Object => {
617                user_wants_objects = true;
618                copy_if_one_unit(OutputType::Object, true);
619            }
620            OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
621        }
622    }
623
624    // Clean up unwanted temporary files.
625
626    // We create the following files by default:
627    //  - #crate#.#module-name#.bc
628    //  - #crate#.#module-name#.o
629    //  - #crate#.crate.metadata.bc
630    //  - #crate#.crate.metadata.o
631    //  - #crate#.o (linked from crate.##.o)
632    //  - #crate#.bc (copied from crate.##.bc)
633    // We may create additional files if requested by the user (through
634    // `-C save-temps` or `--emit=` flags).
635
636    if !sess.opts.cg.save_temps {
637        // Remove the temporary .#module-name#.o objects. If the user didn't
638        // explicitly request bitcode (with --emit=bc), and the bitcode is not
639        // needed for building an rlib, then we must remove .#module-name#.bc as
640        // well.
641
642        // Specific rules for keeping .#module-name#.bc:
643        //  - If the user requested bitcode (`user_wants_bitcode`), and
644        //    codegen_units > 1, then keep it.
645        //  - If the user requested bitcode but codegen_units == 1, then we
646        //    can toss .#module-name#.bc because we copied it to .bc earlier.
647        //  - If we're not building an rlib and the user didn't request
648        //    bitcode, then delete .#module-name#.bc.
649        // If you change how this works, also update back::link::link_rlib,
650        // where .#module-name#.bc files are (maybe) deleted after making an
651        // rlib.
652        let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
653
654        let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
655
656        let keep_numbered_objects =
657            needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
658
659        for module in compiled_modules.modules.iter() {
660            if !keep_numbered_objects {
661                if let Some(ref path) = module.object {
662                    ensure_removed(sess.dcx(), path);
663                }
664
665                if let Some(ref path) = module.dwarf_object {
666                    ensure_removed(sess.dcx(), path);
667                }
668            }
669
670            if let Some(ref path) = module.bytecode {
671                if !keep_numbered_bitcode {
672                    ensure_removed(sess.dcx(), path);
673                }
674            }
675        }
676
677        if !user_wants_bitcode
678            && let Some(ref allocator_module) = compiled_modules.allocator_module
679            && let Some(ref path) = allocator_module.bytecode
680        {
681            ensure_removed(sess.dcx(), path);
682        }
683    }
684
685    if sess.opts.json_artifact_notifications {
686        if let [module] = &compiled_modules.modules[..] {
687            module.for_each_output(|_path, ty| {
688                if sess.opts.output_types.contains_key(&ty) {
689                    let descr = ty.shorthand();
690                    // for single cgu file is renamed to drop cgu specific suffix
691                    // so we regenerate it the same way
692                    let path = crate_output.path(ty);
693                    sess.dcx().emit_artifact_notification(path.as_path(), descr);
694                }
695            });
696        } else {
697            for module in &compiled_modules.modules {
698                module.for_each_output(|path, ty| {
699                    if sess.opts.output_types.contains_key(&ty) {
700                        let descr = ty.shorthand();
701                        sess.dcx().emit_artifact_notification(&path, descr);
702                    }
703                });
704            }
705        }
706    }
707
708    // We leave the following files around by default:
709    //  - #crate#.o
710    //  - #crate#.crate.metadata.o
711    //  - #crate#.bc
712    // These are used in linking steps and will be cleaned up afterward.
713}
714
715pub(crate) enum WorkItem<B: WriteBackendMethods> {
716    /// Optimize a newly codegened, totally unoptimized module.
717    Optimize(ModuleCodegen<B::Module>),
718    /// Copy the post-LTO artifacts from the incremental cache to the output
719    /// directory.
720    CopyPostLtoArtifacts(CachedModuleCodegen),
721}
722
723enum ThinLtoWorkItem<B: WriteBackendMethods> {
724    /// Copy the post-LTO artifacts from the incremental cache to the output
725    /// directory.
726    CopyPostLtoArtifacts(CachedModuleCodegen),
727    /// Performs thin-LTO on the given module.
728    ThinLto(lto::ThinModule<B>),
729}
730
731// `pthread_setname()` on *nix ignores anything beyond the first 15
732// bytes. Use short descriptions to maximize the space available for
733// the module name.
734#[cfg(not(windows))]
735fn desc(short: &str, _long: &str, name: &str) -> String {
736    // The short label is three bytes, and is followed by a space. That
737    // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
738    // depends on the CGU name form.
739    //
740    // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
741    //   before the `-cgu.0` is the same for every CGU, so use the
742    //   `cgu.0` part. The number suffix will be different for each
743    //   CGU.
744    //
745    // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
746    //   name because each CGU will have a unique ASCII hash, and the
747    //   first 11 bytes will be enough to identify it.
748    //
749    // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
750    //   `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
751    //   name. The first 11 bytes won't be enough to uniquely identify
752    //   it, but no obvious substring will, and this is a rarely used
753    //   option so it doesn't matter much.
754    //
755    match (&short.len(), &3) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(short.len(), 3);
756    let name = if let Some(index) = name.find("-cgu.") {
757        &name[index + 1..] // +1 skips the leading '-'.
758    } else {
759        name
760    };
761    ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0} {1}", short, name))
    })format!("{short} {name}")
762}
763
764// Windows has no thread name length limit, so use more descriptive names.
765#[cfg(windows)]
766fn desc(_short: &str, long: &str, name: &str) -> String {
767    format!("{long} {name}")
768}
769
770impl<B: WriteBackendMethods> WorkItem<B> {
771    /// Generate a short description of this work item suitable for use as a thread name.
772    fn short_description(&self) -> String {
773        match self {
774            WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
775            WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
776        }
777    }
778}
779
780impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
781    /// Generate a short description of this work item suitable for use as a thread name.
782    fn short_description(&self) -> String {
783        match self {
784            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
785                desc("cpy", "copy LTO artifacts for", &m.name)
786            }
787            ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
788        }
789    }
790}
791
792/// A result produced by the backend.
793pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
794    /// The backend has finished compiling a CGU, nothing more required.
795    Finished(CompiledModule),
796
797    /// The backend has finished compiling a CGU, which now needs to go through
798    /// fat LTO.
799    NeedsFatLto(FatLtoInput<B>),
800
801    /// The backend has finished compiling a CGU, which now needs to go through
802    /// thin LTO.
803    NeedsThinLto(String, B::ThinBuffer),
804}
805
806pub enum FatLtoInput<B: WriteBackendMethods> {
807    Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
808    InMemory(ModuleCodegen<B::Module>),
809}
810
811/// Actual LTO type we end up choosing based on multiple factors.
812pub(crate) enum ComputedLtoType {
813    No,
814    Thin,
815    Fat,
816}
817
818pub(crate) fn compute_per_cgu_lto_type(
819    sess_lto: &Lto,
820    linker_does_lto: bool,
821    sess_crate_types: &[CrateType],
822) -> ComputedLtoType {
823    // If the linker does LTO, we don't have to do it. Note that we
824    // keep doing full LTO, if it is requested, as not to break the
825    // assumption that the output will be a single module.
826
827    // We ignore a request for full crate graph LTO if the crate type
828    // is only an rlib, as there is no full crate graph to process,
829    // that'll happen later.
830    //
831    // This use case currently comes up primarily for targets that
832    // require LTO so the request for LTO is always unconditionally
833    // passed down to the backend, but we don't actually want to do
834    // anything about it yet until we've got a final product.
835    let is_rlib = #[allow(non_exhaustive_omitted_patterns)] match sess_crate_types {
    [CrateType::Rlib] => true,
    _ => false,
}matches!(sess_crate_types, [CrateType::Rlib]);
836
837    match sess_lto {
838        Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin,
839        Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
840        Lto::Fat if !is_rlib => ComputedLtoType::Fat,
841        _ => ComputedLtoType::No,
842    }
843}
844
845fn execute_optimize_work_item<B: ExtraBackendMethods>(
846    cgcx: &CodegenContext,
847    prof: &SelfProfilerRef,
848    shared_emitter: SharedEmitter,
849    mut module: ModuleCodegen<B::Module>,
850) -> WorkItemResult<B> {
851    let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
852
853    B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config);
854
855    // After we've done the initial round of optimizations we need to
856    // decide whether to synchronously codegen this module or ship it
857    // back to the coordinator thread for further LTO processing (which
858    // has to wait for all the initial modules to be optimized).
859
860    let lto_type =
861        compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
862
863    // If we're doing some form of incremental LTO then we need to be sure to
864    // save our module to disk first.
865    let bitcode = if cgcx.module_config.emit_pre_lto_bc {
866        let filename = pre_lto_bitcode_filename(&module.name);
867        cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
868    } else {
869        None
870    };
871
872    match lto_type {
873        ComputedLtoType::No => {
874            let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config);
875            WorkItemResult::Finished(module)
876        }
877        ComputedLtoType::Thin => {
878            let (name, thin_buffer) = B::prepare_thin(module);
879            if let Some(path) = bitcode {
880                fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
881                    {
    ::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
            path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
882                });
883            }
884            WorkItemResult::NeedsThinLto(name, thin_buffer)
885        }
886        ComputedLtoType::Fat => match bitcode {
887            Some(path) => {
888                let (name, buffer) = B::serialize_module(module);
889                fs::write(&path, buffer.data()).unwrap_or_else(|e| {
890                    {
    ::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
            path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
891                });
892                WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
893                    name,
894                    buffer: SerializedModule::Local(buffer),
895                })
896            }
897            None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
898        },
899    }
900}
901
902fn execute_copy_from_cache_work_item(
903    cgcx: &CodegenContext,
904    prof: &SelfProfilerRef,
905    shared_emitter: SharedEmitter,
906    module: CachedModuleCodegen,
907) -> CompiledModule {
908    let _timer =
909        prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
910
911    let dcx = DiagCtxt::new(Box::new(shared_emitter));
912    let dcx = dcx.handle();
913
914    let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
915
916    let mut links_from_incr_cache = Vec::new();
917
918    let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
919        let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
920        {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/back/write.rs:920",
                        "rustc_codegen_ssa::back::write", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/back/write.rs"),
                        ::tracing_core::__macro_support::Option::Some(920u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::back::write"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("copying preexisting module `{0}` from {1:?} to {2}",
                                                    module.name, source_file, output_path.display()) as
                                            &dyn Value))])
            });
    } else { ; }
};debug!(
921            "copying preexisting module `{}` from {:?} to {}",
922            module.name,
923            source_file,
924            output_path.display()
925        );
926        match link_or_copy(&source_file, &output_path) {
927            Ok(_) => {
928                links_from_incr_cache.push(source_file);
929                Some(output_path)
930            }
931            Err(error) => {
932                dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
933                None
934            }
935        }
936    };
937
938    let dwarf_object =
939        module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
940            let dwarf_obj_out = cgcx
941                .output_filenames
942                .split_dwarf_path(
943                    cgcx.split_debuginfo,
944                    cgcx.split_dwarf_kind,
945                    &module.name,
946                    cgcx.invocation_temp.as_deref(),
947                )
948                .expect(
949                    "saved dwarf object in work product but `split_dwarf_path` returned `None`",
950                );
951            load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
952        });
953
954    let mut load_from_incr_cache = |perform, output_type: OutputType| {
955        if perform {
956            let saved_file = module.source.saved_files.get(output_type.extension())?;
957            let output_path = cgcx.output_filenames.temp_path_for_cgu(
958                output_type,
959                &module.name,
960                cgcx.invocation_temp.as_deref(),
961            );
962            load_from_incr_comp_dir(output_path, &saved_file)
963        } else {
964            None
965        }
966    };
967
968    let module_config = &cgcx.module_config;
969    let should_emit_obj = module_config.emit_obj != EmitObj::None;
970    let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
971    let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
972    let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
973    let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
974    if should_emit_obj && object.is_none() {
975        dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
976    }
977
978    CompiledModule {
979        links_from_incr_cache,
980        kind: ModuleKind::Regular,
981        name: module.name,
982        object,
983        dwarf_object,
984        bytecode,
985        assembly,
986        llvm_ir,
987    }
988}
989
990fn do_fat_lto<B: ExtraBackendMethods>(
991    cgcx: &CodegenContext,
992    prof: &SelfProfilerRef,
993    shared_emitter: SharedEmitter,
994    tm_factory: TargetMachineFactoryFn<B>,
995    exported_symbols_for_lto: &[String],
996    each_linked_rlib_for_lto: &[PathBuf],
997    mut needs_fat_lto: Vec<FatLtoInput<B>>,
998    import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
999) -> CompiledModule {
1000    let _timer = prof.verbose_generic_activity("LLVM_fatlto");
1001
1002    let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1003    let dcx = dcx.handle();
1004
1005    check_lto_allowed(&cgcx, dcx);
1006
1007    for (module, wp) in import_only_modules {
1008        needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
1009    }
1010
1011    let module = B::run_and_optimize_fat_lto(
1012        cgcx,
1013        prof,
1014        &shared_emitter,
1015        tm_factory,
1016        exported_symbols_for_lto,
1017        each_linked_rlib_for_lto,
1018        needs_fat_lto,
1019    );
1020    B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config)
1021}
1022
1023fn do_thin_lto<B: ExtraBackendMethods>(
1024    cgcx: &CodegenContext,
1025    prof: &SelfProfilerRef,
1026    shared_emitter: SharedEmitter,
1027    tm_factory: TargetMachineFactoryFn<B>,
1028    exported_symbols_for_lto: Arc<Vec<String>>,
1029    each_linked_rlib_for_lto: Vec<PathBuf>,
1030    needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
1031    lto_import_only_modules: Vec<(
1032        SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>,
1033        WorkProduct,
1034    )>,
1035) -> Vec<CompiledModule> {
1036    let _timer = prof.verbose_generic_activity("LLVM_thinlto");
1037
1038    let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1039    let dcx = dcx.handle();
1040
1041    check_lto_allowed(&cgcx, dcx);
1042
1043    let (coordinator_send, coordinator_receive) = channel();
1044
1045    // First up, convert our jobserver into a helper thread so we can use normal
1046    // mpsc channels to manage our messages and such.
1047    // After we've requested tokens then we'll, when we can,
1048    // get tokens on `coordinator_receive` which will
1049    // get managed in the main loop below.
1050    let coordinator_send2 = coordinator_send.clone();
1051    let helper = jobserver::client()
1052        .into_helper_thread(move |token| {
1053            drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1054        })
1055        .expect("failed to spawn helper thread");
1056
1057    let mut work_items = ::alloc::vec::Vec::new()vec![];
1058
1059    // We have LTO work to do. Perform the serial work here of
1060    // figuring out what we're going to LTO and then push a
1061    // bunch of work items onto our queue to do LTO. This all
1062    // happens on the coordinator thread but it's very quick so
1063    // we don't worry about tokens.
1064    for (work, cost) in generate_thin_lto_work::<B>(
1065        cgcx,
1066        prof,
1067        dcx,
1068        &exported_symbols_for_lto,
1069        &each_linked_rlib_for_lto,
1070        needs_thin_lto,
1071        lto_import_only_modules,
1072    ) {
1073        let insertion_index =
1074            work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1075        work_items.insert(insertion_index, (work, cost));
1076        if cgcx.parallel {
1077            helper.request_token();
1078        }
1079    }
1080
1081    let mut codegen_aborted = None;
1082
1083    // These are the Jobserver Tokens we currently hold. Does not include
1084    // the implicit Token the compiler process owns no matter what.
1085    let mut tokens = ::alloc::vec::Vec::new()vec![];
1086
1087    // Amount of tokens that are used (including the implicit token).
1088    let mut used_token_count = 0;
1089
1090    let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1091
1092    // Run the message loop while there's still anything that needs message
1093    // processing. Note that as soon as codegen is aborted we simply want to
1094    // wait for all existing work to finish, so many of the conditions here
1095    // only apply if codegen hasn't been aborted as they represent pending
1096    // work to be done.
1097    loop {
1098        if codegen_aborted.is_none() {
1099            if used_token_count == 0 && work_items.is_empty() {
1100                // All codegen work is done.
1101                break;
1102            }
1103
1104            // Spin up what work we can, only doing this while we've got available
1105            // parallelism slots and work left to spawn.
1106            while used_token_count < tokens.len() + 1
1107                && let Some((item, _)) = work_items.pop()
1108            {
1109                spawn_thin_lto_work(
1110                    &cgcx,
1111                    prof,
1112                    shared_emitter.clone(),
1113                    Arc::clone(&tm_factory),
1114                    coordinator_send.clone(),
1115                    item,
1116                );
1117                used_token_count += 1;
1118            }
1119        } else {
1120            // Don't queue up any more work if codegen was aborted, we're
1121            // just waiting for our existing children to finish.
1122            if used_token_count == 0 {
1123                break;
1124            }
1125        }
1126
1127        // Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1128        tokens.truncate(used_token_count.saturating_sub(1));
1129
1130        match coordinator_receive.recv().unwrap() {
1131            // Save the token locally and the next turn of the loop will use
1132            // this to spawn a new unit of work, or it may get dropped
1133            // immediately if we have no more work to spawn.
1134            ThinLtoMessage::Token(token) => match token {
1135                Ok(token) => {
1136                    tokens.push(token);
1137                }
1138                Err(e) => {
1139                    let msg = &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
                e))
    })format!("failed to acquire jobserver token: {e}");
1140                    shared_emitter.fatal(msg);
1141                    codegen_aborted = Some(FatalError);
1142                }
1143            },
1144
1145            ThinLtoMessage::WorkItem { result } => {
1146                // If a thread exits successfully then we drop a token associated
1147                // with that worker and update our `used_token_count` count.
1148                // We may later re-acquire a token to continue running more work.
1149                // We may also not actually drop a token here if the worker was
1150                // running with an "ephemeral token".
1151                used_token_count -= 1;
1152
1153                match result {
1154                    Ok(compiled_module) => compiled_modules.push(compiled_module),
1155                    Err(Some(WorkerFatalError)) => {
1156                        // Like `CodegenAborted`, wait for remaining work to finish.
1157                        codegen_aborted = Some(FatalError);
1158                    }
1159                    Err(None) => {
1160                        // If the thread failed that means it panicked, so
1161                        // we abort immediately.
1162                        ::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1163                    }
1164                }
1165            }
1166        }
1167    }
1168
1169    if let Some(codegen_aborted) = codegen_aborted {
1170        codegen_aborted.raise();
1171    }
1172
1173    compiled_modules
1174}
1175
1176fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
1177    cgcx: &CodegenContext,
1178    prof: &SelfProfilerRef,
1179    shared_emitter: SharedEmitter,
1180    tm_factory: TargetMachineFactoryFn<B>,
1181    module: lto::ThinModule<B>,
1182) -> CompiledModule {
1183    let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
1184
1185    let module = B::optimize_thin(cgcx, prof, &shared_emitter, tm_factory, module);
1186    B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config)
1187}
1188
1189/// Messages sent to the coordinator.
1190pub(crate) enum Message<B: WriteBackendMethods> {
1191    /// A jobserver token has become available. Sent from the jobserver helper
1192    /// thread.
1193    Token(io::Result<Acquired>),
1194
1195    /// The backend has finished processing a work item for a codegen unit.
1196    /// Sent from a backend worker thread.
1197    WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
1198
1199    /// The frontend has finished generating something (backend IR or a
1200    /// post-LTO artifact) for a codegen unit, and it should be passed to the
1201    /// backend. Sent from the main thread.
1202    CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
1203
1204    /// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1205    /// Sent from the main thread.
1206    AddImportOnlyModule {
1207        module_data: SerializedModule<B::ModuleBuffer>,
1208        work_product: WorkProduct,
1209    },
1210
1211    /// The frontend has finished generating everything for all codegen units.
1212    /// Sent from the main thread.
1213    CodegenComplete,
1214
1215    /// Some normal-ish compiler error occurred, and codegen should be wound
1216    /// down. Sent from the main thread.
1217    CodegenAborted,
1218}
1219
1220/// Messages sent to the coordinator.
1221pub(crate) enum ThinLtoMessage {
1222    /// A jobserver token has become available. Sent from the jobserver helper
1223    /// thread.
1224    Token(io::Result<Acquired>),
1225
1226    /// The backend has finished processing a work item for a codegen unit.
1227    /// Sent from a backend worker thread.
1228    WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1229}
1230
1231/// A message sent from the coordinator thread to the main thread telling it to
1232/// process another codegen unit.
1233pub struct CguMessage;
1234
1235// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1236// can be used to send diagnostics from codegen threads to the main thread.
1237// It's missing the following fields from `rustc_errors::DiagInner`.
1238// - `span`: it doesn't impl `Send`.
1239// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1240//   diagnostics.
1241// - `sort_span`: it doesn't impl `Send`.
1242// - `is_lint`: lints aren't relevant during codegen.
1243// - `emitted_at`: not used for codegen diagnostics.
1244struct Diagnostic {
1245    span: Vec<SpanData>,
1246    level: Level,
1247    messages: Vec<(DiagMessage, Style)>,
1248    code: Option<ErrCode>,
1249    children: Vec<Subdiagnostic>,
1250    args: DiagArgMap,
1251}
1252
1253// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1254// missing the following fields from `rustc_errors::Subdiag`.
1255// - `span`: it doesn't impl `Send`.
1256struct Subdiagnostic {
1257    level: Level,
1258    messages: Vec<(DiagMessage, Style)>,
1259}
1260
1261#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for MainThreadState {
    #[inline]
    fn eq(&self, other: &MainThreadState) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::clone::Clone for MainThreadState {
    #[inline]
    fn clone(&self) -> MainThreadState { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for MainThreadState { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for MainThreadState {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                MainThreadState::Idle => "Idle",
                MainThreadState::Codegenning => "Codegenning",
                MainThreadState::Lending => "Lending",
            })
    }
}Debug)]
1262enum MainThreadState {
1263    /// Doing nothing.
1264    Idle,
1265
1266    /// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1267    Codegenning,
1268
1269    /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1270    Lending,
1271}
1272
1273fn start_executing_work<B: ExtraBackendMethods>(
1274    backend: B,
1275    tcx: TyCtxt<'_>,
1276    crate_info: &CrateInfo,
1277    shared_emitter: SharedEmitter,
1278    codegen_worker_send: Sender<CguMessage>,
1279    coordinator_receive: Receiver<Message<B>>,
1280    regular_config: Arc<ModuleConfig>,
1281    allocator_config: Arc<ModuleConfig>,
1282    mut allocator_module: Option<ModuleCodegen<B::Module>>,
1283    coordinator_send: Sender<Message<B>>,
1284) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
1285    let sess = tcx.sess;
1286    let prof = sess.prof.clone();
1287
1288    let mut each_linked_rlib_for_lto = Vec::new();
1289    let mut each_linked_rlib_file_for_lto = Vec::new();
1290    drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1291        if link::ignored_for_lto(sess, crate_info, cnum) {
1292            return;
1293        }
1294        each_linked_rlib_for_lto.push(cnum);
1295        each_linked_rlib_file_for_lto.push(path.to_path_buf());
1296    }));
1297
1298    // Compute the set of symbols we need to retain when doing LTO (if we need to)
1299    let exported_symbols_for_lto =
1300        Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
1301
1302    // First up, convert our jobserver into a helper thread so we can use normal
1303    // mpsc channels to manage our messages and such.
1304    // After we've requested tokens then we'll, when we can,
1305    // get tokens on `coordinator_receive` which will
1306    // get managed in the main loop below.
1307    let coordinator_send2 = coordinator_send.clone();
1308    let helper = jobserver::client()
1309        .into_helper_thread(move |token| {
1310            drop(coordinator_send2.send(Message::Token::<B>(token)));
1311        })
1312        .expect("failed to spawn helper thread");
1313
1314    let opt_level = tcx.backend_optimization_level(());
1315    let backend_features = tcx.global_backend_features(()).clone();
1316    let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features);
1317
1318    let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1319        let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1320        match result {
1321            Ok(dir) => Some(dir),
1322            Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1323        }
1324    } else {
1325        None
1326    };
1327
1328    let cgcx = CodegenContext {
1329        crate_types: tcx.crate_types().to_vec(),
1330        lto: sess.lto(),
1331        use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
1332        dylib_lto: sess.opts.unstable_opts.dylib_lto,
1333        prefer_dynamic: sess.opts.cg.prefer_dynamic,
1334        fewer_names: sess.fewer_names(),
1335        save_temps: sess.opts.cg.save_temps,
1336        time_trace: sess.opts.unstable_opts.llvm_time_trace,
1337        remark: sess.opts.cg.remark.clone(),
1338        remark_dir,
1339        incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1340        output_filenames: Arc::clone(tcx.output_filenames(())),
1341        module_config: regular_config,
1342        opt_level,
1343        backend_features,
1344        msvc_imps_needed: msvc_imps_needed(tcx),
1345        is_pe_coff: tcx.sess.target.is_like_windows,
1346        target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1347        target_arch: tcx.sess.target.arch.to_string(),
1348        target_is_like_darwin: tcx.sess.target.is_like_darwin,
1349        target_is_like_aix: tcx.sess.target.is_like_aix,
1350        target_is_like_gpu: tcx.sess.target.is_like_gpu,
1351        split_debuginfo: tcx.sess.split_debuginfo(),
1352        split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1353        parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1354        pointer_size: tcx.data_layout.pointer_size(),
1355        invocation_temp: sess.invocation_temp.clone(),
1356    };
1357
1358    // This is the "main loop" of parallel work happening for parallel codegen.
1359    // It's here that we manage parallelism, schedule work, and work with
1360    // messages coming from clients.
1361    //
1362    // There are a few environmental pre-conditions that shape how the system
1363    // is set up:
1364    //
1365    // - Error reporting can only happen on the main thread because that's the
1366    //   only place where we have access to the compiler `Session`.
1367    // - LLVM work can be done on any thread.
1368    // - Codegen can only happen on the main thread.
1369    // - Each thread doing substantial work must be in possession of a `Token`
1370    //   from the `Jobserver`.
1371    // - The compiler process always holds one `Token`. Any additional `Tokens`
1372    //   have to be requested from the `Jobserver`.
1373    //
1374    // Error Reporting
1375    // ===============
1376    // The error reporting restriction is handled separately from the rest: We
1377    // set up a `SharedEmitter` that holds an open channel to the main thread.
1378    // When an error occurs on any thread, the shared emitter will send the
1379    // error message to the receiver main thread (`SharedEmitterMain`). The
1380    // main thread will periodically query this error message queue and emit
1381    // any error messages it has received. It might even abort compilation if
1382    // it has received a fatal error. In this case we rely on all other threads
1383    // being torn down automatically with the main thread.
1384    // Since the main thread will often be busy doing codegen work, error
1385    // reporting will be somewhat delayed, since the message queue can only be
1386    // checked in between two work packages.
1387    //
1388    // Work Processing Infrastructure
1389    // ==============================
1390    // The work processing infrastructure knows three major actors:
1391    //
1392    // - the coordinator thread,
1393    // - the main thread, and
1394    // - LLVM worker threads
1395    //
1396    // The coordinator thread is running a message loop. It instructs the main
1397    // thread about what work to do when, and it will spawn off LLVM worker
1398    // threads as open LLVM WorkItems become available.
1399    //
1400    // The job of the main thread is to codegen CGUs into LLVM work packages
1401    // (since the main thread is the only thread that can do this). The main
1402    // thread will block until it receives a message from the coordinator, upon
1403    // which it will codegen one CGU, send it to the coordinator and block
1404    // again. This way the coordinator can control what the main thread is
1405    // doing.
1406    //
1407    // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1408    // available, it will spawn off a new LLVM worker thread and let it process
1409    // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1410    // it will just shut down, which also frees all resources associated with
1411    // the given LLVM module, and sends a message to the coordinator that the
1412    // WorkItem has been completed.
1413    //
1414    // Work Scheduling
1415    // ===============
1416    // The scheduler's goal is to minimize the time it takes to complete all
1417    // work there is, however, we also want to keep memory consumption low
1418    // if possible. These two goals are at odds with each other: If memory
1419    // consumption were not an issue, we could just let the main thread produce
1420    // LLVM WorkItems at full speed, assuring maximal utilization of
1421    // Tokens/LLVM worker threads. However, since codegen is usually faster
1422    // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1423    // WorkItem potentially holds on to a substantial amount of memory.
1424    //
1425    // So the actual goal is to always produce just enough LLVM WorkItems as
1426    // not to starve our LLVM worker threads. That means, once we have enough
1427    // WorkItems in our queue, we can block the main thread, so it does not
1428    // produce more until we need them.
1429    //
1430    // Doing LLVM Work on the Main Thread
1431    // ----------------------------------
1432    // Since the main thread owns the compiler process's implicit `Token`, it is
1433    // wasteful to keep it blocked without doing any work. Therefore, what we do
1434    // in this case is: We spawn off an additional LLVM worker thread that helps
1435    // reduce the queue. The work it is doing corresponds to the implicit
1436    // `Token`. The coordinator will mark the main thread as being busy with
1437    // LLVM work. (The actual work happens on another OS thread but we just care
1438    // about `Tokens`, not actual threads).
1439    //
1440    // When any LLVM worker thread finishes while the main thread is marked as
1441    // "busy with LLVM work", we can do a little switcheroo: We give the Token
1442    // of the just finished thread to the LLVM worker thread that is working on
1443    // behalf of the main thread's implicit Token, thus freeing up the main
1444    // thread again. The coordinator can then again decide what the main thread
1445    // should do. This allows the coordinator to make decisions at more points
1446    // in time.
1447    //
1448    // Striking a Balance between Throughput and Memory Consumption
1449    // ------------------------------------------------------------
1450    // Since our two goals, (1) use as many Tokens as possible and (2) keep
1451    // memory consumption as low as possible, are in conflict with each other,
1452    // we have to find a trade off between them. Right now, the goal is to keep
1453    // all workers busy, which means that no worker should find the queue empty
1454    // when it is ready to start.
1455    // How do we do achieve this? Good question :) We actually never know how
1456    // many `Tokens` are potentially available so it's hard to say how much to
1457    // fill up the queue before switching the main thread to LLVM work. Also we
1458    // currently don't have a means to estimate how long a running LLVM worker
1459    // will still be busy with it's current WorkItem. However, we know the
1460    // maximal count of available Tokens that makes sense (=the number of CPU
1461    // cores), so we can take a conservative guess. The heuristic we use here
1462    // is implemented in the `queue_full_enough()` function.
1463    //
1464    // Some Background on Jobservers
1465    // -----------------------------
1466    // It's worth also touching on the management of parallelism here. We don't
1467    // want to just spawn a thread per work item because while that's optimal
1468    // parallelism it may overload a system with too many threads or violate our
1469    // configuration for the maximum amount of cpu to use for this process. To
1470    // manage this we use the `jobserver` crate.
1471    //
1472    // Job servers are an artifact of GNU make and are used to manage
1473    // parallelism between processes. A jobserver is a glorified IPC semaphore
1474    // basically. Whenever we want to run some work we acquire the semaphore,
1475    // and whenever we're done with that work we release the semaphore. In this
1476    // manner we can ensure that the maximum number of parallel workers is
1477    // capped at any one point in time.
1478    //
1479    // LTO and the coordinator thread
1480    // ------------------------------
1481    //
1482    // The final job the coordinator thread is responsible for is managing LTO
1483    // and how that works. When LTO is requested what we'll do is collect all
1484    // optimized LLVM modules into a local vector on the coordinator. Once all
1485    // modules have been codegened and optimized we hand this to the `lto`
1486    // module for further optimization. The `lto` module will return back a list
1487    // of more modules to work on, which the coordinator will continue to spawn
1488    // work for.
1489    //
1490    // Each LLVM module is automatically sent back to the coordinator for LTO if
1491    // necessary. There's already optimizations in place to avoid sending work
1492    // back to the coordinator if LTO isn't requested.
1493    return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
1494        // This is where we collect codegen units that have gone all the way
1495        // through codegen and LLVM.
1496        let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1497        let mut needs_fat_lto = Vec::new();
1498        let mut needs_thin_lto = Vec::new();
1499        let mut lto_import_only_modules = Vec::new();
1500
1501        /// Possible state transitions:
1502        /// - Ongoing -> Completed
1503        /// - Ongoing -> Aborted
1504        /// - Completed -> Aborted
1505        #[derive(#[automatically_derived]
impl ::core::fmt::Debug for CodegenState {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                CodegenState::Ongoing => "Ongoing",
                CodegenState::Completed => "Completed",
                CodegenState::Aborted => "Aborted",
            })
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CodegenState {
    #[inline]
    fn eq(&self, other: &CodegenState) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq)]
1506        enum CodegenState {
1507            Ongoing,
1508            Completed,
1509            Aborted,
1510        }
1511        use CodegenState::*;
1512        let mut codegen_state = Ongoing;
1513
1514        // This is the queue of LLVM work items that still need processing.
1515        let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1516
1517        // This are the Jobserver Tokens we currently hold. Does not include
1518        // the implicit Token the compiler process owns no matter what.
1519        let mut tokens = Vec::new();
1520
1521        let mut main_thread_state = MainThreadState::Idle;
1522
1523        // How many LLVM worker threads are running while holding a Token. This
1524        // *excludes* any that the main thread is lending a Token to.
1525        let mut running_with_own_token = 0;
1526
1527        // How many LLVM worker threads are running in total. This *includes*
1528        // any that the main thread is lending a Token to.
1529        let running_with_any_token = |main_thread_state, running_with_own_token| {
1530            running_with_own_token
1531                + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1532        };
1533
1534        let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1535
1536        if let Some(allocator_module) = &mut allocator_module {
1537            B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config);
1538        }
1539
1540        // Run the message loop while there's still anything that needs message
1541        // processing. Note that as soon as codegen is aborted we simply want to
1542        // wait for all existing work to finish, so many of the conditions here
1543        // only apply if codegen hasn't been aborted as they represent pending
1544        // work to be done.
1545        loop {
1546            // While there are still CGUs to be codegened, the coordinator has
1547            // to decide how to utilize the compiler processes implicit Token:
1548            // For codegenning more CGU or for running them through LLVM.
1549            if codegen_state == Ongoing {
1550                if main_thread_state == MainThreadState::Idle {
1551                    // Compute the number of workers that will be running once we've taken as many
1552                    // items from the work queue as we can, plus one for the main thread. It's not
1553                    // critically important that we use this instead of just
1554                    // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1555                    // from fluctuating just because a worker finished up and we decreased the
1556                    // `running_with_own_token` count, even though we're just going to increase it
1557                    // right after this when we put a new worker to work.
1558                    let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1559                    let additional_running = std::cmp::min(extra_tokens, work_items.len());
1560                    let anticipated_running = running_with_own_token + additional_running + 1;
1561
1562                    if !queue_full_enough(work_items.len(), anticipated_running) {
1563                        // The queue is not full enough, process more codegen units:
1564                        if codegen_worker_send.send(CguMessage).is_err() {
1565                            {
    ::core::panicking::panic_fmt(format_args!("Could not send CguMessage to main thread"));
}panic!("Could not send CguMessage to main thread")
1566                        }
1567                        main_thread_state = MainThreadState::Codegenning;
1568                    } else {
1569                        // The queue is full enough to not let the worker
1570                        // threads starve. Use the implicit Token to do some
1571                        // LLVM work too.
1572                        let (item, _) =
1573                            work_items.pop().expect("queue empty - queue_full_enough() broken?");
1574                        main_thread_state = MainThreadState::Lending;
1575                        spawn_work(
1576                            &cgcx,
1577                            &prof,
1578                            shared_emitter.clone(),
1579                            coordinator_send.clone(),
1580                            &mut llvm_start_time,
1581                            item,
1582                        );
1583                    }
1584                }
1585            } else if codegen_state == Completed {
1586                if running_with_any_token(main_thread_state, running_with_own_token) == 0
1587                    && work_items.is_empty()
1588                {
1589                    // All codegen work is done.
1590                    break;
1591                }
1592
1593                // In this branch, we know that everything has been codegened,
1594                // so it's just a matter of determining whether the implicit
1595                // Token is free to use for LLVM work.
1596                match main_thread_state {
1597                    MainThreadState::Idle => {
1598                        if let Some((item, _)) = work_items.pop() {
1599                            main_thread_state = MainThreadState::Lending;
1600                            spawn_work(
1601                                &cgcx,
1602                                &prof,
1603                                shared_emitter.clone(),
1604                                coordinator_send.clone(),
1605                                &mut llvm_start_time,
1606                                item,
1607                            );
1608                        } else {
1609                            // There is no unstarted work, so let the main thread
1610                            // take over for a running worker. Otherwise the
1611                            // implicit token would just go to waste.
1612                            // We reduce the `running` counter by one. The
1613                            // `tokens.truncate()` below will take care of
1614                            // giving the Token back.
1615                            if !(running_with_own_token > 0) {
    ::core::panicking::panic("assertion failed: running_with_own_token > 0")
};assert!(running_with_own_token > 0);
1616                            running_with_own_token -= 1;
1617                            main_thread_state = MainThreadState::Lending;
1618                        }
1619                    }
1620                    MainThreadState::Codegenning => ::rustc_middle::util::bug::bug_fmt(format_args!("codegen worker should not be codegenning after codegen was already completed"))bug!(
1621                        "codegen worker should not be codegenning after \
1622                              codegen was already completed"
1623                    ),
1624                    MainThreadState::Lending => {
1625                        // Already making good use of that token
1626                    }
1627                }
1628            } else {
1629                // Don't queue up any more work if codegen was aborted, we're
1630                // just waiting for our existing children to finish.
1631                if !(codegen_state == Aborted) {
    ::core::panicking::panic("assertion failed: codegen_state == Aborted")
};assert!(codegen_state == Aborted);
1632                if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1633                    break;
1634                }
1635            }
1636
1637            // Spin up what work we can, only doing this while we've got available
1638            // parallelism slots and work left to spawn.
1639            if codegen_state != Aborted {
1640                while running_with_own_token < tokens.len()
1641                    && let Some((item, _)) = work_items.pop()
1642                {
1643                    spawn_work(
1644                        &cgcx,
1645                        &prof,
1646                        shared_emitter.clone(),
1647                        coordinator_send.clone(),
1648                        &mut llvm_start_time,
1649                        item,
1650                    );
1651                    running_with_own_token += 1;
1652                }
1653            }
1654
1655            // Relinquish accidentally acquired extra tokens.
1656            tokens.truncate(running_with_own_token);
1657
1658            match coordinator_receive.recv().unwrap() {
1659                // Save the token locally and the next turn of the loop will use
1660                // this to spawn a new unit of work, or it may get dropped
1661                // immediately if we have no more work to spawn.
1662                Message::Token(token) => {
1663                    match token {
1664                        Ok(token) => {
1665                            tokens.push(token);
1666
1667                            if main_thread_state == MainThreadState::Lending {
1668                                // If the main thread token is used for LLVM work
1669                                // at the moment, we turn that thread into a regular
1670                                // LLVM worker thread, so the main thread is free
1671                                // to react to codegen demand.
1672                                main_thread_state = MainThreadState::Idle;
1673                                running_with_own_token += 1;
1674                            }
1675                        }
1676                        Err(e) => {
1677                            let msg = &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
                e))
    })format!("failed to acquire jobserver token: {e}");
1678                            shared_emitter.fatal(msg);
1679                            codegen_state = Aborted;
1680                        }
1681                    }
1682                }
1683
1684                Message::CodegenDone { llvm_work_item, cost } => {
1685                    // We keep the queue sorted by estimated processing cost,
1686                    // so that more expensive items are processed earlier. This
1687                    // is good for throughput as it gives the main thread more
1688                    // time to fill up the queue and it avoids scheduling
1689                    // expensive items to the end.
1690                    // Note, however, that this is not ideal for memory
1691                    // consumption, as LLVM module sizes are not evenly
1692                    // distributed.
1693                    let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1694                    let insertion_index = match insertion_index {
1695                        Ok(idx) | Err(idx) => idx,
1696                    };
1697                    work_items.insert(insertion_index, (llvm_work_item, cost));
1698
1699                    if cgcx.parallel {
1700                        helper.request_token();
1701                    }
1702                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1703                    main_thread_state = MainThreadState::Idle;
1704                }
1705
1706                Message::CodegenComplete => {
1707                    if codegen_state != Aborted {
1708                        codegen_state = Completed;
1709                    }
1710                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1711                    main_thread_state = MainThreadState::Idle;
1712                }
1713
1714                // If codegen is aborted that means translation was aborted due
1715                // to some normal-ish compiler error. In this situation we want
1716                // to exit as soon as possible, but we want to make sure all
1717                // existing work has finished. Flag codegen as being done, and
1718                // then conditions above will ensure no more work is spawned but
1719                // we'll keep executing this loop until `running_with_own_token`
1720                // hits 0.
1721                Message::CodegenAborted => {
1722                    codegen_state = Aborted;
1723                }
1724
1725                Message::WorkItem { result } => {
1726                    // If a thread exits successfully then we drop a token associated
1727                    // with that worker and update our `running_with_own_token` count.
1728                    // We may later re-acquire a token to continue running more work.
1729                    // We may also not actually drop a token here if the worker was
1730                    // running with an "ephemeral token".
1731                    if main_thread_state == MainThreadState::Lending {
1732                        main_thread_state = MainThreadState::Idle;
1733                    } else {
1734                        running_with_own_token -= 1;
1735                    }
1736
1737                    match result {
1738                        Ok(WorkItemResult::Finished(compiled_module)) => {
1739                            compiled_modules.push(compiled_module);
1740                        }
1741                        Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1742                            if !needs_thin_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1743                            needs_fat_lto.push(fat_lto_input);
1744                        }
1745                        Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1746                            if !needs_fat_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1747                            needs_thin_lto.push((name, thin_buffer));
1748                        }
1749                        Err(Some(WorkerFatalError)) => {
1750                            // Like `CodegenAborted`, wait for remaining work to finish.
1751                            codegen_state = Aborted;
1752                        }
1753                        Err(None) => {
1754                            // If the thread failed that means it panicked, so
1755                            // we abort immediately.
1756                            ::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1757                        }
1758                    }
1759                }
1760
1761                Message::AddImportOnlyModule { module_data, work_product } => {
1762                    match (&codegen_state, &Ongoing) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(codegen_state, Ongoing);
1763                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1764                    lto_import_only_modules.push((module_data, work_product));
1765                    main_thread_state = MainThreadState::Idle;
1766                }
1767            }
1768        }
1769
1770        // Drop to print timings
1771        drop(llvm_start_time);
1772
1773        if codegen_state == Aborted {
1774            return Err(());
1775        }
1776
1777        drop(codegen_state);
1778        drop(tokens);
1779        drop(helper);
1780        if !work_items.is_empty() {
    ::core::panicking::panic("assertion failed: work_items.is_empty()")
};assert!(work_items.is_empty());
1781
1782        if !needs_fat_lto.is_empty() {
1783            if !compiled_modules.is_empty() {
    ::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1784            if !needs_thin_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1785
1786            if let Some(allocator_module) = allocator_module.take() {
1787                needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
1788            }
1789
1790            return Ok(MaybeLtoModules::FatLto {
1791                cgcx,
1792                exported_symbols_for_lto,
1793                each_linked_rlib_file_for_lto,
1794                needs_fat_lto,
1795                lto_import_only_modules,
1796            });
1797        } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1798            if !compiled_modules.is_empty() {
    ::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1799            if !needs_fat_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1800
1801            if cgcx.lto == Lto::ThinLocal {
1802                compiled_modules.extend(do_thin_lto::<B>(
1803                    &cgcx,
1804                    &prof,
1805                    shared_emitter.clone(),
1806                    tm_factory,
1807                    exported_symbols_for_lto,
1808                    each_linked_rlib_file_for_lto,
1809                    needs_thin_lto,
1810                    lto_import_only_modules,
1811                ));
1812            } else {
1813                if let Some(allocator_module) = allocator_module.take() {
1814                    let (name, thin_buffer) = B::prepare_thin(allocator_module);
1815                    needs_thin_lto.push((name, thin_buffer));
1816                }
1817
1818                return Ok(MaybeLtoModules::ThinLto {
1819                    cgcx,
1820                    exported_symbols_for_lto,
1821                    each_linked_rlib_file_for_lto,
1822                    needs_thin_lto,
1823                    lto_import_only_modules,
1824                });
1825            }
1826        }
1827
1828        Ok(MaybeLtoModules::NoLto {
1829            modules: compiled_modules,
1830            allocator_module: allocator_module.map(|allocator_module| {
1831                B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config)
1832            }),
1833        })
1834    })
1835    .expect("failed to spawn coordinator thread");
1836
1837    // A heuristic that determines if we have enough LLVM WorkItems in the
1838    // queue so that the main thread can do LLVM work instead of codegen
1839    fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1840        // This heuristic scales ahead-of-time codegen according to available
1841        // concurrency, as measured by `workers_running`. The idea is that the
1842        // more concurrency we have available, the more demand there will be for
1843        // work items, and the fuller the queue should be kept to meet demand.
1844        // An important property of this approach is that we codegen ahead of
1845        // time only as much as necessary, so as to keep fewer LLVM modules in
1846        // memory at once, thereby reducing memory consumption.
1847        //
1848        // When the number of workers running is less than the max concurrency
1849        // available to us, this heuristic can cause us to instruct the main
1850        // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1851        // of codegen, even though it seems like it *should* be codegenning so
1852        // that we can create more work items and spawn more LLVM workers.
1853        //
1854        // But this is not a problem. When the main thread is told to LLVM,
1855        // according to this heuristic and how work is scheduled, there is
1856        // always at least one item in the queue, and therefore at least one
1857        // pending jobserver token request. If there *is* more concurrency
1858        // available, we will immediately receive a token, which will upgrade
1859        // the main thread's LLVM worker to a real one (conceptually), and free
1860        // up the main thread to codegen if necessary. On the other hand, if
1861        // there isn't more concurrency, then the main thread working on an LLVM
1862        // item is appropriate, as long as the queue is full enough for demand.
1863        //
1864        // Speaking of which, how full should we keep the queue? Probably less
1865        // full than you'd think. A lot has to go wrong for the queue not to be
1866        // full enough and for that to have a negative effect on compile times.
1867        //
1868        // Workers are unlikely to finish at exactly the same time, so when one
1869        // finishes and takes another work item off the queue, we often have
1870        // ample time to codegen at that point before the next worker finishes.
1871        // But suppose that codegen takes so long that the workers exhaust the
1872        // queue, and we have one or more workers that have nothing to work on.
1873        // Well, it might not be so bad. Of all the LLVM modules we create and
1874        // optimize, one has to finish last. It's not necessarily the case that
1875        // by losing some concurrency for a moment, we delay the point at which
1876        // that last LLVM module is finished and the rest of compilation can
1877        // proceed. Also, when we can't take advantage of some concurrency, we
1878        // give tokens back to the job server. That enables some other rustc to
1879        // potentially make use of the available concurrency. That could even
1880        // *decrease* overall compile time if we're lucky. But yes, if no other
1881        // rustc can make use of the concurrency, then we've squandered it.
1882        //
1883        // However, keeping the queue full is also beneficial when we have a
1884        // surge in available concurrency. Then items can be taken from the
1885        // queue immediately, without having to wait for codegen.
1886        //
1887        // So, the heuristic below tries to keep one item in the queue for every
1888        // four running workers. Based on limited benchmarking, this appears to
1889        // be more than sufficient to avoid increasing compilation times.
1890        let quarter_of_workers = workers_running - 3 * workers_running / 4;
1891        items_in_queue > 0 && items_in_queue >= quarter_of_workers
1892    }
1893}
1894
1895/// `FatalError` is explicitly not `Send`.
1896#[must_use]
1897pub(crate) struct WorkerFatalError;
1898
1899fn spawn_work<'a, B: ExtraBackendMethods>(
1900    cgcx: &CodegenContext,
1901    prof: &'a SelfProfilerRef,
1902    shared_emitter: SharedEmitter,
1903    coordinator_send: Sender<Message<B>>,
1904    llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1905    work: WorkItem<B>,
1906) {
1907    if llvm_start_time.is_none() {
1908        *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
1909    }
1910
1911    let cgcx = cgcx.clone();
1912    let prof = prof.clone();
1913
1914    B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1915        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1916            WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m),
1917            WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
1918                execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m),
1919            ),
1920        }));
1921
1922        let msg = match result {
1923            Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
1924
1925            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1926            // diagnostic was already sent off to the main thread - just surface
1927            // that there was an error in this worker.
1928            Err(err) if err.is::<FatalErrorMarker>() => {
1929                Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1930            }
1931
1932            Err(_) => Message::WorkItem::<B> { result: Err(None) },
1933        };
1934        drop(coordinator_send.send(msg));
1935    })
1936    .expect("failed to spawn work thread");
1937}
1938
1939fn spawn_thin_lto_work<B: ExtraBackendMethods>(
1940    cgcx: &CodegenContext,
1941    prof: &SelfProfilerRef,
1942    shared_emitter: SharedEmitter,
1943    tm_factory: TargetMachineFactoryFn<B>,
1944    coordinator_send: Sender<ThinLtoMessage>,
1945    work: ThinLtoWorkItem<B>,
1946) {
1947    let cgcx = cgcx.clone();
1948    let prof = prof.clone();
1949
1950    B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1951        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1952            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
1953                execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m)
1954            }
1955            ThinLtoWorkItem::ThinLto(m) => {
1956                execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m)
1957            }
1958        }));
1959
1960        let msg = match result {
1961            Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
1962
1963            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1964            // diagnostic was already sent off to the main thread - just surface
1965            // that there was an error in this worker.
1966            Err(err) if err.is::<FatalErrorMarker>() => {
1967                ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1968            }
1969
1970            Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1971        };
1972        drop(coordinator_send.send(msg));
1973    })
1974    .expect("failed to spawn work thread");
1975}
1976
1977enum SharedEmitterMessage {
1978    Diagnostic(Diagnostic),
1979    InlineAsmError(InlineAsmError),
1980    Fatal(String),
1981}
1982
1983pub struct InlineAsmError {
1984    pub span: SpanData,
1985    pub msg: String,
1986    pub level: Level,
1987    pub source: Option<(String, Vec<InnerSpan>)>,
1988}
1989
1990#[derive(#[automatically_derived]
impl ::core::clone::Clone for SharedEmitter {
    #[inline]
    fn clone(&self) -> SharedEmitter {
        SharedEmitter { sender: ::core::clone::Clone::clone(&self.sender) }
    }
}Clone)]
1991pub struct SharedEmitter {
1992    sender: Sender<SharedEmitterMessage>,
1993}
1994
1995pub struct SharedEmitterMain {
1996    receiver: Receiver<SharedEmitterMessage>,
1997}
1998
1999impl SharedEmitter {
2000    fn new() -> (SharedEmitter, SharedEmitterMain) {
2001        let (sender, receiver) = channel();
2002
2003        (SharedEmitter { sender }, SharedEmitterMain { receiver })
2004    }
2005
2006    pub fn inline_asm_error(&self, err: InlineAsmError) {
2007        drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
2008    }
2009
2010    fn fatal(&self, msg: &str) {
2011        drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
2012    }
2013}
2014
2015impl Emitter for SharedEmitter {
2016    fn emit_diagnostic(&mut self, mut diag: rustc_errors::DiagInner) {
2017        // Check that we aren't missing anything interesting when converting to
2018        // the cut-down local `DiagInner`.
2019        if !!diag.span.has_span_labels() {
    ::core::panicking::panic("assertion failed: !diag.span.has_span_labels()")
};assert!(!diag.span.has_span_labels());
2020        match (&diag.suggestions, &Suggestions::Enabled(::alloc::vec::Vec::new())) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
2021        match (&diag.sort_span, &rustc_span::DUMMY_SP) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
2022        match (&diag.is_lint, &None) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.is_lint, None);
2023        // No sensible check for `diag.emitted_at`.
2024
2025        let args = mem::replace(&mut diag.args, DiagArgMap::default());
2026        drop(
2027            self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
2028                span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
2029                level: diag.level(),
2030                messages: diag.messages,
2031                code: diag.code,
2032                children: diag
2033                    .children
2034                    .into_iter()
2035                    .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
2036                    .collect(),
2037                args,
2038            })),
2039        );
2040    }
2041
2042    fn source_map(&self) -> Option<&SourceMap> {
2043        None
2044    }
2045
2046    fn translator(&self) -> &Translator {
2047        {
    ::core::panicking::panic_fmt(format_args!("shared emitter attempted to translate a diagnostic"));
};panic!("shared emitter attempted to translate a diagnostic");
2048    }
2049}
2050
2051impl SharedEmitterMain {
2052    fn check(&self, sess: &Session, blocking: bool) {
2053        loop {
2054            let message = if blocking {
2055                match self.receiver.recv() {
2056                    Ok(message) => Ok(message),
2057                    Err(_) => Err(()),
2058                }
2059            } else {
2060                match self.receiver.try_recv() {
2061                    Ok(message) => Ok(message),
2062                    Err(_) => Err(()),
2063                }
2064            };
2065
2066            match message {
2067                Ok(SharedEmitterMessage::Diagnostic(diag)) => {
2068                    // The diagnostic has been received on the main thread.
2069                    // Convert it back to a full `Diagnostic` and emit.
2070                    let dcx = sess.dcx();
2071                    let mut d =
2072                        rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
2073                    d.span = MultiSpan::from_spans(
2074                        diag.span.into_iter().map(|span| span.span()).collect(),
2075                    );
2076                    d.code = diag.code; // may be `None`, that's ok
2077                    d.children = diag
2078                        .children
2079                        .into_iter()
2080                        .map(|sub| rustc_errors::Subdiag {
2081                            level: sub.level,
2082                            messages: sub.messages,
2083                            span: MultiSpan::new(),
2084                        })
2085                        .collect();
2086                    d.args = diag.args;
2087                    dcx.emit_diagnostic(d);
2088                    sess.dcx().abort_if_errors();
2089                }
2090                Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2091                    match inner.level {
    Level::Error | Level::Warning | Level::Note => {}
    ref left_val => {
        ::core::panicking::assert_matches_failed(left_val,
            "Level::Error | Level::Warning | Level::Note",
            ::core::option::Option::None);
    }
};assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2092                    let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2093                    if !inner.span.is_dummy() {
2094                        err.span(inner.span.span());
2095                    }
2096
2097                    // Point to the generated assembly if it is available.
2098                    if let Some((buffer, spans)) = inner.source {
2099                        let source = sess
2100                            .source_map()
2101                            .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2102                        let spans: Vec<_> = spans
2103                            .iter()
2104                            .map(|sp| {
2105                                Span::with_root_ctxt(
2106                                    source.normalized_byte_pos(sp.start as u32),
2107                                    source.normalized_byte_pos(sp.end as u32),
2108                                )
2109                            })
2110                            .collect();
2111                        err.span_note(spans, "instantiated into assembly here");
2112                    }
2113
2114                    err.emit();
2115                }
2116                Ok(SharedEmitterMessage::Fatal(msg)) => {
2117                    sess.dcx().fatal(msg);
2118                }
2119                Err(_) => {
2120                    break;
2121                }
2122            }
2123        }
2124    }
2125}
2126
2127pub struct Coordinator<B: ExtraBackendMethods> {
2128    sender: Sender<Message<B>>,
2129    future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
2130    // Only used for the Message type.
2131    phantom: PhantomData<B>,
2132}
2133
2134impl<B: ExtraBackendMethods> Coordinator<B> {
2135    fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
2136        self.future.take().unwrap().join()
2137    }
2138}
2139
2140impl<B: ExtraBackendMethods> Drop for Coordinator<B> {
2141    fn drop(&mut self) {
2142        if let Some(future) = self.future.take() {
2143            // If we haven't joined yet, signal to the coordinator that it should spawn no more
2144            // work, and wait for worker threads to finish.
2145            drop(self.sender.send(Message::CodegenAborted::<B>));
2146            drop(future.join());
2147        }
2148    }
2149}
2150
2151pub struct OngoingCodegen<B: ExtraBackendMethods> {
2152    pub backend: B,
2153    pub crate_info: CrateInfo,
2154    pub output_filenames: Arc<OutputFilenames>,
2155    // Field order below is intended to terminate the coordinator thread before two fields below
2156    // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2157    // `Drop` implementation for more info.
2158    pub coordinator: Coordinator<B>,
2159    pub codegen_worker_receive: Receiver<CguMessage>,
2160    pub shared_emitter_main: SharedEmitterMain,
2161}
2162
2163impl<B: ExtraBackendMethods> OngoingCodegen<B> {
2164    pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
2165        self.shared_emitter_main.check(sess, true);
2166
2167        let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2168            Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
2169            Ok(Err(())) => {
2170                sess.dcx().abort_if_errors();
2171                {
    ::core::panicking::panic_fmt(format_args!("expected abort due to worker thread errors"));
}panic!("expected abort due to worker thread errors")
2172            }
2173            Err(_) => {
2174                ::rustc_middle::util::bug::bug_fmt(format_args!("panic during codegen/LLVM phase"));bug!("panic during codegen/LLVM phase");
2175            }
2176        });
2177
2178        sess.dcx().abort_if_errors();
2179
2180        let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
2181
2182        // Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
2183        let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
2184            MaybeLtoModules::NoLto { modules, allocator_module } => {
2185                drop(shared_emitter);
2186                CompiledModules { modules, allocator_module }
2187            }
2188            MaybeLtoModules::FatLto {
2189                cgcx,
2190                exported_symbols_for_lto,
2191                each_linked_rlib_file_for_lto,
2192                needs_fat_lto,
2193                lto_import_only_modules,
2194            } => {
2195                let tm_factory = self.backend.target_machine_factory(
2196                    sess,
2197                    cgcx.opt_level,
2198                    &cgcx.backend_features,
2199                );
2200
2201                CompiledModules {
2202                    modules: <[_]>::into_vec(::alloc::boxed::box_new([do_fat_lto(&cgcx, &sess.prof,
                    shared_emitter, tm_factory, &exported_symbols_for_lto,
                    &each_linked_rlib_file_for_lto, needs_fat_lto,
                    lto_import_only_modules)]))vec![do_fat_lto(
2203                        &cgcx,
2204                        &sess.prof,
2205                        shared_emitter,
2206                        tm_factory,
2207                        &exported_symbols_for_lto,
2208                        &each_linked_rlib_file_for_lto,
2209                        needs_fat_lto,
2210                        lto_import_only_modules,
2211                    )],
2212                    allocator_module: None,
2213                }
2214            }
2215            MaybeLtoModules::ThinLto {
2216                cgcx,
2217                exported_symbols_for_lto,
2218                each_linked_rlib_file_for_lto,
2219                needs_thin_lto,
2220                lto_import_only_modules,
2221            } => {
2222                let tm_factory = self.backend.target_machine_factory(
2223                    sess,
2224                    cgcx.opt_level,
2225                    &cgcx.backend_features,
2226                );
2227
2228                CompiledModules {
2229                    modules: do_thin_lto::<B>(
2230                        &cgcx,
2231                        &sess.prof,
2232                        shared_emitter,
2233                        tm_factory,
2234                        exported_symbols_for_lto,
2235                        each_linked_rlib_file_for_lto,
2236                        needs_thin_lto,
2237                        lto_import_only_modules,
2238                    ),
2239                    allocator_module: None,
2240                }
2241            }
2242        });
2243
2244        shared_emitter_main.check(sess, true);
2245
2246        sess.dcx().abort_if_errors();
2247
2248        let mut compiled_modules =
2249            compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
2250
2251        // Regardless of what order these modules completed in, report them to
2252        // the backend in the same order every time to ensure that we're handing
2253        // out deterministic results.
2254        compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
2255
2256        let work_products =
2257            copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2258        produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
2259
2260        // FIXME: time_llvm_passes support - does this use a global context or
2261        // something?
2262        if sess.codegen_units().as_usize() == 1 && sess.opts.unstable_opts.time_llvm_passes {
2263            self.backend.print_pass_timings()
2264        }
2265
2266        if sess.print_llvm_stats() {
2267            self.backend.print_statistics()
2268        }
2269
2270        (
2271            CodegenResults {
2272                crate_info: self.crate_info,
2273
2274                modules: compiled_modules.modules,
2275                allocator_module: compiled_modules.allocator_module,
2276            },
2277            work_products,
2278        )
2279    }
2280
2281    pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2282        self.wait_for_signal_to_codegen_item();
2283        self.check_for_errors(tcx.sess);
2284        drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2285    }
2286
2287    pub(crate) fn check_for_errors(&self, sess: &Session) {
2288        self.shared_emitter_main.check(sess, false);
2289    }
2290
2291    pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2292        match self.codegen_worker_receive.recv() {
2293            Ok(CguMessage) => {
2294                // Ok to proceed.
2295            }
2296            Err(_) => {
2297                // One of the LLVM threads must have panicked, fall through so
2298                // error handling can be reached.
2299            }
2300        }
2301    }
2302}
2303
2304pub(crate) fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
2305    coordinator: &Coordinator<B>,
2306    module: ModuleCodegen<B::Module>,
2307    cost: u64,
2308) {
2309    let llvm_work_item = WorkItem::Optimize(module);
2310    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2311}
2312
2313pub(crate) fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
2314    coordinator: &Coordinator<B>,
2315    module: CachedModuleCodegen,
2316) {
2317    let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2318    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2319}
2320
2321pub(crate) fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
2322    tcx: TyCtxt<'_>,
2323    coordinator: &Coordinator<B>,
2324    module: CachedModuleCodegen,
2325) {
2326    let filename = pre_lto_bitcode_filename(&module.name);
2327    let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2328    let file = fs::File::open(&bc_path)
2329        .unwrap_or_else(|e| {
    ::core::panicking::panic_fmt(format_args!("failed to open bitcode file `{0}`: {1}",
            bc_path.display(), e));
}panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
2330
2331    let mmap = unsafe {
2332        Mmap::map(file).unwrap_or_else(|e| {
2333            {
    ::core::panicking::panic_fmt(format_args!("failed to mmap bitcode file `{0}`: {1}",
            bc_path.display(), e));
}panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
2334        })
2335    };
2336    // Schedule the module to be loaded
2337    drop(coordinator.sender.send(Message::AddImportOnlyModule::<B> {
2338        module_data: SerializedModule::FromUncompressedFile(mmap),
2339        work_product: module.source,
2340    }));
2341}
2342
2343fn pre_lto_bitcode_filename(module_name: &str) -> String {
2344    ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}.{1}", module_name,
                PRE_LTO_BC_EXT))
    })format!("{module_name}.{PRE_LTO_BC_EXT}")
2345}
2346
2347fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2348    // This should never be true (because it's not supported). If it is true,
2349    // something is wrong with commandline arg validation.
2350    if !!(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
                        tcx.sess.target.is_like_windows &&
                    tcx.sess.opts.cg.prefer_dynamic) {
    ::core::panicking::panic("assertion failed: !(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&\n                tcx.sess.target.is_like_windows &&\n            tcx.sess.opts.cg.prefer_dynamic)")
};assert!(
2351        !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2352            && tcx.sess.target.is_like_windows
2353            && tcx.sess.opts.cg.prefer_dynamic)
2354    );
2355
2356    // We need to generate _imp__ symbol if we are generating an rlib or we include one
2357    // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2358    // these, but it currently does not do so.
2359    let can_have_static_objects =
2360        tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
2361
2362    tcx.sess.target.is_like_windows &&
2363    can_have_static_objects   &&
2364    // ThinLTO can't handle this workaround in all cases, so we don't
2365    // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2366    // dynamic linking when linker plugin LTO is enabled.
2367    !tcx.sess.opts.cg.linker_plugin_lto.enabled()
2368}