1use std::marker::PhantomData;
2use std::panic::AssertUnwindSafe;
3use std::path::{Path, PathBuf};
4use std::sync::Arc;
5use std::sync::mpsc::{Receiver, Sender, channel};
6use std::{fs, io, mem, str, thread};
78use rustc_abi::Size;
9use rustc_data_structures::assert_matches;
10use rustc_data_structures::fx::FxIndexMap;
11use rustc_data_structures::jobserver::{self, Acquired};
12use rustc_data_structures::memmap::Mmap;
13use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
14use rustc_errors::emitter::Emitter;
15use rustc_errors::translation::Translator;
16use rustc_errors::{
17Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
18Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
19};
20use rustc_fs_util::link_or_copy;
21use rustc_hir::attrs::AttributeKind;
22use rustc_hir::find_attr;
23use rustc_incremental::{
24copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
25};
26use rustc_macros::{Decodable, Encodable};
27use rustc_metadata::fs::copy_to_stdout;
28use rustc_middle::bug;
29use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
30use rustc_middle::ty::TyCtxt;
31use rustc_session::Session;
32use rustc_session::config::{
33self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes,
34SwitchWithOptPath,
35};
36use rustc_span::source_map::SourceMap;
37use rustc_span::{FileName, InnerSpan, Span, SpanData};
38use rustc_target::spec::{MergeFunctions, SanitizerSet};
39use tracing::debug;
4041use super::link::{self, ensure_removed};
42use super::lto::{self, SerializedModule};
43use crate::back::lto::check_lto_allowed;
44use crate::errors::ErrorCreatingRemarkDir;
45use crate::traits::*;
46use crate::{
47CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
48errors,
49};
5051const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
5253/// What kind of object file to emit.
54#[derive(#[automatically_derived]
impl ::core::clone::Clone for EmitObj {
#[inline]
fn clone(&self) -> EmitObj {
let _: ::core::clone::AssertParamIsClone<BitcodeSection>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EmitObj { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for EmitObj {
#[inline]
fn eq(&self, other: &EmitObj) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(EmitObj::ObjectCode(__self_0), EmitObj::ObjectCode(__arg1_0))
=> __self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for EmitObj {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
EmitObj::None => { 0usize }
EmitObj::Bitcode => { 1usize }
EmitObj::ObjectCode(ref __binding_0) => { 2usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
EmitObj::None => {}
EmitObj::Bitcode => {}
EmitObj::ObjectCode(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for EmitObj {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { EmitObj::None }
1usize => { EmitObj::Bitcode }
2usize => {
EmitObj::ObjectCode(::rustc_serialize::Decodable::decode(__decoder))
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `EmitObj`, expected 0..3, actual {0}",
n));
}
}
}
}
};Decodable)]
55pub enum EmitObj {
56// No object file.
57None,
5859// Just uncompressed llvm bitcode. Provides easy compatibility with
60 // emscripten's ecc compiler, when used as the linker.
61Bitcode,
6263// Object code, possibly augmented with a bitcode section.
64ObjectCode(BitcodeSection),
65}
6667/// What kind of llvm bitcode section to embed in an object file.
68#[derive(#[automatically_derived]
impl ::core::clone::Clone for BitcodeSection {
#[inline]
fn clone(&self) -> BitcodeSection { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BitcodeSection { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BitcodeSection {
#[inline]
fn eq(&self, other: &BitcodeSection) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for BitcodeSection {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
BitcodeSection::None => { 0usize }
BitcodeSection::Full => { 1usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
BitcodeSection::None => {}
BitcodeSection::Full => {}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for BitcodeSection {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { BitcodeSection::None }
1usize => { BitcodeSection::Full }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `BitcodeSection`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable)]
69pub enum BitcodeSection {
70// No bitcode section.
71None,
7273// A full, uncompressed bitcode section.
74Full,
75}
7677/// Module-specific configuration for `optimize_and_codegen`.
78#[derive(const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for ModuleConfig {
fn encode(&self, __encoder: &mut __E) {
match *self {
ModuleConfig {
passes: ref __binding_0,
opt_level: ref __binding_1,
pgo_gen: ref __binding_2,
pgo_use: ref __binding_3,
pgo_sample_use: ref __binding_4,
debug_info_for_profiling: ref __binding_5,
instrument_coverage: ref __binding_6,
sanitizer: ref __binding_7,
sanitizer_recover: ref __binding_8,
sanitizer_dataflow_abilist: ref __binding_9,
sanitizer_memory_track_origins: ref __binding_10,
emit_pre_lto_bc: ref __binding_11,
emit_no_opt_bc: ref __binding_12,
emit_bc: ref __binding_13,
emit_ir: ref __binding_14,
emit_asm: ref __binding_15,
emit_obj: ref __binding_16,
emit_thin_lto: ref __binding_17,
emit_thin_lto_summary: ref __binding_18,
verify_llvm_ir: ref __binding_19,
lint_llvm_ir: ref __binding_20,
no_prepopulate_passes: ref __binding_21,
no_builtins: ref __binding_22,
vectorize_loop: ref __binding_23,
vectorize_slp: ref __binding_24,
merge_functions: ref __binding_25,
emit_lifetime_markers: ref __binding_26,
llvm_plugins: ref __binding_27,
autodiff: ref __binding_28,
offload: ref __binding_29 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_27,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_28,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_29,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for ModuleConfig {
fn decode(__decoder: &mut __D) -> Self {
ModuleConfig {
passes: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
pgo_gen: ::rustc_serialize::Decodable::decode(__decoder),
pgo_use: ::rustc_serialize::Decodable::decode(__decoder),
pgo_sample_use: ::rustc_serialize::Decodable::decode(__decoder),
debug_info_for_profiling: ::rustc_serialize::Decodable::decode(__decoder),
instrument_coverage: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_recover: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_dataflow_abilist: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_memory_track_origins: ::rustc_serialize::Decodable::decode(__decoder),
emit_pre_lto_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_no_opt_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_ir: ::rustc_serialize::Decodable::decode(__decoder),
emit_asm: ::rustc_serialize::Decodable::decode(__decoder),
emit_obj: ::rustc_serialize::Decodable::decode(__decoder),
emit_thin_lto: ::rustc_serialize::Decodable::decode(__decoder),
emit_thin_lto_summary: ::rustc_serialize::Decodable::decode(__decoder),
verify_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
lint_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
no_prepopulate_passes: ::rustc_serialize::Decodable::decode(__decoder),
no_builtins: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_loop: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_slp: ::rustc_serialize::Decodable::decode(__decoder),
merge_functions: ::rustc_serialize::Decodable::decode(__decoder),
emit_lifetime_markers: ::rustc_serialize::Decodable::decode(__decoder),
llvm_plugins: ::rustc_serialize::Decodable::decode(__decoder),
autodiff: ::rustc_serialize::Decodable::decode(__decoder),
offload: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
79pub struct ModuleConfig {
80/// Names of additional optimization passes to run.
81pub passes: Vec<String>,
82/// Some(level) to optimize at a certain level, or None to run
83 /// absolutely no optimizations (used for the allocator module).
84pub opt_level: Option<config::OptLevel>,
8586pub pgo_gen: SwitchWithOptPath,
87pub pgo_use: Option<PathBuf>,
88pub pgo_sample_use: Option<PathBuf>,
89pub debug_info_for_profiling: bool,
90pub instrument_coverage: bool,
9192pub sanitizer: SanitizerSet,
93pub sanitizer_recover: SanitizerSet,
94pub sanitizer_dataflow_abilist: Vec<String>,
95pub sanitizer_memory_track_origins: usize,
9697// Flags indicating which outputs to produce.
98pub emit_pre_lto_bc: bool,
99pub emit_no_opt_bc: bool,
100pub emit_bc: bool,
101pub emit_ir: bool,
102pub emit_asm: bool,
103pub emit_obj: EmitObj,
104pub emit_thin_lto: bool,
105pub emit_thin_lto_summary: bool,
106107// Miscellaneous flags. These are mostly copied from command-line
108 // options.
109pub verify_llvm_ir: bool,
110pub lint_llvm_ir: bool,
111pub no_prepopulate_passes: bool,
112pub no_builtins: bool,
113pub vectorize_loop: bool,
114pub vectorize_slp: bool,
115pub merge_functions: bool,
116pub emit_lifetime_markers: bool,
117pub llvm_plugins: Vec<String>,
118pub autodiff: Vec<config::AutoDiff>,
119pub offload: Vec<config::Offload>,
120}
121122impl ModuleConfig {
123fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
124// If it's a regular module, use `$regular`, otherwise use `$other`.
125 // `$regular` and `$other` are evaluated lazily.
126macro_rules! if_regular {
127 ($regular: expr, $other: expr) => {
128if let ModuleKind::Regular = kind { $regular } else { $other }
129 };
130 }
131132let sess = tcx.sess;
133let opt_level_and_size = if let ModuleKind::Regular = kind { Some(sess.opts.optimize) } else { None }if_regular!(Some(sess.opts.optimize), None);
134135let save_temps = sess.opts.cg.save_temps;
136137let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
138 || match kind {
139 ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
140 ModuleKind::Allocator => false,
141 };
142143let emit_obj = if !should_emit_obj {
144 EmitObj::None145 } else if sess.target.obj_is_bitcode
146 || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
147 {
148// This case is selected if the target uses objects as bitcode, or
149 // if linker plugin LTO is enabled. In the linker plugin LTO case
150 // the assumption is that the final link-step will read the bitcode
151 // and convert it to object code. This may be done by either the
152 // native linker or rustc itself.
153 //
154 // Note, however, that the linker-plugin-lto requested here is
155 // explicitly ignored for `#![no_builtins]` crates. These crates are
156 // specifically ignored by rustc's LTO passes and wouldn't work if
157 // loaded into the linker. These crates define symbols that LLVM
158 // lowers intrinsics to, and these symbol dependencies aren't known
159 // until after codegen. As a result any crate marked
160 // `#![no_builtins]` is assumed to not participate in LTO and
161 // instead goes on to generate object code.
162EmitObj::Bitcode163 } else if need_bitcode_in_object(tcx) {
164 EmitObj::ObjectCode(BitcodeSection::Full)
165 } else {
166 EmitObj::ObjectCode(BitcodeSection::None)
167 };
168169ModuleConfig {
170 passes: if let ModuleKind::Regular = kind {
sess.opts.cg.passes.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.cg.passes.clone(), vec![]),
171172 opt_level: opt_level_and_size,
173174 pgo_gen: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_generate.clone()
} else { SwitchWithOptPath::Disabled }if_regular!(
175 sess.opts.cg.profile_generate.clone(),
176 SwitchWithOptPath::Disabled
177 ),
178 pgo_use: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_use.clone()
} else { None }if_regular!(sess.opts.cg.profile_use.clone(), None),
179 pgo_sample_use: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.profile_sample_use.clone()
} else { None }if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
180 debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
181 instrument_coverage: if let ModuleKind::Regular = kind {
sess.instrument_coverage()
} else { false }if_regular!(sess.instrument_coverage(), false),
182183 sanitizer: if let ModuleKind::Regular = kind {
sess.sanitizers()
} else { SanitizerSet::empty() }if_regular!(sess.sanitizers(), SanitizerSet::empty()),
184 sanitizer_dataflow_abilist: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone()
} else { Vec::new() }if_regular!(
185 sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
186 Vec::new()
187 ),
188 sanitizer_recover: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_recover
} else { SanitizerSet::empty() }if_regular!(
189 sess.opts.unstable_opts.sanitizer_recover,
190 SanitizerSet::empty()
191 ),
192 sanitizer_memory_track_origins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_memory_track_origins
} else { 0 }if_regular!(
193 sess.opts.unstable_opts.sanitizer_memory_track_origins,
1940
195),
196197 emit_pre_lto_bc: if let ModuleKind::Regular = kind {
save_temps || need_pre_lto_bitcode_for_incr_comp(sess)
} else { false }if_regular!(
198 save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
199false
200),
201 emit_no_opt_bc: if let ModuleKind::Regular = kind { save_temps } else { false }if_regular!(save_temps, false),
202 emit_bc: if let ModuleKind::Regular = kind {
save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode)
} else { save_temps }if_regular!(
203 save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
204 save_temps
205 ),
206 emit_ir: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
} else { false }if_regular!(
207 sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
208false
209),
210 emit_asm: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::Assembly)
} else { false }if_regular!(
211 sess.opts.output_types.contains_key(&OutputType::Assembly),
212false
213),
214emit_obj,
215// thin lto summaries prevent fat lto, so do not emit them if fat
216 // lto is requested. See PR #136840 for background information.
217emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto && sess.lto() != Lto::Fat,
218 emit_thin_lto_summary: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode)
} else { false }if_regular!(
219 sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
220false
221),
222223 verify_llvm_ir: sess.verify_llvm_ir(),
224 lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
225 no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
226 no_builtins: no_builtins || sess.target.no_builtins,
227228// Copy what clang does by turning on loop vectorization at O2 and
229 // slp vectorization at O3.
230vectorize_loop: !sess.opts.cg.no_vectorize_loops
231 && (sess.opts.optimize == config::OptLevel::More232 || sess.opts.optimize == config::OptLevel::Aggressive),
233 vectorize_slp: !sess.opts.cg.no_vectorize_slp
234 && sess.opts.optimize == config::OptLevel::Aggressive,
235236// Some targets (namely, NVPTX) interact badly with the
237 // MergeFunctions pass. This is because MergeFunctions can generate
238 // new function calls which may interfere with the target calling
239 // convention; e.g. for the NVPTX target, PTX kernels should not
240 // call other PTX kernels. MergeFunctions can also be configured to
241 // generate aliases instead, but aliases are not supported by some
242 // backends (again, NVPTX). Therefore, allow targets to opt out of
243 // the MergeFunctions pass, but otherwise keep the pass enabled (at
244 // O2 and O3) since it can be useful for reducing code size.
245merge_functions: match sess246 .opts
247 .unstable_opts
248 .merge_functions
249 .unwrap_or(sess.target.merge_functions)
250 {
251 MergeFunctions::Disabled => false,
252 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
253use config::OptLevel::*;
254match sess.opts.optimize {
255Aggressive | More | SizeMin | Size => true,
256Less | No => false,
257 }
258 }
259 },
260261 emit_lifetime_markers: sess.emit_lifetime_markers(),
262 llvm_plugins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.llvm_plugins.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
263 autodiff: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.autodiff.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
264 offload: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.offload.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
265 }
266 }
267268pub fn bitcode_needed(&self) -> bool {
269self.emit_bc
270 || self.emit_thin_lto_summary
271 || self.emit_obj == EmitObj::Bitcode272 || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
273 }
274275pub fn embed_bitcode(&self) -> bool {
276self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
277 }
278}
279280/// Configuration passed to the function returned by the `target_machine_factory`.
281pub struct TargetMachineFactoryConfig {
282/// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
283 /// so the path to the dwarf object has to be provided when we create the target machine.
284 /// This can be ignored by backends which do not need it for their Split DWARF support.
285pub split_dwarf_file: Option<PathBuf>,
286287/// The name of the output object file. Used for setting OutputFilenames in target options
288 /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
289pub output_obj_file: Option<PathBuf>,
290}
291292impl TargetMachineFactoryConfig {
293pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig {
294let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
295cgcx.output_filenames.split_dwarf_path(
296cgcx.split_debuginfo,
297cgcx.split_dwarf_kind,
298module_name,
299cgcx.invocation_temp.as_deref(),
300 )
301 } else {
302None303 };
304305let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
306 OutputType::Object,
307module_name,
308cgcx.invocation_temp.as_deref(),
309 ));
310TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
311 }
312}
313314pub type TargetMachineFactoryFn<B> = Arc<
315dyn Fn(
316DiagCtxtHandle<'_>,
317TargetMachineFactoryConfig,
318 ) -> <B as WriteBackendMethods>::TargetMachine319 + Send320 + Sync,
321>;
322323/// Additional resources used by optimize_and_codegen (not module specific)
324#[derive(#[automatically_derived]
impl ::core::clone::Clone for CodegenContext {
#[inline]
fn clone(&self) -> CodegenContext {
CodegenContext {
lto: ::core::clone::Clone::clone(&self.lto),
use_linker_plugin_lto: ::core::clone::Clone::clone(&self.use_linker_plugin_lto),
dylib_lto: ::core::clone::Clone::clone(&self.dylib_lto),
prefer_dynamic: ::core::clone::Clone::clone(&self.prefer_dynamic),
save_temps: ::core::clone::Clone::clone(&self.save_temps),
fewer_names: ::core::clone::Clone::clone(&self.fewer_names),
time_trace: ::core::clone::Clone::clone(&self.time_trace),
crate_types: ::core::clone::Clone::clone(&self.crate_types),
output_filenames: ::core::clone::Clone::clone(&self.output_filenames),
invocation_temp: ::core::clone::Clone::clone(&self.invocation_temp),
module_config: ::core::clone::Clone::clone(&self.module_config),
opt_level: ::core::clone::Clone::clone(&self.opt_level),
backend_features: ::core::clone::Clone::clone(&self.backend_features),
msvc_imps_needed: ::core::clone::Clone::clone(&self.msvc_imps_needed),
is_pe_coff: ::core::clone::Clone::clone(&self.is_pe_coff),
target_can_use_split_dwarf: ::core::clone::Clone::clone(&self.target_can_use_split_dwarf),
target_arch: ::core::clone::Clone::clone(&self.target_arch),
target_is_like_darwin: ::core::clone::Clone::clone(&self.target_is_like_darwin),
target_is_like_aix: ::core::clone::Clone::clone(&self.target_is_like_aix),
target_is_like_gpu: ::core::clone::Clone::clone(&self.target_is_like_gpu),
split_debuginfo: ::core::clone::Clone::clone(&self.split_debuginfo),
split_dwarf_kind: ::core::clone::Clone::clone(&self.split_dwarf_kind),
pointer_size: ::core::clone::Clone::clone(&self.pointer_size),
remark: ::core::clone::Clone::clone(&self.remark),
remark_dir: ::core::clone::Clone::clone(&self.remark_dir),
incr_comp_session_dir: ::core::clone::Clone::clone(&self.incr_comp_session_dir),
parallel: ::core::clone::Clone::clone(&self.parallel),
}
}
}Clone, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for CodegenContext {
fn encode(&self, __encoder: &mut __E) {
match *self {
CodegenContext {
lto: ref __binding_0,
use_linker_plugin_lto: ref __binding_1,
dylib_lto: ref __binding_2,
prefer_dynamic: ref __binding_3,
save_temps: ref __binding_4,
fewer_names: ref __binding_5,
time_trace: ref __binding_6,
crate_types: ref __binding_7,
output_filenames: ref __binding_8,
invocation_temp: ref __binding_9,
module_config: ref __binding_10,
opt_level: ref __binding_11,
backend_features: ref __binding_12,
msvc_imps_needed: ref __binding_13,
is_pe_coff: ref __binding_14,
target_can_use_split_dwarf: ref __binding_15,
target_arch: ref __binding_16,
target_is_like_darwin: ref __binding_17,
target_is_like_aix: ref __binding_18,
target_is_like_gpu: ref __binding_19,
split_debuginfo: ref __binding_20,
split_dwarf_kind: ref __binding_21,
pointer_size: ref __binding_22,
remark: ref __binding_23,
remark_dir: ref __binding_24,
incr_comp_session_dir: ref __binding_25,
parallel: ref __binding_26 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for CodegenContext {
fn decode(__decoder: &mut __D) -> Self {
CodegenContext {
lto: ::rustc_serialize::Decodable::decode(__decoder),
use_linker_plugin_lto: ::rustc_serialize::Decodable::decode(__decoder),
dylib_lto: ::rustc_serialize::Decodable::decode(__decoder),
prefer_dynamic: ::rustc_serialize::Decodable::decode(__decoder),
save_temps: ::rustc_serialize::Decodable::decode(__decoder),
fewer_names: ::rustc_serialize::Decodable::decode(__decoder),
time_trace: ::rustc_serialize::Decodable::decode(__decoder),
crate_types: ::rustc_serialize::Decodable::decode(__decoder),
output_filenames: ::rustc_serialize::Decodable::decode(__decoder),
invocation_temp: ::rustc_serialize::Decodable::decode(__decoder),
module_config: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
backend_features: ::rustc_serialize::Decodable::decode(__decoder),
msvc_imps_needed: ::rustc_serialize::Decodable::decode(__decoder),
is_pe_coff: ::rustc_serialize::Decodable::decode(__decoder),
target_can_use_split_dwarf: ::rustc_serialize::Decodable::decode(__decoder),
target_arch: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_darwin: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_aix: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_gpu: ::rustc_serialize::Decodable::decode(__decoder),
split_debuginfo: ::rustc_serialize::Decodable::decode(__decoder),
split_dwarf_kind: ::rustc_serialize::Decodable::decode(__decoder),
pointer_size: ::rustc_serialize::Decodable::decode(__decoder),
remark: ::rustc_serialize::Decodable::decode(__decoder),
remark_dir: ::rustc_serialize::Decodable::decode(__decoder),
incr_comp_session_dir: ::rustc_serialize::Decodable::decode(__decoder),
parallel: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
325pub struct CodegenContext {
326// Resources needed when running LTO
327pub lto: Lto,
328pub use_linker_plugin_lto: bool,
329pub dylib_lto: bool,
330pub prefer_dynamic: bool,
331pub save_temps: bool,
332pub fewer_names: bool,
333pub time_trace: bool,
334pub crate_types: Vec<CrateType>,
335pub output_filenames: Arc<OutputFilenames>,
336pub invocation_temp: Option<String>,
337pub module_config: Arc<ModuleConfig>,
338pub opt_level: OptLevel,
339pub backend_features: Vec<String>,
340pub msvc_imps_needed: bool,
341pub is_pe_coff: bool,
342pub target_can_use_split_dwarf: bool,
343pub target_arch: String,
344pub target_is_like_darwin: bool,
345pub target_is_like_aix: bool,
346pub target_is_like_gpu: bool,
347pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
348pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
349pub pointer_size: Size,
350351/// LLVM optimizations for which we want to print remarks.
352pub remark: Passes,
353/// Directory into which should the LLVM optimization remarks be written.
354 /// If `None`, they will be written to stderr.
355pub remark_dir: Option<PathBuf>,
356/// The incremental compilation session directory, or None if we are not
357 /// compiling incrementally
358pub incr_comp_session_dir: Option<PathBuf>,
359/// `true` if the codegen should be run in parallel.
360 ///
361 /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
362pub parallel: bool,
363}
364365fn generate_thin_lto_work<B: ExtraBackendMethods>(
366 cgcx: &CodegenContext,
367 prof: &SelfProfilerRef,
368 dcx: DiagCtxtHandle<'_>,
369 exported_symbols_for_lto: &[String],
370 each_linked_rlib_for_lto: &[PathBuf],
371 needs_thin_lto: Vec<(String, B::ThinBuffer)>,
372 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
373) -> Vec<(ThinLtoWorkItem<B>, u64)> {
374let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work");
375376let (lto_modules, copy_jobs) = B::run_thin_lto(
377cgcx,
378prof,
379dcx,
380exported_symbols_for_lto,
381each_linked_rlib_for_lto,
382needs_thin_lto,
383import_only_modules,
384 );
385lto_modules386 .into_iter()
387 .map(|module| {
388let cost = module.cost();
389 (ThinLtoWorkItem::ThinLto(module), cost)
390 })
391 .chain(copy_jobs.into_iter().map(|wp| {
392 (
393 ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
394 name: wp.cgu_name.clone(),
395 source: wp,
396 }),
3970, // copying is very cheap
398)
399 }))
400 .collect()
401}
402403pub struct CompiledModules {
404pub modules: Vec<CompiledModule>,
405pub allocator_module: Option<CompiledModule>,
406}
407408enum MaybeLtoModules<B: WriteBackendMethods> {
409 NoLto {
410 modules: Vec<CompiledModule>,
411 allocator_module: Option<CompiledModule>,
412 },
413 FatLto {
414 cgcx: CodegenContext,
415 exported_symbols_for_lto: Arc<Vec<String>>,
416 each_linked_rlib_file_for_lto: Vec<PathBuf>,
417 needs_fat_lto: Vec<FatLtoInput<B>>,
418 lto_import_only_modules:
419Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
420 },
421 ThinLto {
422 cgcx: CodegenContext,
423 exported_symbols_for_lto: Arc<Vec<String>>,
424 each_linked_rlib_file_for_lto: Vec<PathBuf>,
425 needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
426 lto_import_only_modules:
427Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
428 },
429}
430431fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
432let sess = tcx.sess;
433sess.opts.cg.embed_bitcode
434 && tcx.crate_types().contains(&CrateType::Rlib)
435 && sess.opts.output_types.contains_key(&OutputType::Exe)
436}
437438fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
439if sess.opts.incremental.is_none() {
440return false;
441 }
442443match sess.lto() {
444 Lto::No => false,
445 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
446 }
447}
448449pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
450 backend: B,
451 tcx: TyCtxt<'_>,
452 target_cpu: String,
453 allocator_module: Option<ModuleCodegen<B::Module>>,
454) -> OngoingCodegen<B> {
455let (coordinator_send, coordinator_receive) = channel();
456457let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID);
458let no_builtins = {
{
'done:
{
for i in crate_attrs {
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(AttributeKind::NoBuiltins) => {
break 'done Some(());
}
_ => {}
}
}
None
}
}.is_some()
}find_attr!(crate_attrs, AttributeKind::NoBuiltins);
459460let crate_info = CrateInfo::new(tcx, target_cpu);
461462let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
463let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
464465let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
466let (codegen_worker_send, codegen_worker_receive) = channel();
467468let coordinator_thread = start_executing_work(
469backend.clone(),
470tcx,
471&crate_info,
472shared_emitter,
473codegen_worker_send,
474coordinator_receive,
475Arc::new(regular_config),
476Arc::new(allocator_config),
477allocator_module,
478coordinator_send.clone(),
479 );
480481OngoingCodegen {
482backend,
483crate_info,
484485codegen_worker_receive,
486shared_emitter_main,
487 coordinator: Coordinator {
488 sender: coordinator_send,
489 future: Some(coordinator_thread),
490 phantom: PhantomData,
491 },
492 output_filenames: Arc::clone(tcx.output_filenames(())),
493 }
494}
495496fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
497 sess: &Session,
498 compiled_modules: &CompiledModules,
499) -> FxIndexMap<WorkProductId, WorkProduct> {
500let mut work_products = FxIndexMap::default();
501502if sess.opts.incremental.is_none() {
503return work_products;
504 }
505506let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
507508for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
509let mut files = Vec::new();
510if let Some(object_file_path) = &module.object {
511 files.push((OutputType::Object.extension(), object_file_path.as_path()));
512 }
513if let Some(dwarf_object_file_path) = &module.dwarf_object {
514 files.push(("dwo", dwarf_object_file_path.as_path()));
515 }
516if let Some(path) = &module.assembly {
517 files.push((OutputType::Assembly.extension(), path.as_path()));
518 }
519if let Some(path) = &module.llvm_ir {
520 files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
521 }
522if let Some(path) = &module.bytecode {
523 files.push((OutputType::Bitcode.extension(), path.as_path()));
524 }
525if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
526 sess,
527&module.name,
528 files.as_slice(),
529&module.links_from_incr_cache,
530 ) {
531 work_products.insert(id, product);
532 }
533 }
534535work_products536}
537538pub fn produce_final_output_artifacts(
539 sess: &Session,
540 compiled_modules: &CompiledModules,
541 crate_output: &OutputFilenames,
542) {
543let mut user_wants_bitcode = false;
544let mut user_wants_objects = false;
545546// Produce final compile outputs.
547let copy_gracefully = |from: &Path, to: &OutFileName| match to {
548 OutFileName::Stdoutif let Err(e) = copy_to_stdout(from) => {
549sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
550 }
551 OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
552sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
553 }
554_ => {}
555 };
556557let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
558if let [module] = &compiled_modules.modules[..] {
559// 1) Only one codegen unit. In this case it's no difficulty
560 // to copy `foo.0.x` to `foo.x`.
561let path = crate_output.temp_path_for_cgu(
562output_type,
563&module.name,
564sess.invocation_temp.as_deref(),
565 );
566let output = crate_output.path(output_type);
567if !output_type.is_text_output() && output.is_tty() {
568sess.dcx()
569 .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
570 } else {
571copy_gracefully(&path, &output);
572 }
573if !sess.opts.cg.save_temps && !keep_numbered {
574// The user just wants `foo.x`, not `foo.#module-name#.x`.
575ensure_removed(sess.dcx(), &path);
576 }
577 } else {
578if crate_output.outputs.contains_explicit_name(&output_type) {
579// 2) Multiple codegen units, with `--emit foo=some_name`. We have
580 // no good solution for this case, so warn the user.
581sess.dcx()
582 .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
583 } else if crate_output.single_output_file.is_some() {
584// 3) Multiple codegen units, with `-o some_name`. We have
585 // no good solution for this case, so warn the user.
586sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
587 } else {
588// 4) Multiple codegen units, but no explicit name. We
589 // just leave the `foo.0.x` files in place.
590 // (We don't have to do any work in this case.)
591}
592 }
593 };
594595// Flag to indicate whether the user explicitly requested bitcode.
596 // Otherwise, we produced it only as a temporary output, and will need
597 // to get rid of it.
598for output_type in crate_output.outputs.keys() {
599match *output_type {
600 OutputType::Bitcode => {
601 user_wants_bitcode = true;
602// Copy to .bc, but always keep the .0.bc. There is a later
603 // check to figure out if we should delete .0.bc files, or keep
604 // them for making an rlib.
605copy_if_one_unit(OutputType::Bitcode, true);
606 }
607 OutputType::ThinLinkBitcode => {
608 copy_if_one_unit(OutputType::ThinLinkBitcode, false);
609 }
610 OutputType::LlvmAssembly => {
611 copy_if_one_unit(OutputType::LlvmAssembly, false);
612 }
613 OutputType::Assembly => {
614 copy_if_one_unit(OutputType::Assembly, false);
615 }
616 OutputType::Object => {
617 user_wants_objects = true;
618 copy_if_one_unit(OutputType::Object, true);
619 }
620 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
621 }
622 }
623624// Clean up unwanted temporary files.
625626 // We create the following files by default:
627 // - #crate#.#module-name#.bc
628 // - #crate#.#module-name#.o
629 // - #crate#.crate.metadata.bc
630 // - #crate#.crate.metadata.o
631 // - #crate#.o (linked from crate.##.o)
632 // - #crate#.bc (copied from crate.##.bc)
633 // We may create additional files if requested by the user (through
634 // `-C save-temps` or `--emit=` flags).
635636if !sess.opts.cg.save_temps {
637// Remove the temporary .#module-name#.o objects. If the user didn't
638 // explicitly request bitcode (with --emit=bc), and the bitcode is not
639 // needed for building an rlib, then we must remove .#module-name#.bc as
640 // well.
641642 // Specific rules for keeping .#module-name#.bc:
643 // - If the user requested bitcode (`user_wants_bitcode`), and
644 // codegen_units > 1, then keep it.
645 // - If the user requested bitcode but codegen_units == 1, then we
646 // can toss .#module-name#.bc because we copied it to .bc earlier.
647 // - If we're not building an rlib and the user didn't request
648 // bitcode, then delete .#module-name#.bc.
649 // If you change how this works, also update back::link::link_rlib,
650 // where .#module-name#.bc files are (maybe) deleted after making an
651 // rlib.
652let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
653654let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
655656let keep_numbered_objects =
657needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
658659for module in compiled_modules.modules.iter() {
660if !keep_numbered_objects {
661if let Some(ref path) = module.object {
662 ensure_removed(sess.dcx(), path);
663 }
664665if let Some(ref path) = module.dwarf_object {
666 ensure_removed(sess.dcx(), path);
667 }
668 }
669670if let Some(ref path) = module.bytecode {
671if !keep_numbered_bitcode {
672 ensure_removed(sess.dcx(), path);
673 }
674 }
675 }
676677if !user_wants_bitcode678 && let Some(ref allocator_module) = compiled_modules.allocator_module
679 && let Some(ref path) = allocator_module.bytecode
680 {
681ensure_removed(sess.dcx(), path);
682 }
683 }
684685if sess.opts.json_artifact_notifications {
686if let [module] = &compiled_modules.modules[..] {
687module.for_each_output(|_path, ty| {
688if sess.opts.output_types.contains_key(&ty) {
689let descr = ty.shorthand();
690// for single cgu file is renamed to drop cgu specific suffix
691 // so we regenerate it the same way
692let path = crate_output.path(ty);
693sess.dcx().emit_artifact_notification(path.as_path(), descr);
694 }
695 });
696 } else {
697for module in &compiled_modules.modules {
698 module.for_each_output(|path, ty| {
699if sess.opts.output_types.contains_key(&ty) {
700let descr = ty.shorthand();
701 sess.dcx().emit_artifact_notification(&path, descr);
702 }
703 });
704 }
705 }
706 }
707708// We leave the following files around by default:
709 // - #crate#.o
710 // - #crate#.crate.metadata.o
711 // - #crate#.bc
712 // These are used in linking steps and will be cleaned up afterward.
713}
714715pub(crate) enum WorkItem<B: WriteBackendMethods> {
716/// Optimize a newly codegened, totally unoptimized module.
717Optimize(ModuleCodegen<B::Module>),
718/// Copy the post-LTO artifacts from the incremental cache to the output
719 /// directory.
720CopyPostLtoArtifacts(CachedModuleCodegen),
721}
722723enum ThinLtoWorkItem<B: WriteBackendMethods> {
724/// Copy the post-LTO artifacts from the incremental cache to the output
725 /// directory.
726CopyPostLtoArtifacts(CachedModuleCodegen),
727/// Performs thin-LTO on the given module.
728ThinLto(lto::ThinModule<B>),
729}
730731// `pthread_setname()` on *nix ignores anything beyond the first 15
732// bytes. Use short descriptions to maximize the space available for
733// the module name.
734#[cfg(not(windows))]
735fn desc(short: &str, _long: &str, name: &str) -> String {
736// The short label is three bytes, and is followed by a space. That
737 // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
738 // depends on the CGU name form.
739 //
740 // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
741 // before the `-cgu.0` is the same for every CGU, so use the
742 // `cgu.0` part. The number suffix will be different for each
743 // CGU.
744 //
745 // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
746 // name because each CGU will have a unique ASCII hash, and the
747 // first 11 bytes will be enough to identify it.
748 //
749 // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
750 // `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
751 // name. The first 11 bytes won't be enough to uniquely identify
752 // it, but no obvious substring will, and this is a rarely used
753 // option so it doesn't matter much.
754 //
755match (&short.len(), &3) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(short.len(), 3);
756let name = if let Some(index) = name.find("-cgu.") {
757&name[index + 1..] // +1 skips the leading '-'.
758} else {
759name760 };
761::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0} {1}", short, name))
})format!("{short} {name}")762}
763764// Windows has no thread name length limit, so use more descriptive names.
765#[cfg(windows)]
766fn desc(_short: &str, long: &str, name: &str) -> String {
767format!("{long} {name}")
768}
769770impl<B: WriteBackendMethods> WorkItem<B> {
771/// Generate a short description of this work item suitable for use as a thread name.
772fn short_description(&self) -> String {
773match self {
774 WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
775 WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
776 }
777 }
778}
779780impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
781/// Generate a short description of this work item suitable for use as a thread name.
782fn short_description(&self) -> String {
783match self {
784 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
785desc("cpy", "copy LTO artifacts for", &m.name)
786 }
787 ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
788 }
789 }
790}
791792/// A result produced by the backend.
793pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
794/// The backend has finished compiling a CGU, nothing more required.
795Finished(CompiledModule),
796797/// The backend has finished compiling a CGU, which now needs to go through
798 /// fat LTO.
799NeedsFatLto(FatLtoInput<B>),
800801/// The backend has finished compiling a CGU, which now needs to go through
802 /// thin LTO.
803NeedsThinLto(String, B::ThinBuffer),
804}
805806pub enum FatLtoInput<B: WriteBackendMethods> {
807 Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
808 InMemory(ModuleCodegen<B::Module>),
809}
810811/// Actual LTO type we end up choosing based on multiple factors.
812pub(crate) enum ComputedLtoType {
813 No,
814 Thin,
815 Fat,
816}
817818pub(crate) fn compute_per_cgu_lto_type(
819 sess_lto: &Lto,
820 linker_does_lto: bool,
821 sess_crate_types: &[CrateType],
822) -> ComputedLtoType {
823// If the linker does LTO, we don't have to do it. Note that we
824 // keep doing full LTO, if it is requested, as not to break the
825 // assumption that the output will be a single module.
826827 // We ignore a request for full crate graph LTO if the crate type
828 // is only an rlib, as there is no full crate graph to process,
829 // that'll happen later.
830 //
831 // This use case currently comes up primarily for targets that
832 // require LTO so the request for LTO is always unconditionally
833 // passed down to the backend, but we don't actually want to do
834 // anything about it yet until we've got a final product.
835let is_rlib = #[allow(non_exhaustive_omitted_patterns)] match sess_crate_types {
[CrateType::Rlib] => true,
_ => false,
}matches!(sess_crate_types, [CrateType::Rlib]);
836837match sess_lto {
838 Lto::ThinLocalif !linker_does_lto => ComputedLtoType::Thin,
839 Lto::Thinif !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
840 Lto::Fatif !is_rlib => ComputedLtoType::Fat,
841_ => ComputedLtoType::No,
842 }
843}
844845fn execute_optimize_work_item<B: ExtraBackendMethods>(
846 cgcx: &CodegenContext,
847 prof: &SelfProfilerRef,
848 shared_emitter: SharedEmitter,
849mut module: ModuleCodegen<B::Module>,
850) -> WorkItemResult<B> {
851let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
852853 B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config);
854855// After we've done the initial round of optimizations we need to
856 // decide whether to synchronously codegen this module or ship it
857 // back to the coordinator thread for further LTO processing (which
858 // has to wait for all the initial modules to be optimized).
859860let lto_type =
861compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
862863// If we're doing some form of incremental LTO then we need to be sure to
864 // save our module to disk first.
865let bitcode = if cgcx.module_config.emit_pre_lto_bc {
866let filename = pre_lto_bitcode_filename(&module.name);
867cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
868 } else {
869None870 };
871872match lto_type {
873 ComputedLtoType::No => {
874let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config);
875 WorkItemResult::Finished(module)
876 }
877 ComputedLtoType::Thin => {
878let (name, thin_buffer) = B::prepare_thin(module);
879if let Some(path) = bitcode {
880 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
881{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
882 });
883 }
884 WorkItemResult::NeedsThinLto(name, thin_buffer)
885 }
886 ComputedLtoType::Fat => match bitcode {
887Some(path) => {
888let (name, buffer) = B::serialize_module(module);
889 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
890{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
891 });
892 WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
893name,
894 buffer: SerializedModule::Local(buffer),
895 })
896 }
897None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
898 },
899 }
900}
901902fn execute_copy_from_cache_work_item(
903 cgcx: &CodegenContext,
904 prof: &SelfProfilerRef,
905 shared_emitter: SharedEmitter,
906 module: CachedModuleCodegen,
907) -> CompiledModule {
908let _timer =
909prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
910911let dcx = DiagCtxt::new(Box::new(shared_emitter));
912let dcx = dcx.handle();
913914let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
915916let mut links_from_incr_cache = Vec::new();
917918let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
919let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
920{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/back/write.rs:920",
"rustc_codegen_ssa::back::write", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/back/write.rs"),
::tracing_core::__macro_support::Option::Some(920u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::back::write"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("copying preexisting module `{0}` from {1:?} to {2}",
module.name, source_file, output_path.display()) as
&dyn Value))])
});
} else { ; }
};debug!(
921"copying preexisting module `{}` from {:?} to {}",
922 module.name,
923 source_file,
924 output_path.display()
925 );
926match link_or_copy(&source_file, &output_path) {
927Ok(_) => {
928links_from_incr_cache.push(source_file);
929Some(output_path)
930 }
931Err(error) => {
932dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
933None934 }
935 }
936 };
937938let dwarf_object =
939module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
940let dwarf_obj_out = cgcx941 .output_filenames
942 .split_dwarf_path(
943cgcx.split_debuginfo,
944cgcx.split_dwarf_kind,
945&module.name,
946cgcx.invocation_temp.as_deref(),
947 )
948 .expect(
949"saved dwarf object in work product but `split_dwarf_path` returned `None`",
950 );
951load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
952 });
953954let mut load_from_incr_cache = |perform, output_type: OutputType| {
955if perform {
956let saved_file = module.source.saved_files.get(output_type.extension())?;
957let output_path = cgcx.output_filenames.temp_path_for_cgu(
958output_type,
959&module.name,
960cgcx.invocation_temp.as_deref(),
961 );
962load_from_incr_comp_dir(output_path, &saved_file)
963 } else {
964None965 }
966 };
967968let module_config = &cgcx.module_config;
969let should_emit_obj = module_config.emit_obj != EmitObj::None;
970let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
971let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
972let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
973let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
974if should_emit_obj && object.is_none() {
975dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
976 }
977978CompiledModule {
979links_from_incr_cache,
980 kind: ModuleKind::Regular,
981 name: module.name,
982object,
983dwarf_object,
984bytecode,
985assembly,
986llvm_ir,
987 }
988}
989990fn do_fat_lto<B: ExtraBackendMethods>(
991 cgcx: &CodegenContext,
992 prof: &SelfProfilerRef,
993 shared_emitter: SharedEmitter,
994 tm_factory: TargetMachineFactoryFn<B>,
995 exported_symbols_for_lto: &[String],
996 each_linked_rlib_for_lto: &[PathBuf],
997mut needs_fat_lto: Vec<FatLtoInput<B>>,
998 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
999) -> CompiledModule {
1000let _timer = prof.verbose_generic_activity("LLVM_fatlto");
10011002let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1003let dcx = dcx.handle();
10041005check_lto_allowed(&cgcx, dcx);
10061007for (module, wp) in import_only_modules {
1008 needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
1009 }
10101011let module = B::run_and_optimize_fat_lto(
1012cgcx,
1013prof,
1014&shared_emitter,
1015tm_factory,
1016exported_symbols_for_lto,
1017each_linked_rlib_for_lto,
1018needs_fat_lto,
1019 );
1020 B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config)
1021}
10221023fn do_thin_lto<B: ExtraBackendMethods>(
1024 cgcx: &CodegenContext,
1025 prof: &SelfProfilerRef,
1026 shared_emitter: SharedEmitter,
1027 tm_factory: TargetMachineFactoryFn<B>,
1028 exported_symbols_for_lto: Arc<Vec<String>>,
1029 each_linked_rlib_for_lto: Vec<PathBuf>,
1030 needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
1031 lto_import_only_modules: Vec<(
1032SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>,
1033WorkProduct,
1034 )>,
1035) -> Vec<CompiledModule> {
1036let _timer = prof.verbose_generic_activity("LLVM_thinlto");
10371038let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1039let dcx = dcx.handle();
10401041check_lto_allowed(&cgcx, dcx);
10421043let (coordinator_send, coordinator_receive) = channel();
10441045// First up, convert our jobserver into a helper thread so we can use normal
1046 // mpsc channels to manage our messages and such.
1047 // After we've requested tokens then we'll, when we can,
1048 // get tokens on `coordinator_receive` which will
1049 // get managed in the main loop below.
1050let coordinator_send2 = coordinator_send.clone();
1051let helper = jobserver::client()
1052 .into_helper_thread(move |token| {
1053drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1054 })
1055 .expect("failed to spawn helper thread");
10561057let mut work_items = ::alloc::vec::Vec::new()vec![];
10581059// We have LTO work to do. Perform the serial work here of
1060 // figuring out what we're going to LTO and then push a
1061 // bunch of work items onto our queue to do LTO. This all
1062 // happens on the coordinator thread but it's very quick so
1063 // we don't worry about tokens.
1064for (work, cost) in generate_thin_lto_work::<B>(
1065 cgcx,
1066 prof,
1067 dcx,
1068&exported_symbols_for_lto,
1069&each_linked_rlib_for_lto,
1070 needs_thin_lto,
1071 lto_import_only_modules,
1072 ) {
1073let insertion_index =
1074 work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1075 work_items.insert(insertion_index, (work, cost));
1076if cgcx.parallel {
1077 helper.request_token();
1078 }
1079 }
10801081let mut codegen_aborted = None;
10821083// These are the Jobserver Tokens we currently hold. Does not include
1084 // the implicit Token the compiler process owns no matter what.
1085let mut tokens = ::alloc::vec::Vec::new()vec![];
10861087// Amount of tokens that are used (including the implicit token).
1088let mut used_token_count = 0;
10891090let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
10911092// Run the message loop while there's still anything that needs message
1093 // processing. Note that as soon as codegen is aborted we simply want to
1094 // wait for all existing work to finish, so many of the conditions here
1095 // only apply if codegen hasn't been aborted as they represent pending
1096 // work to be done.
1097loop {
1098if codegen_aborted.is_none() {
1099if used_token_count == 0 && work_items.is_empty() {
1100// All codegen work is done.
1101break;
1102 }
11031104// Spin up what work we can, only doing this while we've got available
1105 // parallelism slots and work left to spawn.
1106while used_token_count < tokens.len() + 1
1107&& let Some((item, _)) = work_items.pop()
1108 {
1109 spawn_thin_lto_work(
1110&cgcx,
1111 prof,
1112 shared_emitter.clone(),
1113 Arc::clone(&tm_factory),
1114 coordinator_send.clone(),
1115 item,
1116 );
1117 used_token_count += 1;
1118 }
1119 } else {
1120// Don't queue up any more work if codegen was aborted, we're
1121 // just waiting for our existing children to finish.
1122if used_token_count == 0 {
1123break;
1124 }
1125 }
11261127// Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1128tokens.truncate(used_token_count.saturating_sub(1));
11291130match coordinator_receive.recv().unwrap() {
1131// Save the token locally and the next turn of the loop will use
1132 // this to spawn a new unit of work, or it may get dropped
1133 // immediately if we have no more work to spawn.
1134ThinLtoMessage::Token(token) => match token {
1135Ok(token) => {
1136tokens.push(token);
1137 }
1138Err(e) => {
1139let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1140shared_emitter.fatal(msg);
1141codegen_aborted = Some(FatalError);
1142 }
1143 },
11441145 ThinLtoMessage::WorkItem { result } => {
1146// If a thread exits successfully then we drop a token associated
1147 // with that worker and update our `used_token_count` count.
1148 // We may later re-acquire a token to continue running more work.
1149 // We may also not actually drop a token here if the worker was
1150 // running with an "ephemeral token".
1151used_token_count -= 1;
11521153match result {
1154Ok(compiled_module) => compiled_modules.push(compiled_module),
1155Err(Some(WorkerFatalError)) => {
1156// Like `CodegenAborted`, wait for remaining work to finish.
1157codegen_aborted = Some(FatalError);
1158 }
1159Err(None) => {
1160// If the thread failed that means it panicked, so
1161 // we abort immediately.
1162::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1163 }
1164 }
1165 }
1166 }
1167 }
11681169if let Some(codegen_aborted) = codegen_aborted {
1170codegen_aborted.raise();
1171 }
11721173compiled_modules1174}
11751176fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
1177 cgcx: &CodegenContext,
1178 prof: &SelfProfilerRef,
1179 shared_emitter: SharedEmitter,
1180 tm_factory: TargetMachineFactoryFn<B>,
1181 module: lto::ThinModule<B>,
1182) -> CompiledModule {
1183let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
11841185let module = B::optimize_thin(cgcx, prof, &shared_emitter, tm_factory, module);
1186 B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config)
1187}
11881189/// Messages sent to the coordinator.
1190pub(crate) enum Message<B: WriteBackendMethods> {
1191/// A jobserver token has become available. Sent from the jobserver helper
1192 /// thread.
1193Token(io::Result<Acquired>),
11941195/// The backend has finished processing a work item for a codegen unit.
1196 /// Sent from a backend worker thread.
1197WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
11981199/// The frontend has finished generating something (backend IR or a
1200 /// post-LTO artifact) for a codegen unit, and it should be passed to the
1201 /// backend. Sent from the main thread.
1202CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
12031204/// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1205 /// Sent from the main thread.
1206AddImportOnlyModule {
1207 module_data: SerializedModule<B::ModuleBuffer>,
1208 work_product: WorkProduct,
1209 },
12101211/// The frontend has finished generating everything for all codegen units.
1212 /// Sent from the main thread.
1213CodegenComplete,
12141215/// Some normal-ish compiler error occurred, and codegen should be wound
1216 /// down. Sent from the main thread.
1217CodegenAborted,
1218}
12191220/// Messages sent to the coordinator.
1221pub(crate) enum ThinLtoMessage {
1222/// A jobserver token has become available. Sent from the jobserver helper
1223 /// thread.
1224Token(io::Result<Acquired>),
12251226/// The backend has finished processing a work item for a codegen unit.
1227 /// Sent from a backend worker thread.
1228WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1229}
12301231/// A message sent from the coordinator thread to the main thread telling it to
1232/// process another codegen unit.
1233pub struct CguMessage;
12341235// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1236// can be used to send diagnostics from codegen threads to the main thread.
1237// It's missing the following fields from `rustc_errors::DiagInner`.
1238// - `span`: it doesn't impl `Send`.
1239// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1240// diagnostics.
1241// - `sort_span`: it doesn't impl `Send`.
1242// - `is_lint`: lints aren't relevant during codegen.
1243// - `emitted_at`: not used for codegen diagnostics.
1244struct Diagnostic {
1245 span: Vec<SpanData>,
1246 level: Level,
1247 messages: Vec<(DiagMessage, Style)>,
1248 code: Option<ErrCode>,
1249 children: Vec<Subdiagnostic>,
1250 args: DiagArgMap,
1251}
12521253// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1254// missing the following fields from `rustc_errors::Subdiag`.
1255// - `span`: it doesn't impl `Send`.
1256struct Subdiagnostic {
1257 level: Level,
1258 messages: Vec<(DiagMessage, Style)>,
1259}
12601261#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for MainThreadState {
#[inline]
fn eq(&self, other: &MainThreadState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::clone::Clone for MainThreadState {
#[inline]
fn clone(&self) -> MainThreadState { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for MainThreadState { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for MainThreadState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
MainThreadState::Idle => "Idle",
MainThreadState::Codegenning => "Codegenning",
MainThreadState::Lending => "Lending",
})
}
}Debug)]
1262enum MainThreadState {
1263/// Doing nothing.
1264Idle,
12651266/// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1267Codegenning,
12681269/// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1270Lending,
1271}
12721273fn start_executing_work<B: ExtraBackendMethods>(
1274 backend: B,
1275 tcx: TyCtxt<'_>,
1276 crate_info: &CrateInfo,
1277 shared_emitter: SharedEmitter,
1278 codegen_worker_send: Sender<CguMessage>,
1279 coordinator_receive: Receiver<Message<B>>,
1280 regular_config: Arc<ModuleConfig>,
1281 allocator_config: Arc<ModuleConfig>,
1282mut allocator_module: Option<ModuleCodegen<B::Module>>,
1283 coordinator_send: Sender<Message<B>>,
1284) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
1285let sess = tcx.sess;
1286let prof = sess.prof.clone();
12871288let mut each_linked_rlib_for_lto = Vec::new();
1289let mut each_linked_rlib_file_for_lto = Vec::new();
1290drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1291if link::ignored_for_lto(sess, crate_info, cnum) {
1292return;
1293 }
1294each_linked_rlib_for_lto.push(cnum);
1295each_linked_rlib_file_for_lto.push(path.to_path_buf());
1296 }));
12971298// Compute the set of symbols we need to retain when doing LTO (if we need to)
1299let exported_symbols_for_lto =
1300Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
13011302// First up, convert our jobserver into a helper thread so we can use normal
1303 // mpsc channels to manage our messages and such.
1304 // After we've requested tokens then we'll, when we can,
1305 // get tokens on `coordinator_receive` which will
1306 // get managed in the main loop below.
1307let coordinator_send2 = coordinator_send.clone();
1308let helper = jobserver::client()
1309 .into_helper_thread(move |token| {
1310drop(coordinator_send2.send(Message::Token::<B>(token)));
1311 })
1312 .expect("failed to spawn helper thread");
13131314let opt_level = tcx.backend_optimization_level(());
1315let backend_features = tcx.global_backend_features(()).clone();
1316let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features);
13171318let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1319let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1320match result {
1321Ok(dir) => Some(dir),
1322Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1323 }
1324 } else {
1325None1326 };
13271328let cgcx = CodegenContext {
1329 crate_types: tcx.crate_types().to_vec(),
1330 lto: sess.lto(),
1331 use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
1332 dylib_lto: sess.opts.unstable_opts.dylib_lto,
1333 prefer_dynamic: sess.opts.cg.prefer_dynamic,
1334 fewer_names: sess.fewer_names(),
1335 save_temps: sess.opts.cg.save_temps,
1336 time_trace: sess.opts.unstable_opts.llvm_time_trace,
1337 remark: sess.opts.cg.remark.clone(),
1338remark_dir,
1339 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1340 output_filenames: Arc::clone(tcx.output_filenames(())),
1341 module_config: regular_config,
1342opt_level,
1343backend_features,
1344 msvc_imps_needed: msvc_imps_needed(tcx),
1345 is_pe_coff: tcx.sess.target.is_like_windows,
1346 target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1347 target_arch: tcx.sess.target.arch.to_string(),
1348 target_is_like_darwin: tcx.sess.target.is_like_darwin,
1349 target_is_like_aix: tcx.sess.target.is_like_aix,
1350 target_is_like_gpu: tcx.sess.target.is_like_gpu,
1351 split_debuginfo: tcx.sess.split_debuginfo(),
1352 split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1353 parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1354 pointer_size: tcx.data_layout.pointer_size(),
1355 invocation_temp: sess.invocation_temp.clone(),
1356 };
13571358// This is the "main loop" of parallel work happening for parallel codegen.
1359 // It's here that we manage parallelism, schedule work, and work with
1360 // messages coming from clients.
1361 //
1362 // There are a few environmental pre-conditions that shape how the system
1363 // is set up:
1364 //
1365 // - Error reporting can only happen on the main thread because that's the
1366 // only place where we have access to the compiler `Session`.
1367 // - LLVM work can be done on any thread.
1368 // - Codegen can only happen on the main thread.
1369 // - Each thread doing substantial work must be in possession of a `Token`
1370 // from the `Jobserver`.
1371 // - The compiler process always holds one `Token`. Any additional `Tokens`
1372 // have to be requested from the `Jobserver`.
1373 //
1374 // Error Reporting
1375 // ===============
1376 // The error reporting restriction is handled separately from the rest: We
1377 // set up a `SharedEmitter` that holds an open channel to the main thread.
1378 // When an error occurs on any thread, the shared emitter will send the
1379 // error message to the receiver main thread (`SharedEmitterMain`). The
1380 // main thread will periodically query this error message queue and emit
1381 // any error messages it has received. It might even abort compilation if
1382 // it has received a fatal error. In this case we rely on all other threads
1383 // being torn down automatically with the main thread.
1384 // Since the main thread will often be busy doing codegen work, error
1385 // reporting will be somewhat delayed, since the message queue can only be
1386 // checked in between two work packages.
1387 //
1388 // Work Processing Infrastructure
1389 // ==============================
1390 // The work processing infrastructure knows three major actors:
1391 //
1392 // - the coordinator thread,
1393 // - the main thread, and
1394 // - LLVM worker threads
1395 //
1396 // The coordinator thread is running a message loop. It instructs the main
1397 // thread about what work to do when, and it will spawn off LLVM worker
1398 // threads as open LLVM WorkItems become available.
1399 //
1400 // The job of the main thread is to codegen CGUs into LLVM work packages
1401 // (since the main thread is the only thread that can do this). The main
1402 // thread will block until it receives a message from the coordinator, upon
1403 // which it will codegen one CGU, send it to the coordinator and block
1404 // again. This way the coordinator can control what the main thread is
1405 // doing.
1406 //
1407 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1408 // available, it will spawn off a new LLVM worker thread and let it process
1409 // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1410 // it will just shut down, which also frees all resources associated with
1411 // the given LLVM module, and sends a message to the coordinator that the
1412 // WorkItem has been completed.
1413 //
1414 // Work Scheduling
1415 // ===============
1416 // The scheduler's goal is to minimize the time it takes to complete all
1417 // work there is, however, we also want to keep memory consumption low
1418 // if possible. These two goals are at odds with each other: If memory
1419 // consumption were not an issue, we could just let the main thread produce
1420 // LLVM WorkItems at full speed, assuring maximal utilization of
1421 // Tokens/LLVM worker threads. However, since codegen is usually faster
1422 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1423 // WorkItem potentially holds on to a substantial amount of memory.
1424 //
1425 // So the actual goal is to always produce just enough LLVM WorkItems as
1426 // not to starve our LLVM worker threads. That means, once we have enough
1427 // WorkItems in our queue, we can block the main thread, so it does not
1428 // produce more until we need them.
1429 //
1430 // Doing LLVM Work on the Main Thread
1431 // ----------------------------------
1432 // Since the main thread owns the compiler process's implicit `Token`, it is
1433 // wasteful to keep it blocked without doing any work. Therefore, what we do
1434 // in this case is: We spawn off an additional LLVM worker thread that helps
1435 // reduce the queue. The work it is doing corresponds to the implicit
1436 // `Token`. The coordinator will mark the main thread as being busy with
1437 // LLVM work. (The actual work happens on another OS thread but we just care
1438 // about `Tokens`, not actual threads).
1439 //
1440 // When any LLVM worker thread finishes while the main thread is marked as
1441 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1442 // of the just finished thread to the LLVM worker thread that is working on
1443 // behalf of the main thread's implicit Token, thus freeing up the main
1444 // thread again. The coordinator can then again decide what the main thread
1445 // should do. This allows the coordinator to make decisions at more points
1446 // in time.
1447 //
1448 // Striking a Balance between Throughput and Memory Consumption
1449 // ------------------------------------------------------------
1450 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1451 // memory consumption as low as possible, are in conflict with each other,
1452 // we have to find a trade off between them. Right now, the goal is to keep
1453 // all workers busy, which means that no worker should find the queue empty
1454 // when it is ready to start.
1455 // How do we do achieve this? Good question :) We actually never know how
1456 // many `Tokens` are potentially available so it's hard to say how much to
1457 // fill up the queue before switching the main thread to LLVM work. Also we
1458 // currently don't have a means to estimate how long a running LLVM worker
1459 // will still be busy with it's current WorkItem. However, we know the
1460 // maximal count of available Tokens that makes sense (=the number of CPU
1461 // cores), so we can take a conservative guess. The heuristic we use here
1462 // is implemented in the `queue_full_enough()` function.
1463 //
1464 // Some Background on Jobservers
1465 // -----------------------------
1466 // It's worth also touching on the management of parallelism here. We don't
1467 // want to just spawn a thread per work item because while that's optimal
1468 // parallelism it may overload a system with too many threads or violate our
1469 // configuration for the maximum amount of cpu to use for this process. To
1470 // manage this we use the `jobserver` crate.
1471 //
1472 // Job servers are an artifact of GNU make and are used to manage
1473 // parallelism between processes. A jobserver is a glorified IPC semaphore
1474 // basically. Whenever we want to run some work we acquire the semaphore,
1475 // and whenever we're done with that work we release the semaphore. In this
1476 // manner we can ensure that the maximum number of parallel workers is
1477 // capped at any one point in time.
1478 //
1479 // LTO and the coordinator thread
1480 // ------------------------------
1481 //
1482 // The final job the coordinator thread is responsible for is managing LTO
1483 // and how that works. When LTO is requested what we'll do is collect all
1484 // optimized LLVM modules into a local vector on the coordinator. Once all
1485 // modules have been codegened and optimized we hand this to the `lto`
1486 // module for further optimization. The `lto` module will return back a list
1487 // of more modules to work on, which the coordinator will continue to spawn
1488 // work for.
1489 //
1490 // Each LLVM module is automatically sent back to the coordinator for LTO if
1491 // necessary. There's already optimizations in place to avoid sending work
1492 // back to the coordinator if LTO isn't requested.
1493return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
1494// This is where we collect codegen units that have gone all the way
1495 // through codegen and LLVM.
1496let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1497let mut needs_fat_lto = Vec::new();
1498let mut needs_thin_lto = Vec::new();
1499let mut lto_import_only_modules = Vec::new();
15001501/// Possible state transitions:
1502 /// - Ongoing -> Completed
1503 /// - Ongoing -> Aborted
1504 /// - Completed -> Aborted
1505#[derive(#[automatically_derived]
impl ::core::fmt::Debug for CodegenState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
CodegenState::Ongoing => "Ongoing",
CodegenState::Completed => "Completed",
CodegenState::Aborted => "Aborted",
})
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CodegenState {
#[inline]
fn eq(&self, other: &CodegenState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq)]
1506enum CodegenState {
1507 Ongoing,
1508 Completed,
1509 Aborted,
1510 }
1511use CodegenState::*;
1512let mut codegen_state = Ongoing;
15131514// This is the queue of LLVM work items that still need processing.
1515let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
15161517// This are the Jobserver Tokens we currently hold. Does not include
1518 // the implicit Token the compiler process owns no matter what.
1519let mut tokens = Vec::new();
15201521let mut main_thread_state = MainThreadState::Idle;
15221523// How many LLVM worker threads are running while holding a Token. This
1524 // *excludes* any that the main thread is lending a Token to.
1525let mut running_with_own_token = 0;
15261527// How many LLVM worker threads are running in total. This *includes*
1528 // any that the main thread is lending a Token to.
1529let running_with_any_token = |main_thread_state, running_with_own_token| {
1530running_with_own_token1531 + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1532 };
15331534let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
15351536if let Some(allocator_module) = &mut allocator_module {
1537 B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config);
1538 }
15391540// Run the message loop while there's still anything that needs message
1541 // processing. Note that as soon as codegen is aborted we simply want to
1542 // wait for all existing work to finish, so many of the conditions here
1543 // only apply if codegen hasn't been aborted as they represent pending
1544 // work to be done.
1545loop {
1546// While there are still CGUs to be codegened, the coordinator has
1547 // to decide how to utilize the compiler processes implicit Token:
1548 // For codegenning more CGU or for running them through LLVM.
1549if codegen_state == Ongoing {
1550if main_thread_state == MainThreadState::Idle {
1551// Compute the number of workers that will be running once we've taken as many
1552 // items from the work queue as we can, plus one for the main thread. It's not
1553 // critically important that we use this instead of just
1554 // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1555 // from fluctuating just because a worker finished up and we decreased the
1556 // `running_with_own_token` count, even though we're just going to increase it
1557 // right after this when we put a new worker to work.
1558let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1559let additional_running = std::cmp::min(extra_tokens, work_items.len());
1560let anticipated_running = running_with_own_token + additional_running + 1;
15611562if !queue_full_enough(work_items.len(), anticipated_running) {
1563// The queue is not full enough, process more codegen units:
1564if codegen_worker_send.send(CguMessage).is_err() {
1565{
::core::panicking::panic_fmt(format_args!("Could not send CguMessage to main thread"));
}panic!("Could not send CguMessage to main thread")1566 }
1567main_thread_state = MainThreadState::Codegenning;
1568 } else {
1569// The queue is full enough to not let the worker
1570 // threads starve. Use the implicit Token to do some
1571 // LLVM work too.
1572let (item, _) =
1573work_items.pop().expect("queue empty - queue_full_enough() broken?");
1574main_thread_state = MainThreadState::Lending;
1575spawn_work(
1576&cgcx,
1577&prof,
1578shared_emitter.clone(),
1579coordinator_send.clone(),
1580&mut llvm_start_time,
1581item,
1582 );
1583 }
1584 }
1585 } else if codegen_state == Completed {
1586if running_with_any_token(main_thread_state, running_with_own_token) == 0
1587&& work_items.is_empty()
1588 {
1589// All codegen work is done.
1590break;
1591 }
15921593// In this branch, we know that everything has been codegened,
1594 // so it's just a matter of determining whether the implicit
1595 // Token is free to use for LLVM work.
1596match main_thread_state {
1597 MainThreadState::Idle => {
1598if let Some((item, _)) = work_items.pop() {
1599main_thread_state = MainThreadState::Lending;
1600spawn_work(
1601&cgcx,
1602&prof,
1603shared_emitter.clone(),
1604coordinator_send.clone(),
1605&mut llvm_start_time,
1606item,
1607 );
1608 } else {
1609// There is no unstarted work, so let the main thread
1610 // take over for a running worker. Otherwise the
1611 // implicit token would just go to waste.
1612 // We reduce the `running` counter by one. The
1613 // `tokens.truncate()` below will take care of
1614 // giving the Token back.
1615if !(running_with_own_token > 0) {
::core::panicking::panic("assertion failed: running_with_own_token > 0")
};assert!(running_with_own_token > 0);
1616running_with_own_token -= 1;
1617main_thread_state = MainThreadState::Lending;
1618 }
1619 }
1620 MainThreadState::Codegenning => ::rustc_middle::util::bug::bug_fmt(format_args!("codegen worker should not be codegenning after codegen was already completed"))bug!(
1621"codegen worker should not be codegenning after \
1622 codegen was already completed"
1623),
1624 MainThreadState::Lending => {
1625// Already making good use of that token
1626}
1627 }
1628 } else {
1629// Don't queue up any more work if codegen was aborted, we're
1630 // just waiting for our existing children to finish.
1631if !(codegen_state == Aborted) {
::core::panicking::panic("assertion failed: codegen_state == Aborted")
};assert!(codegen_state == Aborted);
1632if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1633break;
1634 }
1635 }
16361637// Spin up what work we can, only doing this while we've got available
1638 // parallelism slots and work left to spawn.
1639if codegen_state != Aborted {
1640while running_with_own_token < tokens.len()
1641 && let Some((item, _)) = work_items.pop()
1642 {
1643 spawn_work(
1644&cgcx,
1645&prof,
1646 shared_emitter.clone(),
1647 coordinator_send.clone(),
1648&mut llvm_start_time,
1649 item,
1650 );
1651 running_with_own_token += 1;
1652 }
1653 }
16541655// Relinquish accidentally acquired extra tokens.
1656tokens.truncate(running_with_own_token);
16571658match coordinator_receive.recv().unwrap() {
1659// Save the token locally and the next turn of the loop will use
1660 // this to spawn a new unit of work, or it may get dropped
1661 // immediately if we have no more work to spawn.
1662Message::Token(token) => {
1663match token {
1664Ok(token) => {
1665tokens.push(token);
16661667if main_thread_state == MainThreadState::Lending {
1668// If the main thread token is used for LLVM work
1669 // at the moment, we turn that thread into a regular
1670 // LLVM worker thread, so the main thread is free
1671 // to react to codegen demand.
1672main_thread_state = MainThreadState::Idle;
1673running_with_own_token += 1;
1674 }
1675 }
1676Err(e) => {
1677let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1678shared_emitter.fatal(msg);
1679codegen_state = Aborted;
1680 }
1681 }
1682 }
16831684 Message::CodegenDone { llvm_work_item, cost } => {
1685// We keep the queue sorted by estimated processing cost,
1686 // so that more expensive items are processed earlier. This
1687 // is good for throughput as it gives the main thread more
1688 // time to fill up the queue and it avoids scheduling
1689 // expensive items to the end.
1690 // Note, however, that this is not ideal for memory
1691 // consumption, as LLVM module sizes are not evenly
1692 // distributed.
1693let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1694let insertion_index = match insertion_index {
1695Ok(idx) | Err(idx) => idx,
1696 };
1697work_items.insert(insertion_index, (llvm_work_item, cost));
16981699if cgcx.parallel {
1700helper.request_token();
1701 }
1702match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1703main_thread_state = MainThreadState::Idle;
1704 }
17051706 Message::CodegenComplete => {
1707if codegen_state != Aborted {
1708codegen_state = Completed;
1709 }
1710match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1711main_thread_state = MainThreadState::Idle;
1712 }
17131714// If codegen is aborted that means translation was aborted due
1715 // to some normal-ish compiler error. In this situation we want
1716 // to exit as soon as possible, but we want to make sure all
1717 // existing work has finished. Flag codegen as being done, and
1718 // then conditions above will ensure no more work is spawned but
1719 // we'll keep executing this loop until `running_with_own_token`
1720 // hits 0.
1721Message::CodegenAborted => {
1722codegen_state = Aborted;
1723 }
17241725 Message::WorkItem { result } => {
1726// If a thread exits successfully then we drop a token associated
1727 // with that worker and update our `running_with_own_token` count.
1728 // We may later re-acquire a token to continue running more work.
1729 // We may also not actually drop a token here if the worker was
1730 // running with an "ephemeral token".
1731if main_thread_state == MainThreadState::Lending {
1732main_thread_state = MainThreadState::Idle;
1733 } else {
1734running_with_own_token -= 1;
1735 }
17361737match result {
1738Ok(WorkItemResult::Finished(compiled_module)) => {
1739compiled_modules.push(compiled_module);
1740 }
1741Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1742if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1743needs_fat_lto.push(fat_lto_input);
1744 }
1745Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1746if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1747needs_thin_lto.push((name, thin_buffer));
1748 }
1749Err(Some(WorkerFatalError)) => {
1750// Like `CodegenAborted`, wait for remaining work to finish.
1751codegen_state = Aborted;
1752 }
1753Err(None) => {
1754// If the thread failed that means it panicked, so
1755 // we abort immediately.
1756::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1757 }
1758 }
1759 }
17601761 Message::AddImportOnlyModule { module_data, work_product } => {
1762match (&codegen_state, &Ongoing) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(codegen_state, Ongoing);
1763match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1764lto_import_only_modules.push((module_data, work_product));
1765main_thread_state = MainThreadState::Idle;
1766 }
1767 }
1768 }
17691770// Drop to print timings
1771drop(llvm_start_time);
17721773if codegen_state == Aborted {
1774return Err(());
1775 }
17761777drop(codegen_state);
1778drop(tokens);
1779drop(helper);
1780if !work_items.is_empty() {
::core::panicking::panic("assertion failed: work_items.is_empty()")
};assert!(work_items.is_empty());
17811782if !needs_fat_lto.is_empty() {
1783if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1784if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
17851786if let Some(allocator_module) = allocator_module.take() {
1787needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
1788 }
17891790return Ok(MaybeLtoModules::FatLto {
1791cgcx,
1792exported_symbols_for_lto,
1793each_linked_rlib_file_for_lto,
1794needs_fat_lto,
1795lto_import_only_modules,
1796 });
1797 } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1798if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1799if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
18001801if cgcx.lto == Lto::ThinLocal {
1802compiled_modules.extend(do_thin_lto::<B>(
1803&cgcx,
1804&prof,
1805shared_emitter.clone(),
1806tm_factory,
1807exported_symbols_for_lto,
1808each_linked_rlib_file_for_lto,
1809needs_thin_lto,
1810lto_import_only_modules,
1811 ));
1812 } else {
1813if let Some(allocator_module) = allocator_module.take() {
1814let (name, thin_buffer) = B::prepare_thin(allocator_module);
1815needs_thin_lto.push((name, thin_buffer));
1816 }
18171818return Ok(MaybeLtoModules::ThinLto {
1819cgcx,
1820exported_symbols_for_lto,
1821each_linked_rlib_file_for_lto,
1822needs_thin_lto,
1823lto_import_only_modules,
1824 });
1825 }
1826 }
18271828Ok(MaybeLtoModules::NoLto {
1829 modules: compiled_modules,
1830 allocator_module: allocator_module.map(|allocator_module| {
1831 B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config)
1832 }),
1833 })
1834 })
1835 .expect("failed to spawn coordinator thread");
18361837// A heuristic that determines if we have enough LLVM WorkItems in the
1838 // queue so that the main thread can do LLVM work instead of codegen
1839fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1840// This heuristic scales ahead-of-time codegen according to available
1841 // concurrency, as measured by `workers_running`. The idea is that the
1842 // more concurrency we have available, the more demand there will be for
1843 // work items, and the fuller the queue should be kept to meet demand.
1844 // An important property of this approach is that we codegen ahead of
1845 // time only as much as necessary, so as to keep fewer LLVM modules in
1846 // memory at once, thereby reducing memory consumption.
1847 //
1848 // When the number of workers running is less than the max concurrency
1849 // available to us, this heuristic can cause us to instruct the main
1850 // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1851 // of codegen, even though it seems like it *should* be codegenning so
1852 // that we can create more work items and spawn more LLVM workers.
1853 //
1854 // But this is not a problem. When the main thread is told to LLVM,
1855 // according to this heuristic and how work is scheduled, there is
1856 // always at least one item in the queue, and therefore at least one
1857 // pending jobserver token request. If there *is* more concurrency
1858 // available, we will immediately receive a token, which will upgrade
1859 // the main thread's LLVM worker to a real one (conceptually), and free
1860 // up the main thread to codegen if necessary. On the other hand, if
1861 // there isn't more concurrency, then the main thread working on an LLVM
1862 // item is appropriate, as long as the queue is full enough for demand.
1863 //
1864 // Speaking of which, how full should we keep the queue? Probably less
1865 // full than you'd think. A lot has to go wrong for the queue not to be
1866 // full enough and for that to have a negative effect on compile times.
1867 //
1868 // Workers are unlikely to finish at exactly the same time, so when one
1869 // finishes and takes another work item off the queue, we often have
1870 // ample time to codegen at that point before the next worker finishes.
1871 // But suppose that codegen takes so long that the workers exhaust the
1872 // queue, and we have one or more workers that have nothing to work on.
1873 // Well, it might not be so bad. Of all the LLVM modules we create and
1874 // optimize, one has to finish last. It's not necessarily the case that
1875 // by losing some concurrency for a moment, we delay the point at which
1876 // that last LLVM module is finished and the rest of compilation can
1877 // proceed. Also, when we can't take advantage of some concurrency, we
1878 // give tokens back to the job server. That enables some other rustc to
1879 // potentially make use of the available concurrency. That could even
1880 // *decrease* overall compile time if we're lucky. But yes, if no other
1881 // rustc can make use of the concurrency, then we've squandered it.
1882 //
1883 // However, keeping the queue full is also beneficial when we have a
1884 // surge in available concurrency. Then items can be taken from the
1885 // queue immediately, without having to wait for codegen.
1886 //
1887 // So, the heuristic below tries to keep one item in the queue for every
1888 // four running workers. Based on limited benchmarking, this appears to
1889 // be more than sufficient to avoid increasing compilation times.
1890let quarter_of_workers = workers_running - 3 * workers_running / 4;
1891items_in_queue > 0 && items_in_queue >= quarter_of_workers1892 }
1893}
18941895/// `FatalError` is explicitly not `Send`.
1896#[must_use]
1897pub(crate) struct WorkerFatalError;
18981899fn spawn_work<'a, B: ExtraBackendMethods>(
1900 cgcx: &CodegenContext,
1901 prof: &'a SelfProfilerRef,
1902 shared_emitter: SharedEmitter,
1903 coordinator_send: Sender<Message<B>>,
1904 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1905 work: WorkItem<B>,
1906) {
1907if llvm_start_time.is_none() {
1908*llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
1909 }
19101911let cgcx = cgcx.clone();
1912let prof = prof.clone();
19131914 B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1915let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1916 WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m),
1917 WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
1918execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m),
1919 ),
1920 }));
19211922let msg = match result {
1923Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
19241925// We ignore any `FatalError` coming out of `execute_work_item`, as a
1926 // diagnostic was already sent off to the main thread - just surface
1927 // that there was an error in this worker.
1928Err(err) if err.is::<FatalErrorMarker>() => {
1929 Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1930 }
19311932Err(_) => Message::WorkItem::<B> { result: Err(None) },
1933 };
1934drop(coordinator_send.send(msg));
1935 })
1936 .expect("failed to spawn work thread");
1937}
19381939fn spawn_thin_lto_work<B: ExtraBackendMethods>(
1940 cgcx: &CodegenContext,
1941 prof: &SelfProfilerRef,
1942 shared_emitter: SharedEmitter,
1943 tm_factory: TargetMachineFactoryFn<B>,
1944 coordinator_send: Sender<ThinLtoMessage>,
1945 work: ThinLtoWorkItem<B>,
1946) {
1947let cgcx = cgcx.clone();
1948let prof = prof.clone();
19491950 B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
1951let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1952 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
1953execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m)
1954 }
1955 ThinLtoWorkItem::ThinLto(m) => {
1956execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m)
1957 }
1958 }));
19591960let msg = match result {
1961Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
19621963// We ignore any `FatalError` coming out of `execute_work_item`, as a
1964 // diagnostic was already sent off to the main thread - just surface
1965 // that there was an error in this worker.
1966Err(err) if err.is::<FatalErrorMarker>() => {
1967 ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1968 }
19691970Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1971 };
1972drop(coordinator_send.send(msg));
1973 })
1974 .expect("failed to spawn work thread");
1975}
19761977enum SharedEmitterMessage {
1978 Diagnostic(Diagnostic),
1979 InlineAsmError(InlineAsmError),
1980 Fatal(String),
1981}
19821983pub struct InlineAsmError {
1984pub span: SpanData,
1985pub msg: String,
1986pub level: Level,
1987pub source: Option<(String, Vec<InnerSpan>)>,
1988}
19891990#[derive(#[automatically_derived]
impl ::core::clone::Clone for SharedEmitter {
#[inline]
fn clone(&self) -> SharedEmitter {
SharedEmitter { sender: ::core::clone::Clone::clone(&self.sender) }
}
}Clone)]
1991pub struct SharedEmitter {
1992 sender: Sender<SharedEmitterMessage>,
1993}
19941995pub struct SharedEmitterMain {
1996 receiver: Receiver<SharedEmitterMessage>,
1997}
19981999impl SharedEmitter {
2000fn new() -> (SharedEmitter, SharedEmitterMain) {
2001let (sender, receiver) = channel();
20022003 (SharedEmitter { sender }, SharedEmitterMain { receiver })
2004 }
20052006pub fn inline_asm_error(&self, err: InlineAsmError) {
2007drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
2008 }
20092010fn fatal(&self, msg: &str) {
2011drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
2012 }
2013}
20142015impl Emitterfor SharedEmitter {
2016fn emit_diagnostic(&mut self, mut diag: rustc_errors::DiagInner) {
2017// Check that we aren't missing anything interesting when converting to
2018 // the cut-down local `DiagInner`.
2019if !!diag.span.has_span_labels() {
::core::panicking::panic("assertion failed: !diag.span.has_span_labels()")
};assert!(!diag.span.has_span_labels());
2020match (&diag.suggestions, &Suggestions::Enabled(::alloc::vec::Vec::new())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
2021match (&diag.sort_span, &rustc_span::DUMMY_SP) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
2022match (&diag.is_lint, &None) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.is_lint, None);
2023// No sensible check for `diag.emitted_at`.
20242025let args = mem::replace(&mut diag.args, DiagArgMap::default());
2026drop(
2027self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
2028 span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
2029 level: diag.level(),
2030 messages: diag.messages,
2031 code: diag.code,
2032 children: diag2033 .children
2034 .into_iter()
2035 .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
2036 .collect(),
2037args,
2038 })),
2039 );
2040 }
20412042fn source_map(&self) -> Option<&SourceMap> {
2043None2044 }
20452046fn translator(&self) -> &Translator {
2047{
::core::panicking::panic_fmt(format_args!("shared emitter attempted to translate a diagnostic"));
};panic!("shared emitter attempted to translate a diagnostic");
2048 }
2049}
20502051impl SharedEmitterMain {
2052fn check(&self, sess: &Session, blocking: bool) {
2053loop {
2054let message = if blocking {
2055match self.receiver.recv() {
2056Ok(message) => Ok(message),
2057Err(_) => Err(()),
2058 }
2059 } else {
2060match self.receiver.try_recv() {
2061Ok(message) => Ok(message),
2062Err(_) => Err(()),
2063 }
2064 };
20652066match message {
2067Ok(SharedEmitterMessage::Diagnostic(diag)) => {
2068// The diagnostic has been received on the main thread.
2069 // Convert it back to a full `Diagnostic` and emit.
2070let dcx = sess.dcx();
2071let mut d =
2072 rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
2073d.span = MultiSpan::from_spans(
2074diag.span.into_iter().map(|span| span.span()).collect(),
2075 );
2076d.code = diag.code; // may be `None`, that's ok
2077d.children = diag2078 .children
2079 .into_iter()
2080 .map(|sub| rustc_errors::Subdiag {
2081 level: sub.level,
2082 messages: sub.messages,
2083 span: MultiSpan::new(),
2084 })
2085 .collect();
2086d.args = diag.args;
2087dcx.emit_diagnostic(d);
2088sess.dcx().abort_if_errors();
2089 }
2090Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2091match inner.level {
Level::Error | Level::Warning | Level::Note => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"Level::Error | Level::Warning | Level::Note",
::core::option::Option::None);
}
};assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2092let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2093if !inner.span.is_dummy() {
2094err.span(inner.span.span());
2095 }
20962097// Point to the generated assembly if it is available.
2098if let Some((buffer, spans)) = inner.source {
2099let source = sess2100 .source_map()
2101 .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2102let spans: Vec<_> = spans2103 .iter()
2104 .map(|sp| {
2105Span::with_root_ctxt(
2106source.normalized_byte_pos(sp.start as u32),
2107source.normalized_byte_pos(sp.end as u32),
2108 )
2109 })
2110 .collect();
2111err.span_note(spans, "instantiated into assembly here");
2112 }
21132114err.emit();
2115 }
2116Ok(SharedEmitterMessage::Fatal(msg)) => {
2117sess.dcx().fatal(msg);
2118 }
2119Err(_) => {
2120break;
2121 }
2122 }
2123 }
2124 }
2125}
21262127pub struct Coordinator<B: ExtraBackendMethods> {
2128 sender: Sender<Message<B>>,
2129 future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
2130// Only used for the Message type.
2131phantom: PhantomData<B>,
2132}
21332134impl<B: ExtraBackendMethods> Coordinator<B> {
2135fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
2136self.future.take().unwrap().join()
2137 }
2138}
21392140impl<B: ExtraBackendMethods> Dropfor Coordinator<B> {
2141fn drop(&mut self) {
2142if let Some(future) = self.future.take() {
2143// If we haven't joined yet, signal to the coordinator that it should spawn no more
2144 // work, and wait for worker threads to finish.
2145drop(self.sender.send(Message::CodegenAborted::<B>));
2146drop(future.join());
2147 }
2148 }
2149}
21502151pub struct OngoingCodegen<B: ExtraBackendMethods> {
2152pub backend: B,
2153pub crate_info: CrateInfo,
2154pub output_filenames: Arc<OutputFilenames>,
2155// Field order below is intended to terminate the coordinator thread before two fields below
2156 // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2157 // `Drop` implementation for more info.
2158pub coordinator: Coordinator<B>,
2159pub codegen_worker_receive: Receiver<CguMessage>,
2160pub shared_emitter_main: SharedEmitterMain,
2161}
21622163impl<B: ExtraBackendMethods> OngoingCodegen<B> {
2164pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
2165self.shared_emitter_main.check(sess, true);
21662167let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2168Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
2169Ok(Err(())) => {
2170sess.dcx().abort_if_errors();
2171{
::core::panicking::panic_fmt(format_args!("expected abort due to worker thread errors"));
}panic!("expected abort due to worker thread errors")2172 }
2173Err(_) => {
2174::rustc_middle::util::bug::bug_fmt(format_args!("panic during codegen/LLVM phase"));bug!("panic during codegen/LLVM phase");
2175 }
2176 });
21772178sess.dcx().abort_if_errors();
21792180let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
21812182// Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
2183let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
2184 MaybeLtoModules::NoLto { modules, allocator_module } => {
2185drop(shared_emitter);
2186CompiledModules { modules, allocator_module }
2187 }
2188 MaybeLtoModules::FatLto {
2189 cgcx,
2190 exported_symbols_for_lto,
2191 each_linked_rlib_file_for_lto,
2192 needs_fat_lto,
2193 lto_import_only_modules,
2194 } => {
2195let tm_factory = self.backend.target_machine_factory(
2196sess,
2197cgcx.opt_level,
2198&cgcx.backend_features,
2199 );
22002201CompiledModules {
2202 modules: <[_]>::into_vec(::alloc::boxed::box_new([do_fat_lto(&cgcx, &sess.prof,
shared_emitter, tm_factory, &exported_symbols_for_lto,
&each_linked_rlib_file_for_lto, needs_fat_lto,
lto_import_only_modules)]))vec![do_fat_lto(
2203&cgcx,
2204&sess.prof,
2205 shared_emitter,
2206 tm_factory,
2207&exported_symbols_for_lto,
2208&each_linked_rlib_file_for_lto,
2209 needs_fat_lto,
2210 lto_import_only_modules,
2211 )],
2212 allocator_module: None,
2213 }
2214 }
2215 MaybeLtoModules::ThinLto {
2216 cgcx,
2217 exported_symbols_for_lto,
2218 each_linked_rlib_file_for_lto,
2219 needs_thin_lto,
2220 lto_import_only_modules,
2221 } => {
2222let tm_factory = self.backend.target_machine_factory(
2223sess,
2224cgcx.opt_level,
2225&cgcx.backend_features,
2226 );
22272228CompiledModules {
2229 modules: do_thin_lto::<B>(
2230&cgcx,
2231&sess.prof,
2232shared_emitter,
2233tm_factory,
2234exported_symbols_for_lto,
2235each_linked_rlib_file_for_lto,
2236needs_thin_lto,
2237lto_import_only_modules,
2238 ),
2239 allocator_module: None,
2240 }
2241 }
2242 });
22432244shared_emitter_main.check(sess, true);
22452246sess.dcx().abort_if_errors();
22472248let mut compiled_modules =
2249compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
22502251// Regardless of what order these modules completed in, report them to
2252 // the backend in the same order every time to ensure that we're handing
2253 // out deterministic results.
2254compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
22552256let work_products =
2257copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2258produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
22592260// FIXME: time_llvm_passes support - does this use a global context or
2261 // something?
2262if sess.codegen_units().as_usize() == 1 && sess.opts.unstable_opts.time_llvm_passes {
2263self.backend.print_pass_timings()
2264 }
22652266if sess.print_llvm_stats() {
2267self.backend.print_statistics()
2268 }
22692270 (
2271CodegenResults {
2272 crate_info: self.crate_info,
22732274 modules: compiled_modules.modules,
2275 allocator_module: compiled_modules.allocator_module,
2276 },
2277work_products,
2278 )
2279 }
22802281pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2282self.wait_for_signal_to_codegen_item();
2283self.check_for_errors(tcx.sess);
2284drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2285 }
22862287pub(crate) fn check_for_errors(&self, sess: &Session) {
2288self.shared_emitter_main.check(sess, false);
2289 }
22902291pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2292match self.codegen_worker_receive.recv() {
2293Ok(CguMessage) => {
2294// Ok to proceed.
2295}
2296Err(_) => {
2297// One of the LLVM threads must have panicked, fall through so
2298 // error handling can be reached.
2299}
2300 }
2301 }
2302}
23032304pub(crate) fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
2305 coordinator: &Coordinator<B>,
2306 module: ModuleCodegen<B::Module>,
2307 cost: u64,
2308) {
2309let llvm_work_item = WorkItem::Optimize(module);
2310drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2311}
23122313pub(crate) fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
2314 coordinator: &Coordinator<B>,
2315 module: CachedModuleCodegen,
2316) {
2317let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2318drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2319}
23202321pub(crate) fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
2322 tcx: TyCtxt<'_>,
2323 coordinator: &Coordinator<B>,
2324 module: CachedModuleCodegen,
2325) {
2326let filename = pre_lto_bitcode_filename(&module.name);
2327let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2328let file = fs::File::open(&bc_path)
2329 .unwrap_or_else(|e| {
::core::panicking::panic_fmt(format_args!("failed to open bitcode file `{0}`: {1}",
bc_path.display(), e));
}panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
23302331let mmap = unsafe {
2332Mmap::map(file).unwrap_or_else(|e| {
2333{
::core::panicking::panic_fmt(format_args!("failed to mmap bitcode file `{0}`: {1}",
bc_path.display(), e));
}panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)2334 })
2335 };
2336// Schedule the module to be loaded
2337drop(coordinator.sender.send(Message::AddImportOnlyModule::<B> {
2338 module_data: SerializedModule::FromUncompressedFile(mmap),
2339 work_product: module.source,
2340 }));
2341}
23422343fn pre_lto_bitcode_filename(module_name: &str) -> String {
2344::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0}.{1}", module_name,
PRE_LTO_BC_EXT))
})format!("{module_name}.{PRE_LTO_BC_EXT}")2345}
23462347fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2348// This should never be true (because it's not supported). If it is true,
2349 // something is wrong with commandline arg validation.
2350if !!(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
tcx.sess.target.is_like_windows &&
tcx.sess.opts.cg.prefer_dynamic) {
::core::panicking::panic("assertion failed: !(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&\n tcx.sess.target.is_like_windows &&\n tcx.sess.opts.cg.prefer_dynamic)")
};assert!(
2351 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2352 && tcx.sess.target.is_like_windows
2353 && tcx.sess.opts.cg.prefer_dynamic)
2354 );
23552356// We need to generate _imp__ symbol if we are generating an rlib or we include one
2357 // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2358 // these, but it currently does not do so.
2359let can_have_static_objects =
2360tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
23612362tcx.sess.target.is_like_windows &&
2363can_have_static_objects &&
2364// ThinLTO can't handle this workaround in all cases, so we don't
2365 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2366 // dynamic linking when linker plugin LTO is enabled.
2367!tcx.sess.opts.cg.linker_plugin_lto.enabled()
2368}