1use std::marker::PhantomData;
2use std::panic::AssertUnwindSafe;
3use std::path::{Path, PathBuf};
4use std::sync::Arc;
5use std::sync::mpsc::{Receiver, Sender, channel};
6use std::{assert_matches, fs, io, mem, str, thread};
78use rustc_abi::Size;
9use rustc_data_structures::fx::FxIndexMap;
10use rustc_data_structures::jobserver::{self, Acquired};
11use rustc_data_structures::memmap::Mmap;
12use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
13use rustc_errors::emitter::Emitter;
14use rustc_errors::{
15Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
16Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
17};
18use rustc_fs_util::link_or_copy;
19use rustc_hir::find_attr;
20use rustc_incremental::{
21copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
22};
23use rustc_macros::{Decodable, Encodable};
24use rustc_metadata::fs::copy_to_stdout;
25use rustc_middle::bug;
26use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
27use rustc_middle::ty::TyCtxt;
28use rustc_session::Session;
29use rustc_session::config::{
30self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes,
31SwitchWithOptPath,
32};
33use rustc_span::source_map::SourceMap;
34use rustc_span::{FileName, InnerSpan, Span, SpanData};
35use rustc_target::spec::{MergeFunctions, SanitizerSet};
36use tracing::debug;
3738use super::link::{self, ensure_removed};
39use super::lto::{self, SerializedModule};
40use crate::back::lto::check_lto_allowed;
41use crate::errors::ErrorCreatingRemarkDir;
42use crate::traits::*;
43use crate::{
44CachedModuleCodegen, CompiledModule, CompiledModules, CrateInfo, ModuleCodegen, ModuleKind,
45errors,
46};
4748const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
4950/// What kind of object file to emit.
51#[derive(#[automatically_derived]
impl ::core::clone::Clone for EmitObj {
#[inline]
fn clone(&self) -> EmitObj {
let _: ::core::clone::AssertParamIsClone<BitcodeSection>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EmitObj { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for EmitObj {
#[inline]
fn eq(&self, other: &EmitObj) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(EmitObj::ObjectCode(__self_0), EmitObj::ObjectCode(__arg1_0))
=> __self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for EmitObj {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
EmitObj::None => { 0usize }
EmitObj::Bitcode => { 1usize }
EmitObj::ObjectCode(ref __binding_0) => { 2usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
EmitObj::None => {}
EmitObj::Bitcode => {}
EmitObj::ObjectCode(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for EmitObj {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { EmitObj::None }
1usize => { EmitObj::Bitcode }
2usize => {
EmitObj::ObjectCode(::rustc_serialize::Decodable::decode(__decoder))
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `EmitObj`, expected 0..3, actual {0}",
n));
}
}
}
}
};Decodable)]
52pub enum EmitObj {
53// No object file.
54None,
5556// Just uncompressed llvm bitcode. Provides easy compatibility with
57 // emscripten's ecc compiler, when used as the linker.
58Bitcode,
5960// Object code, possibly augmented with a bitcode section.
61ObjectCode(BitcodeSection),
62}
6364/// What kind of llvm bitcode section to embed in an object file.
65#[derive(#[automatically_derived]
impl ::core::clone::Clone for BitcodeSection {
#[inline]
fn clone(&self) -> BitcodeSection { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BitcodeSection { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BitcodeSection {
#[inline]
fn eq(&self, other: &BitcodeSection) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for BitcodeSection {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
BitcodeSection::None => { 0usize }
BitcodeSection::Full => { 1usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
BitcodeSection::None => {}
BitcodeSection::Full => {}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for BitcodeSection {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { BitcodeSection::None }
1usize => { BitcodeSection::Full }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `BitcodeSection`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable)]
66pub enum BitcodeSection {
67// No bitcode section.
68None,
6970// A full, uncompressed bitcode section.
71Full,
72}
7374/// Module-specific configuration for `optimize_and_codegen`.
75#[derive(const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for ModuleConfig {
fn encode(&self, __encoder: &mut __E) {
match *self {
ModuleConfig {
passes: ref __binding_0,
opt_level: ref __binding_1,
pgo_gen: ref __binding_2,
pgo_use: ref __binding_3,
pgo_sample_use: ref __binding_4,
debug_info_for_profiling: ref __binding_5,
instrument_coverage: ref __binding_6,
sanitizer: ref __binding_7,
sanitizer_recover: ref __binding_8,
sanitizer_dataflow_abilist: ref __binding_9,
sanitizer_memory_track_origins: ref __binding_10,
emit_pre_lto_bc: ref __binding_11,
emit_bc: ref __binding_12,
emit_ir: ref __binding_13,
emit_asm: ref __binding_14,
emit_obj: ref __binding_15,
emit_thin_lto_summary: ref __binding_16,
verify_llvm_ir: ref __binding_17,
lint_llvm_ir: ref __binding_18,
no_prepopulate_passes: ref __binding_19,
no_builtins: ref __binding_20,
vectorize_loop: ref __binding_21,
vectorize_slp: ref __binding_22,
merge_functions: ref __binding_23,
emit_lifetime_markers: ref __binding_24,
llvm_plugins: ref __binding_25,
autodiff: ref __binding_26,
offload: ref __binding_27 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_27,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for ModuleConfig {
fn decode(__decoder: &mut __D) -> Self {
ModuleConfig {
passes: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
pgo_gen: ::rustc_serialize::Decodable::decode(__decoder),
pgo_use: ::rustc_serialize::Decodable::decode(__decoder),
pgo_sample_use: ::rustc_serialize::Decodable::decode(__decoder),
debug_info_for_profiling: ::rustc_serialize::Decodable::decode(__decoder),
instrument_coverage: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_recover: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_dataflow_abilist: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_memory_track_origins: ::rustc_serialize::Decodable::decode(__decoder),
emit_pre_lto_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_ir: ::rustc_serialize::Decodable::decode(__decoder),
emit_asm: ::rustc_serialize::Decodable::decode(__decoder),
emit_obj: ::rustc_serialize::Decodable::decode(__decoder),
emit_thin_lto_summary: ::rustc_serialize::Decodable::decode(__decoder),
verify_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
lint_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
no_prepopulate_passes: ::rustc_serialize::Decodable::decode(__decoder),
no_builtins: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_loop: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_slp: ::rustc_serialize::Decodable::decode(__decoder),
merge_functions: ::rustc_serialize::Decodable::decode(__decoder),
emit_lifetime_markers: ::rustc_serialize::Decodable::decode(__decoder),
llvm_plugins: ::rustc_serialize::Decodable::decode(__decoder),
autodiff: ::rustc_serialize::Decodable::decode(__decoder),
offload: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
76pub struct ModuleConfig {
77/// Names of additional optimization passes to run.
78pub passes: Vec<String>,
79/// Some(level) to optimize at a certain level, or None to run
80 /// absolutely no optimizations (used for the allocator module).
81pub opt_level: Option<config::OptLevel>,
8283pub pgo_gen: SwitchWithOptPath,
84pub pgo_use: Option<PathBuf>,
85pub pgo_sample_use: Option<PathBuf>,
86pub debug_info_for_profiling: bool,
87pub instrument_coverage: bool,
8889pub sanitizer: SanitizerSet,
90pub sanitizer_recover: SanitizerSet,
91pub sanitizer_dataflow_abilist: Vec<String>,
92pub sanitizer_memory_track_origins: usize,
9394// Flags indicating which outputs to produce.
95pub emit_pre_lto_bc: bool,
96pub emit_bc: bool,
97pub emit_ir: bool,
98pub emit_asm: bool,
99pub emit_obj: EmitObj,
100pub emit_thin_lto_summary: bool,
101102// Miscellaneous flags. These are mostly copied from command-line
103 // options.
104pub verify_llvm_ir: bool,
105pub lint_llvm_ir: bool,
106pub no_prepopulate_passes: bool,
107pub no_builtins: bool,
108pub vectorize_loop: bool,
109pub vectorize_slp: bool,
110pub merge_functions: bool,
111pub emit_lifetime_markers: bool,
112pub llvm_plugins: Vec<String>,
113pub autodiff: Vec<config::AutoDiff>,
114pub offload: Vec<config::Offload>,
115}
116117impl ModuleConfig {
118fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
119// If it's a regular module, use `$regular`, otherwise use `$other`.
120 // `$regular` and `$other` are evaluated lazily.
121macro_rules! if_regular {
122 ($regular: expr, $other: expr) => {
123if let ModuleKind::Regular = kind { $regular } else { $other }
124 };
125 }
126127let sess = tcx.sess;
128let opt_level_and_size = if let ModuleKind::Regular = kind { Some(sess.opts.optimize) } else { None }if_regular!(Some(sess.opts.optimize), None);
129130let save_temps = sess.opts.cg.save_temps;
131132let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
133 || match kind {
134 ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
135 ModuleKind::Allocator => false,
136 };
137138let emit_obj = if !should_emit_obj {
139 EmitObj::None140 } else if sess.target.obj_is_bitcode
141 || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
142 {
143// This case is selected if the target uses objects as bitcode, or
144 // if linker plugin LTO is enabled. In the linker plugin LTO case
145 // the assumption is that the final link-step will read the bitcode
146 // and convert it to object code. This may be done by either the
147 // native linker or rustc itself.
148 //
149 // Note, however, that the linker-plugin-lto requested here is
150 // explicitly ignored for `#![no_builtins]` crates. These crates are
151 // specifically ignored by rustc's LTO passes and wouldn't work if
152 // loaded into the linker. These crates define symbols that LLVM
153 // lowers intrinsics to, and these symbol dependencies aren't known
154 // until after codegen. As a result any crate marked
155 // `#![no_builtins]` is assumed to not participate in LTO and
156 // instead goes on to generate object code.
157EmitObj::Bitcode158 } else if need_bitcode_in_object(tcx) {
159 EmitObj::ObjectCode(BitcodeSection::Full)
160 } else {
161 EmitObj::ObjectCode(BitcodeSection::None)
162 };
163164ModuleConfig {
165 passes: if let ModuleKind::Regular = kind {
sess.opts.cg.passes.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.cg.passes.clone(), vec![]),
166167 opt_level: opt_level_and_size,
168169 pgo_gen: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_generate.clone()
} else { SwitchWithOptPath::Disabled }if_regular!(
170 sess.opts.cg.profile_generate.clone(),
171 SwitchWithOptPath::Disabled
172 ),
173 pgo_use: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_use.clone()
} else { None }if_regular!(sess.opts.cg.profile_use.clone(), None),
174 pgo_sample_use: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.profile_sample_use.clone()
} else { None }if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
175 debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
176 instrument_coverage: if let ModuleKind::Regular = kind {
sess.instrument_coverage()
} else { false }if_regular!(sess.instrument_coverage(), false),
177178 sanitizer: if let ModuleKind::Regular = kind {
sess.sanitizers()
} else { SanitizerSet::empty() }if_regular!(sess.sanitizers(), SanitizerSet::empty()),
179 sanitizer_dataflow_abilist: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone()
} else { Vec::new() }if_regular!(
180 sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
181 Vec::new()
182 ),
183 sanitizer_recover: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_recover
} else { SanitizerSet::empty() }if_regular!(
184 sess.opts.unstable_opts.sanitizer_recover,
185 SanitizerSet::empty()
186 ),
187 sanitizer_memory_track_origins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_memory_track_origins
} else { 0 }if_regular!(
188 sess.opts.unstable_opts.sanitizer_memory_track_origins,
1890
190),
191192 emit_pre_lto_bc: if let ModuleKind::Regular = kind {
save_temps || need_pre_lto_bitcode_for_incr_comp(sess)
} else { false }if_regular!(
193 save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
194false
195),
196 emit_bc: if let ModuleKind::Regular = kind {
save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode)
} else { save_temps }if_regular!(
197 save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
198 save_temps
199 ),
200 emit_ir: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
} else { false }if_regular!(
201 sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
202false
203),
204 emit_asm: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::Assembly)
} else { false }if_regular!(
205 sess.opts.output_types.contains_key(&OutputType::Assembly),
206false
207),
208emit_obj,
209 emit_thin_lto_summary: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode)
} else { false }if_regular!(
210 sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
211false
212),
213214 verify_llvm_ir: sess.verify_llvm_ir(),
215 lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
216 no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
217 no_builtins: no_builtins || sess.target.no_builtins,
218219// Copy what clang does by turning on loop vectorization at O2 and
220 // slp vectorization at O3.
221vectorize_loop: !sess.opts.cg.no_vectorize_loops
222 && (sess.opts.optimize == config::OptLevel::More223 || sess.opts.optimize == config::OptLevel::Aggressive),
224 vectorize_slp: !sess.opts.cg.no_vectorize_slp
225 && sess.opts.optimize == config::OptLevel::Aggressive,
226227// Some targets (namely, NVPTX) interact badly with the
228 // MergeFunctions pass. This is because MergeFunctions can generate
229 // new function calls which may interfere with the target calling
230 // convention; e.g. for the NVPTX target, PTX kernels should not
231 // call other PTX kernels. MergeFunctions can also be configured to
232 // generate aliases instead, but aliases are not supported by some
233 // backends (again, NVPTX). Therefore, allow targets to opt out of
234 // the MergeFunctions pass, but otherwise keep the pass enabled (at
235 // O2 and O3) since it can be useful for reducing code size.
236merge_functions: match sess237 .opts
238 .unstable_opts
239 .merge_functions
240 .unwrap_or(sess.target.merge_functions)
241 {
242 MergeFunctions::Disabled => false,
243 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
244use config::OptLevel::*;
245match sess.opts.optimize {
246Aggressive | More | SizeMin | Size => true,
247Less | No => false,
248 }
249 }
250 },
251252 emit_lifetime_markers: sess.emit_lifetime_markers(),
253 llvm_plugins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.llvm_plugins.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
254 autodiff: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.autodiff.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
255 offload: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.offload.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
256 }
257 }
258259pub fn bitcode_needed(&self) -> bool {
260self.emit_bc
261 || self.emit_thin_lto_summary
262 || self.emit_obj == EmitObj::Bitcode263 || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
264 }
265266pub fn embed_bitcode(&self) -> bool {
267self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
268 }
269}
270271/// Configuration passed to the function returned by the `target_machine_factory`.
272pub struct TargetMachineFactoryConfig {
273/// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
274 /// so the path to the dwarf object has to be provided when we create the target machine.
275 /// This can be ignored by backends which do not need it for their Split DWARF support.
276pub split_dwarf_file: Option<PathBuf>,
277278/// The name of the output object file. Used for setting OutputFilenames in target options
279 /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
280pub output_obj_file: Option<PathBuf>,
281}
282283impl TargetMachineFactoryConfig {
284pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig {
285let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
286cgcx.output_filenames.split_dwarf_path(
287cgcx.split_debuginfo,
288cgcx.split_dwarf_kind,
289module_name,
290cgcx.invocation_temp.as_deref(),
291 )
292 } else {
293None294 };
295296let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
297 OutputType::Object,
298module_name,
299cgcx.invocation_temp.as_deref(),
300 ));
301TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
302 }
303}
304305pub type TargetMachineFactoryFn<B> = Arc<
306dyn Fn(
307DiagCtxtHandle<'_>,
308TargetMachineFactoryConfig,
309 ) -> <B as WriteBackendMethods>::TargetMachine310 + Send311 + Sync,
312>;
313314/// Additional resources used by optimize_and_codegen (not module specific)
315#[derive(#[automatically_derived]
impl ::core::clone::Clone for CodegenContext {
#[inline]
fn clone(&self) -> CodegenContext {
CodegenContext {
lto: ::core::clone::Clone::clone(&self.lto),
use_linker_plugin_lto: ::core::clone::Clone::clone(&self.use_linker_plugin_lto),
dylib_lto: ::core::clone::Clone::clone(&self.dylib_lto),
prefer_dynamic: ::core::clone::Clone::clone(&self.prefer_dynamic),
save_temps: ::core::clone::Clone::clone(&self.save_temps),
fewer_names: ::core::clone::Clone::clone(&self.fewer_names),
time_trace: ::core::clone::Clone::clone(&self.time_trace),
crate_types: ::core::clone::Clone::clone(&self.crate_types),
output_filenames: ::core::clone::Clone::clone(&self.output_filenames),
invocation_temp: ::core::clone::Clone::clone(&self.invocation_temp),
module_config: ::core::clone::Clone::clone(&self.module_config),
opt_level: ::core::clone::Clone::clone(&self.opt_level),
backend_features: ::core::clone::Clone::clone(&self.backend_features),
msvc_imps_needed: ::core::clone::Clone::clone(&self.msvc_imps_needed),
is_pe_coff: ::core::clone::Clone::clone(&self.is_pe_coff),
target_can_use_split_dwarf: ::core::clone::Clone::clone(&self.target_can_use_split_dwarf),
target_arch: ::core::clone::Clone::clone(&self.target_arch),
target_is_like_darwin: ::core::clone::Clone::clone(&self.target_is_like_darwin),
target_is_like_aix: ::core::clone::Clone::clone(&self.target_is_like_aix),
target_is_like_gpu: ::core::clone::Clone::clone(&self.target_is_like_gpu),
split_debuginfo: ::core::clone::Clone::clone(&self.split_debuginfo),
split_dwarf_kind: ::core::clone::Clone::clone(&self.split_dwarf_kind),
pointer_size: ::core::clone::Clone::clone(&self.pointer_size),
remark: ::core::clone::Clone::clone(&self.remark),
remark_dir: ::core::clone::Clone::clone(&self.remark_dir),
incr_comp_session_dir: ::core::clone::Clone::clone(&self.incr_comp_session_dir),
parallel: ::core::clone::Clone::clone(&self.parallel),
}
}
}Clone, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for CodegenContext {
fn encode(&self, __encoder: &mut __E) {
match *self {
CodegenContext {
lto: ref __binding_0,
use_linker_plugin_lto: ref __binding_1,
dylib_lto: ref __binding_2,
prefer_dynamic: ref __binding_3,
save_temps: ref __binding_4,
fewer_names: ref __binding_5,
time_trace: ref __binding_6,
crate_types: ref __binding_7,
output_filenames: ref __binding_8,
invocation_temp: ref __binding_9,
module_config: ref __binding_10,
opt_level: ref __binding_11,
backend_features: ref __binding_12,
msvc_imps_needed: ref __binding_13,
is_pe_coff: ref __binding_14,
target_can_use_split_dwarf: ref __binding_15,
target_arch: ref __binding_16,
target_is_like_darwin: ref __binding_17,
target_is_like_aix: ref __binding_18,
target_is_like_gpu: ref __binding_19,
split_debuginfo: ref __binding_20,
split_dwarf_kind: ref __binding_21,
pointer_size: ref __binding_22,
remark: ref __binding_23,
remark_dir: ref __binding_24,
incr_comp_session_dir: ref __binding_25,
parallel: ref __binding_26 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for CodegenContext {
fn decode(__decoder: &mut __D) -> Self {
CodegenContext {
lto: ::rustc_serialize::Decodable::decode(__decoder),
use_linker_plugin_lto: ::rustc_serialize::Decodable::decode(__decoder),
dylib_lto: ::rustc_serialize::Decodable::decode(__decoder),
prefer_dynamic: ::rustc_serialize::Decodable::decode(__decoder),
save_temps: ::rustc_serialize::Decodable::decode(__decoder),
fewer_names: ::rustc_serialize::Decodable::decode(__decoder),
time_trace: ::rustc_serialize::Decodable::decode(__decoder),
crate_types: ::rustc_serialize::Decodable::decode(__decoder),
output_filenames: ::rustc_serialize::Decodable::decode(__decoder),
invocation_temp: ::rustc_serialize::Decodable::decode(__decoder),
module_config: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
backend_features: ::rustc_serialize::Decodable::decode(__decoder),
msvc_imps_needed: ::rustc_serialize::Decodable::decode(__decoder),
is_pe_coff: ::rustc_serialize::Decodable::decode(__decoder),
target_can_use_split_dwarf: ::rustc_serialize::Decodable::decode(__decoder),
target_arch: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_darwin: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_aix: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_gpu: ::rustc_serialize::Decodable::decode(__decoder),
split_debuginfo: ::rustc_serialize::Decodable::decode(__decoder),
split_dwarf_kind: ::rustc_serialize::Decodable::decode(__decoder),
pointer_size: ::rustc_serialize::Decodable::decode(__decoder),
remark: ::rustc_serialize::Decodable::decode(__decoder),
remark_dir: ::rustc_serialize::Decodable::decode(__decoder),
incr_comp_session_dir: ::rustc_serialize::Decodable::decode(__decoder),
parallel: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
316pub struct CodegenContext {
317// Resources needed when running LTO
318pub lto: Lto,
319pub use_linker_plugin_lto: bool,
320pub dylib_lto: bool,
321pub prefer_dynamic: bool,
322pub save_temps: bool,
323pub fewer_names: bool,
324pub time_trace: bool,
325pub crate_types: Vec<CrateType>,
326pub output_filenames: Arc<OutputFilenames>,
327pub invocation_temp: Option<String>,
328pub module_config: Arc<ModuleConfig>,
329pub opt_level: OptLevel,
330pub backend_features: Vec<String>,
331pub msvc_imps_needed: bool,
332pub is_pe_coff: bool,
333pub target_can_use_split_dwarf: bool,
334pub target_arch: String,
335pub target_is_like_darwin: bool,
336pub target_is_like_aix: bool,
337pub target_is_like_gpu: bool,
338pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
339pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
340pub pointer_size: Size,
341342/// LLVM optimizations for which we want to print remarks.
343pub remark: Passes,
344/// Directory into which should the LLVM optimization remarks be written.
345 /// If `None`, they will be written to stderr.
346pub remark_dir: Option<PathBuf>,
347/// The incremental compilation session directory, or None if we are not
348 /// compiling incrementally
349pub incr_comp_session_dir: Option<PathBuf>,
350/// `true` if the codegen should be run in parallel.
351 ///
352 /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
353pub parallel: bool,
354}
355356fn generate_thin_lto_work<B: WriteBackendMethods>(
357 cgcx: &CodegenContext,
358 prof: &SelfProfilerRef,
359 dcx: DiagCtxtHandle<'_>,
360 exported_symbols_for_lto: &[String],
361 each_linked_rlib_for_lto: &[PathBuf],
362 needs_thin_lto: Vec<(String, B::ModuleBuffer)>,
363 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
364) -> Vec<(ThinLtoWorkItem<B>, u64)> {
365let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work");
366367let (lto_modules, copy_jobs) = B::run_thin_lto(
368cgcx,
369prof,
370dcx,
371exported_symbols_for_lto,
372each_linked_rlib_for_lto,
373needs_thin_lto,
374import_only_modules,
375 );
376lto_modules377 .into_iter()
378 .map(|module| {
379let cost = module.cost();
380 (ThinLtoWorkItem::ThinLto(module), cost)
381 })
382 .chain(copy_jobs.into_iter().map(|wp| {
383 (
384 ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
385 name: wp.cgu_name.clone(),
386 source: wp,
387 }),
3880, // copying is very cheap
389)
390 }))
391 .collect()
392}
393394enum MaybeLtoModules<B: WriteBackendMethods> {
395 NoLto(CompiledModules),
396 FatLto {
397 cgcx: CodegenContext,
398 exported_symbols_for_lto: Arc<Vec<String>>,
399 each_linked_rlib_file_for_lto: Vec<PathBuf>,
400 needs_fat_lto: Vec<FatLtoInput<B>>,
401 lto_import_only_modules:
402Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
403 },
404 ThinLto {
405 cgcx: CodegenContext,
406 exported_symbols_for_lto: Arc<Vec<String>>,
407 each_linked_rlib_file_for_lto: Vec<PathBuf>,
408 needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ModuleBuffer)>,
409 lto_import_only_modules:
410Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
411 },
412}
413414fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
415let sess = tcx.sess;
416sess.opts.cg.embed_bitcode
417 && tcx.crate_types().contains(&CrateType::Rlib)
418 && sess.opts.output_types.contains_key(&OutputType::Exe)
419}
420421fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
422if sess.opts.incremental.is_none() {
423return false;
424 }
425426match sess.lto() {
427 Lto::No => false,
428 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
429 }
430}
431432pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
433 backend: B,
434 tcx: TyCtxt<'_>,
435 crate_info: &CrateInfo,
436 allocator_module: Option<ModuleCodegen<B::Module>>,
437) -> OngoingCodegen<B> {
438let (coordinator_send, coordinator_receive) = channel();
439440let no_builtins = {
'done:
{
for i in tcx.hir_krate_attrs() {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(NoBuiltins) => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}.is_some()find_attr!(tcx, crate, NoBuiltins);
441442let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
443let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
444445let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
446let (codegen_worker_send, codegen_worker_receive) = channel();
447448let coordinator_thread = start_executing_work(
449backend.clone(),
450tcx,
451crate_info,
452shared_emitter,
453codegen_worker_send,
454coordinator_receive,
455Arc::new(regular_config),
456Arc::new(allocator_config),
457allocator_module,
458coordinator_send.clone(),
459 );
460461OngoingCodegen {
462backend,
463464codegen_worker_receive,
465shared_emitter_main,
466 coordinator: Coordinator {
467 sender: coordinator_send,
468 future: Some(coordinator_thread),
469 phantom: PhantomData,
470 },
471 output_filenames: Arc::clone(tcx.output_filenames(())),
472 }
473}
474475fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
476 sess: &Session,
477 compiled_modules: &CompiledModules,
478) -> FxIndexMap<WorkProductId, WorkProduct> {
479let mut work_products = FxIndexMap::default();
480481if sess.opts.incremental.is_none() {
482return work_products;
483 }
484485let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
486487for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
488let mut files = Vec::new();
489if let Some(object_file_path) = &module.object {
490 files.push((OutputType::Object.extension(), object_file_path.as_path()));
491 }
492if let Some(dwarf_object_file_path) = &module.dwarf_object {
493 files.push(("dwo", dwarf_object_file_path.as_path()));
494 }
495if let Some(path) = &module.assembly {
496 files.push((OutputType::Assembly.extension(), path.as_path()));
497 }
498if let Some(path) = &module.llvm_ir {
499 files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
500 }
501if let Some(path) = &module.bytecode {
502 files.push((OutputType::Bitcode.extension(), path.as_path()));
503 }
504if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
505 sess,
506&module.name,
507 files.as_slice(),
508&module.links_from_incr_cache,
509 ) {
510 work_products.insert(id, product);
511 }
512 }
513514work_products515}
516517pub fn produce_final_output_artifacts(
518 sess: &Session,
519 compiled_modules: &CompiledModules,
520 crate_output: &OutputFilenames,
521) {
522let mut user_wants_bitcode = false;
523let mut user_wants_objects = false;
524525// Produce final compile outputs.
526let copy_gracefully = |from: &Path, to: &OutFileName| match to {
527 OutFileName::Stdoutif let Err(e) = copy_to_stdout(from) => {
528sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
529 }
530 OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
531sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
532 }
533_ => {}
534 };
535536let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
537if let [module] = &compiled_modules.modules[..] {
538// 1) Only one codegen unit. In this case it's no difficulty
539 // to copy `foo.0.x` to `foo.x`.
540let path = crate_output.temp_path_for_cgu(
541output_type,
542&module.name,
543sess.invocation_temp.as_deref(),
544 );
545let output = crate_output.path(output_type);
546if !output_type.is_text_output() && output.is_tty() {
547sess.dcx()
548 .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
549 } else {
550copy_gracefully(&path, &output);
551 }
552if !sess.opts.cg.save_temps && !keep_numbered {
553// The user just wants `foo.x`, not `foo.#module-name#.x`.
554ensure_removed(sess.dcx(), &path);
555 }
556 } else {
557if crate_output.outputs.contains_explicit_name(&output_type) {
558// 2) Multiple codegen units, with `--emit foo=some_name`. We have
559 // no good solution for this case, so warn the user.
560sess.dcx()
561 .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
562 } else if crate_output.single_output_file.is_some() {
563// 3) Multiple codegen units, with `-o some_name`. We have
564 // no good solution for this case, so warn the user.
565sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
566 } else {
567// 4) Multiple codegen units, but no explicit name. We
568 // just leave the `foo.0.x` files in place.
569 // (We don't have to do any work in this case.)
570}
571 }
572 };
573574// Flag to indicate whether the user explicitly requested bitcode.
575 // Otherwise, we produced it only as a temporary output, and will need
576 // to get rid of it.
577for output_type in crate_output.outputs.keys() {
578match *output_type {
579 OutputType::Bitcode => {
580 user_wants_bitcode = true;
581// Copy to .bc, but always keep the .0.bc. There is a later
582 // check to figure out if we should delete .0.bc files, or keep
583 // them for making an rlib.
584copy_if_one_unit(OutputType::Bitcode, true);
585 }
586 OutputType::ThinLinkBitcode => {
587 copy_if_one_unit(OutputType::ThinLinkBitcode, false);
588 }
589 OutputType::LlvmAssembly => {
590 copy_if_one_unit(OutputType::LlvmAssembly, false);
591 }
592 OutputType::Assembly => {
593 copy_if_one_unit(OutputType::Assembly, false);
594 }
595 OutputType::Object => {
596 user_wants_objects = true;
597 copy_if_one_unit(OutputType::Object, true);
598 }
599 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
600 }
601 }
602603// Clean up unwanted temporary files.
604605 // We create the following files by default:
606 // - #crate#.#module-name#.bc
607 // - #crate#.#module-name#.o
608 // - #crate#.crate.metadata.bc
609 // - #crate#.crate.metadata.o
610 // - #crate#.o (linked from crate.##.o)
611 // - #crate#.bc (copied from crate.##.bc)
612 // We may create additional files if requested by the user (through
613 // `-C save-temps` or `--emit=` flags).
614615if !sess.opts.cg.save_temps {
616// Remove the temporary .#module-name#.o objects. If the user didn't
617 // explicitly request bitcode (with --emit=bc), and the bitcode is not
618 // needed for building an rlib, then we must remove .#module-name#.bc as
619 // well.
620621 // Specific rules for keeping .#module-name#.bc:
622 // - If the user requested bitcode (`user_wants_bitcode`), and
623 // codegen_units > 1, then keep it.
624 // - If the user requested bitcode but codegen_units == 1, then we
625 // can toss .#module-name#.bc because we copied it to .bc earlier.
626 // - If we're not building an rlib and the user didn't request
627 // bitcode, then delete .#module-name#.bc.
628 // If you change how this works, also update back::link::link_rlib,
629 // where .#module-name#.bc files are (maybe) deleted after making an
630 // rlib.
631let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
632633let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
634635let keep_numbered_objects =
636needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
637638for module in compiled_modules.modules.iter() {
639if !keep_numbered_objects {
640if let Some(ref path) = module.object {
641 ensure_removed(sess.dcx(), path);
642 }
643644if let Some(ref path) = module.dwarf_object {
645 ensure_removed(sess.dcx(), path);
646 }
647 }
648649if let Some(ref path) = module.bytecode {
650if !keep_numbered_bitcode {
651 ensure_removed(sess.dcx(), path);
652 }
653 }
654 }
655656if !user_wants_bitcode657 && let Some(ref allocator_module) = compiled_modules.allocator_module
658 && let Some(ref path) = allocator_module.bytecode
659 {
660ensure_removed(sess.dcx(), path);
661 }
662 }
663664if sess.opts.json_artifact_notifications {
665if let [module] = &compiled_modules.modules[..] {
666module.for_each_output(|_path, ty| {
667if sess.opts.output_types.contains_key(&ty) {
668let descr = ty.shorthand();
669// for single cgu file is renamed to drop cgu specific suffix
670 // so we regenerate it the same way
671let path = crate_output.path(ty);
672sess.dcx().emit_artifact_notification(path.as_path(), descr);
673 }
674 });
675 } else {
676for module in &compiled_modules.modules {
677 module.for_each_output(|path, ty| {
678if sess.opts.output_types.contains_key(&ty) {
679let descr = ty.shorthand();
680 sess.dcx().emit_artifact_notification(&path, descr);
681 }
682 });
683 }
684 }
685 }
686687// We leave the following files around by default:
688 // - #crate#.o
689 // - #crate#.crate.metadata.o
690 // - #crate#.bc
691 // These are used in linking steps and will be cleaned up afterward.
692}
693694pub(crate) enum WorkItem<B: WriteBackendMethods> {
695/// Optimize a newly codegened, totally unoptimized module.
696Optimize(ModuleCodegen<B::Module>),
697/// Copy the post-LTO artifacts from the incremental cache to the output
698 /// directory.
699CopyPostLtoArtifacts(CachedModuleCodegen),
700}
701702enum ThinLtoWorkItem<B: WriteBackendMethods> {
703/// Copy the post-LTO artifacts from the incremental cache to the output
704 /// directory.
705CopyPostLtoArtifacts(CachedModuleCodegen),
706/// Performs thin-LTO on the given module.
707ThinLto(lto::ThinModule<B>),
708}
709710// `pthread_setname()` on *nix ignores anything beyond the first 15
711// bytes. Use short descriptions to maximize the space available for
712// the module name.
713#[cfg(not(windows))]
714fn desc(short: &str, _long: &str, name: &str) -> String {
715// The short label is three bytes, and is followed by a space. That
716 // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
717 // depends on the CGU name form.
718 //
719 // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
720 // before the `-cgu.0` is the same for every CGU, so use the
721 // `cgu.0` part. The number suffix will be different for each
722 // CGU.
723 //
724 // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
725 // name because each CGU will have a unique ASCII hash, and the
726 // first 11 bytes will be enough to identify it.
727 //
728 // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
729 // `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
730 // name. The first 11 bytes won't be enough to uniquely identify
731 // it, but no obvious substring will, and this is a rarely used
732 // option so it doesn't matter much.
733 //
734match (&short.len(), &3) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(short.len(), 3);
735let name = if let Some(index) = name.find("-cgu.") {
736&name[index + 1..] // +1 skips the leading '-'.
737} else {
738name739 };
740::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0} {1}", short, name))
})format!("{short} {name}")741}
742743// Windows has no thread name length limit, so use more descriptive names.
744#[cfg(windows)]
745fn desc(_short: &str, long: &str, name: &str) -> String {
746format!("{long} {name}")
747}
748749impl<B: WriteBackendMethods> WorkItem<B> {
750/// Generate a short description of this work item suitable for use as a thread name.
751fn short_description(&self) -> String {
752match self {
753 WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
754 WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
755 }
756 }
757}
758759impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
760/// Generate a short description of this work item suitable for use as a thread name.
761fn short_description(&self) -> String {
762match self {
763 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
764desc("cpy", "copy LTO artifacts for", &m.name)
765 }
766 ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
767 }
768 }
769}
770771/// A result produced by the backend.
772pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
773/// The backend has finished compiling a CGU, nothing more required.
774Finished(CompiledModule),
775776/// The backend has finished compiling a CGU, which now needs to go through
777 /// fat LTO.
778NeedsFatLto(FatLtoInput<B>),
779780/// The backend has finished compiling a CGU, which now needs to go through
781 /// thin LTO.
782NeedsThinLto(String, B::ModuleBuffer),
783}
784785pub enum FatLtoInput<B: WriteBackendMethods> {
786 Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
787 InMemory(ModuleCodegen<B::Module>),
788}
789790/// Actual LTO type we end up choosing based on multiple factors.
791pub(crate) enum ComputedLtoType {
792 No,
793 Thin,
794 Fat,
795}
796797pub(crate) fn compute_per_cgu_lto_type(
798 sess_lto: &Lto,
799 linker_does_lto: bool,
800 sess_crate_types: &[CrateType],
801) -> ComputedLtoType {
802// If the linker does LTO, we don't have to do it. Note that we
803 // keep doing full LTO, if it is requested, as not to break the
804 // assumption that the output will be a single module.
805806 // We ignore a request for full crate graph LTO if the crate type
807 // is only an rlib, as there is no full crate graph to process,
808 // that'll happen later.
809 //
810 // This use case currently comes up primarily for targets that
811 // require LTO so the request for LTO is always unconditionally
812 // passed down to the backend, but we don't actually want to do
813 // anything about it yet until we've got a final product.
814let is_rlib = #[allow(non_exhaustive_omitted_patterns)] match sess_crate_types {
[CrateType::Rlib] => true,
_ => false,
}matches!(sess_crate_types, [CrateType::Rlib]);
815816match sess_lto {
817 Lto::ThinLocalif !linker_does_lto => ComputedLtoType::Thin,
818 Lto::Thinif !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
819 Lto::Fatif !is_rlib => ComputedLtoType::Fat,
820_ => ComputedLtoType::No,
821 }
822}
823824fn execute_optimize_work_item<B: WriteBackendMethods>(
825 cgcx: &CodegenContext,
826 prof: &SelfProfilerRef,
827 shared_emitter: SharedEmitter,
828mut module: ModuleCodegen<B::Module>,
829) -> WorkItemResult<B> {
830let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
831832 B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config);
833834// After we've done the initial round of optimizations we need to
835 // decide whether to synchronously codegen this module or ship it
836 // back to the coordinator thread for further LTO processing (which
837 // has to wait for all the initial modules to be optimized).
838839let lto_type =
840compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
841842// If we're doing some form of incremental LTO then we need to be sure to
843 // save our module to disk first.
844let bitcode = if cgcx.module_config.emit_pre_lto_bc {
845let filename = pre_lto_bitcode_filename(&module.name);
846cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
847 } else {
848None849 };
850851match lto_type {
852 ComputedLtoType::No => {
853let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config);
854 WorkItemResult::Finished(module)
855 }
856 ComputedLtoType::Thin => {
857let thin_buffer = B::serialize_module(module.module_llvm, true);
858if let Some(path) = bitcode {
859 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
860{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
861 });
862 }
863 WorkItemResult::NeedsThinLto(module.name, thin_buffer)
864 }
865 ComputedLtoType::Fat => match bitcode {
866Some(path) => {
867let buffer = B::serialize_module(module.module_llvm, false);
868 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
869{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
870 });
871 WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
872 name: module.name,
873 buffer: SerializedModule::Local(buffer),
874 })
875 }
876None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
877 },
878 }
879}
880881fn execute_copy_from_cache_work_item(
882 cgcx: &CodegenContext,
883 prof: &SelfProfilerRef,
884 shared_emitter: SharedEmitter,
885 module: CachedModuleCodegen,
886) -> CompiledModule {
887let _timer =
888prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
889890let dcx = DiagCtxt::new(Box::new(shared_emitter));
891let dcx = dcx.handle();
892893let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
894895let mut links_from_incr_cache = Vec::new();
896897let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
898let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
899{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/back/write.rs:899",
"rustc_codegen_ssa::back::write", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/back/write.rs"),
::tracing_core::__macro_support::Option::Some(899u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::back::write"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("copying preexisting module `{0}` from {1:?} to {2}",
module.name, source_file, output_path.display()) as
&dyn Value))])
});
} else { ; }
};debug!(
900"copying preexisting module `{}` from {:?} to {}",
901 module.name,
902 source_file,
903 output_path.display()
904 );
905match link_or_copy(&source_file, &output_path) {
906Ok(_) => {
907links_from_incr_cache.push(source_file);
908Some(output_path)
909 }
910Err(error) => {
911dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
912None913 }
914 }
915 };
916917let dwarf_object =
918module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
919let dwarf_obj_out = cgcx920 .output_filenames
921 .split_dwarf_path(
922cgcx.split_debuginfo,
923cgcx.split_dwarf_kind,
924&module.name,
925cgcx.invocation_temp.as_deref(),
926 )
927 .expect(
928"saved dwarf object in work product but `split_dwarf_path` returned `None`",
929 );
930load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
931 });
932933let mut load_from_incr_cache = |perform, output_type: OutputType| {
934if perform {
935let saved_file = module.source.saved_files.get(output_type.extension())?;
936let output_path = cgcx.output_filenames.temp_path_for_cgu(
937output_type,
938&module.name,
939cgcx.invocation_temp.as_deref(),
940 );
941load_from_incr_comp_dir(output_path, &saved_file)
942 } else {
943None944 }
945 };
946947let module_config = &cgcx.module_config;
948let should_emit_obj = module_config.emit_obj != EmitObj::None;
949let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
950let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
951let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
952let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
953if should_emit_obj && object.is_none() {
954dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
955 }
956957CompiledModule {
958links_from_incr_cache,
959 kind: ModuleKind::Regular,
960 name: module.name,
961object,
962dwarf_object,
963bytecode,
964assembly,
965llvm_ir,
966 }
967}
968969fn do_fat_lto<B: WriteBackendMethods>(
970 cgcx: &CodegenContext,
971 prof: &SelfProfilerRef,
972 shared_emitter: SharedEmitter,
973 tm_factory: TargetMachineFactoryFn<B>,
974 exported_symbols_for_lto: &[String],
975 each_linked_rlib_for_lto: &[PathBuf],
976mut needs_fat_lto: Vec<FatLtoInput<B>>,
977 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
978) -> CompiledModule {
979let _timer = prof.verbose_generic_activity("LLVM_fatlto");
980981let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
982let dcx = dcx.handle();
983984check_lto_allowed(&cgcx, dcx);
985986for (module, wp) in import_only_modules {
987 needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
988 }
989990 B::optimize_and_codegen_fat_lto(
991cgcx,
992prof,
993&shared_emitter,
994tm_factory,
995exported_symbols_for_lto,
996each_linked_rlib_for_lto,
997needs_fat_lto,
998 )
999}
10001001fn do_thin_lto<B: WriteBackendMethods>(
1002 cgcx: &CodegenContext,
1003 prof: &SelfProfilerRef,
1004 shared_emitter: SharedEmitter,
1005 tm_factory: TargetMachineFactoryFn<B>,
1006 exported_symbols_for_lto: Arc<Vec<String>>,
1007 each_linked_rlib_for_lto: Vec<PathBuf>,
1008 needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ModuleBuffer)>,
1009 lto_import_only_modules: Vec<(
1010SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>,
1011WorkProduct,
1012 )>,
1013) -> Vec<CompiledModule> {
1014let _timer = prof.verbose_generic_activity("LLVM_thinlto");
10151016let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1017let dcx = dcx.handle();
10181019check_lto_allowed(&cgcx, dcx);
10201021let (coordinator_send, coordinator_receive) = channel();
10221023// First up, convert our jobserver into a helper thread so we can use normal
1024 // mpsc channels to manage our messages and such.
1025 // After we've requested tokens then we'll, when we can,
1026 // get tokens on `coordinator_receive` which will
1027 // get managed in the main loop below.
1028let coordinator_send2 = coordinator_send.clone();
1029let helper = jobserver::client()
1030 .into_helper_thread(move |token| {
1031drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1032 })
1033 .expect("failed to spawn helper thread");
10341035let mut work_items = ::alloc::vec::Vec::new()vec![];
10361037// We have LTO work to do. Perform the serial work here of
1038 // figuring out what we're going to LTO and then push a
1039 // bunch of work items onto our queue to do LTO. This all
1040 // happens on the coordinator thread but it's very quick so
1041 // we don't worry about tokens.
1042for (work, cost) in generate_thin_lto_work::<B>(
1043 cgcx,
1044 prof,
1045 dcx,
1046&exported_symbols_for_lto,
1047&each_linked_rlib_for_lto,
1048 needs_thin_lto,
1049 lto_import_only_modules,
1050 ) {
1051let insertion_index =
1052 work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1053 work_items.insert(insertion_index, (work, cost));
1054if cgcx.parallel {
1055 helper.request_token();
1056 }
1057 }
10581059let mut codegen_aborted = None;
10601061// These are the Jobserver Tokens we currently hold. Does not include
1062 // the implicit Token the compiler process owns no matter what.
1063let mut tokens = ::alloc::vec::Vec::new()vec![];
10641065// Amount of tokens that are used (including the implicit token).
1066let mut used_token_count = 0;
10671068let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
10691070// Run the message loop while there's still anything that needs message
1071 // processing. Note that as soon as codegen is aborted we simply want to
1072 // wait for all existing work to finish, so many of the conditions here
1073 // only apply if codegen hasn't been aborted as they represent pending
1074 // work to be done.
1075loop {
1076if codegen_aborted.is_none() {
1077if used_token_count == 0 && work_items.is_empty() {
1078// All codegen work is done.
1079break;
1080 }
10811082// Spin up what work we can, only doing this while we've got available
1083 // parallelism slots and work left to spawn.
1084while used_token_count < tokens.len() + 1
1085&& let Some((item, _)) = work_items.pop()
1086 {
1087 spawn_thin_lto_work(
1088&cgcx,
1089 prof,
1090 shared_emitter.clone(),
1091 Arc::clone(&tm_factory),
1092 coordinator_send.clone(),
1093 item,
1094 );
1095 used_token_count += 1;
1096 }
1097 } else {
1098// Don't queue up any more work if codegen was aborted, we're
1099 // just waiting for our existing children to finish.
1100if used_token_count == 0 {
1101break;
1102 }
1103 }
11041105// Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1106tokens.truncate(used_token_count.saturating_sub(1));
11071108match coordinator_receive.recv().unwrap() {
1109// Save the token locally and the next turn of the loop will use
1110 // this to spawn a new unit of work, or it may get dropped
1111 // immediately if we have no more work to spawn.
1112ThinLtoMessage::Token(token) => match token {
1113Ok(token) => {
1114tokens.push(token);
1115 }
1116Err(e) => {
1117let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1118shared_emitter.fatal(msg);
1119codegen_aborted = Some(FatalError);
1120 }
1121 },
11221123 ThinLtoMessage::WorkItem { result } => {
1124// If a thread exits successfully then we drop a token associated
1125 // with that worker and update our `used_token_count` count.
1126 // We may later re-acquire a token to continue running more work.
1127 // We may also not actually drop a token here if the worker was
1128 // running with an "ephemeral token".
1129used_token_count -= 1;
11301131match result {
1132Ok(compiled_module) => compiled_modules.push(compiled_module),
1133Err(Some(WorkerFatalError)) => {
1134// Like `CodegenAborted`, wait for remaining work to finish.
1135codegen_aborted = Some(FatalError);
1136 }
1137Err(None) => {
1138// If the thread failed that means it panicked, so
1139 // we abort immediately.
1140::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1141 }
1142 }
1143 }
1144 }
1145 }
11461147if let Some(codegen_aborted) = codegen_aborted {
1148codegen_aborted.raise();
1149 }
11501151compiled_modules1152}
11531154fn execute_thin_lto_work_item<B: WriteBackendMethods>(
1155 cgcx: &CodegenContext,
1156 prof: &SelfProfilerRef,
1157 shared_emitter: SharedEmitter,
1158 tm_factory: TargetMachineFactoryFn<B>,
1159 module: lto::ThinModule<B>,
1160) -> CompiledModule {
1161let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
11621163 B::optimize_and_codegen_thin(cgcx, prof, &shared_emitter, tm_factory, module)
1164}
11651166/// Messages sent to the coordinator.
1167pub(crate) enum Message<B: WriteBackendMethods> {
1168/// A jobserver token has become available. Sent from the jobserver helper
1169 /// thread.
1170Token(io::Result<Acquired>),
11711172/// The backend has finished processing a work item for a codegen unit.
1173 /// Sent from a backend worker thread.
1174WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
11751176/// The frontend has finished generating something (backend IR or a
1177 /// post-LTO artifact) for a codegen unit, and it should be passed to the
1178 /// backend. Sent from the main thread.
1179CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
11801181/// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1182 /// Sent from the main thread.
1183AddImportOnlyModule {
1184 module_data: SerializedModule<B::ModuleBuffer>,
1185 work_product: WorkProduct,
1186 },
11871188/// The frontend has finished generating everything for all codegen units.
1189 /// Sent from the main thread.
1190CodegenComplete,
11911192/// Some normal-ish compiler error occurred, and codegen should be wound
1193 /// down. Sent from the main thread.
1194CodegenAborted,
1195}
11961197/// Messages sent to the coordinator.
1198pub(crate) enum ThinLtoMessage {
1199/// A jobserver token has become available. Sent from the jobserver helper
1200 /// thread.
1201Token(io::Result<Acquired>),
12021203/// The backend has finished processing a work item for a codegen unit.
1204 /// Sent from a backend worker thread.
1205WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1206}
12071208/// A message sent from the coordinator thread to the main thread telling it to
1209/// process another codegen unit.
1210pub struct CguMessage;
12111212// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1213// can be used to send diagnostics from codegen threads to the main thread.
1214// It's missing the following fields from `rustc_errors::DiagInner`.
1215// - `span`: it doesn't impl `Send`.
1216// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1217// diagnostics.
1218// - `sort_span`: it doesn't impl `Send`.
1219// - `is_lint`: lints aren't relevant during codegen.
1220// - `emitted_at`: not used for codegen diagnostics.
1221struct Diagnostic {
1222 span: Vec<SpanData>,
1223 level: Level,
1224 messages: Vec<(DiagMessage, Style)>,
1225 code: Option<ErrCode>,
1226 children: Vec<Subdiagnostic>,
1227 args: DiagArgMap,
1228}
12291230// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1231// missing the following fields from `rustc_errors::Subdiag`.
1232// - `span`: it doesn't impl `Send`.
1233struct Subdiagnostic {
1234 level: Level,
1235 messages: Vec<(DiagMessage, Style)>,
1236}
12371238#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for MainThreadState {
#[inline]
fn eq(&self, other: &MainThreadState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::clone::Clone for MainThreadState {
#[inline]
fn clone(&self) -> MainThreadState { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for MainThreadState { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for MainThreadState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
MainThreadState::Idle => "Idle",
MainThreadState::Codegenning => "Codegenning",
MainThreadState::Lending => "Lending",
})
}
}Debug)]
1239enum MainThreadState {
1240/// Doing nothing.
1241Idle,
12421243/// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1244Codegenning,
12451246/// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1247Lending,
1248}
12491250fn start_executing_work<B: ExtraBackendMethods>(
1251 backend: B,
1252 tcx: TyCtxt<'_>,
1253 crate_info: &CrateInfo,
1254 shared_emitter: SharedEmitter,
1255 codegen_worker_send: Sender<CguMessage>,
1256 coordinator_receive: Receiver<Message<B>>,
1257 regular_config: Arc<ModuleConfig>,
1258 allocator_config: Arc<ModuleConfig>,
1259mut allocator_module: Option<ModuleCodegen<B::Module>>,
1260 coordinator_send: Sender<Message<B>>,
1261) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
1262let sess = tcx.sess;
1263let prof = sess.prof.clone();
12641265let mut each_linked_rlib_for_lto = Vec::new();
1266let mut each_linked_rlib_file_for_lto = Vec::new();
1267drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1268if link::ignored_for_lto(sess, crate_info, cnum) {
1269return;
1270 }
1271each_linked_rlib_for_lto.push(cnum);
1272each_linked_rlib_file_for_lto.push(path.to_path_buf());
1273 }));
12741275// Compute the set of symbols we need to retain when doing LTO (if we need to)
1276let exported_symbols_for_lto =
1277Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
12781279// First up, convert our jobserver into a helper thread so we can use normal
1280 // mpsc channels to manage our messages and such.
1281 // After we've requested tokens then we'll, when we can,
1282 // get tokens on `coordinator_receive` which will
1283 // get managed in the main loop below.
1284let coordinator_send2 = coordinator_send.clone();
1285let helper = jobserver::client()
1286 .into_helper_thread(move |token| {
1287drop(coordinator_send2.send(Message::Token::<B>(token)));
1288 })
1289 .expect("failed to spawn helper thread");
12901291let opt_level = tcx.backend_optimization_level(());
1292let backend_features = tcx.global_backend_features(()).clone();
1293let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features);
12941295let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1296let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1297match result {
1298Ok(dir) => Some(dir),
1299Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1300 }
1301 } else {
1302None1303 };
13041305let cgcx = CodegenContext {
1306 crate_types: tcx.crate_types().to_vec(),
1307 lto: sess.lto(),
1308 use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
1309 dylib_lto: sess.opts.unstable_opts.dylib_lto,
1310 prefer_dynamic: sess.opts.cg.prefer_dynamic,
1311 fewer_names: sess.fewer_names(),
1312 save_temps: sess.opts.cg.save_temps,
1313 time_trace: sess.opts.unstable_opts.llvm_time_trace,
1314 remark: sess.opts.cg.remark.clone(),
1315remark_dir,
1316 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1317 output_filenames: Arc::clone(tcx.output_filenames(())),
1318 module_config: regular_config,
1319opt_level,
1320backend_features,
1321 msvc_imps_needed: msvc_imps_needed(tcx),
1322 is_pe_coff: tcx.sess.target.is_like_windows,
1323 target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1324 target_arch: tcx.sess.target.arch.to_string(),
1325 target_is_like_darwin: tcx.sess.target.is_like_darwin,
1326 target_is_like_aix: tcx.sess.target.is_like_aix,
1327 target_is_like_gpu: tcx.sess.target.is_like_gpu,
1328 split_debuginfo: tcx.sess.split_debuginfo(),
1329 split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1330 parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1331 pointer_size: tcx.data_layout.pointer_size(),
1332 invocation_temp: sess.invocation_temp.clone(),
1333 };
13341335// This is the "main loop" of parallel work happening for parallel codegen.
1336 // It's here that we manage parallelism, schedule work, and work with
1337 // messages coming from clients.
1338 //
1339 // There are a few environmental pre-conditions that shape how the system
1340 // is set up:
1341 //
1342 // - Error reporting can only happen on the main thread because that's the
1343 // only place where we have access to the compiler `Session`.
1344 // - LLVM work can be done on any thread.
1345 // - Codegen can only happen on the main thread.
1346 // - Each thread doing substantial work must be in possession of a `Token`
1347 // from the `Jobserver`.
1348 // - The compiler process always holds one `Token`. Any additional `Tokens`
1349 // have to be requested from the `Jobserver`.
1350 //
1351 // Error Reporting
1352 // ===============
1353 // The error reporting restriction is handled separately from the rest: We
1354 // set up a `SharedEmitter` that holds an open channel to the main thread.
1355 // When an error occurs on any thread, the shared emitter will send the
1356 // error message to the receiver main thread (`SharedEmitterMain`). The
1357 // main thread will periodically query this error message queue and emit
1358 // any error messages it has received. It might even abort compilation if
1359 // it has received a fatal error. In this case we rely on all other threads
1360 // being torn down automatically with the main thread.
1361 // Since the main thread will often be busy doing codegen work, error
1362 // reporting will be somewhat delayed, since the message queue can only be
1363 // checked in between two work packages.
1364 //
1365 // Work Processing Infrastructure
1366 // ==============================
1367 // The work processing infrastructure knows three major actors:
1368 //
1369 // - the coordinator thread,
1370 // - the main thread, and
1371 // - LLVM worker threads
1372 //
1373 // The coordinator thread is running a message loop. It instructs the main
1374 // thread about what work to do when, and it will spawn off LLVM worker
1375 // threads as open LLVM WorkItems become available.
1376 //
1377 // The job of the main thread is to codegen CGUs into LLVM work packages
1378 // (since the main thread is the only thread that can do this). The main
1379 // thread will block until it receives a message from the coordinator, upon
1380 // which it will codegen one CGU, send it to the coordinator and block
1381 // again. This way the coordinator can control what the main thread is
1382 // doing.
1383 //
1384 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1385 // available, it will spawn off a new LLVM worker thread and let it process
1386 // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1387 // it will just shut down, which also frees all resources associated with
1388 // the given LLVM module, and sends a message to the coordinator that the
1389 // WorkItem has been completed.
1390 //
1391 // Work Scheduling
1392 // ===============
1393 // The scheduler's goal is to minimize the time it takes to complete all
1394 // work there is, however, we also want to keep memory consumption low
1395 // if possible. These two goals are at odds with each other: If memory
1396 // consumption were not an issue, we could just let the main thread produce
1397 // LLVM WorkItems at full speed, assuring maximal utilization of
1398 // Tokens/LLVM worker threads. However, since codegen is usually faster
1399 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1400 // WorkItem potentially holds on to a substantial amount of memory.
1401 //
1402 // So the actual goal is to always produce just enough LLVM WorkItems as
1403 // not to starve our LLVM worker threads. That means, once we have enough
1404 // WorkItems in our queue, we can block the main thread, so it does not
1405 // produce more until we need them.
1406 //
1407 // Doing LLVM Work on the Main Thread
1408 // ----------------------------------
1409 // Since the main thread owns the compiler process's implicit `Token`, it is
1410 // wasteful to keep it blocked without doing any work. Therefore, what we do
1411 // in this case is: We spawn off an additional LLVM worker thread that helps
1412 // reduce the queue. The work it is doing corresponds to the implicit
1413 // `Token`. The coordinator will mark the main thread as being busy with
1414 // LLVM work. (The actual work happens on another OS thread but we just care
1415 // about `Tokens`, not actual threads).
1416 //
1417 // When any LLVM worker thread finishes while the main thread is marked as
1418 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1419 // of the just finished thread to the LLVM worker thread that is working on
1420 // behalf of the main thread's implicit Token, thus freeing up the main
1421 // thread again. The coordinator can then again decide what the main thread
1422 // should do. This allows the coordinator to make decisions at more points
1423 // in time.
1424 //
1425 // Striking a Balance between Throughput and Memory Consumption
1426 // ------------------------------------------------------------
1427 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1428 // memory consumption as low as possible, are in conflict with each other,
1429 // we have to find a trade off between them. Right now, the goal is to keep
1430 // all workers busy, which means that no worker should find the queue empty
1431 // when it is ready to start.
1432 // How do we do achieve this? Good question :) We actually never know how
1433 // many `Tokens` are potentially available so it's hard to say how much to
1434 // fill up the queue before switching the main thread to LLVM work. Also we
1435 // currently don't have a means to estimate how long a running LLVM worker
1436 // will still be busy with it's current WorkItem. However, we know the
1437 // maximal count of available Tokens that makes sense (=the number of CPU
1438 // cores), so we can take a conservative guess. The heuristic we use here
1439 // is implemented in the `queue_full_enough()` function.
1440 //
1441 // Some Background on Jobservers
1442 // -----------------------------
1443 // It's worth also touching on the management of parallelism here. We don't
1444 // want to just spawn a thread per work item because while that's optimal
1445 // parallelism it may overload a system with too many threads or violate our
1446 // configuration for the maximum amount of cpu to use for this process. To
1447 // manage this we use the `jobserver` crate.
1448 //
1449 // Job servers are an artifact of GNU make and are used to manage
1450 // parallelism between processes. A jobserver is a glorified IPC semaphore
1451 // basically. Whenever we want to run some work we acquire the semaphore,
1452 // and whenever we're done with that work we release the semaphore. In this
1453 // manner we can ensure that the maximum number of parallel workers is
1454 // capped at any one point in time.
1455 //
1456 // LTO and the coordinator thread
1457 // ------------------------------
1458 //
1459 // The final job the coordinator thread is responsible for is managing LTO
1460 // and how that works. When LTO is requested what we'll do is collect all
1461 // optimized LLVM modules into a local vector on the coordinator. Once all
1462 // modules have been codegened and optimized we hand this to the `lto`
1463 // module for further optimization. The `lto` module will return back a list
1464 // of more modules to work on, which the coordinator will continue to spawn
1465 // work for.
1466 //
1467 // Each LLVM module is automatically sent back to the coordinator for LTO if
1468 // necessary. There's already optimizations in place to avoid sending work
1469 // back to the coordinator if LTO isn't requested.
1470let f = move || {
1471let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
14721473// This is where we collect codegen units that have gone all the way
1474 // through codegen and LLVM.
1475let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1476let mut needs_fat_lto = Vec::new();
1477let mut needs_thin_lto = Vec::new();
1478let mut lto_import_only_modules = Vec::new();
14791480/// Possible state transitions:
1481 /// - Ongoing -> Completed
1482 /// - Ongoing -> Aborted
1483 /// - Completed -> Aborted
1484#[derive(#[automatically_derived]
impl ::core::fmt::Debug for CodegenState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
CodegenState::Ongoing => "Ongoing",
CodegenState::Completed => "Completed",
CodegenState::Aborted => "Aborted",
})
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CodegenState {
#[inline]
fn eq(&self, other: &CodegenState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq)]
1485enum CodegenState {
1486 Ongoing,
1487 Completed,
1488 Aborted,
1489 }
1490use CodegenState::*;
1491let mut codegen_state = Ongoing;
14921493// This is the queue of LLVM work items that still need processing.
1494let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
14951496// This are the Jobserver Tokens we currently hold. Does not include
1497 // the implicit Token the compiler process owns no matter what.
1498let mut tokens = Vec::new();
14991500let mut main_thread_state = MainThreadState::Idle;
15011502// How many LLVM worker threads are running while holding a Token. This
1503 // *excludes* any that the main thread is lending a Token to.
1504let mut running_with_own_token = 0;
15051506// How many LLVM worker threads are running in total. This *includes*
1507 // any that the main thread is lending a Token to.
1508let running_with_any_token = |main_thread_state, running_with_own_token| {
1509running_with_own_token1510 + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1511 };
15121513let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
15141515if let Some(allocator_module) = &mut allocator_module {
1516 B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config);
1517 }
15181519// Run the message loop while there's still anything that needs message
1520 // processing. Note that as soon as codegen is aborted we simply want to
1521 // wait for all existing work to finish, so many of the conditions here
1522 // only apply if codegen hasn't been aborted as they represent pending
1523 // work to be done.
1524loop {
1525// While there are still CGUs to be codegened, the coordinator has
1526 // to decide how to utilize the compiler processes implicit Token:
1527 // For codegenning more CGU or for running them through LLVM.
1528if codegen_state == Ongoing {
1529if main_thread_state == MainThreadState::Idle {
1530// Compute the number of workers that will be running once we've taken as many
1531 // items from the work queue as we can, plus one for the main thread. It's not
1532 // critically important that we use this instead of just
1533 // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1534 // from fluctuating just because a worker finished up and we decreased the
1535 // `running_with_own_token` count, even though we're just going to increase it
1536 // right after this when we put a new worker to work.
1537let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1538let additional_running = std::cmp::min(extra_tokens, work_items.len());
1539let anticipated_running = running_with_own_token + additional_running + 1;
15401541if !queue_full_enough(work_items.len(), anticipated_running) {
1542// The queue is not full enough, process more codegen units:
1543if codegen_worker_send.send(CguMessage).is_err() {
1544{
::core::panicking::panic_fmt(format_args!("Could not send CguMessage to main thread"));
}panic!("Could not send CguMessage to main thread")1545 }
1546main_thread_state = MainThreadState::Codegenning;
1547 } else {
1548// The queue is full enough to not let the worker
1549 // threads starve. Use the implicit Token to do some
1550 // LLVM work too.
1551let (item, _) =
1552work_items.pop().expect("queue empty - queue_full_enough() broken?");
1553main_thread_state = MainThreadState::Lending;
1554spawn_work(
1555&cgcx,
1556&prof,
1557shared_emitter.clone(),
1558coordinator_send.clone(),
1559&mut llvm_start_time,
1560item,
1561 );
1562 }
1563 }
1564 } else if codegen_state == Completed {
1565if running_with_any_token(main_thread_state, running_with_own_token) == 0
1566&& work_items.is_empty()
1567 {
1568// All codegen work is done.
1569break;
1570 }
15711572// In this branch, we know that everything has been codegened,
1573 // so it's just a matter of determining whether the implicit
1574 // Token is free to use for LLVM work.
1575match main_thread_state {
1576 MainThreadState::Idle => {
1577if let Some((item, _)) = work_items.pop() {
1578main_thread_state = MainThreadState::Lending;
1579spawn_work(
1580&cgcx,
1581&prof,
1582shared_emitter.clone(),
1583coordinator_send.clone(),
1584&mut llvm_start_time,
1585item,
1586 );
1587 } else {
1588// There is no unstarted work, so let the main thread
1589 // take over for a running worker. Otherwise the
1590 // implicit token would just go to waste.
1591 // We reduce the `running` counter by one. The
1592 // `tokens.truncate()` below will take care of
1593 // giving the Token back.
1594if !(running_with_own_token > 0) {
::core::panicking::panic("assertion failed: running_with_own_token > 0")
};assert!(running_with_own_token > 0);
1595running_with_own_token -= 1;
1596main_thread_state = MainThreadState::Lending;
1597 }
1598 }
1599 MainThreadState::Codegenning => ::rustc_middle::util::bug::bug_fmt(format_args!("codegen worker should not be codegenning after codegen was already completed"))bug!(
1600"codegen worker should not be codegenning after \
1601 codegen was already completed"
1602),
1603 MainThreadState::Lending => {
1604// Already making good use of that token
1605}
1606 }
1607 } else {
1608// Don't queue up any more work if codegen was aborted, we're
1609 // just waiting for our existing children to finish.
1610if !(codegen_state == Aborted) {
::core::panicking::panic("assertion failed: codegen_state == Aborted")
};assert!(codegen_state == Aborted);
1611if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1612break;
1613 }
1614 }
16151616// Spin up what work we can, only doing this while we've got available
1617 // parallelism slots and work left to spawn.
1618if codegen_state != Aborted {
1619while running_with_own_token < tokens.len()
1620 && let Some((item, _)) = work_items.pop()
1621 {
1622 spawn_work(
1623&cgcx,
1624&prof,
1625 shared_emitter.clone(),
1626 coordinator_send.clone(),
1627&mut llvm_start_time,
1628 item,
1629 );
1630 running_with_own_token += 1;
1631 }
1632 }
16331634// Relinquish accidentally acquired extra tokens.
1635tokens.truncate(running_with_own_token);
16361637match coordinator_receive.recv().unwrap() {
1638// Save the token locally and the next turn of the loop will use
1639 // this to spawn a new unit of work, or it may get dropped
1640 // immediately if we have no more work to spawn.
1641Message::Token(token) => {
1642match token {
1643Ok(token) => {
1644tokens.push(token);
16451646if main_thread_state == MainThreadState::Lending {
1647// If the main thread token is used for LLVM work
1648 // at the moment, we turn that thread into a regular
1649 // LLVM worker thread, so the main thread is free
1650 // to react to codegen demand.
1651main_thread_state = MainThreadState::Idle;
1652running_with_own_token += 1;
1653 }
1654 }
1655Err(e) => {
1656let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1657shared_emitter.fatal(msg);
1658codegen_state = Aborted;
1659 }
1660 }
1661 }
16621663 Message::CodegenDone { llvm_work_item, cost } => {
1664// We keep the queue sorted by estimated processing cost,
1665 // so that more expensive items are processed earlier. This
1666 // is good for throughput as it gives the main thread more
1667 // time to fill up the queue and it avoids scheduling
1668 // expensive items to the end.
1669 // Note, however, that this is not ideal for memory
1670 // consumption, as LLVM module sizes are not evenly
1671 // distributed.
1672let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1673let insertion_index = match insertion_index {
1674Ok(idx) | Err(idx) => idx,
1675 };
1676work_items.insert(insertion_index, (llvm_work_item, cost));
16771678if cgcx.parallel {
1679helper.request_token();
1680 }
1681match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1682main_thread_state = MainThreadState::Idle;
1683 }
16841685 Message::CodegenComplete => {
1686if codegen_state != Aborted {
1687codegen_state = Completed;
1688 }
1689match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1690main_thread_state = MainThreadState::Idle;
1691 }
16921693// If codegen is aborted that means translation was aborted due
1694 // to some normal-ish compiler error. In this situation we want
1695 // to exit as soon as possible, but we want to make sure all
1696 // existing work has finished. Flag codegen as being done, and
1697 // then conditions above will ensure no more work is spawned but
1698 // we'll keep executing this loop until `running_with_own_token`
1699 // hits 0.
1700Message::CodegenAborted => {
1701codegen_state = Aborted;
1702 }
17031704 Message::WorkItem { result } => {
1705// If a thread exits successfully then we drop a token associated
1706 // with that worker and update our `running_with_own_token` count.
1707 // We may later re-acquire a token to continue running more work.
1708 // We may also not actually drop a token here if the worker was
1709 // running with an "ephemeral token".
1710if main_thread_state == MainThreadState::Lending {
1711main_thread_state = MainThreadState::Idle;
1712 } else {
1713running_with_own_token -= 1;
1714 }
17151716match result {
1717Ok(WorkItemResult::Finished(compiled_module)) => {
1718compiled_modules.push(compiled_module);
1719 }
1720Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1721if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1722needs_fat_lto.push(fat_lto_input);
1723 }
1724Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1725if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1726needs_thin_lto.push((name, thin_buffer));
1727 }
1728Err(Some(WorkerFatalError)) => {
1729// Like `CodegenAborted`, wait for remaining work to finish.
1730codegen_state = Aborted;
1731 }
1732Err(None) => {
1733// If the thread failed that means it panicked, so
1734 // we abort immediately.
1735::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1736 }
1737 }
1738 }
17391740 Message::AddImportOnlyModule { module_data, work_product } => {
1741match (&codegen_state, &Ongoing) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(codegen_state, Ongoing);
1742match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1743lto_import_only_modules.push((module_data, work_product));
1744main_thread_state = MainThreadState::Idle;
1745 }
1746 }
1747 }
17481749// Drop to print timings
1750drop(llvm_start_time);
17511752if codegen_state == Aborted {
1753return Err(());
1754 }
17551756drop(codegen_state);
1757drop(tokens);
1758drop(helper);
1759if !work_items.is_empty() {
::core::panicking::panic("assertion failed: work_items.is_empty()")
};assert!(work_items.is_empty());
17601761if !needs_fat_lto.is_empty() {
1762if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1763if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
17641765if let Some(allocator_module) = allocator_module.take() {
1766needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
1767 }
17681769return Ok(MaybeLtoModules::FatLto {
1770cgcx,
1771exported_symbols_for_lto,
1772each_linked_rlib_file_for_lto,
1773needs_fat_lto,
1774lto_import_only_modules,
1775 });
1776 } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1777if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1778if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
17791780if cgcx.lto == Lto::ThinLocal {
1781compiled_modules.extend(do_thin_lto::<B>(
1782&cgcx,
1783&prof,
1784shared_emitter.clone(),
1785tm_factory,
1786exported_symbols_for_lto,
1787each_linked_rlib_file_for_lto,
1788needs_thin_lto,
1789lto_import_only_modules,
1790 ));
1791 } else {
1792if let Some(allocator_module) = allocator_module.take() {
1793let thin_buffer = B::serialize_module(allocator_module.module_llvm, true);
1794needs_thin_lto.push((allocator_module.name, thin_buffer));
1795 }
17961797return Ok(MaybeLtoModules::ThinLto {
1798cgcx,
1799exported_symbols_for_lto,
1800each_linked_rlib_file_for_lto,
1801needs_thin_lto,
1802lto_import_only_modules,
1803 });
1804 }
1805 }
18061807Ok(MaybeLtoModules::NoLto(CompiledModules {
1808 modules: compiled_modules,
1809 allocator_module: allocator_module.map(|allocator_module| {
1810 B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config)
1811 }),
1812 }))
1813 };
1814return std::thread::Builder::new()
1815 .name("coordinator".to_owned())
1816 .spawn(f)
1817 .expect("failed to spawn coordinator thread");
18181819// A heuristic that determines if we have enough LLVM WorkItems in the
1820 // queue so that the main thread can do LLVM work instead of codegen
1821fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1822// This heuristic scales ahead-of-time codegen according to available
1823 // concurrency, as measured by `workers_running`. The idea is that the
1824 // more concurrency we have available, the more demand there will be for
1825 // work items, and the fuller the queue should be kept to meet demand.
1826 // An important property of this approach is that we codegen ahead of
1827 // time only as much as necessary, so as to keep fewer LLVM modules in
1828 // memory at once, thereby reducing memory consumption.
1829 //
1830 // When the number of workers running is less than the max concurrency
1831 // available to us, this heuristic can cause us to instruct the main
1832 // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1833 // of codegen, even though it seems like it *should* be codegenning so
1834 // that we can create more work items and spawn more LLVM workers.
1835 //
1836 // But this is not a problem. When the main thread is told to LLVM,
1837 // according to this heuristic and how work is scheduled, there is
1838 // always at least one item in the queue, and therefore at least one
1839 // pending jobserver token request. If there *is* more concurrency
1840 // available, we will immediately receive a token, which will upgrade
1841 // the main thread's LLVM worker to a real one (conceptually), and free
1842 // up the main thread to codegen if necessary. On the other hand, if
1843 // there isn't more concurrency, then the main thread working on an LLVM
1844 // item is appropriate, as long as the queue is full enough for demand.
1845 //
1846 // Speaking of which, how full should we keep the queue? Probably less
1847 // full than you'd think. A lot has to go wrong for the queue not to be
1848 // full enough and for that to have a negative effect on compile times.
1849 //
1850 // Workers are unlikely to finish at exactly the same time, so when one
1851 // finishes and takes another work item off the queue, we often have
1852 // ample time to codegen at that point before the next worker finishes.
1853 // But suppose that codegen takes so long that the workers exhaust the
1854 // queue, and we have one or more workers that have nothing to work on.
1855 // Well, it might not be so bad. Of all the LLVM modules we create and
1856 // optimize, one has to finish last. It's not necessarily the case that
1857 // by losing some concurrency for a moment, we delay the point at which
1858 // that last LLVM module is finished and the rest of compilation can
1859 // proceed. Also, when we can't take advantage of some concurrency, we
1860 // give tokens back to the job server. That enables some other rustc to
1861 // potentially make use of the available concurrency. That could even
1862 // *decrease* overall compile time if we're lucky. But yes, if no other
1863 // rustc can make use of the concurrency, then we've squandered it.
1864 //
1865 // However, keeping the queue full is also beneficial when we have a
1866 // surge in available concurrency. Then items can be taken from the
1867 // queue immediately, without having to wait for codegen.
1868 //
1869 // So, the heuristic below tries to keep one item in the queue for every
1870 // four running workers. Based on limited benchmarking, this appears to
1871 // be more than sufficient to avoid increasing compilation times.
1872let quarter_of_workers = workers_running - 3 * workers_running / 4;
1873items_in_queue > 0 && items_in_queue >= quarter_of_workers1874 }
1875}
18761877/// `FatalError` is explicitly not `Send`.
1878#[must_use]
1879pub(crate) struct WorkerFatalError;
18801881fn spawn_work<'a, B: WriteBackendMethods>(
1882 cgcx: &CodegenContext,
1883 prof: &'a SelfProfilerRef,
1884 shared_emitter: SharedEmitter,
1885 coordinator_send: Sender<Message<B>>,
1886 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1887 work: WorkItem<B>,
1888) {
1889if llvm_start_time.is_none() {
1890*llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
1891 }
18921893let cgcx = cgcx.clone();
1894let prof = prof.clone();
18951896let name = work.short_description();
1897let f = move || {
1898let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
18991900let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1901 WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m),
1902 WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
1903execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m),
1904 ),
1905 }));
19061907let msg = match result {
1908Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
19091910// We ignore any `FatalError` coming out of `execute_work_item`, as a
1911 // diagnostic was already sent off to the main thread - just surface
1912 // that there was an error in this worker.
1913Err(err) if err.is::<FatalErrorMarker>() => {
1914 Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1915 }
19161917Err(_) => Message::WorkItem::<B> { result: Err(None) },
1918 };
1919drop(coordinator_send.send(msg));
1920 };
1921 std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1922}
19231924fn spawn_thin_lto_work<B: WriteBackendMethods>(
1925 cgcx: &CodegenContext,
1926 prof: &SelfProfilerRef,
1927 shared_emitter: SharedEmitter,
1928 tm_factory: TargetMachineFactoryFn<B>,
1929 coordinator_send: Sender<ThinLtoMessage>,
1930 work: ThinLtoWorkItem<B>,
1931) {
1932let cgcx = cgcx.clone();
1933let prof = prof.clone();
19341935let name = work.short_description();
1936let f = move || {
1937let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
19381939let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1940 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
1941execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m)
1942 }
1943 ThinLtoWorkItem::ThinLto(m) => {
1944execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m)
1945 }
1946 }));
19471948let msg = match result {
1949Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
19501951// We ignore any `FatalError` coming out of `execute_work_item`, as a
1952 // diagnostic was already sent off to the main thread - just surface
1953 // that there was an error in this worker.
1954Err(err) if err.is::<FatalErrorMarker>() => {
1955 ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1956 }
19571958Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1959 };
1960drop(coordinator_send.send(msg));
1961 };
1962 std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1963}
19641965enum SharedEmitterMessage {
1966 Diagnostic(Diagnostic),
1967 InlineAsmError(InlineAsmError),
1968 Fatal(String),
1969}
19701971pub struct InlineAsmError {
1972pub span: SpanData,
1973pub msg: String,
1974pub level: Level,
1975pub source: Option<(String, Vec<InnerSpan>)>,
1976}
19771978#[derive(#[automatically_derived]
impl ::core::clone::Clone for SharedEmitter {
#[inline]
fn clone(&self) -> SharedEmitter {
SharedEmitter { sender: ::core::clone::Clone::clone(&self.sender) }
}
}Clone)]
1979pub struct SharedEmitter {
1980 sender: Sender<SharedEmitterMessage>,
1981}
19821983pub struct SharedEmitterMain {
1984 receiver: Receiver<SharedEmitterMessage>,
1985}
19861987impl SharedEmitter {
1988fn new() -> (SharedEmitter, SharedEmitterMain) {
1989let (sender, receiver) = channel();
19901991 (SharedEmitter { sender }, SharedEmitterMain { receiver })
1992 }
19931994pub fn inline_asm_error(&self, err: InlineAsmError) {
1995drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
1996 }
19971998fn fatal(&self, msg: &str) {
1999drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
2000 }
2001}
20022003impl Emitterfor SharedEmitter {
2004fn emit_diagnostic(&mut self, mut diag: rustc_errors::DiagInner) {
2005// Check that we aren't missing anything interesting when converting to
2006 // the cut-down local `DiagInner`.
2007if !!diag.span.has_span_labels() {
::core::panicking::panic("assertion failed: !diag.span.has_span_labels()")
};assert!(!diag.span.has_span_labels());
2008match (&diag.suggestions, &Suggestions::Enabled(::alloc::vec::Vec::new())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
2009match (&diag.sort_span, &rustc_span::DUMMY_SP) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
2010match (&diag.is_lint, &None) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.is_lint, None);
2011// No sensible check for `diag.emitted_at`.
20122013let args = mem::replace(&mut diag.args, DiagArgMap::default());
2014drop(
2015self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
2016 span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
2017 level: diag.level(),
2018 messages: diag.messages,
2019 code: diag.code,
2020 children: diag2021 .children
2022 .into_iter()
2023 .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
2024 .collect(),
2025args,
2026 })),
2027 );
2028 }
20292030fn source_map(&self) -> Option<&SourceMap> {
2031None2032 }
2033}
20342035impl SharedEmitterMain {
2036fn check(&self, sess: &Session, blocking: bool) {
2037loop {
2038let message = if blocking {
2039match self.receiver.recv() {
2040Ok(message) => Ok(message),
2041Err(_) => Err(()),
2042 }
2043 } else {
2044match self.receiver.try_recv() {
2045Ok(message) => Ok(message),
2046Err(_) => Err(()),
2047 }
2048 };
20492050match message {
2051Ok(SharedEmitterMessage::Diagnostic(diag)) => {
2052// The diagnostic has been received on the main thread.
2053 // Convert it back to a full `Diagnostic` and emit.
2054let dcx = sess.dcx();
2055let mut d =
2056 rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
2057d.span = MultiSpan::from_spans(
2058diag.span.into_iter().map(|span| span.span()).collect(),
2059 );
2060d.code = diag.code; // may be `None`, that's ok
2061d.children = diag2062 .children
2063 .into_iter()
2064 .map(|sub| rustc_errors::Subdiag {
2065 level: sub.level,
2066 messages: sub.messages,
2067 span: MultiSpan::new(),
2068 })
2069 .collect();
2070d.args = diag.args;
2071dcx.emit_diagnostic(d);
2072sess.dcx().abort_if_errors();
2073 }
2074Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2075match inner.level {
Level::Error | Level::Warning | Level::Note => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"Level::Error | Level::Warning | Level::Note",
::core::option::Option::None);
}
};assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2076let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2077if !inner.span.is_dummy() {
2078err.span(inner.span.span());
2079 }
20802081// Point to the generated assembly if it is available.
2082if let Some((buffer, spans)) = inner.source {
2083let source = sess2084 .source_map()
2085 .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2086let spans: Vec<_> = spans2087 .iter()
2088 .map(|sp| {
2089Span::with_root_ctxt(
2090source.normalized_byte_pos(sp.start as u32),
2091source.normalized_byte_pos(sp.end as u32),
2092 )
2093 })
2094 .collect();
2095err.span_note(spans, "instantiated into assembly here");
2096 }
20972098err.emit();
2099 }
2100Ok(SharedEmitterMessage::Fatal(msg)) => {
2101sess.dcx().fatal(msg);
2102 }
2103Err(_) => {
2104break;
2105 }
2106 }
2107 }
2108 }
2109}
21102111pub struct Coordinator<B: WriteBackendMethods> {
2112 sender: Sender<Message<B>>,
2113 future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
2114// Only used for the Message type.
2115phantom: PhantomData<B>,
2116}
21172118impl<B: WriteBackendMethods> Coordinator<B> {
2119fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
2120self.future.take().unwrap().join()
2121 }
2122}
21232124impl<B: WriteBackendMethods> Dropfor Coordinator<B> {
2125fn drop(&mut self) {
2126if let Some(future) = self.future.take() {
2127// If we haven't joined yet, signal to the coordinator that it should spawn no more
2128 // work, and wait for worker threads to finish.
2129drop(self.sender.send(Message::CodegenAborted::<B>));
2130drop(future.join());
2131 }
2132 }
2133}
21342135pub struct OngoingCodegen<B: WriteBackendMethods> {
2136pub backend: B,
2137pub output_filenames: Arc<OutputFilenames>,
2138// Field order below is intended to terminate the coordinator thread before two fields below
2139 // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2140 // `Drop` implementation for more info.
2141pub coordinator: Coordinator<B>,
2142pub codegen_worker_receive: Receiver<CguMessage>,
2143pub shared_emitter_main: SharedEmitterMain,
2144}
21452146impl<B: WriteBackendMethods> OngoingCodegen<B> {
2147pub fn join(self, sess: &Session) -> (CompiledModules, FxIndexMap<WorkProductId, WorkProduct>) {
2148self.shared_emitter_main.check(sess, true);
21492150let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2151Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
2152Ok(Err(())) => {
2153sess.dcx().abort_if_errors();
2154{
::core::panicking::panic_fmt(format_args!("expected abort due to worker thread errors"));
}panic!("expected abort due to worker thread errors")2155 }
2156Err(_) => {
2157::rustc_middle::util::bug::bug_fmt(format_args!("panic during codegen/LLVM phase"));bug!("panic during codegen/LLVM phase");
2158 }
2159 });
21602161sess.dcx().abort_if_errors();
21622163let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
21642165// Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
2166let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
2167 MaybeLtoModules::NoLto(compiled_modules) => {
2168drop(shared_emitter);
2169compiled_modules2170 }
2171 MaybeLtoModules::FatLto {
2172 cgcx,
2173 exported_symbols_for_lto,
2174 each_linked_rlib_file_for_lto,
2175 needs_fat_lto,
2176 lto_import_only_modules,
2177 } => {
2178let tm_factory = self.backend.target_machine_factory(
2179sess,
2180cgcx.opt_level,
2181&cgcx.backend_features,
2182 );
21832184CompiledModules {
2185 modules: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[do_fat_lto(&cgcx, &sess.prof, shared_emitter, tm_factory,
&exported_symbols_for_lto, &each_linked_rlib_file_for_lto,
needs_fat_lto, lto_import_only_modules)]))vec![do_fat_lto(
2186&cgcx,
2187&sess.prof,
2188 shared_emitter,
2189 tm_factory,
2190&exported_symbols_for_lto,
2191&each_linked_rlib_file_for_lto,
2192 needs_fat_lto,
2193 lto_import_only_modules,
2194 )],
2195 allocator_module: None,
2196 }
2197 }
2198 MaybeLtoModules::ThinLto {
2199 cgcx,
2200 exported_symbols_for_lto,
2201 each_linked_rlib_file_for_lto,
2202 needs_thin_lto,
2203 lto_import_only_modules,
2204 } => {
2205let tm_factory = self.backend.target_machine_factory(
2206sess,
2207cgcx.opt_level,
2208&cgcx.backend_features,
2209 );
22102211CompiledModules {
2212 modules: do_thin_lto::<B>(
2213&cgcx,
2214&sess.prof,
2215shared_emitter,
2216tm_factory,
2217exported_symbols_for_lto,
2218each_linked_rlib_file_for_lto,
2219needs_thin_lto,
2220lto_import_only_modules,
2221 ),
2222 allocator_module: None,
2223 }
2224 }
2225 });
22262227shared_emitter_main.check(sess, true);
22282229sess.dcx().abort_if_errors();
22302231let mut compiled_modules =
2232compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
22332234// Regardless of what order these modules completed in, report them to
2235 // the backend in the same order every time to ensure that we're handing
2236 // out deterministic results.
2237compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
22382239let work_products =
2240copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2241produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
22422243 (compiled_modules, work_products)
2244 }
22452246pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2247self.wait_for_signal_to_codegen_item();
2248self.check_for_errors(tcx.sess);
2249drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2250 }
22512252pub(crate) fn check_for_errors(&self, sess: &Session) {
2253self.shared_emitter_main.check(sess, false);
2254 }
22552256pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2257match self.codegen_worker_receive.recv() {
2258Ok(CguMessage) => {
2259// Ok to proceed.
2260}
2261Err(_) => {
2262// One of the LLVM threads must have panicked, fall through so
2263 // error handling can be reached.
2264}
2265 }
2266 }
2267}
22682269pub(crate) fn submit_codegened_module_to_llvm<B: WriteBackendMethods>(
2270 coordinator: &Coordinator<B>,
2271 module: ModuleCodegen<B::Module>,
2272 cost: u64,
2273) {
2274let llvm_work_item = WorkItem::Optimize(module);
2275drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2276}
22772278pub(crate) fn submit_post_lto_module_to_llvm<B: WriteBackendMethods>(
2279 coordinator: &Coordinator<B>,
2280 module: CachedModuleCodegen,
2281) {
2282let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2283drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2284}
22852286pub(crate) fn submit_pre_lto_module_to_llvm<B: WriteBackendMethods>(
2287 tcx: TyCtxt<'_>,
2288 coordinator: &Coordinator<B>,
2289 module: CachedModuleCodegen,
2290) {
2291let filename = pre_lto_bitcode_filename(&module.name);
2292let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2293let file = fs::File::open(&bc_path)
2294 .unwrap_or_else(|e| {
::core::panicking::panic_fmt(format_args!("failed to open bitcode file `{0}`: {1}",
bc_path.display(), e));
}panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
22952296let mmap = unsafe {
2297Mmap::map(file).unwrap_or_else(|e| {
2298{
::core::panicking::panic_fmt(format_args!("failed to mmap bitcode file `{0}`: {1}",
bc_path.display(), e));
}panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)2299 })
2300 };
2301// Schedule the module to be loaded
2302drop(coordinator.sender.send(Message::AddImportOnlyModule::<B> {
2303 module_data: SerializedModule::FromUncompressedFile(mmap),
2304 work_product: module.source,
2305 }));
2306}
23072308fn pre_lto_bitcode_filename(module_name: &str) -> String {
2309::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0}.{1}", module_name,
PRE_LTO_BC_EXT))
})format!("{module_name}.{PRE_LTO_BC_EXT}")2310}
23112312fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2313// This should never be true (because it's not supported). If it is true,
2314 // something is wrong with commandline arg validation.
2315if !!(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
tcx.sess.target.is_like_windows &&
tcx.sess.opts.cg.prefer_dynamic) {
::core::panicking::panic("assertion failed: !(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&\n tcx.sess.target.is_like_windows &&\n tcx.sess.opts.cg.prefer_dynamic)")
};assert!(
2316 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2317 && tcx.sess.target.is_like_windows
2318 && tcx.sess.opts.cg.prefer_dynamic)
2319 );
23202321// We need to generate _imp__ symbol if we are generating an rlib or we include one
2322 // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2323 // these, but it currently does not do so.
2324let can_have_static_objects =
2325tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
23262327tcx.sess.target.is_like_windows &&
2328can_have_static_objects &&
2329// ThinLTO can't handle this workaround in all cases, so we don't
2330 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2331 // dynamic linking when linker plugin LTO is enabled.
2332!tcx.sess.opts.cg.linker_plugin_lto.enabled()
2333}