cargo/core/compiler/job_queue/job_state.rs
1//! See [`JobState`].
2
3use std::{cell::Cell, marker, sync::Arc};
4
5use cargo_util::ProcessBuilder;
6
7use crate::core::compiler::build_runner::OutputFile;
8use crate::core::compiler::future_incompat::FutureBreakageItem;
9use crate::util::Queue;
10use crate::CargoResult;
11
12use super::{Artifact, DiagDedupe, Job, JobId, Message};
13
14/// A `JobState` is constructed by `JobQueue::run` and passed to `Job::run`. It includes everything
15/// necessary to communicate between the main thread and the execution of the job.
16///
17/// The job may execute on either a dedicated thread or the main thread. If the job executes on the
18/// main thread, the `output` field must be set to prevent a deadlock.
19pub struct JobState<'a, 'gctx> {
20 /// Channel back to the main thread to coordinate messages and such.
21 ///
22 /// When the `output` field is `Some`, care must be taken to avoid calling `push_bounded` on
23 /// the message queue to prevent a deadlock.
24 messages: Arc<Queue<Message>>,
25
26 /// Normally output is sent to the job queue with backpressure. When the job is fresh
27 /// however we need to immediately display the output to prevent a deadlock as the
28 /// output messages are processed on the same thread as they are sent from. `output`
29 /// defines where to output in this case.
30 ///
31 /// Currently the [`Shell`] inside [`GlobalContext`] is wrapped in a `RefCell` and thus can't
32 /// be passed between threads. This means that it isn't possible for multiple output messages
33 /// to be interleaved. In the future, it may be wrapped in a `Mutex` instead. In this case
34 /// interleaving is still prevented as the lock would be held for the whole printing of an
35 /// output message.
36 ///
37 /// [`Shell`]: crate::core::Shell
38 /// [`GlobalContext`]: crate::GlobalContext
39 output: Option<&'a DiagDedupe<'gctx>>,
40
41 /// The job id that this state is associated with, used when sending
42 /// messages back to the main thread.
43 id: JobId,
44
45 /// Whether or not we're expected to have a call to `rmeta_produced`. Once
46 /// that method is called this is dynamically set to `false` to prevent
47 /// sending a double message later on.
48 rmeta_required: Cell<bool>,
49
50 // Historical versions of Cargo made use of the `'a` argument here, so to
51 // leave the door open to future refactorings keep it here.
52 _marker: marker::PhantomData<&'a ()>,
53}
54
55impl<'a, 'gctx> JobState<'a, 'gctx> {
56 pub(super) fn new(
57 id: JobId,
58 messages: Arc<Queue<Message>>,
59 output: Option<&'a DiagDedupe<'gctx>>,
60 rmeta_required: bool,
61 ) -> Self {
62 Self {
63 id,
64 messages,
65 output,
66 rmeta_required: Cell::new(rmeta_required),
67 _marker: marker::PhantomData,
68 }
69 }
70
71 pub fn running(&self, cmd: &ProcessBuilder) {
72 self.messages.push(Message::Run(self.id, cmd.to_string()));
73 }
74
75 pub fn build_plan(
76 &self,
77 module_name: String,
78 cmd: ProcessBuilder,
79 filenames: Arc<Vec<OutputFile>>,
80 ) {
81 self.messages
82 .push(Message::BuildPlanMsg(module_name, cmd, filenames));
83 }
84
85 pub fn stdout(&self, stdout: String) -> CargoResult<()> {
86 if let Some(dedupe) = self.output {
87 writeln!(dedupe.gctx.shell().out(), "{}", stdout)?;
88 } else {
89 self.messages.push_bounded(Message::Stdout(stdout));
90 }
91 Ok(())
92 }
93
94 pub fn stderr(&self, stderr: String) -> CargoResult<()> {
95 if let Some(dedupe) = self.output {
96 let mut shell = dedupe.gctx.shell();
97 shell.print_ansi_stderr(stderr.as_bytes())?;
98 shell.err().write_all(b"\n")?;
99 } else {
100 self.messages.push_bounded(Message::Stderr(stderr));
101 }
102 Ok(())
103 }
104
105 /// See [`Message::Diagnostic`] and [`Message::WarningCount`].
106 pub fn emit_diag(&self, level: &str, diag: String, fixable: bool) -> CargoResult<()> {
107 if let Some(dedupe) = self.output {
108 let emitted = dedupe.emit_diag(&diag)?;
109 if level == "warning" {
110 self.messages.push(Message::WarningCount {
111 id: self.id,
112 emitted,
113 fixable,
114 });
115 }
116 } else {
117 self.messages.push_bounded(Message::Diagnostic {
118 id: self.id,
119 level: level.to_string(),
120 diag,
121 fixable,
122 });
123 }
124 Ok(())
125 }
126
127 /// See [`Message::Warning`].
128 pub fn warning(&self, warning: String) -> CargoResult<()> {
129 self.messages.push_bounded(Message::Warning {
130 id: self.id,
131 warning,
132 });
133 Ok(())
134 }
135
136 /// A method used to signal to the coordinator thread that the rmeta file
137 /// for an rlib has been produced. This is only called for some rmeta
138 /// builds when required, and can be called at any time before a job ends.
139 /// This should only be called once because a metadata file can only be
140 /// produced once!
141 pub fn rmeta_produced(&self) {
142 self.rmeta_required.set(false);
143 self.messages
144 .push(Message::Finish(self.id, Artifact::Metadata, Ok(())));
145 }
146
147 /// Drives a [`Job`] to finish. This ensures that a [`Message::Finish`] is
148 /// sent even if our job panics.
149 pub(super) fn run_to_finish(self, job: Job) {
150 let mut sender = FinishOnDrop {
151 messages: &self.messages,
152 id: self.id,
153 result: None,
154 };
155 sender.result = Some(job.run(&self));
156
157 // If the `rmeta_required` wasn't consumed but it was set
158 // previously, then we either have:
159 //
160 // 1. The `job` didn't do anything because it was "fresh".
161 // 2. The `job` returned an error and didn't reach the point where
162 // it called `rmeta_produced`.
163 // 3. We forgot to call `rmeta_produced` and there's a bug in Cargo.
164 //
165 // Ruling out the third, the other two are pretty common for 2
166 // we'll just naturally abort the compilation operation but for 1
167 // we need to make sure that the metadata is flagged as produced so
168 // send a synthetic message here.
169 if self.rmeta_required.get() && sender.result.as_ref().unwrap().is_ok() {
170 self.messages
171 .push(Message::Finish(self.id, Artifact::Metadata, Ok(())));
172 }
173
174 // Use a helper struct with a `Drop` implementation to guarantee
175 // that a `Finish` message is sent even if our job panics. We
176 // shouldn't panic unless there's a bug in Cargo, so we just need
177 // to make sure nothing hangs by accident.
178 struct FinishOnDrop<'a> {
179 messages: &'a Queue<Message>,
180 id: JobId,
181 result: Option<CargoResult<()>>,
182 }
183
184 impl Drop for FinishOnDrop<'_> {
185 fn drop(&mut self) {
186 let result = self
187 .result
188 .take()
189 .unwrap_or_else(|| Err(anyhow::format_err!("worker panicked")));
190 self.messages
191 .push(Message::Finish(self.id, Artifact::All, result));
192 }
193 }
194 }
195
196 pub fn future_incompat_report(&self, report: Vec<FutureBreakageItem>) {
197 self.messages
198 .push(Message::FutureIncompatReport(self.id, report));
199 }
200}