1use std::io::{BufRead, BufReader, Read, Write};
10use std::process::ChildStdout;
11use std::time::Duration;
12
13use termcolor::{Color, ColorSpec, WriteColor};
14
15use crate::core::builder::Builder;
16use crate::utils::exec::BootstrapCommand;
17
18const TERSE_TESTS_PER_LINE: usize = 88;
19
20pub(crate) fn add_flags_and_try_run_tests(
21 builder: &Builder<'_>,
22 cmd: &mut BootstrapCommand,
23) -> bool {
24 if !cmd.get_args().any(|arg| arg == "--") {
25 cmd.arg("--");
26 }
27 cmd.args(["-Z", "unstable-options", "--format", "json"]);
28
29 try_run_tests(builder, cmd, false)
30}
31
32pub(crate) fn try_run_tests(
33 builder: &Builder<'_>,
34 cmd: &mut BootstrapCommand,
35 stream: bool,
36) -> bool {
37 if run_tests(builder, cmd, stream) {
38 return true;
39 }
40
41 if builder.fail_fast {
42 crate::exit!(1);
43 }
44
45 builder.config.exec_ctx().add_to_delay_failure(format!("{cmd:?}"));
46
47 false
48}
49
50fn run_tests(builder: &Builder<'_>, cmd: &mut BootstrapCommand, stream: bool) -> bool {
51 builder.verbose(|| println!("running: {cmd:?}"));
52
53 let Some(mut streaming_command) = cmd.stream_capture_stdout(&builder.config.exec_ctx) else {
54 return true;
55 };
56
57 let renderer = Renderer::new(streaming_command.stdout.take().unwrap(), builder);
60 if stream {
61 renderer.stream_all();
62 } else {
63 renderer.render_all();
64 }
65
66 let status = streaming_command.wait(&builder.config.exec_ctx).unwrap();
67 if !status.success() && builder.is_verbose() {
68 println!(
69 "\n\ncommand did not execute successfully: {cmd:?}\n\
70 expected success, got: {status}",
71 );
72 }
73
74 status.success()
75}
76
77struct Renderer<'a> {
78 stdout: BufReader<ChildStdout>,
79 failures: Vec<TestOutcome>,
80 benches: Vec<BenchOutcome>,
81 builder: &'a Builder<'a>,
82 tests_count: Option<usize>,
83 executed_tests: usize,
84 up_to_date_tests: usize,
87 ignored_tests: usize,
88 terse_tests_in_line: usize,
89 ci_latest_logged_percentage: f64,
90}
91
92impl<'a> Renderer<'a> {
93 fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
94 Self {
95 stdout: BufReader::new(stdout),
96 benches: Vec::new(),
97 failures: Vec::new(),
98 builder,
99 tests_count: None,
100 executed_tests: 0,
101 up_to_date_tests: 0,
102 ignored_tests: 0,
103 terse_tests_in_line: 0,
104 ci_latest_logged_percentage: 0.0,
105 }
106 }
107
108 fn render_all(mut self) {
109 let mut line = Vec::new();
110 loop {
111 line.clear();
112 match self.stdout.read_until(b'\n', &mut line) {
113 Ok(_) => {}
114 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
115 Err(err) => panic!("failed to read output of test runner: {err}"),
116 }
117 if line.is_empty() {
118 break;
119 }
120
121 match serde_json::from_slice(&line) {
122 Ok(parsed) => self.render_message(parsed),
123 Err(_err) => {
124 let mut stdout = std::io::stdout();
126 stdout.write_all(&line).unwrap();
127 let _ = stdout.flush();
128 }
129 }
130 }
131
132 if self.up_to_date_tests > 0 {
133 let n = self.up_to_date_tests;
134 let s = if n > 1 { "s" } else { "" };
135 println!("help: ignored {n} up-to-date test{s}; use `--force-rerun` to prevent this\n");
136 }
137 }
138
139 fn stream_all(mut self) {
141 let mut buffer = [0; 1];
142 loop {
143 match self.stdout.read(&mut buffer) {
144 Ok(0) => break,
145 Ok(_) => {
146 let mut stdout = std::io::stdout();
147 stdout.write_all(&buffer).unwrap();
148 let _ = stdout.flush();
149 }
150 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
151 Err(err) => panic!("failed to read output of test runner: {err}"),
152 }
153 }
154 }
155
156 fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
157 self.executed_tests += 1;
158
159 if let Outcome::Ignored { reason } = outcome {
160 self.ignored_tests += 1;
161 if reason == Some("up-to-date") {
163 self.up_to_date_tests += 1;
164 }
165 }
166
167 #[cfg(feature = "build-metrics")]
168 self.builder.metrics.record_test(
169 &test.name,
170 match outcome {
171 Outcome::Ok | Outcome::BenchOk => build_helper::metrics::TestOutcome::Passed,
172 Outcome::Failed => build_helper::metrics::TestOutcome::Failed,
173 Outcome::Ignored { reason } => build_helper::metrics::TestOutcome::Ignored {
174 ignore_reason: reason.map(|s| s.to_string()),
175 },
176 },
177 self.builder,
178 );
179
180 if self.builder.config.verbose_tests {
181 self.render_test_outcome_verbose(outcome, test);
182 } else if self.builder.config.is_running_on_ci {
183 self.render_test_outcome_ci(outcome, test);
184 } else {
185 self.render_test_outcome_terse(outcome, test);
186 }
187 }
188
189 fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
190 print!("test {} ... ", test.name);
191 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
192 if let Some(exec_time) = test.exec_time {
193 print!(" ({exec_time:.2?})");
194 }
195 println!();
196 }
197
198 fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
199 if self.terse_tests_in_line != 0
200 && self.terse_tests_in_line.is_multiple_of(TERSE_TESTS_PER_LINE)
201 {
202 if let Some(total) = self.tests_count {
203 let total = total.to_string();
204 let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
205 print!(" {executed}/{total}");
206 }
207 println!();
208 self.terse_tests_in_line = 0;
209 }
210
211 self.terse_tests_in_line += 1;
212 self.builder.colored_stdout(|stdout| outcome.write_short(stdout, &test.name)).unwrap();
213 let _ = std::io::stdout().flush();
214 }
215
216 fn render_test_outcome_ci(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
217 if let Some(total) = self.tests_count {
218 let percent = self.executed_tests as f64 / total as f64;
219
220 if self.ci_latest_logged_percentage + 0.10 < percent {
221 let total = total.to_string();
222 let executed = format!("{:>width$}", self.executed_tests, width = total.len());
223 let pretty_percent = format!("{:.0}%", percent * 100.0);
224 let passed_tests = self.executed_tests - (self.failures.len() + self.ignored_tests);
225 println!(
226 "{:<4} -- {executed}/{total}, {:>total_indent$} passed, {} failed, {} ignored",
227 pretty_percent,
228 passed_tests,
229 self.failures.len(),
230 self.ignored_tests,
231 total_indent = total.len()
232 );
233 self.ci_latest_logged_percentage += 0.10;
234 }
235 }
236
237 self.builder.colored_stdout(|stdout| outcome.write_ci(stdout, &test.name)).unwrap();
238 let _ = std::io::stdout().flush();
239 }
240
241 fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
242 if !self.builder.config.verbose_tests {
244 println!();
245 }
246
247 if !self.failures.is_empty() {
248 println!("\nfailures:\n");
249 for failure in &self.failures {
250 if failure.stdout.is_some() || failure.message.is_some() {
251 println!("---- {} stdout ----", failure.name);
252 if let Some(stdout) = &failure.stdout {
253 print!("{stdout}");
256 if !stdout.ends_with('\n') {
257 println!("\n\\ (no newline at end of output)");
258 }
259 }
260 println!("---- {} stdout end ----", failure.name);
261 if let Some(message) = &failure.message {
262 println!("NOTE: {message}");
263 }
264 }
265 }
266
267 println!("\nfailures:");
268 for failure in &self.failures {
269 println!(" {}", failure.name);
270 }
271 }
272
273 if !self.benches.is_empty() {
274 println!("\nbenchmarks:");
275
276 let mut rows = Vec::new();
277 for bench in &self.benches {
278 rows.push((
279 &bench.name,
280 format!("{:.2?}ns/iter", bench.median),
281 format!("+/- {:.2?}", bench.deviation),
282 ));
283 }
284
285 let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
286 let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
287 let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
288 for row in &rows {
289 println!(" {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
290 }
291 }
292
293 print!("\ntest result: ");
294 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
295 println!(
296 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out{time}\n",
297 suite.passed,
298 suite.failed,
299 suite.ignored,
300 suite.measured,
301 suite.filtered_out,
302 time = match suite.exec_time {
303 Some(t) => format!("; finished in {:.2?}", Duration::from_secs_f64(t)),
304 None => String::new(),
305 }
306 );
307 }
308
309 fn render_message(&mut self, message: Message) {
310 match message {
311 Message::Suite(SuiteMessage::Started { test_count }) => {
312 println!("\nrunning {test_count} tests");
313 self.benches = vec![];
314 self.failures = vec![];
315 self.ignored_tests = 0;
316 self.executed_tests = 0;
317 self.terse_tests_in_line = 0;
318 self.tests_count = Some(test_count);
319 }
320 Message::Suite(SuiteMessage::Ok(outcome)) => {
321 self.render_suite_outcome(Outcome::Ok, &outcome);
322 }
323 Message::Suite(SuiteMessage::Failed(outcome)) => {
324 self.render_suite_outcome(Outcome::Failed, &outcome);
325 }
326 Message::Bench(outcome) => {
327 let fake_test_outcome = TestOutcome {
333 name: outcome.name.clone(),
334 exec_time: None,
335 stdout: None,
336 message: None,
337 };
338 self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
339 self.benches.push(outcome);
340 }
341 Message::Test(TestMessage::Ok(outcome)) => {
342 self.render_test_outcome(Outcome::Ok, &outcome);
343 }
344 Message::Test(TestMessage::Ignored(outcome)) => {
345 self.render_test_outcome(
346 Outcome::Ignored { reason: outcome.message.as_deref() },
347 &outcome,
348 );
349 }
350 Message::Test(TestMessage::Failed(outcome)) => {
351 self.render_test_outcome(Outcome::Failed, &outcome);
352 self.failures.push(outcome);
353 }
354 Message::Test(TestMessage::Timeout { name }) => {
355 println!("test {name} has been running for a long time");
356 }
357 Message::Test(TestMessage::Started) => {} }
359 }
360}
361
362enum Outcome<'a> {
363 Ok,
364 BenchOk,
365 Failed,
366 Ignored { reason: Option<&'a str> },
367}
368
369impl Outcome<'_> {
370 fn write_short(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
371 match self {
372 Outcome::Ok => {
373 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
374 write!(writer, ".")?;
375 }
376 Outcome::BenchOk => {
377 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
378 write!(writer, "b")?;
379 }
380 Outcome::Failed => {
381 writeln!(writer)?;
384 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
385 writeln!(writer, "{name} ... F")?;
386 }
387 Outcome::Ignored { .. } => {
388 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
389 write!(writer, "i")?;
390 }
391 }
392 writer.reset()
393 }
394
395 fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
396 match self {
397 Outcome::Ok => {
398 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
399 write!(writer, "ok")?;
400 }
401 Outcome::BenchOk => {
402 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
403 write!(writer, "benchmarked")?;
404 }
405 Outcome::Failed => {
406 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
407 write!(writer, "FAILED")?;
408 }
409 Outcome::Ignored { reason } => {
410 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
411 write!(writer, "ignored")?;
412 if let Some(reason) = reason {
413 write!(writer, ", {reason}")?;
414 }
415 }
416 }
417 writer.reset()
418 }
419
420 fn write_ci(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
421 match self {
422 Outcome::Ok | Outcome::BenchOk | Outcome::Ignored { .. } => {}
423 Outcome::Failed => {
424 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
425 writeln!(writer, " {name} ... FAILED")?;
426 }
427 }
428 writer.reset()
429 }
430}
431
432#[derive(serde_derive::Deserialize)]
433#[serde(tag = "type", rename_all = "snake_case")]
434enum Message {
435 Suite(SuiteMessage),
436 Test(TestMessage),
437 Bench(BenchOutcome),
438}
439
440#[derive(serde_derive::Deserialize)]
441#[serde(tag = "event", rename_all = "snake_case")]
442enum SuiteMessage {
443 Ok(SuiteOutcome),
444 Failed(SuiteOutcome),
445 Started { test_count: usize },
446}
447
448#[derive(serde_derive::Deserialize)]
449struct SuiteOutcome {
450 passed: usize,
451 failed: usize,
452 ignored: usize,
453 measured: usize,
454 filtered_out: usize,
455 exec_time: Option<f64>,
458}
459
460#[derive(serde_derive::Deserialize)]
461#[serde(tag = "event", rename_all = "snake_case")]
462enum TestMessage {
463 Ok(TestOutcome),
464 Failed(TestOutcome),
465 Ignored(TestOutcome),
466 Timeout { name: String },
467 Started,
468}
469
470#[derive(serde_derive::Deserialize)]
471struct BenchOutcome {
472 name: String,
473 median: f64,
474 deviation: f64,
475}
476
477#[derive(serde_derive::Deserialize)]
478struct TestOutcome {
479 name: String,
480 exec_time: Option<f64>,
481 stdout: Option<String>,
482 message: Option<String>,
483}