bootstrap/utils/
render_tests.rs

1//! This module renders the JSON output of libtest into a human-readable form, trying to be as
2//! similar to libtest's native output as possible.
3//!
4//! This is needed because we need to use libtest in JSON mode to extract granular information
5//! about the executed tests. Doing so suppresses the human-readable output, and (compared to Cargo
6//! and rustc) libtest doesn't include the rendered human-readable output as a JSON field. We had
7//! to reimplement all the rendering logic in this module because of that.
8
9use std::io::{BufRead, BufReader, Read, Write};
10use std::process::{ChildStdout, Stdio};
11use std::time::Duration;
12
13use termcolor::{Color, ColorSpec, WriteColor};
14
15use crate::core::builder::Builder;
16use crate::utils::exec::BootstrapCommand;
17
18const TERSE_TESTS_PER_LINE: usize = 88;
19
20pub(crate) fn add_flags_and_try_run_tests(
21    builder: &Builder<'_>,
22    cmd: &mut BootstrapCommand,
23) -> bool {
24    if !cmd.get_args().any(|arg| arg == "--") {
25        cmd.arg("--");
26    }
27    cmd.args(["-Z", "unstable-options", "--format", "json"]);
28
29    try_run_tests(builder, cmd, false)
30}
31
32pub(crate) fn try_run_tests(
33    builder: &Builder<'_>,
34    cmd: &mut BootstrapCommand,
35    stream: bool,
36) -> bool {
37    if builder.config.dry_run() {
38        cmd.mark_as_executed();
39        return true;
40    }
41
42    if !run_tests(builder, cmd, stream) {
43        if builder.fail_fast {
44            crate::exit!(1);
45        } else {
46            let mut failures = builder.delayed_failures.borrow_mut();
47            failures.push(format!("{cmd:?}"));
48            false
49        }
50    } else {
51        true
52    }
53}
54
55fn run_tests(builder: &Builder<'_>, cmd: &mut BootstrapCommand, stream: bool) -> bool {
56    let cmd = cmd.as_command_mut();
57    cmd.stdout(Stdio::piped());
58
59    builder.verbose(|| println!("running: {cmd:?}"));
60
61    let mut process = cmd.spawn().unwrap();
62
63    // This runs until the stdout of the child is closed, which means the child exited. We don't
64    // run this on another thread since the builder is not Sync.
65    let renderer = Renderer::new(process.stdout.take().unwrap(), builder);
66    if stream {
67        renderer.stream_all();
68    } else {
69        renderer.render_all();
70    }
71
72    let result = process.wait_with_output().unwrap();
73    if !result.status.success() && builder.is_verbose() {
74        println!(
75            "\n\ncommand did not execute successfully: {cmd:?}\n\
76             expected success, got: {}",
77            result.status
78        );
79    }
80
81    result.status.success()
82}
83
84struct Renderer<'a> {
85    stdout: BufReader<ChildStdout>,
86    failures: Vec<TestOutcome>,
87    benches: Vec<BenchOutcome>,
88    builder: &'a Builder<'a>,
89    tests_count: Option<usize>,
90    executed_tests: usize,
91    /// Number of tests that were skipped due to already being up-to-date
92    /// (i.e. no relevant changes occurred since they last ran).
93    up_to_date_tests: usize,
94    ignored_tests: usize,
95    terse_tests_in_line: usize,
96    ci_latest_logged_percentage: f64,
97}
98
99impl<'a> Renderer<'a> {
100    fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
101        Self {
102            stdout: BufReader::new(stdout),
103            benches: Vec::new(),
104            failures: Vec::new(),
105            builder,
106            tests_count: None,
107            executed_tests: 0,
108            up_to_date_tests: 0,
109            ignored_tests: 0,
110            terse_tests_in_line: 0,
111            ci_latest_logged_percentage: 0.0,
112        }
113    }
114
115    fn render_all(mut self) {
116        let mut line = Vec::new();
117        loop {
118            line.clear();
119            match self.stdout.read_until(b'\n', &mut line) {
120                Ok(_) => {}
121                Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
122                Err(err) => panic!("failed to read output of test runner: {err}"),
123            }
124            if line.is_empty() {
125                break;
126            }
127
128            match serde_json::from_slice(&line) {
129                Ok(parsed) => self.render_message(parsed),
130                Err(_err) => {
131                    // Handle non-JSON output, for example when --nocapture is passed.
132                    let mut stdout = std::io::stdout();
133                    stdout.write_all(&line).unwrap();
134                    let _ = stdout.flush();
135                }
136            }
137        }
138
139        if self.up_to_date_tests > 0 {
140            let n = self.up_to_date_tests;
141            let s = if n > 1 { "s" } else { "" };
142            println!("help: ignored {n} up-to-date test{s}; use `--force-rerun` to prevent this\n");
143        }
144    }
145
146    /// Renders the stdout characters one by one
147    fn stream_all(mut self) {
148        let mut buffer = [0; 1];
149        loop {
150            match self.stdout.read(&mut buffer) {
151                Ok(0) => break,
152                Ok(_) => {
153                    let mut stdout = std::io::stdout();
154                    stdout.write_all(&buffer).unwrap();
155                    let _ = stdout.flush();
156                }
157                Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
158                Err(err) => panic!("failed to read output of test runner: {err}"),
159            }
160        }
161    }
162
163    fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
164        self.executed_tests += 1;
165
166        if let Outcome::Ignored { reason } = outcome {
167            self.ignored_tests += 1;
168            // Keep this in sync with the "up-to-date" ignore message inserted by compiletest.
169            if reason == Some("up-to-date") {
170                self.up_to_date_tests += 1;
171            }
172        }
173
174        #[cfg(feature = "build-metrics")]
175        self.builder.metrics.record_test(
176            &test.name,
177            match outcome {
178                Outcome::Ok | Outcome::BenchOk => build_helper::metrics::TestOutcome::Passed,
179                Outcome::Failed => build_helper::metrics::TestOutcome::Failed,
180                Outcome::Ignored { reason } => build_helper::metrics::TestOutcome::Ignored {
181                    ignore_reason: reason.map(|s| s.to_string()),
182                },
183            },
184            self.builder,
185        );
186
187        if self.builder.config.verbose_tests {
188            self.render_test_outcome_verbose(outcome, test);
189        } else if self.builder.config.is_running_on_ci {
190            self.render_test_outcome_ci(outcome, test);
191        } else {
192            self.render_test_outcome_terse(outcome, test);
193        }
194    }
195
196    fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
197        print!("test {} ... ", test.name);
198        self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
199        if let Some(exec_time) = test.exec_time {
200            print!(" ({exec_time:.2?})");
201        }
202        println!();
203    }
204
205    fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
206        if self.terse_tests_in_line != 0 && self.terse_tests_in_line % TERSE_TESTS_PER_LINE == 0 {
207            if let Some(total) = self.tests_count {
208                let total = total.to_string();
209                let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
210                print!(" {executed}/{total}");
211            }
212            println!();
213            self.terse_tests_in_line = 0;
214        }
215
216        self.terse_tests_in_line += 1;
217        self.builder.colored_stdout(|stdout| outcome.write_short(stdout, &test.name)).unwrap();
218        let _ = std::io::stdout().flush();
219    }
220
221    fn render_test_outcome_ci(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
222        if let Some(total) = self.tests_count {
223            let percent = self.executed_tests as f64 / total as f64;
224
225            if self.ci_latest_logged_percentage + 0.10 < percent {
226                let total = total.to_string();
227                let executed = format!("{:>width$}", self.executed_tests, width = total.len());
228                let pretty_percent = format!("{:.0}%", percent * 100.0);
229                let passed_tests = self.executed_tests - (self.failures.len() + self.ignored_tests);
230                println!(
231                    "{:<4} -- {executed}/{total}, {:>total_indent$} passed, {} failed, {} ignored",
232                    pretty_percent,
233                    passed_tests,
234                    self.failures.len(),
235                    self.ignored_tests,
236                    total_indent = total.len()
237                );
238                self.ci_latest_logged_percentage += 0.10;
239            }
240        }
241
242        self.builder.colored_stdout(|stdout| outcome.write_ci(stdout, &test.name)).unwrap();
243        let _ = std::io::stdout().flush();
244    }
245
246    fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
247        // The terse output doesn't end with a newline, so we need to add it ourselves.
248        if !self.builder.config.verbose_tests {
249            println!();
250        }
251
252        if !self.failures.is_empty() {
253            println!("\nfailures:\n");
254            for failure in &self.failures {
255                if failure.stdout.is_some() || failure.message.is_some() {
256                    println!("---- {} stdout ----", failure.name);
257                    if let Some(stdout) = &failure.stdout {
258                        println!("{stdout}");
259                    }
260                    if let Some(message) = &failure.message {
261                        println!("NOTE: {message}");
262                    }
263                }
264            }
265
266            println!("\nfailures:");
267            for failure in &self.failures {
268                println!("    {}", failure.name);
269            }
270        }
271
272        if !self.benches.is_empty() {
273            println!("\nbenchmarks:");
274
275            let mut rows = Vec::new();
276            for bench in &self.benches {
277                rows.push((
278                    &bench.name,
279                    format!("{:.2?}ns/iter", bench.median),
280                    format!("+/- {:.2?}", bench.deviation),
281                ));
282            }
283
284            let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
285            let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
286            let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
287            for row in &rows {
288                println!("    {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
289            }
290        }
291
292        print!("\ntest result: ");
293        self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
294        println!(
295            ". {} passed; {} failed; {} ignored; {} measured; {} filtered out{time}\n",
296            suite.passed,
297            suite.failed,
298            suite.ignored,
299            suite.measured,
300            suite.filtered_out,
301            time = match suite.exec_time {
302                Some(t) => format!("; finished in {:.2?}", Duration::from_secs_f64(t)),
303                None => String::new(),
304            }
305        );
306    }
307
308    fn render_message(&mut self, message: Message) {
309        match message {
310            Message::Suite(SuiteMessage::Started { test_count }) => {
311                println!("\nrunning {test_count} tests");
312                self.benches = vec![];
313                self.failures = vec![];
314                self.ignored_tests = 0;
315                self.executed_tests = 0;
316                self.terse_tests_in_line = 0;
317                self.tests_count = Some(test_count);
318            }
319            Message::Suite(SuiteMessage::Ok(outcome)) => {
320                self.render_suite_outcome(Outcome::Ok, &outcome);
321            }
322            Message::Suite(SuiteMessage::Failed(outcome)) => {
323                self.render_suite_outcome(Outcome::Failed, &outcome);
324            }
325            Message::Bench(outcome) => {
326                // The formatting for benchmarks doesn't replicate 1:1 the formatting libtest
327                // outputs, mostly because libtest's formatting is broken in terse mode, which is
328                // the default used by our monorepo. We use a different formatting instead:
329                // successful benchmarks are just showed as "benchmarked"/"b", and the details are
330                // outputted at the bottom like failures.
331                let fake_test_outcome = TestOutcome {
332                    name: outcome.name.clone(),
333                    exec_time: None,
334                    stdout: None,
335                    message: None,
336                };
337                self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
338                self.benches.push(outcome);
339            }
340            Message::Test(TestMessage::Ok(outcome)) => {
341                self.render_test_outcome(Outcome::Ok, &outcome);
342            }
343            Message::Test(TestMessage::Ignored(outcome)) => {
344                self.render_test_outcome(
345                    Outcome::Ignored { reason: outcome.message.as_deref() },
346                    &outcome,
347                );
348            }
349            Message::Test(TestMessage::Failed(outcome)) => {
350                self.render_test_outcome(Outcome::Failed, &outcome);
351                self.failures.push(outcome);
352            }
353            Message::Test(TestMessage::Timeout { name }) => {
354                println!("test {name} has been running for a long time");
355            }
356            Message::Test(TestMessage::Started) => {} // Not useful
357        }
358    }
359}
360
361enum Outcome<'a> {
362    Ok,
363    BenchOk,
364    Failed,
365    Ignored { reason: Option<&'a str> },
366}
367
368impl Outcome<'_> {
369    fn write_short(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
370        match self {
371            Outcome::Ok => {
372                writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
373                write!(writer, ".")?;
374            }
375            Outcome::BenchOk => {
376                writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
377                write!(writer, "b")?;
378            }
379            Outcome::Failed => {
380                // Put failed tests on their own line and include the test name, so that it's faster
381                // to see which test failed without having to wait for them all to run.
382                writeln!(writer)?;
383                writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
384                writeln!(writer, "{name} ... F")?;
385            }
386            Outcome::Ignored { .. } => {
387                writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
388                write!(writer, "i")?;
389            }
390        }
391        writer.reset()
392    }
393
394    fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
395        match self {
396            Outcome::Ok => {
397                writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
398                write!(writer, "ok")?;
399            }
400            Outcome::BenchOk => {
401                writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
402                write!(writer, "benchmarked")?;
403            }
404            Outcome::Failed => {
405                writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
406                write!(writer, "FAILED")?;
407            }
408            Outcome::Ignored { reason } => {
409                writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
410                write!(writer, "ignored")?;
411                if let Some(reason) = reason {
412                    write!(writer, ", {reason}")?;
413                }
414            }
415        }
416        writer.reset()
417    }
418
419    fn write_ci(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
420        match self {
421            Outcome::Ok | Outcome::BenchOk | Outcome::Ignored { .. } => {}
422            Outcome::Failed => {
423                writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
424                writeln!(writer, "   {name} ... FAILED")?;
425            }
426        }
427        writer.reset()
428    }
429}
430
431#[derive(serde_derive::Deserialize)]
432#[serde(tag = "type", rename_all = "snake_case")]
433enum Message {
434    Suite(SuiteMessage),
435    Test(TestMessage),
436    Bench(BenchOutcome),
437}
438
439#[derive(serde_derive::Deserialize)]
440#[serde(tag = "event", rename_all = "snake_case")]
441enum SuiteMessage {
442    Ok(SuiteOutcome),
443    Failed(SuiteOutcome),
444    Started { test_count: usize },
445}
446
447#[derive(serde_derive::Deserialize)]
448struct SuiteOutcome {
449    passed: usize,
450    failed: usize,
451    ignored: usize,
452    measured: usize,
453    filtered_out: usize,
454    /// The time it took to execute this test suite, or `None` if time measurement was not possible
455    /// (e.g. due to running on wasm).
456    exec_time: Option<f64>,
457}
458
459#[derive(serde_derive::Deserialize)]
460#[serde(tag = "event", rename_all = "snake_case")]
461enum TestMessage {
462    Ok(TestOutcome),
463    Failed(TestOutcome),
464    Ignored(TestOutcome),
465    Timeout { name: String },
466    Started,
467}
468
469#[derive(serde_derive::Deserialize)]
470struct BenchOutcome {
471    name: String,
472    median: f64,
473    deviation: f64,
474}
475
476#[derive(serde_derive::Deserialize)]
477struct TestOutcome {
478    name: String,
479    exec_time: Option<f64>,
480    stdout: Option<String>,
481    message: Option<String>,
482}