1use std::io::{BufRead, BufReader, Read, Write};
10use std::process::ChildStdout;
11use std::time::Duration;
12
13use termcolor::{Color, ColorSpec, WriteColor};
14
15use crate::core::builder::Builder;
16use crate::utils::exec::BootstrapCommand;
17
18const TERSE_TESTS_PER_LINE: usize = 88;
19
20pub(crate) fn add_flags_and_try_run_tests(
21 builder: &Builder<'_>,
22 cmd: &mut BootstrapCommand,
23) -> bool {
24 if !cmd.get_args().any(|arg| arg == "--") {
25 cmd.arg("--");
26 }
27 cmd.args(["-Z", "unstable-options", "--format", "json"]);
28
29 try_run_tests(builder, cmd, false)
30}
31
32pub(crate) fn try_run_tests(
33 builder: &Builder<'_>,
34 cmd: &mut BootstrapCommand,
35 stream: bool,
36) -> bool {
37 if run_tests(builder, cmd, stream) {
38 return true;
39 }
40
41 if builder.fail_fast {
42 crate::exit!(1);
43 }
44
45 builder.config.exec_ctx().add_to_delay_failure(format!("{cmd:?}"));
46
47 false
48}
49
50fn run_tests(builder: &Builder<'_>, cmd: &mut BootstrapCommand, stream: bool) -> bool {
51 builder.do_if_verbose(|| println!("running: {cmd:?}"));
52
53 let Some(mut streaming_command) = cmd.stream_capture_stdout(&builder.config.exec_ctx) else {
54 return true;
55 };
56
57 let renderer = Renderer::new(streaming_command.stdout.take().unwrap(), builder);
60 if stream {
61 renderer.stream_all();
62 } else {
63 renderer.render_all();
64 }
65
66 let status = streaming_command.wait(&builder.config.exec_ctx).unwrap();
67 if !status.success() && builder.is_verbose() {
68 println!(
69 "\n\ncommand did not execute successfully: {cmd:?}\n\
70 expected success, got: {status}",
71 );
72 }
73
74 status.success()
75}
76
77struct Renderer<'a> {
78 stdout: BufReader<ChildStdout>,
79 failures: Vec<TestOutcome>,
80 benches: Vec<BenchOutcome>,
81 builder: &'a Builder<'a>,
82 tests_count: Option<usize>,
83 executed_tests: usize,
84 up_to_date_tests: usize,
87 ignored_tests: usize,
88 terse_tests_in_line: usize,
89 ci_latest_logged_percentage: f64,
90}
91
92impl<'a> Renderer<'a> {
93 fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
94 Self {
95 stdout: BufReader::new(stdout),
96 benches: Vec::new(),
97 failures: Vec::new(),
98 builder,
99 tests_count: None,
100 executed_tests: 0,
101 up_to_date_tests: 0,
102 ignored_tests: 0,
103 terse_tests_in_line: 0,
104 ci_latest_logged_percentage: 0.0,
105 }
106 }
107
108 fn render_all(mut self) {
109 let mut line = Vec::new();
110 loop {
111 line.clear();
112 match self.stdout.read_until(b'\n', &mut line) {
113 Ok(_) => {}
114 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
115 Err(err) => panic!("failed to read output of test runner: {err}"),
116 }
117 if line.is_empty() {
118 break;
119 }
120
121 match serde_json::from_slice(&line) {
122 Ok(parsed) => self.render_message(parsed),
123 Err(_err) => {
124 let mut stdout = std::io::stdout();
126 stdout.write_all(&line).unwrap();
127 let _ = stdout.flush();
128 }
129 }
130 }
131
132 if self.up_to_date_tests > 0 {
133 let n = self.up_to_date_tests;
134 let s = if n > 1 { "s" } else { "" };
135 println!("help: ignored {n} up-to-date test{s}; use `--force-rerun` to prevent this\n");
136 }
137 }
138
139 fn stream_all(mut self) {
141 let mut buffer = [0; 1];
142 loop {
143 match self.stdout.read(&mut buffer) {
144 Ok(0) => break,
145 Ok(_) => {
146 let mut stdout = std::io::stdout();
147 stdout.write_all(&buffer).unwrap();
148 let _ = stdout.flush();
149 }
150 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
151 Err(err) => panic!("failed to read output of test runner: {err}"),
152 }
153 }
154 }
155
156 fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
157 self.executed_tests += 1;
158
159 if let Outcome::Ignored { reason } = outcome {
160 self.ignored_tests += 1;
161 if reason == Some("up-to-date") {
163 self.up_to_date_tests += 1;
164 }
165 }
166
167 #[cfg(feature = "build-metrics")]
168 self.builder.metrics.record_test(
169 &test.name,
170 match outcome {
171 Outcome::Ok | Outcome::BenchOk => build_helper::metrics::TestOutcome::Passed,
172 Outcome::Failed => build_helper::metrics::TestOutcome::Failed,
173 Outcome::Ignored { reason } => build_helper::metrics::TestOutcome::Ignored {
174 ignore_reason: reason.map(|s| s.to_string()),
175 },
176 },
177 self.builder,
178 );
179
180 if self.builder.config.verbose_tests {
181 self.render_test_outcome_verbose(outcome, test);
182 } else if self.builder.config.is_running_on_ci {
183 self.render_test_outcome_ci(outcome, test);
184 } else {
185 self.render_test_outcome_terse(outcome, test);
186 }
187 }
188
189 fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
190 print!("test {} ... ", test.name);
191 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
192 if let Some(exec_time) = test.exec_time {
193 print!(" ({exec_time:.2?})");
194 }
195 println!();
196 }
197
198 fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
199 if self.terse_tests_in_line != 0
200 && self.terse_tests_in_line.is_multiple_of(TERSE_TESTS_PER_LINE)
201 {
202 if let Some(total) = self.tests_count {
203 let total = total.to_string();
204 let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
205 print!(" {executed}/{total}");
206 }
207 println!();
208 self.terse_tests_in_line = 0;
209 }
210
211 self.terse_tests_in_line += 1;
212 self.builder.colored_stdout(|stdout| outcome.write_short(stdout, &test.name)).unwrap();
213 let _ = std::io::stdout().flush();
214 }
215
216 fn render_test_outcome_ci(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
217 if let Some(total) = self.tests_count {
218 let percent = self.executed_tests as f64 / total as f64;
219
220 if self.ci_latest_logged_percentage + 0.10 < percent {
221 let total = total.to_string();
222 let executed = format!("{:>width$}", self.executed_tests, width = total.len());
223 let pretty_percent = format!("{:.0}%", percent * 100.0);
224 let passed_tests = self.executed_tests - (self.failures.len() + self.ignored_tests);
225 println!(
226 "{:<4} -- {executed}/{total}, {:>total_indent$} passed, {} failed, {} ignored",
227 pretty_percent,
228 passed_tests,
229 self.failures.len(),
230 self.ignored_tests,
231 total_indent = total.len()
232 );
233 self.ci_latest_logged_percentage += 0.10;
234 }
235 }
236
237 self.builder.colored_stdout(|stdout| outcome.write_ci(stdout, &test.name)).unwrap();
238 let _ = std::io::stdout().flush();
239 }
240
241 fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
242 if !self.builder.config.verbose_tests {
244 println!();
245 }
246
247 if !self.failures.is_empty() {
248 println!("\nfailures:\n");
249 for failure in &self.failures {
250 if failure.stdout.is_some() || failure.message.is_some() {
251 println!("---- {} stdout ----", failure.name);
252 if let Some(stdout) = &failure.stdout {
253 print!("{stdout}");
256 if !stdout.ends_with('\n') {
257 println!("\n\\ (no newline at end of output)");
258 }
259 }
260 println!("---- {} stdout end ----", failure.name);
261 if let Some(message) = &failure.message {
262 println!("NOTE: {message}");
263 }
264 }
265 }
266
267 println!("\nfailures:");
268 for failure in &self.failures {
269 println!(" {}", failure.name);
270 }
271 }
272
273 if !self.benches.is_empty() {
274 println!("\nbenchmarks:");
275
276 let mut rows = Vec::new();
277 for bench in &self.benches {
278 rows.push((
279 &bench.name,
280 format!("{:.2?}ns/iter", bench.median),
281 format!("+/- {:.2?}", bench.deviation),
282 ));
283 }
284
285 let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
286 let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
287 let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
288 for row in &rows {
289 println!(" {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
290 }
291 }
292
293 print!("\ntest result: ");
294 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
295 println!(
296 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out{time}\n",
297 suite.passed,
298 suite.failed,
299 suite.ignored,
300 suite.measured,
301 suite.filtered_out,
302 time = match suite.exec_time {
303 Some(t) => format!("; finished in {:.2?}", Duration::from_secs_f64(t)),
304 None => String::new(),
305 }
306 );
307 }
308
309 fn render_report(&self, report: &Report) {
310 let &Report { total_time, compilation_time } = report;
311 println!(
313 "all doctests ran in {total_time:.2}s; merged doctests compilation took {compilation_time:.2}s"
314 );
315 }
316
317 fn render_message(&mut self, message: Message) {
318 match message {
319 Message::Suite(SuiteMessage::Started { test_count }) => {
320 println!("\nrunning {test_count} tests");
321 self.benches = vec![];
322 self.failures = vec![];
323 self.ignored_tests = 0;
324 self.executed_tests = 0;
325 self.terse_tests_in_line = 0;
326 self.tests_count = Some(test_count);
327 }
328 Message::Suite(SuiteMessage::Ok(outcome)) => {
329 self.render_suite_outcome(Outcome::Ok, &outcome);
330 }
331 Message::Suite(SuiteMessage::Failed(outcome)) => {
332 self.render_suite_outcome(Outcome::Failed, &outcome);
333 }
334 Message::Report(report) => {
335 self.render_report(&report);
336 }
337 Message::Bench(outcome) => {
338 let fake_test_outcome = TestOutcome {
344 name: outcome.name.clone(),
345 exec_time: None,
346 stdout: None,
347 message: None,
348 };
349 self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
350 self.benches.push(outcome);
351 }
352 Message::Test(TestMessage::Ok(outcome)) => {
353 self.render_test_outcome(Outcome::Ok, &outcome);
354 }
355 Message::Test(TestMessage::Ignored(outcome)) => {
356 self.render_test_outcome(
357 Outcome::Ignored { reason: outcome.message.as_deref() },
358 &outcome,
359 );
360 }
361 Message::Test(TestMessage::Failed(outcome)) => {
362 self.render_test_outcome(Outcome::Failed, &outcome);
363 self.failures.push(outcome);
364 }
365 Message::Test(TestMessage::Timeout { name }) => {
366 println!("test {name} has been running for a long time");
367 }
368 Message::Test(TestMessage::Started) => {} }
370 }
371}
372
373enum Outcome<'a> {
374 Ok,
375 BenchOk,
376 Failed,
377 Ignored { reason: Option<&'a str> },
378}
379
380impl Outcome<'_> {
381 fn write_short(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
382 match self {
383 Outcome::Ok => {
384 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
385 write!(writer, ".")?;
386 }
387 Outcome::BenchOk => {
388 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
389 write!(writer, "b")?;
390 }
391 Outcome::Failed => {
392 writeln!(writer)?;
395 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
396 writeln!(writer, "{name} ... F")?;
397 }
398 Outcome::Ignored { .. } => {
399 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
400 write!(writer, "i")?;
401 }
402 }
403 writer.reset()
404 }
405
406 fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
407 match self {
408 Outcome::Ok => {
409 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
410 write!(writer, "ok")?;
411 }
412 Outcome::BenchOk => {
413 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
414 write!(writer, "benchmarked")?;
415 }
416 Outcome::Failed => {
417 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
418 write!(writer, "FAILED")?;
419 }
420 Outcome::Ignored { reason } => {
421 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
422 write!(writer, "ignored")?;
423 if let Some(reason) = reason {
424 write!(writer, ", {reason}")?;
425 }
426 }
427 }
428 writer.reset()
429 }
430
431 fn write_ci(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
432 match self {
433 Outcome::Ok | Outcome::BenchOk | Outcome::Ignored { .. } => {}
434 Outcome::Failed => {
435 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
436 writeln!(writer, " {name} ... FAILED")?;
437 }
438 }
439 writer.reset()
440 }
441}
442
443#[derive(serde_derive::Deserialize)]
444#[serde(tag = "type", rename_all = "snake_case")]
445enum Message {
446 Suite(SuiteMessage),
447 Test(TestMessage),
448 Bench(BenchOutcome),
449 Report(Report),
450}
451
452#[derive(serde_derive::Deserialize)]
453#[serde(tag = "event", rename_all = "snake_case")]
454enum SuiteMessage {
455 Ok(SuiteOutcome),
456 Failed(SuiteOutcome),
457 Started { test_count: usize },
458}
459
460#[derive(serde_derive::Deserialize)]
461struct SuiteOutcome {
462 passed: usize,
463 failed: usize,
464 ignored: usize,
465 measured: usize,
466 filtered_out: usize,
467 exec_time: Option<f64>,
470}
471
472#[derive(serde_derive::Deserialize)]
473#[serde(tag = "event", rename_all = "snake_case")]
474enum TestMessage {
475 Ok(TestOutcome),
476 Failed(TestOutcome),
477 Ignored(TestOutcome),
478 Timeout { name: String },
479 Started,
480}
481
482#[derive(serde_derive::Deserialize)]
483struct BenchOutcome {
484 name: String,
485 median: f64,
486 deviation: f64,
487}
488
489#[derive(serde_derive::Deserialize)]
490struct TestOutcome {
491 name: String,
492 exec_time: Option<f64>,
493 stdout: Option<String>,
494 message: Option<String>,
495}
496
497#[derive(serde_derive::Deserialize)]
499struct Report {
500 total_time: f64,
501 compilation_time: f64,
502}