1use std::io::{BufRead, BufReader, Read, Write};
10use std::process::{ChildStdout, Stdio};
11use std::time::Duration;
12
13use build_helper::ci::CiEnv;
14use termcolor::{Color, ColorSpec, WriteColor};
15
16use crate::core::builder::Builder;
17use crate::utils::exec::BootstrapCommand;
18
19const TERSE_TESTS_PER_LINE: usize = 88;
20
21pub(crate) fn add_flags_and_try_run_tests(
22 builder: &Builder<'_>,
23 cmd: &mut BootstrapCommand,
24) -> bool {
25 if !cmd.get_args().any(|arg| arg == "--") {
26 cmd.arg("--");
27 }
28 cmd.args(["-Z", "unstable-options", "--format", "json"]);
29
30 try_run_tests(builder, cmd, false)
31}
32
33pub(crate) fn try_run_tests(
34 builder: &Builder<'_>,
35 cmd: &mut BootstrapCommand,
36 stream: bool,
37) -> bool {
38 if builder.config.dry_run() {
39 cmd.mark_as_executed();
40 return true;
41 }
42
43 if !run_tests(builder, cmd, stream) {
44 if builder.fail_fast {
45 crate::exit!(1);
46 } else {
47 let mut failures = builder.delayed_failures.borrow_mut();
48 failures.push(format!("{cmd:?}"));
49 false
50 }
51 } else {
52 true
53 }
54}
55
56fn run_tests(builder: &Builder<'_>, cmd: &mut BootstrapCommand, stream: bool) -> bool {
57 let cmd = cmd.as_command_mut();
58 cmd.stdout(Stdio::piped());
59
60 builder.verbose(|| println!("running: {cmd:?}"));
61
62 let mut process = cmd.spawn().unwrap();
63
64 let renderer = Renderer::new(process.stdout.take().unwrap(), builder);
67 if stream {
68 renderer.stream_all();
69 } else {
70 renderer.render_all();
71 }
72
73 let result = process.wait_with_output().unwrap();
74 if !result.status.success() && builder.is_verbose() {
75 println!(
76 "\n\ncommand did not execute successfully: {cmd:?}\n\
77 expected success, got: {}",
78 result.status
79 );
80 }
81
82 result.status.success()
83}
84
85struct Renderer<'a> {
86 stdout: BufReader<ChildStdout>,
87 failures: Vec<TestOutcome>,
88 benches: Vec<BenchOutcome>,
89 builder: &'a Builder<'a>,
90 tests_count: Option<usize>,
91 executed_tests: usize,
92 up_to_date_tests: usize,
95 ignored_tests: usize,
96 terse_tests_in_line: usize,
97 ci_latest_logged_percentage: f64,
98}
99
100impl<'a> Renderer<'a> {
101 fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
102 Self {
103 stdout: BufReader::new(stdout),
104 benches: Vec::new(),
105 failures: Vec::new(),
106 builder,
107 tests_count: None,
108 executed_tests: 0,
109 up_to_date_tests: 0,
110 ignored_tests: 0,
111 terse_tests_in_line: 0,
112 ci_latest_logged_percentage: 0.0,
113 }
114 }
115
116 fn render_all(mut self) {
117 let mut line = Vec::new();
118 loop {
119 line.clear();
120 match self.stdout.read_until(b'\n', &mut line) {
121 Ok(_) => {}
122 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
123 Err(err) => panic!("failed to read output of test runner: {err}"),
124 }
125 if line.is_empty() {
126 break;
127 }
128
129 match serde_json::from_slice(&line) {
130 Ok(parsed) => self.render_message(parsed),
131 Err(_err) => {
132 let mut stdout = std::io::stdout();
134 stdout.write_all(&line).unwrap();
135 let _ = stdout.flush();
136 }
137 }
138 }
139
140 if self.up_to_date_tests > 0 {
141 let n = self.up_to_date_tests;
142 let s = if n > 1 { "s" } else { "" };
143 println!("help: ignored {n} up-to-date test{s}; use `--force-rerun` to prevent this\n");
144 }
145 }
146
147 fn stream_all(mut self) {
149 let mut buffer = [0; 1];
150 loop {
151 match self.stdout.read(&mut buffer) {
152 Ok(0) => break,
153 Ok(_) => {
154 let mut stdout = std::io::stdout();
155 stdout.write_all(&buffer).unwrap();
156 let _ = stdout.flush();
157 }
158 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
159 Err(err) => panic!("failed to read output of test runner: {err}"),
160 }
161 }
162 }
163
164 fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
165 self.executed_tests += 1;
166
167 if let Outcome::Ignored { reason } = outcome {
168 self.ignored_tests += 1;
169 if reason == Some("up-to-date") {
171 self.up_to_date_tests += 1;
172 }
173 }
174
175 #[cfg(feature = "build-metrics")]
176 self.builder.metrics.record_test(
177 &test.name,
178 match outcome {
179 Outcome::Ok | Outcome::BenchOk => build_helper::metrics::TestOutcome::Passed,
180 Outcome::Failed => build_helper::metrics::TestOutcome::Failed,
181 Outcome::Ignored { reason } => build_helper::metrics::TestOutcome::Ignored {
182 ignore_reason: reason.map(|s| s.to_string()),
183 },
184 },
185 self.builder,
186 );
187
188 if self.builder.config.verbose_tests {
189 self.render_test_outcome_verbose(outcome, test);
190 } else if CiEnv::is_ci() {
191 self.render_test_outcome_ci(outcome, test);
192 } else {
193 self.render_test_outcome_terse(outcome, test);
194 }
195 }
196
197 fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
198 print!("test {} ... ", test.name);
199 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
200 if let Some(exec_time) = test.exec_time {
201 print!(" ({exec_time:.2?})");
202 }
203 println!();
204 }
205
206 fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
207 if self.terse_tests_in_line != 0 && self.terse_tests_in_line % TERSE_TESTS_PER_LINE == 0 {
208 if let Some(total) = self.tests_count {
209 let total = total.to_string();
210 let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
211 print!(" {executed}/{total}");
212 }
213 println!();
214 self.terse_tests_in_line = 0;
215 }
216
217 self.terse_tests_in_line += 1;
218 self.builder.colored_stdout(|stdout| outcome.write_short(stdout, &test.name)).unwrap();
219 let _ = std::io::stdout().flush();
220 }
221
222 fn render_test_outcome_ci(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
223 if let Some(total) = self.tests_count {
224 let percent = self.executed_tests as f64 / total as f64;
225
226 if self.ci_latest_logged_percentage + 0.10 < percent {
227 let total = total.to_string();
228 let executed = format!("{:>width$}", self.executed_tests, width = total.len());
229 let pretty_percent = format!("{:.0}%", percent * 100.0);
230 let passed_tests = self.executed_tests - (self.failures.len() + self.ignored_tests);
231 println!(
232 "{:<4} -- {executed}/{total}, {:>total_indent$} passed, {} failed, {} ignored",
233 pretty_percent,
234 passed_tests,
235 self.failures.len(),
236 self.ignored_tests,
237 total_indent = total.len()
238 );
239 self.ci_latest_logged_percentage += 0.10;
240 }
241 }
242
243 self.builder.colored_stdout(|stdout| outcome.write_ci(stdout, &test.name)).unwrap();
244 let _ = std::io::stdout().flush();
245 }
246
247 fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
248 if !self.builder.config.verbose_tests {
250 println!();
251 }
252
253 if !self.failures.is_empty() {
254 println!("\nfailures:\n");
255 for failure in &self.failures {
256 if failure.stdout.is_some() || failure.message.is_some() {
257 println!("---- {} stdout ----", failure.name);
258 if let Some(stdout) = &failure.stdout {
259 println!("{stdout}");
260 }
261 if let Some(message) = &failure.message {
262 println!("NOTE: {message}");
263 }
264 }
265 }
266
267 println!("\nfailures:");
268 for failure in &self.failures {
269 println!(" {}", failure.name);
270 }
271 }
272
273 if !self.benches.is_empty() {
274 println!("\nbenchmarks:");
275
276 let mut rows = Vec::new();
277 for bench in &self.benches {
278 rows.push((
279 &bench.name,
280 format!("{:.2?}ns/iter", bench.median),
281 format!("+/- {:.2?}", bench.deviation),
282 ));
283 }
284
285 let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
286 let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
287 let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
288 for row in &rows {
289 println!(" {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
290 }
291 }
292
293 print!("\ntest result: ");
294 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
295 println!(
296 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out{time}\n",
297 suite.passed,
298 suite.failed,
299 suite.ignored,
300 suite.measured,
301 suite.filtered_out,
302 time = match suite.exec_time {
303 Some(t) => format!("; finished in {:.2?}", Duration::from_secs_f64(t)),
304 None => String::new(),
305 }
306 );
307 }
308
309 fn render_message(&mut self, message: Message) {
310 match message {
311 Message::Suite(SuiteMessage::Started { test_count }) => {
312 println!("\nrunning {test_count} tests");
313 self.benches = vec![];
314 self.failures = vec![];
315 self.ignored_tests = 0;
316 self.executed_tests = 0;
317 self.terse_tests_in_line = 0;
318 self.tests_count = Some(test_count);
319 }
320 Message::Suite(SuiteMessage::Ok(outcome)) => {
321 self.render_suite_outcome(Outcome::Ok, &outcome);
322 }
323 Message::Suite(SuiteMessage::Failed(outcome)) => {
324 self.render_suite_outcome(Outcome::Failed, &outcome);
325 }
326 Message::Bench(outcome) => {
327 let fake_test_outcome = TestOutcome {
333 name: outcome.name.clone(),
334 exec_time: None,
335 stdout: None,
336 message: None,
337 };
338 self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
339 self.benches.push(outcome);
340 }
341 Message::Test(TestMessage::Ok(outcome)) => {
342 self.render_test_outcome(Outcome::Ok, &outcome);
343 }
344 Message::Test(TestMessage::Ignored(outcome)) => {
345 self.render_test_outcome(
346 Outcome::Ignored { reason: outcome.message.as_deref() },
347 &outcome,
348 );
349 }
350 Message::Test(TestMessage::Failed(outcome)) => {
351 self.render_test_outcome(Outcome::Failed, &outcome);
352 self.failures.push(outcome);
353 }
354 Message::Test(TestMessage::Timeout { name }) => {
355 println!("test {name} has been running for a long time");
356 }
357 Message::Test(TestMessage::Started) => {} }
359 }
360}
361
362enum Outcome<'a> {
363 Ok,
364 BenchOk,
365 Failed,
366 Ignored { reason: Option<&'a str> },
367}
368
369impl Outcome<'_> {
370 fn write_short(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
371 match self {
372 Outcome::Ok => {
373 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
374 write!(writer, ".")?;
375 }
376 Outcome::BenchOk => {
377 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
378 write!(writer, "b")?;
379 }
380 Outcome::Failed => {
381 writeln!(writer)?;
384 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
385 writeln!(writer, "{name} ... F")?;
386 }
387 Outcome::Ignored { .. } => {
388 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
389 write!(writer, "i")?;
390 }
391 }
392 writer.reset()
393 }
394
395 fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
396 match self {
397 Outcome::Ok => {
398 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
399 write!(writer, "ok")?;
400 }
401 Outcome::BenchOk => {
402 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
403 write!(writer, "benchmarked")?;
404 }
405 Outcome::Failed => {
406 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
407 write!(writer, "FAILED")?;
408 }
409 Outcome::Ignored { reason } => {
410 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
411 write!(writer, "ignored")?;
412 if let Some(reason) = reason {
413 write!(writer, ", {reason}")?;
414 }
415 }
416 }
417 writer.reset()
418 }
419
420 fn write_ci(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
421 match self {
422 Outcome::Ok | Outcome::BenchOk | Outcome::Ignored { .. } => {}
423 Outcome::Failed => {
424 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
425 writeln!(writer, " {name} ... FAILED")?;
426 }
427 }
428 writer.reset()
429 }
430}
431
432#[derive(serde_derive::Deserialize)]
433#[serde(tag = "type", rename_all = "snake_case")]
434enum Message {
435 Suite(SuiteMessage),
436 Test(TestMessage),
437 Bench(BenchOutcome),
438}
439
440#[derive(serde_derive::Deserialize)]
441#[serde(tag = "event", rename_all = "snake_case")]
442enum SuiteMessage {
443 Ok(SuiteOutcome),
444 Failed(SuiteOutcome),
445 Started { test_count: usize },
446}
447
448#[derive(serde_derive::Deserialize)]
449struct SuiteOutcome {
450 passed: usize,
451 failed: usize,
452 ignored: usize,
453 measured: usize,
454 filtered_out: usize,
455 exec_time: Option<f64>,
458}
459
460#[derive(serde_derive::Deserialize)]
461#[serde(tag = "event", rename_all = "snake_case")]
462enum TestMessage {
463 Ok(TestOutcome),
464 Failed(TestOutcome),
465 Ignored(TestOutcome),
466 Timeout { name: String },
467 Started,
468}
469
470#[derive(serde_derive::Deserialize)]
471struct BenchOutcome {
472 name: String,
473 median: f64,
474 deviation: f64,
475}
476
477#[derive(serde_derive::Deserialize)]
478struct TestOutcome {
479 name: String,
480 exec_time: Option<f64>,
481 stdout: Option<String>,
482 message: Option<String>,
483}