test/
lib.rs

1//! Support code for rustc's built in unit-test and micro-benchmarking
2//! framework.
3//!
4//! Almost all user code will only be interested in `Bencher` and
5//! `black_box`. All other interactions (such as writing tests and
6//! benchmarks themselves) should be done via the `#[test]` and
7//! `#[bench]` attributes.
8//!
9//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
10//! details.
11
12// Currently, not much of this is meant for users. It is intended to
13// support the simplest interface possible for representing and
14// running tests while providing a base that other test frameworks may
15// build off of.
16
17#![unstable(feature = "test", issue = "50297")]
18#![doc(test(attr(deny(warnings))))]
19#![doc(rust_logo)]
20#![feature(rustdoc_internals)]
21#![feature(file_buffered)]
22#![feature(internal_output_capture)]
23#![feature(staged_api)]
24#![feature(process_exitcode_internals)]
25#![feature(panic_can_unwind)]
26#![feature(test)]
27#![feature(thread_spawn_hook)]
28#![allow(internal_features)]
29#![warn(rustdoc::unescaped_backticks)]
30#![warn(unreachable_pub)]
31
32pub use cli::TestOpts;
33
34pub use self::ColorConfig::*;
35pub use self::bench::{Bencher, black_box};
36pub use self::console::run_tests_console;
37pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
38pub use self::types::TestName::*;
39pub use self::types::*;
40
41// Module to be used by rustc to compile tests in libtest
42pub mod test {
43    pub use crate::bench::Bencher;
44    pub use crate::cli::{TestOpts, parse_opts};
45    pub use crate::helpers::metrics::{Metric, MetricMap};
46    pub use crate::options::{Options, RunIgnored, RunStrategy, ShouldPanic};
47    pub use crate::test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
48    pub use crate::time::{TestExecTime, TestTimeOptions};
49    pub use crate::types::{
50        DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
51        TestDescAndFn, TestId, TestName, TestType,
52    };
53    pub use crate::{assert_test_result, filter_tests, run_test, test_main, test_main_static};
54}
55
56use std::collections::VecDeque;
57use std::io::prelude::Write;
58use std::mem::ManuallyDrop;
59use std::panic::{self, AssertUnwindSafe, PanicHookInfo, catch_unwind};
60use std::process::{self, Command, Termination};
61use std::sync::mpsc::{Sender, channel};
62use std::sync::{Arc, Mutex};
63use std::time::{Duration, Instant};
64use std::{env, io, thread};
65
66pub mod bench;
67mod cli;
68mod console;
69mod event;
70mod formatters;
71mod helpers;
72mod options;
73pub mod stats;
74mod term;
75mod test_result;
76mod time;
77mod types;
78
79#[cfg(test)]
80mod tests;
81
82use core::any::Any;
83
84use event::{CompletedTest, TestEvent};
85use helpers::concurrency::get_concurrency;
86use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
87use options::RunStrategy;
88use test_result::*;
89use time::TestExecTime;
90
91// Process exit code to be used to indicate test failures.
92const ERROR_EXIT_CODE: i32 = 101;
93
94const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
95const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS";
96
97// The default console test runner. It accepts the command line
98// arguments and a vector of test_descs.
99pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
100    let mut opts = match cli::parse_opts(args) {
101        Some(Ok(o)) => o,
102        Some(Err(msg)) => {
103            eprintln!("error: {msg}");
104            process::exit(ERROR_EXIT_CODE);
105        }
106        None => return,
107    };
108    if let Some(options) = options {
109        opts.options = options;
110    }
111    if opts.list {
112        if let Err(e) = console::list_tests_console(&opts, tests) {
113            eprintln!("error: io error when listing tests: {e:?}");
114            process::exit(ERROR_EXIT_CODE);
115        }
116    } else {
117        if !opts.nocapture {
118            // If we encounter a non-unwinding panic, flush any captured output from the current test,
119            // and stop capturing output to ensure that the non-unwinding panic message is visible.
120            // We also acquire the locks for both output streams to prevent output from other threads
121            // from interleaving with the panic message or appearing after it.
122            let builtin_panic_hook = panic::take_hook();
123            let hook = Box::new({
124                move |info: &'_ PanicHookInfo<'_>| {
125                    if !info.can_unwind() {
126                        std::mem::forget(std::io::stderr().lock());
127                        let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
128                        if let Some(captured) = io::set_output_capture(None) {
129                            if let Ok(data) = captured.lock() {
130                                let _ = stdout.write_all(&data);
131                                let _ = stdout.flush();
132                            }
133                        }
134                    }
135                    builtin_panic_hook(info);
136                }
137            });
138            panic::set_hook(hook);
139            // Use a thread spawning hook to make new threads inherit output capturing.
140            std::thread::add_spawn_hook(|_| {
141                // Get and clone the output capture of the current thread.
142                let output_capture = io::set_output_capture(None);
143                io::set_output_capture(output_capture.clone());
144                // Set the output capture of the new thread.
145                || {
146                    io::set_output_capture(output_capture);
147                }
148            });
149        }
150        let res = console::run_tests_console(&opts, tests);
151        // Prevent Valgrind from reporting reachable blocks in users' unit tests.
152        drop(panic::take_hook());
153        match res {
154            Ok(true) => {}
155            Ok(false) => process::exit(ERROR_EXIT_CODE),
156            Err(e) => {
157                eprintln!("error: io error when listing tests: {e:?}");
158                process::exit(ERROR_EXIT_CODE);
159            }
160        }
161    }
162}
163
164/// A variant optimized for invocation with a static test vector.
165/// This will panic (intentionally) when fed any dynamic tests.
166///
167/// This is the entry point for the main function generated by `rustc --test`
168/// when panic=unwind.
169pub fn test_main_static(tests: &[&TestDescAndFn]) {
170    let args = env::args().collect::<Vec<_>>();
171    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
172    test_main(&args, owned_tests, None)
173}
174
175/// A variant optimized for invocation with a static test vector.
176/// This will panic (intentionally) when fed any dynamic tests.
177///
178/// Runs tests in panic=abort mode, which involves spawning subprocesses for
179/// tests.
180///
181/// This is the entry point for the main function generated by `rustc --test`
182/// when panic=abort.
183pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
184    // If we're being run in SpawnedSecondary mode, run the test here. run_test
185    // will then exit the process.
186    if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
187        unsafe {
188            env::remove_var(SECONDARY_TEST_INVOKER_VAR);
189        }
190
191        // Convert benchmarks to tests if we're not benchmarking.
192        let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>();
193        if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() {
194            unsafe {
195                env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR);
196            }
197        } else {
198            tests = convert_benchmarks_to_tests(tests);
199        };
200
201        let test = tests
202            .into_iter()
203            .find(|test| test.desc.name.as_slice() == name)
204            .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
205        let TestDescAndFn { desc, testfn } = test;
206        match testfn.into_runnable() {
207            Runnable::Test(runnable_test) => {
208                if runnable_test.is_dynamic() {
209                    panic!("only static tests are supported");
210                }
211                run_test_in_spawned_subprocess(desc, runnable_test);
212            }
213            Runnable::Bench(_) => {
214                panic!("benchmarks should not be executed into child processes")
215            }
216        }
217    }
218
219    let args = env::args().collect::<Vec<_>>();
220    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
221    test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
222}
223
224/// Clones static values for putting into a dynamic vector, which test_main()
225/// needs to hand out ownership of tests to parallel test runners.
226///
227/// This will panic when fed any dynamic tests, because they cannot be cloned.
228fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
229    match test.testfn {
230        StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
231        StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
232        _ => panic!("non-static tests passed to test::test_main_static"),
233    }
234}
235
236/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
237/// considered a failure. By default, invokes `report()` and checks for a `0`
238/// result.
239pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
240    let code = result.report().to_i32();
241    if code == 0 {
242        Ok(())
243    } else {
244        Err(format!(
245            "the test returned a termination value with a non-zero status code \
246             ({code}) which indicates a failure"
247        ))
248    }
249}
250
251struct FilteredTests {
252    tests: Vec<(TestId, TestDescAndFn)>,
253    benches: Vec<(TestId, TestDescAndFn)>,
254    next_id: usize,
255}
256
257impl FilteredTests {
258    fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
259        let test = TestDescAndFn { desc, testfn };
260        self.benches.push((TestId(self.next_id), test));
261        self.next_id += 1;
262    }
263    fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
264        let test = TestDescAndFn { desc, testfn };
265        self.tests.push((TestId(self.next_id), test));
266        self.next_id += 1;
267    }
268    fn total_len(&self) -> usize {
269        self.tests.len() + self.benches.len()
270    }
271}
272
273pub fn run_tests<F>(
274    opts: &TestOpts,
275    tests: Vec<TestDescAndFn>,
276    mut notify_about_test_event: F,
277) -> io::Result<()>
278where
279    F: FnMut(TestEvent) -> io::Result<()>,
280{
281    use std::collections::HashMap;
282    use std::hash::{BuildHasherDefault, DefaultHasher};
283    use std::sync::mpsc::RecvTimeoutError;
284
285    struct RunningTest {
286        join_handle: Option<thread::JoinHandle<()>>,
287    }
288
289    impl RunningTest {
290        fn join(self, completed_test: &mut CompletedTest) {
291            if let Some(join_handle) = self.join_handle {
292                if let Err(_) = join_handle.join() {
293                    if let TrOk = completed_test.result {
294                        completed_test.result =
295                            TrFailedMsg("panicked after reporting success".to_string());
296                    }
297                }
298            }
299        }
300    }
301
302    // Use a deterministic hasher
303    type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
304
305    struct TimeoutEntry {
306        id: TestId,
307        desc: TestDesc,
308        timeout: Instant,
309    }
310
311    let tests_len = tests.len();
312
313    let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
314
315    let mut filtered_tests = filter_tests(opts, tests);
316    if !opts.bench_benchmarks {
317        filtered_tests = convert_benchmarks_to_tests(filtered_tests);
318    }
319
320    for test in filtered_tests {
321        let mut desc = test.desc;
322        desc.name = desc.name.with_padding(test.testfn.padding());
323
324        match test.testfn {
325            DynBenchFn(_) | StaticBenchFn(_) => {
326                filtered.add_bench(desc, test.testfn);
327            }
328            testfn => {
329                filtered.add_test(desc, testfn);
330            }
331        };
332    }
333
334    let filtered_out = tests_len - filtered.total_len();
335    let event = TestEvent::TeFilteredOut(filtered_out);
336    notify_about_test_event(event)?;
337
338    let shuffle_seed = get_shuffle_seed(opts);
339
340    let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
341    notify_about_test_event(event)?;
342
343    let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
344
345    let mut remaining = filtered.tests;
346    if let Some(shuffle_seed) = shuffle_seed {
347        shuffle_tests(shuffle_seed, &mut remaining);
348    }
349    // Store the tests in a VecDeque so we can efficiently remove the first element to run the
350    // tests in the order they were passed (unless shuffled).
351    let mut remaining = VecDeque::from(remaining);
352    let mut pending = 0;
353
354    let (tx, rx) = channel::<CompletedTest>();
355    let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
356        RunStrategy::SpawnPrimary
357    } else {
358        RunStrategy::InProcess
359    };
360
361    let mut running_tests: TestMap = HashMap::default();
362    let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
363
364    fn get_timed_out_tests(
365        running_tests: &TestMap,
366        timeout_queue: &mut VecDeque<TimeoutEntry>,
367    ) -> Vec<TestDesc> {
368        let now = Instant::now();
369        let mut timed_out = Vec::new();
370        while let Some(timeout_entry) = timeout_queue.front() {
371            if now < timeout_entry.timeout {
372                break;
373            }
374            let timeout_entry = timeout_queue.pop_front().unwrap();
375            if running_tests.contains_key(&timeout_entry.id) {
376                timed_out.push(timeout_entry.desc);
377            }
378        }
379        timed_out
380    }
381
382    fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
383        timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
384            let now = Instant::now();
385            if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
386        })
387    }
388
389    if concurrency == 1 {
390        while !remaining.is_empty() {
391            let (id, test) = remaining.pop_front().unwrap();
392            let event = TestEvent::TeWait(test.desc.clone());
393            notify_about_test_event(event)?;
394            let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
395            // Wait for the test to complete.
396            let mut completed_test = rx.recv().unwrap();
397            RunningTest { join_handle }.join(&mut completed_test);
398
399            let fail_fast = match completed_test.result {
400                TrIgnored | TrOk | TrBench(_) => false,
401                TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
402            };
403
404            let event = TestEvent::TeResult(completed_test);
405            notify_about_test_event(event)?;
406
407            if fail_fast {
408                return Ok(());
409            }
410        }
411    } else {
412        while pending > 0 || !remaining.is_empty() {
413            while pending < concurrency && !remaining.is_empty() {
414                let (id, test) = remaining.pop_front().unwrap();
415                let timeout = time::get_default_test_timeout();
416                let desc = test.desc.clone();
417
418                let event = TestEvent::TeWait(desc.clone());
419                notify_about_test_event(event)?; //here no pad
420                let join_handle =
421                    run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
422                running_tests.insert(id, RunningTest { join_handle });
423                timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
424                pending += 1;
425            }
426
427            let mut res;
428            loop {
429                if let Some(timeout) = calc_timeout(&timeout_queue) {
430                    res = rx.recv_timeout(timeout);
431                    for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
432                        let event = TestEvent::TeTimeout(test);
433                        notify_about_test_event(event)?;
434                    }
435
436                    match res {
437                        Err(RecvTimeoutError::Timeout) => {
438                            // Result is not yet ready, continue waiting.
439                        }
440                        _ => {
441                            // We've got a result, stop the loop.
442                            break;
443                        }
444                    }
445                } else {
446                    res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
447                    break;
448                }
449            }
450
451            let mut completed_test = res.unwrap();
452            let running_test = running_tests.remove(&completed_test.id).unwrap();
453            running_test.join(&mut completed_test);
454
455            let fail_fast = match completed_test.result {
456                TrIgnored | TrOk | TrBench(_) => false,
457                TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
458            };
459
460            let event = TestEvent::TeResult(completed_test);
461            notify_about_test_event(event)?;
462            pending -= 1;
463
464            if fail_fast {
465                // Prevent remaining test threads from panicking
466                std::mem::forget(rx);
467                return Ok(());
468            }
469        }
470    }
471
472    if opts.bench_benchmarks {
473        // All benchmarks run at the end, in serial.
474        for (id, b) in filtered.benches {
475            let event = TestEvent::TeWait(b.desc.clone());
476            notify_about_test_event(event)?;
477            let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
478            // Wait for the test to complete.
479            let mut completed_test = rx.recv().unwrap();
480            RunningTest { join_handle }.join(&mut completed_test);
481
482            let event = TestEvent::TeResult(completed_test);
483            notify_about_test_event(event)?;
484        }
485    }
486    Ok(())
487}
488
489pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
490    let mut filtered = tests;
491    let matches_filter = |test: &TestDescAndFn, filter: &str| {
492        let test_name = test.desc.name.as_slice();
493
494        match opts.filter_exact {
495            true => test_name == filter,
496            false => test_name.contains(filter),
497        }
498    };
499
500    // Remove tests that don't match the test filter
501    if !opts.filters.is_empty() {
502        filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
503    }
504
505    // Skip tests that match any of the skip filters
506    if !opts.skip.is_empty() {
507        filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
508    }
509
510    // Excludes #[should_panic] tests
511    if opts.exclude_should_panic {
512        filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
513    }
514
515    // maybe unignore tests
516    match opts.run_ignored {
517        RunIgnored::Yes => {
518            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
519        }
520        RunIgnored::Only => {
521            filtered.retain(|test| test.desc.ignore);
522            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
523        }
524        RunIgnored::No => {}
525    }
526
527    filtered
528}
529
530pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
531    // convert benchmarks to tests, if we're not benchmarking them
532    tests
533        .into_iter()
534        .map(|x| {
535            let testfn = match x.testfn {
536                DynBenchFn(benchfn) => DynBenchAsTestFn(benchfn),
537                StaticBenchFn(benchfn) => StaticBenchAsTestFn(benchfn),
538                f => f,
539            };
540            TestDescAndFn { desc: x.desc, testfn }
541        })
542        .collect()
543}
544
545pub fn run_test(
546    opts: &TestOpts,
547    force_ignore: bool,
548    id: TestId,
549    test: TestDescAndFn,
550    strategy: RunStrategy,
551    monitor_ch: Sender<CompletedTest>,
552) -> Option<thread::JoinHandle<()>> {
553    let TestDescAndFn { desc, testfn } = test;
554
555    // Emscripten can catch panics but other wasm targets cannot
556    let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
557        && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
558        && !cfg!(target_os = "emscripten");
559
560    if force_ignore || desc.ignore || ignore_because_no_process_support {
561        let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
562        monitor_ch.send(message).unwrap();
563        return None;
564    }
565
566    match testfn.into_runnable() {
567        Runnable::Test(runnable_test) => {
568            if runnable_test.is_dynamic() {
569                match strategy {
570                    RunStrategy::InProcess => (),
571                    _ => panic!("Cannot run dynamic test fn out-of-process"),
572                };
573            }
574
575            let name = desc.name.clone();
576            let nocapture = opts.nocapture;
577            let time_options = opts.time_options;
578            let bench_benchmarks = opts.bench_benchmarks;
579
580            let runtest = move || match strategy {
581                RunStrategy::InProcess => run_test_in_process(
582                    id,
583                    desc,
584                    nocapture,
585                    time_options.is_some(),
586                    runnable_test,
587                    monitor_ch,
588                    time_options,
589                ),
590                RunStrategy::SpawnPrimary => spawn_test_subprocess(
591                    id,
592                    desc,
593                    nocapture,
594                    time_options.is_some(),
595                    monitor_ch,
596                    time_options,
597                    bench_benchmarks,
598                ),
599            };
600
601            // If the platform is single-threaded we're just going to run
602            // the test synchronously, regardless of the concurrency
603            // level.
604            let supports_threads = !cfg!(target_os = "emscripten")
605                && !cfg!(target_family = "wasm")
606                && !cfg!(target_os = "zkvm");
607            if supports_threads {
608                let cfg = thread::Builder::new().name(name.as_slice().to_owned());
609                let mut runtest = Arc::new(Mutex::new(Some(runtest)));
610                let runtest2 = runtest.clone();
611                match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
612                    Ok(handle) => Some(handle),
613                    Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
614                        // `ErrorKind::WouldBlock` means hitting the thread limit on some
615                        // platforms, so run the test synchronously here instead.
616                        Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
617                        None
618                    }
619                    Err(e) => panic!("failed to spawn thread to run test: {e}"),
620                }
621            } else {
622                runtest();
623                None
624            }
625        }
626        Runnable::Bench(runnable_bench) => {
627            // Benchmarks aren't expected to panic, so we run them all in-process.
628            runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture);
629            None
630        }
631    }
632}
633
634/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
635#[inline(never)]
636fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
637    let result = f();
638
639    // prevent this frame from being tail-call optimised away
640    black_box(result)
641}
642
643fn run_test_in_process(
644    id: TestId,
645    desc: TestDesc,
646    nocapture: bool,
647    report_time: bool,
648    runnable_test: RunnableTest,
649    monitor_ch: Sender<CompletedTest>,
650    time_opts: Option<time::TestTimeOptions>,
651) {
652    // Buffer for capturing standard I/O
653    let data = Arc::new(Mutex::new(Vec::new()));
654
655    if !nocapture {
656        io::set_output_capture(Some(data.clone()));
657    }
658
659    let start = report_time.then(Instant::now);
660    let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run())));
661    let exec_time = start.map(|start| {
662        let duration = start.elapsed();
663        TestExecTime(duration)
664    });
665
666    io::set_output_capture(None);
667
668    let test_result = match result {
669        Ok(()) => calc_result(&desc, Ok(()), time_opts.as_ref(), exec_time.as_ref()),
670        Err(e) => calc_result(&desc, Err(e.as_ref()), time_opts.as_ref(), exec_time.as_ref()),
671    };
672    let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
673    let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
674    monitor_ch.send(message).unwrap();
675}
676
677fn fold_err<T, E>(
678    result: Result<Result<T, E>, Box<dyn Any + Send>>,
679) -> Result<T, Box<dyn Any + Send>>
680where
681    E: Send + 'static,
682{
683    match result {
684        Ok(Err(e)) => Err(Box::new(e)),
685        Ok(Ok(v)) => Ok(v),
686        Err(e) => Err(e),
687    }
688}
689
690fn spawn_test_subprocess(
691    id: TestId,
692    desc: TestDesc,
693    nocapture: bool,
694    report_time: bool,
695    monitor_ch: Sender<CompletedTest>,
696    time_opts: Option<time::TestTimeOptions>,
697    bench_benchmarks: bool,
698) {
699    let (result, test_output, exec_time) = (|| {
700        let args = env::args().collect::<Vec<_>>();
701        let current_exe = &args[0];
702
703        let mut command = Command::new(current_exe);
704        command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
705        if bench_benchmarks {
706            command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1");
707        }
708        if nocapture {
709            command.stdout(process::Stdio::inherit());
710            command.stderr(process::Stdio::inherit());
711        }
712
713        let start = report_time.then(Instant::now);
714        let output = match command.output() {
715            Ok(out) => out,
716            Err(e) => {
717                let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
718                return (TrFailed, err.into_bytes(), None);
719            }
720        };
721        let exec_time = start.map(|start| {
722            let duration = start.elapsed();
723            TestExecTime(duration)
724        });
725
726        let std::process::Output { stdout, stderr, status } = output;
727        let mut test_output = stdout;
728        formatters::write_stderr_delimiter(&mut test_output, &desc.name);
729        test_output.extend_from_slice(&stderr);
730
731        let result =
732            get_result_from_exit_code(&desc, status, time_opts.as_ref(), exec_time.as_ref());
733        (result, test_output, exec_time)
734    })();
735
736    let message = CompletedTest::new(id, desc, result, exec_time, test_output);
737    monitor_ch.send(message).unwrap();
738}
739
740fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! {
741    let builtin_panic_hook = panic::take_hook();
742    let record_result = Arc::new(move |panic_info: Option<&'_ PanicHookInfo<'_>>| {
743        let test_result = match panic_info {
744            Some(info) => calc_result(&desc, Err(info.payload()), None, None),
745            None => calc_result(&desc, Ok(()), None, None),
746        };
747
748        // We don't support serializing TrFailedMsg, so just
749        // print the message out to stderr.
750        if let TrFailedMsg(msg) = &test_result {
751            eprintln!("{msg}");
752        }
753
754        if let Some(info) = panic_info {
755            builtin_panic_hook(info);
756        }
757
758        if let TrOk = test_result {
759            process::exit(test_result::TR_OK);
760        } else {
761            process::abort();
762        }
763    });
764    let record_result2 = record_result.clone();
765    panic::set_hook(Box::new(move |info| record_result2(Some(info))));
766    if let Err(message) = runnable_test.run() {
767        panic!("{}", message);
768    }
769    record_result(None);
770    unreachable!("panic=abort callback should have exited the process")
771}