libtest_with/
lib.rs

1//! Write your own tests and benchmarks that look and behave like built-in tests!
2//!
3//! This is a simple and small test harness that mimics the original `libtest`
4//! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
5//! much like `cargo test` and most CLI arguments are understood and used. With
6//! that plumbing work out of the way, your test runner can focus on the actual
7//! testing.
8//!
9//! For a small real world example, see [`examples/tidy.rs`][1].
10//!
11//! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
12//!
13//! # Usage
14//!
15//! To use this, you most likely want to add a manual `[[test]]` section to
16//! `Cargo.toml` and set `harness = false`. For example:
17//!
18//! ```toml
19//! [[test]]
20//! name = "mytest"
21//! path = "tests/mytest.rs"
22//! harness = false
23//! ```
24//!
25//! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
26//!
27//! ```no_run
28//! use libtest_with::{Arguments, Trial};
29//!
30//!
31//! // Parse command line arguments
32//! let args = Arguments::from_args();
33//!
34//! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
35//! let tests = vec![
36//!     Trial::test("succeeding_test", move || Ok(())),
37//!     Trial::test("failing_test", move || Err("Woops".into())),
38//! ];
39//!
40//! // Run all tests and exit the application appropriatly.
41//! libtest_with::run(&args, tests).exit();
42//! ```
43//!
44//! Instead of returning `Ok` or `Err` directly, you want to actually perform
45//! your tests, of course. See [`Trial::test`] for more information on how to
46//! define a test. You can of course list all your tests manually. But in many
47//! cases it is useful to generate one test per file in a directory, for
48//! example.
49//!
50//! You can then run `cargo test --test mytest` to run it. To see the CLI
51//! arguments supported by this crate, run `cargo test --test mytest -- -h`.
52//!
53//!
54//! # Known limitations and differences to the official test harness
55//!
56//! `libtest-mimic` works on a best-effort basis: it tries to be as close to
57//! `libtest` as possible, but there are differences for a variety of reasons.
58//! For example, some rarely used features might not be implemented, some
59//! features are extremely difficult to implement, and removing minor,
60//! unimportant differences is just not worth the hassle.
61//!
62//! Some of the notable differences:
63//!
64//! - Output capture and `--nocapture`: simply not supported. The official
65//!   `libtest` uses internal `std` functions to temporarily redirect output.
66//!   `libtest-mimic` cannot use those. See [this issue][capture] for more
67//!   information.
68//! - `--format=junit`
69//! - Also see [#13](https://github.com/LukasKalbertodt/libtest-mimic/issues/13)
70//!
71//! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
72
73#![forbid(unsafe_code)]
74
75use std::{
76    borrow::Cow,
77    fmt,
78    process::{self, ExitCode},
79    sync::{mpsc, Mutex},
80    thread,
81    time::Instant,
82};
83
84mod args;
85mod printer;
86
87use printer::Printer;
88
89pub use crate::args::{Arguments, ColorSetting, FormatSetting};
90
91pub static RUNTIME_IGNORE_PREFIX: &'static str = "rt-ignored: ";
92
93#[cfg(feature = "http")]
94pub use reqwest;
95#[cfg(feature = "icmp")]
96pub use ping;
97
98#[cfg(feature = "resource")]
99pub use sysinfo;
100#[cfg(feature = "resource")]
101pub use byte_unit;
102#[cfg(feature = "resource")]
103pub use num_cpus;
104#[cfg(feature = "executable")]
105pub use which;
106#[cfg(feature = "user")]
107#[cfg(all(feature = "user", not(target_os = "windows")))]
108pub use uzers;
109#[cfg(feature = "timezone")]
110pub use chrono;
111
112/// A single test or benchmark.
113///
114/// The original `libtest` often calls benchmarks "tests", which is a bit
115/// confusing. So in this library, it is called "trial".
116///
117/// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's
118/// `name` is printed and used for filtering. The `runner` is called when the
119/// test/benchmark is executed to determine its outcome. If `runner` panics,
120/// the trial is considered "failed". If you need the behavior of
121/// `#[should_panic]` you need to catch the panic yourself. You likely want to
122/// compare the panic payload to an expected value anyway.
123pub struct Trial {
124    runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
125    info: TestInfo,
126}
127
128impl Trial {
129    /// Creates a (non-benchmark) test with the given name and runner.
130    ///
131    /// The runner returning `Ok(())` is interpreted as the test passing. If the
132    /// runner returns `Err(_)`, the test is considered failed.
133    pub fn test<R>(name: impl Into<String>, runner: R) -> Self
134    where
135        R: FnOnce() -> Result<(), Failed> + Send + 'static,
136    {
137        Self {
138            runner: Box::new(move |_test_mode| match runner() {
139                Ok(()) => Outcome::Passed,
140                Err(failed) => {
141                    if let Some(msg) = failed.message() {
142                        if msg.starts_with(RUNTIME_IGNORE_PREFIX) {
143                            let ignore_message = if msg.len() > 12 {
144                                Some(msg[12..].into())
145                            } else {
146                                None
147                            };
148                            return Outcome::Ignored(ignore_message);
149                        }
150                    }
151                    Outcome::Failed(failed)
152                }
153            }),
154            info: TestInfo {
155                name: name.into(),
156                kind: String::new(),
157                is_ignored: false,
158                ignore_message: None,
159                is_bench: false,
160            },
161        }
162    }
163
164    /// Creates a benchmark with the given name and runner.
165    ///
166    /// If the runner's parameter `test_mode` is `true`, the runner function
167    /// should run all code just once, without measuring, just to make sure it
168    /// does not panic. If the parameter is `false`, it should perform the
169    /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
170    /// but if it's `false`, you have to return a `Measurement`, or else the
171    /// benchmark is considered a failure.
172    ///
173    /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
174    /// `false` when `--bench` is set. If `--test` is set, benchmarks are not
175    /// ran at all, and both flags cannot be set at the same time.
176    pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
177    where
178        R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
179    {
180        Self {
181            runner: Box::new(move |test_mode| match runner(test_mode) {
182                Err(failed) => Outcome::Failed(failed),
183                Ok(_) if test_mode => Outcome::Passed,
184                Ok(Some(measurement)) => Outcome::Measured(measurement),
185                Ok(None) => {
186                    Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into())
187                }
188            }),
189            info: TestInfo {
190                name: name.into(),
191                kind: String::new(),
192                is_ignored: false,
193                ignore_message: None,
194                is_bench: true,
195            },
196        }
197    }
198
199    /// Sets the "kind" of this test/benchmark. If this string is not
200    /// empty, it is printed in brackets before the test name (e.g.
201    /// `test [my-kind] test_name`). (Default: *empty*)
202    ///
203    /// This is the only extension to the original libtest.
204    pub fn with_kind(self, kind: impl Into<String>) -> Self {
205        Self {
206            info: TestInfo {
207                kind: kind.into(),
208                ..self.info
209            },
210            ..self
211        }
212    }
213
214    /// Sets whether or not this test is considered "ignored". (Default: `false`)
215    ///
216    /// With the built-in test suite, you can annotate `#[ignore]` on tests to
217    /// not execute them by default (for example because they take a long time
218    /// or require a special environment). If the `--ignored` flag is set,
219    /// ignored tests are executed, too.
220    pub fn with_ignored_flag(self, is_ignored: bool, ignore_message: Option<String>) -> Self {
221        Self {
222            info: TestInfo {
223                is_ignored,
224                ignore_message,
225                ..self.info
226            },
227            ..self
228        }
229    }
230
231    /// Returns the name of this trial.
232    pub fn name(&self) -> &str {
233        &self.info.name
234    }
235
236    /// Returns the kind of this trial. If you have not set a kind, this is an
237    /// empty string.
238    pub fn kind(&self) -> &str {
239        &self.info.kind
240    }
241
242    /// Returns whether this trial has been marked as *ignored*.
243    pub fn has_ignored_flag(&self) -> bool {
244        self.info.is_ignored
245    }
246
247    /// Returns `true` iff this trial is a test (as opposed to a benchmark).
248    pub fn is_test(&self) -> bool {
249        !self.info.is_bench
250    }
251
252    /// Returns `true` iff this trial is a benchmark (as opposed to a test).
253    pub fn is_bench(&self) -> bool {
254        self.info.is_bench
255    }
256}
257
258impl fmt::Debug for Trial {
259    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
260        struct OpaqueRunner;
261        impl fmt::Debug for OpaqueRunner {
262            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
263                f.write_str("<runner>")
264            }
265        }
266
267        f.debug_struct("Test")
268            .field("runner", &OpaqueRunner)
269            .field("name", &self.info.name)
270            .field("kind", &self.info.kind)
271            .field("is_ignored", &self.info.is_ignored)
272            .field("is_bench", &self.info.is_bench)
273            .finish()
274    }
275}
276
277#[derive(Debug)]
278struct TestInfo {
279    name: String,
280    kind: String,
281    is_ignored: bool,
282    ignore_message: Option<String>,
283    is_bench: bool,
284}
285
286impl TestInfo {
287    fn test_name_with_kind(&self) -> Cow<'_, str> {
288        if self.kind.is_empty() {
289            Cow::Borrowed(&self.name)
290        } else {
291            Cow::Owned(format!("[{}] {}", self.kind, self.name))
292        }
293    }
294}
295
296/// Output of a benchmark.
297#[derive(Debug, Clone, Copy, PartialEq, Eq)]
298pub struct Measurement {
299    /// Average time in ns.
300    pub avg: u64,
301
302    /// Variance in ns.
303    pub variance: u64,
304}
305
306/// Indicates that a test/benchmark has failed. Optionally carries a message.
307///
308/// You usually want to use the `From` impl of this type, which allows you to
309/// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
310#[derive(Debug, Clone)]
311pub struct Failed {
312    msg: Option<String>,
313}
314
315impl Failed {
316    /// Creates an instance without message.
317    pub fn without_message() -> Self {
318        Self { msg: None }
319    }
320
321    /// Returns the message of this instance.
322    pub fn message(&self) -> Option<&str> {
323        self.msg.as_deref()
324    }
325}
326
327impl<M: std::fmt::Display> From<M> for Failed {
328    fn from(msg: M) -> Self {
329        Self {
330            msg: Some(msg.to_string()),
331        }
332    }
333}
334
335/// The outcome of performing a test/benchmark.
336#[derive(Debug, Clone)]
337enum Outcome {
338    /// The test passed.
339    Passed,
340
341    /// The test or benchmark failed.
342    Failed(Failed),
343
344    /// The test or benchmark was ignored.
345    Ignored(Option<String>),
346
347    /// The benchmark was successfully run.
348    Measured(Measurement),
349}
350
351/// Contains information about the entire test run. Is returned by[`run`].
352///
353/// This type is marked as `#[must_use]`. Usually, you just call
354/// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
355/// with the correct exit code. But you can also store this value and inspect
356/// its data.
357#[derive(Clone, Debug, PartialEq, Eq)]
358#[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
359pub struct Conclusion {
360    /// Number of tests and benchmarks that were filtered out (either by the
361    /// filter-in pattern or by `--skip` arguments).
362    pub num_filtered_out: u64,
363
364    /// Number of passed tests.
365    pub num_passed: u64,
366
367    /// Number of failed tests and benchmarks.
368    pub num_failed: u64,
369
370    /// Number of ignored tests and benchmarks.
371    pub num_ignored: u64,
372
373    /// Number of benchmarks that successfully ran.
374    pub num_measured: u64,
375}
376
377impl Conclusion {
378    /// Returns an exit code that can be returned from `main` to signal
379    /// success/failure to the calling process.
380    pub fn exit_code(&self) -> ExitCode {
381        if self.has_failed() {
382            ExitCode::from(101)
383        } else {
384            ExitCode::SUCCESS
385        }
386    }
387
388    /// Returns whether there have been any failures.
389    pub fn has_failed(&self) -> bool {
390        self.num_failed > 0
391    }
392
393    /// Exits the application with an appropriate error code (0 if all tests
394    /// have passed, 101 if there have been failures). This uses
395    /// [`process::exit`], meaning that destructors are not ran. Consider
396    /// using [`Self::exit_code`] instead for a proper program cleanup.
397    pub fn exit(&self) -> ! {
398        self.exit_if_failed();
399        process::exit(0);
400    }
401
402    /// Exits the application with error code 101 if there were any failures.
403    /// Otherwise, returns normally. This uses [`process::exit`], meaning that
404    /// destructors are not ran. Consider using [`Self::exit_code`] instead for
405    /// a proper program cleanup.
406    pub fn exit_if_failed(&self) {
407        if self.has_failed() {
408            process::exit(101)
409        }
410    }
411
412    fn empty() -> Self {
413        Self {
414            num_filtered_out: 0,
415            num_passed: 0,
416            num_failed: 0,
417            num_ignored: 0,
418            num_measured: 0,
419        }
420    }
421}
422
423impl Arguments {
424    /// Returns `true` if the given test should be ignored.
425    fn is_ignored(&self, test: &Trial) -> bool {
426        (test.info.is_ignored && !self.ignored && !self.include_ignored)
427            || (test.info.is_bench && self.test)
428            || (!test.info.is_bench && self.bench)
429    }
430
431    fn is_filtered_out(&self, test: &Trial) -> bool {
432        let test_name = test.name();
433        // Match against the full test name, including the kind. This upholds the invariant that if
434        // --list prints out:
435        //
436        // <some string>: test
437        //
438        // then "--exact <some string>" runs exactly that test.
439        let test_name_with_kind = test.info.test_name_with_kind();
440
441        // If a filter was specified, apply this
442        if let Some(filter) = &self.filter {
443            match self.exact {
444                // For exact matches, we want to match against either the test name (to maintain
445                // backwards compatibility with older versions of libtest-mimic), or the test kind
446                // (technically more correct with respect to matching against the output of --list.)
447                true if test_name != filter && &test_name_with_kind != filter => return true,
448                false if !test_name_with_kind.contains(filter) => return true,
449                _ => {}
450            };
451        }
452
453        // If any skip pattern were specified, test for all patterns.
454        for skip_filter in &self.skip {
455            match self.exact {
456                // For exact matches, we want to match against either the test name (to maintain
457                // backwards compatibility with older versions of libtest-mimic), or the test kind
458                // (technically more correct with respect to matching against the output of --list.)
459                true if test_name == skip_filter || &test_name_with_kind == skip_filter => {
460                    return true
461                }
462                false if test_name_with_kind.contains(skip_filter) => return true,
463                _ => {}
464            }
465        }
466
467        if self.ignored && !test.info.is_ignored {
468            return true;
469        }
470
471        false
472    }
473}
474
475/// Runs all given trials (tests & benchmarks).
476///
477/// This is the central function of this crate. It provides the framework for
478/// the testing harness. It does all the printing and house keeping.
479///
480/// The returned value contains a couple of useful information. See
481/// [`Conclusion`] for more information. If `--list` was specified, a list is
482/// printed and a dummy `Conclusion` is returned.
483pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
484    let start_instant = Instant::now();
485    let mut conclusion = Conclusion::empty();
486
487    // Apply filtering
488    if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
489        let len_before = tests.len() as u64;
490        tests.retain(|test| !args.is_filtered_out(test));
491        conclusion.num_filtered_out = len_before - tests.len() as u64;
492    }
493    let tests = tests;
494
495    // Create printer which is used for all output.
496    let mut printer = printer::Printer::new(args, &tests);
497
498    // If `--list` is specified, just print the list and return.
499    if args.list {
500        printer.print_list(&tests, args.ignored);
501        return Conclusion::empty();
502    }
503
504    // Print number of tests
505    printer.print_title(tests.len() as u64);
506
507    let mut failed_tests = Vec::new();
508    let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
509        printer.print_single_outcome(&test, &outcome);
510
511        // Handle outcome
512        match outcome {
513            Outcome::Passed => conclusion.num_passed += 1,
514            Outcome::Failed(failed) => {
515                failed_tests.push((test, failed.msg));
516                conclusion.num_failed += 1;
517            }
518            Outcome::Ignored(_) => conclusion.num_ignored += 1,
519            Outcome::Measured(_) => conclusion.num_measured += 1,
520        }
521    };
522
523    // Execute all tests.
524    let test_mode = !args.bench;
525
526    let num_threads = platform_defaults_to_one_thread()
527        .then_some(1)
528        .or(args.test_threads)
529        .or_else(|| std::thread::available_parallelism().ok().map(Into::into))
530        .unwrap_or(1);
531
532    if num_threads == 1 {
533        // Run test sequentially in main thread
534        for test in tests {
535            // Print `test foo    ...`, run the test, then print the outcome in
536            // the same line.
537            printer.print_test(&test.info);
538            let outcome = if args.is_ignored(&test) {
539                Outcome::Ignored(None)
540            } else {
541                run_single(test.runner, test_mode)
542            };
543            handle_outcome(outcome, test.info, &mut printer);
544        }
545    } else {
546        // Run test in thread pool.
547        let (sender, receiver) = mpsc::channel();
548
549        let num_tests = tests.len();
550        // TODO: this should use a mpmc channel, once that's stabilized in std.
551        let iter = Mutex::new(tests.into_iter());
552        thread::scope(|scope| {
553            // Start worker threads
554            for _ in 0..num_threads {
555                scope.spawn(|| {
556                    loop {
557                        // Get next test to process from the iterator.
558                        let Some(trial) = iter.lock().unwrap().next() else {
559                            break;
560                        };
561
562                        let payload = if args.is_ignored(&trial) {
563                            // libtest-with use Ignored type with possible reason
564                            (Outcome::Ignored(None), trial.info)
565                        } else {
566                            let outcome = run_single(trial.runner, test_mode);
567                            (outcome, trial.info)
568                        };
569
570                        // It's fine to ignore the result of sending. If the
571                        // receiver has hung up, everything will wind down soon
572                        // anyway.
573                        let _ = sender.send(payload);
574                    }
575                });
576            }
577
578            // Print results of tests that already dinished
579            for (outcome, test_info) in receiver.iter().take(num_tests) {
580                // In multithreaded mode, we do only print the start of the line
581                // after the test ran, as otherwise it would lead to terribly
582                // interleaved output.
583                printer.print_test(&test_info);
584                handle_outcome(outcome, test_info, &mut printer);
585            }
586        });
587
588    }
589
590    // Print failures if there were any, and the final summary.
591    if !failed_tests.is_empty() {
592        printer.print_failures(&failed_tests);
593    }
594
595    printer.print_summary(&conclusion, start_instant.elapsed());
596
597    conclusion
598}
599
600/// Returns whether the current host platform should use a single thread by
601/// default rather than a thread pool by default. Some platforms, such as
602/// WebAssembly, don't have native support for threading at this time.
603fn platform_defaults_to_one_thread() -> bool {
604    cfg!(target_family = "wasm")
605}
606
607/// Runs the given runner, catching any panics and treating them as a failed test.
608fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
609    use std::panic::{catch_unwind, AssertUnwindSafe};
610
611    catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
612        // The `panic` information is just an `Any` object representing the
613        // value the panic was invoked with. For most panics (which use
614        // `panic!` like `println!`), this is either `&str` or `String`.
615        let payload = e
616            .downcast_ref::<String>()
617            .map(|s| s.as_str())
618            .or(e.downcast_ref::<&str>().map(|s| *s));
619
620        let msg = match payload {
621            Some(payload) => format!("test panicked: {payload}"),
622            None => format!("test panicked"),
623        };
624        Outcome::Failed(msg.into())
625    })
626}