nextest_runner/reporter/events.rs
1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11 config::{
12 elements::{LeakTimeoutResult, SlowTimeoutResult},
13 scripts::ScriptId,
14 },
15 list::{TestInstanceId, TestList},
16 runner::{StressCondition, StressCount},
17 test_output::ChildExecutionOutput,
18};
19use chrono::{DateTime, FixedOffset};
20use nextest_metadata::MismatchReason;
21use quick_junit::ReportUuid;
22use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
23
24/// A reporter event.
25#[derive(Clone, Debug)]
26pub enum ReporterEvent<'a> {
27 /// A periodic tick.
28 Tick,
29
30 /// A test event.
31 Test(Box<TestEvent<'a>>),
32}
33/// A test event.
34///
35/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
36/// consumed by a [`Reporter`](crate::reporter::Reporter).
37#[derive(Clone, Debug)]
38pub struct TestEvent<'a> {
39 /// The time at which the event was generated, including the offset from UTC.
40 pub timestamp: DateTime<FixedOffset>,
41
42 /// The amount of time elapsed since the start of the test run.
43 pub elapsed: Duration,
44
45 /// The kind of test event this is.
46 pub kind: TestEventKind<'a>,
47}
48
49/// The kind of test event this is.
50///
51/// Forms part of [`TestEvent`].
52#[derive(Clone, Debug)]
53pub enum TestEventKind<'a> {
54 /// The test run started.
55 RunStarted {
56 /// The list of tests that will be run.
57 ///
58 /// The methods on the test list indicate the number of tests that will be run.
59 test_list: &'a TestList<'a>,
60
61 /// The UUID for this run.
62 run_id: ReportUuid,
63
64 /// The nextest profile chosen for this run.
65 profile_name: String,
66
67 /// The command-line arguments for the process.
68 cli_args: Vec<String>,
69
70 /// The stress condition for this run, if any.
71 stress_condition: Option<StressCondition>,
72 },
73
74 /// When running stress tests serially, a sub-run started.
75 StressSubRunStarted {
76 /// The amount of progress completed so far.
77 progress: StressProgress,
78 },
79
80 /// A setup script started.
81 SetupScriptStarted {
82 /// If a stress test is being run, the stress index, starting from 0.
83 stress_index: Option<StressIndex>,
84
85 /// The setup script index.
86 index: usize,
87
88 /// The total number of setup scripts.
89 total: usize,
90
91 /// The script ID.
92 script_id: ScriptId,
93
94 /// The program to run.
95 program: String,
96
97 /// The arguments to the program.
98 args: &'a [String],
99
100 /// True if some output from the setup script is being passed through.
101 no_capture: bool,
102 },
103
104 /// A setup script was slow.
105 SetupScriptSlow {
106 /// If a stress test is being run, the stress index, starting from 0.
107 stress_index: Option<StressIndex>,
108
109 /// The script ID.
110 script_id: ScriptId,
111
112 /// The program to run.
113 program: String,
114
115 /// The arguments to the program.
116 args: &'a [String],
117
118 /// The amount of time elapsed since the start of execution.
119 elapsed: Duration,
120
121 /// True if the script has hit its timeout and is about to be terminated.
122 will_terminate: bool,
123 },
124
125 /// A setup script completed execution.
126 SetupScriptFinished {
127 /// If a stress test is being run, the stress index, starting from 0.
128 stress_index: Option<StressIndex>,
129
130 /// The setup script index.
131 index: usize,
132
133 /// The total number of setup scripts.
134 total: usize,
135
136 /// The script ID.
137 script_id: ScriptId,
138
139 /// The program to run.
140 program: String,
141
142 /// The arguments to the program.
143 args: &'a [String],
144
145 /// Whether the JUnit report should store success output for this script.
146 junit_store_success_output: bool,
147
148 /// Whether the JUnit report should store failure output for this script.
149 junit_store_failure_output: bool,
150
151 /// True if some output from the setup script was passed through.
152 no_capture: bool,
153
154 /// The execution status of the setup script.
155 run_status: SetupScriptExecuteStatus,
156 },
157
158 // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
159 // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
160 // binary).
161 /// A test started running.
162 TestStarted {
163 /// If a stress test is being run, the stress index, starting from 0.
164 stress_index: Option<StressIndex>,
165
166 /// The test instance that was started.
167 test_instance: TestInstanceId<'a>,
168
169 /// Current run statistics so far.
170 current_stats: RunStats,
171
172 /// The number of tests currently running, including this one.
173 running: usize,
174
175 /// The command line that will be used to run this test.
176 command_line: Vec<String>,
177 },
178
179 /// A test was slower than a configured soft timeout.
180 TestSlow {
181 /// If a stress test is being run, the stress index, starting from 0.
182 stress_index: Option<StressIndex>,
183
184 /// The test instance that was slow.
185 test_instance: TestInstanceId<'a>,
186
187 /// Retry data.
188 retry_data: RetryData,
189
190 /// The amount of time that has elapsed since the beginning of the test.
191 elapsed: Duration,
192
193 /// True if the test has hit its timeout and is about to be terminated.
194 will_terminate: bool,
195 },
196
197 /// A test attempt failed and will be retried in the future.
198 ///
199 /// This event does not occur on the final run of a failing test.
200 TestAttemptFailedWillRetry {
201 /// If a stress test is being run, the stress index, starting from 0.
202 stress_index: Option<StressIndex>,
203
204 /// The test instance that is being retried.
205 test_instance: TestInstanceId<'a>,
206
207 /// The status of this attempt to run the test. Will never be success.
208 run_status: ExecuteStatus,
209
210 /// The delay before the next attempt to run the test.
211 delay_before_next_attempt: Duration,
212
213 /// Whether failure outputs are printed out.
214 failure_output: TestOutputDisplay,
215
216 /// The current number of running tests.
217 running: usize,
218 },
219
220 /// A retry has started.
221 TestRetryStarted {
222 /// If a stress test is being run, the stress index, starting from 0.
223 stress_index: Option<StressIndex>,
224
225 /// The test instance that is being retried.
226 test_instance: TestInstanceId<'a>,
227
228 /// Data related to retries.
229 retry_data: RetryData,
230
231 /// The current number of running tests.
232 running: usize,
233
234 /// The command line that will be used to run this test.
235 command_line: Vec<String>,
236 },
237
238 /// A test finished running.
239 TestFinished {
240 /// If a stress test is being run, the stress index, starting from 0.
241 stress_index: Option<StressIndex>,
242
243 /// The test instance that finished running.
244 test_instance: TestInstanceId<'a>,
245
246 /// Test setting for success output.
247 success_output: TestOutputDisplay,
248
249 /// Test setting for failure output.
250 failure_output: TestOutputDisplay,
251
252 /// Whether the JUnit report should store success output for this test.
253 junit_store_success_output: bool,
254
255 /// Whether the JUnit report should store failure output for this test.
256 junit_store_failure_output: bool,
257
258 /// Information about all the runs for this test.
259 run_statuses: ExecutionStatuses,
260
261 /// Current statistics for number of tests so far.
262 current_stats: RunStats,
263
264 /// The number of tests that are currently running, excluding this one.
265 running: usize,
266 },
267
268 /// A test was skipped.
269 TestSkipped {
270 /// If a stress test is being run, the stress index, starting from 0.
271 stress_index: Option<StressIndex>,
272
273 /// The test instance that was skipped.
274 test_instance: TestInstanceId<'a>,
275
276 /// The reason this test was skipped.
277 reason: MismatchReason,
278 },
279
280 /// An information request was received.
281 InfoStarted {
282 /// The number of tasks currently running. This is the same as the
283 /// number of expected responses.
284 total: usize,
285
286 /// Statistics for the run.
287 run_stats: RunStats,
288 },
289
290 /// Information about a script or test was received.
291 InfoResponse {
292 /// The index of the response, starting from 0.
293 index: usize,
294
295 /// The total number of responses expected.
296 total: usize,
297
298 /// The response itself.
299 response: InfoResponse<'a>,
300 },
301
302 /// An information request was completed.
303 InfoFinished {
304 /// The number of responses that were not received. In most cases, this
305 /// is 0.
306 missing: usize,
307 },
308
309 /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
310 /// to be printed.
311 InputEnter {
312 /// Current statistics for number of tests so far.
313 current_stats: RunStats,
314
315 /// The number of tests running.
316 running: usize,
317 },
318
319 /// A cancellation notice was received.
320 RunBeginCancel {
321 /// The number of setup scripts still running.
322 setup_scripts_running: usize,
323
324 /// Current statistics for number of tests so far.
325 ///
326 /// `current_stats.cancel_reason` is set to `Some`.
327 current_stats: RunStats,
328
329 /// The number of tests still running.
330 running: usize,
331 },
332
333 /// A forcible kill was requested due to receiving a signal.
334 RunBeginKill {
335 /// The number of setup scripts still running.
336 setup_scripts_running: usize,
337
338 /// Current statistics for number of tests so far.
339 ///
340 /// `current_stats.cancel_reason` is set to `Some`.
341 current_stats: RunStats,
342
343 /// The number of tests still running.
344 running: usize,
345 },
346
347 /// A SIGTSTP event was received and the run was paused.
348 RunPaused {
349 /// The number of setup scripts running.
350 setup_scripts_running: usize,
351
352 /// The number of tests currently running.
353 running: usize,
354 },
355
356 /// A SIGCONT event was received and the run is being continued.
357 RunContinued {
358 /// The number of setup scripts that will be started up again.
359 setup_scripts_running: usize,
360
361 /// The number of tests that will be started up again.
362 running: usize,
363 },
364
365 /// When running stress tests serially, a sub-run finished.
366 StressSubRunFinished {
367 /// The amount of progress completed so far.
368 progress: StressProgress,
369
370 /// The amount of time it took for this sub-run to complete.
371 sub_elapsed: Duration,
372
373 /// Statistics for the sub-run.
374 sub_stats: RunStats,
375 },
376
377 /// The test run finished.
378 RunFinished {
379 /// The unique ID for this run.
380 run_id: ReportUuid,
381
382 /// The time at which the run was started.
383 start_time: DateTime<FixedOffset>,
384
385 /// The amount of time it took for the tests to run.
386 elapsed: Duration,
387
388 /// Statistics for the run, or overall statistics for stress tests.
389 run_stats: RunFinishedStats,
390 },
391}
392
393/// Progress for a stress test.
394#[derive(Clone, Debug)]
395pub enum StressProgress {
396 /// This is a count-based stress run.
397 Count {
398 /// The total number of stress runs.
399 total: StressCount,
400
401 /// The total time that has elapsed across all stress runs so far.
402 elapsed: Duration,
403
404 /// The number of stress runs that have been completed.
405 completed: u32,
406 },
407
408 /// This is a time-based stress run.
409 Time {
410 /// The total time for the stress run.
411 total: Duration,
412
413 /// The total time that has elapsed across all stress runs so far.
414 elapsed: Duration,
415
416 /// The number of stress runs that have been completed.
417 completed: u32,
418 },
419}
420
421impl StressProgress {
422 /// Returns the remaining amount of work if the progress indicates there's
423 /// still more to do, otherwise `None`.
424 pub fn remaining(&self) -> Option<StressRemaining> {
425 match self {
426 Self::Count {
427 total: StressCount::Count(total),
428 elapsed: _,
429 completed,
430 } => total
431 .get()
432 .checked_sub(*completed)
433 .and_then(|remaining| NonZero::try_from(remaining).ok())
434 .map(StressRemaining::Count),
435 Self::Count {
436 total: StressCount::Infinite,
437 ..
438 } => Some(StressRemaining::Infinite),
439 Self::Time {
440 total,
441 elapsed,
442 completed: _,
443 } => total.checked_sub(*elapsed).map(StressRemaining::Time),
444 }
445 }
446
447 /// Returns a unique ID for this stress sub-run, consisting of the run ID and stress index.
448 pub fn unique_id(&self, run_id: ReportUuid) -> String {
449 let stress_current = match self {
450 Self::Count { completed, .. } | Self::Time { completed, .. } => *completed,
451 };
452 format!("{}:@stress-{}", run_id, stress_current)
453 }
454}
455
456/// For a stress test, the amount of time or number of stress runs remaining.
457#[derive(Clone, Debug)]
458pub enum StressRemaining {
459 /// The number of stress runs remaining, guaranteed to be non-zero.
460 Count(NonZero<u32>),
461
462 /// Infinite number of stress runs remaining.
463 Infinite,
464
465 /// The amount of time remaining.
466 Time(Duration),
467}
468
469/// The index of the current stress run.
470#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
471pub struct StressIndex {
472 /// The 0-indexed index.
473 pub current: u32,
474
475 /// The total number of stress runs, if that is available.
476 pub total: Option<NonZero<u32>>,
477}
478
479/// Statistics for a completed test run or stress run.
480#[derive(Clone, Debug)]
481pub enum RunFinishedStats {
482 /// A single test run was completed.
483 Single(RunStats),
484
485 /// A stress run was completed.
486 Stress(StressRunStats),
487}
488
489impl RunFinishedStats {
490 /// For a single run, returns a summary of statistics as an enum. For a
491 /// stress run, returns a summary for the last sub-run.
492 pub fn final_stats(&self) -> FinalRunStats {
493 match self {
494 Self::Single(stats) => stats.summarize_final(),
495 Self::Stress(stats) => stats.last_final_stats,
496 }
497 }
498}
499
500/// Statistics for a test run.
501#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
502pub struct RunStats {
503 /// The total number of tests that were expected to be run at the beginning.
504 ///
505 /// If the test run is cancelled, this will be more than `finished_count` at the end.
506 pub initial_run_count: usize,
507
508 /// The total number of tests that finished running.
509 pub finished_count: usize,
510
511 /// The total number of setup scripts that were expected to be run at the beginning.
512 ///
513 /// If the test run is cancelled, this will be more than `finished_count` at the end.
514 pub setup_scripts_initial_count: usize,
515
516 /// The total number of setup scripts that finished running.
517 pub setup_scripts_finished_count: usize,
518
519 /// The number of setup scripts that passed.
520 pub setup_scripts_passed: usize,
521
522 /// The number of setup scripts that failed.
523 pub setup_scripts_failed: usize,
524
525 /// The number of setup scripts that encountered an execution failure.
526 pub setup_scripts_exec_failed: usize,
527
528 /// The number of setup scripts that timed out.
529 pub setup_scripts_timed_out: usize,
530
531 /// The number of tests that passed. Includes `passed_slow`, `passed_timed_out`, `flaky` and `leaky`.
532 pub passed: usize,
533
534 /// The number of slow tests that passed.
535 pub passed_slow: usize,
536
537 /// The number of timed out tests that passed.
538 pub passed_timed_out: usize,
539
540 /// The number of tests that passed on retry.
541 pub flaky: usize,
542
543 /// The number of tests that failed. Includes `leaky_failed`.
544 pub failed: usize,
545
546 /// The number of failed tests that were slow.
547 pub failed_slow: usize,
548
549 /// The number of timed out tests that failed.
550 pub failed_timed_out: usize,
551
552 /// The number of tests that passed but leaked handles.
553 pub leaky: usize,
554
555 /// The number of tests that otherwise passed, but leaked handles and were
556 /// treated as failed as a result.
557 pub leaky_failed: usize,
558
559 /// The number of tests that encountered an execution failure.
560 pub exec_failed: usize,
561
562 /// The number of tests that were skipped.
563 pub skipped: usize,
564
565 /// If the run is cancelled, the reason the cancellation is happening.
566 pub cancel_reason: Option<CancelReason>,
567}
568
569impl RunStats {
570 /// Returns true if there are any failures recorded in the stats.
571 pub fn has_failures(&self) -> bool {
572 self.failed_setup_script_count() > 0 || self.failed_count() > 0
573 }
574
575 /// Returns count of setup scripts that did not pass.
576 pub fn failed_setup_script_count(&self) -> usize {
577 self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
578 }
579
580 /// Returns count of tests that did not pass.
581 pub fn failed_count(&self) -> usize {
582 self.failed + self.exec_failed + self.failed_timed_out
583 }
584
585 /// Summarizes the stats as an enum at the end of a test run.
586 pub fn summarize_final(&self) -> FinalRunStats {
587 // Check for failures first. The order of setup scripts vs tests should
588 // not be important, though we don't assert that here.
589 if self.failed_setup_script_count() > 0 {
590 // Is this related to a cancellation other than one directly caused
591 // by the failure?
592 if self.cancel_reason > Some(CancelReason::TestFailure) {
593 FinalRunStats::Cancelled {
594 reason: self.cancel_reason,
595 kind: RunStatsFailureKind::SetupScript,
596 }
597 } else {
598 FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
599 }
600 } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
601 FinalRunStats::Cancelled {
602 reason: self.cancel_reason,
603 kind: RunStatsFailureKind::SetupScript,
604 }
605 } else if self.failed_count() > 0 {
606 let kind = RunStatsFailureKind::Test {
607 initial_run_count: self.initial_run_count,
608 not_run: self.initial_run_count.saturating_sub(self.finished_count),
609 };
610
611 // Is this related to a cancellation other than one directly caused
612 // by the failure?
613 if self.cancel_reason > Some(CancelReason::TestFailure) {
614 FinalRunStats::Cancelled {
615 reason: self.cancel_reason,
616 kind,
617 }
618 } else {
619 FinalRunStats::Failed(kind)
620 }
621 } else if self.initial_run_count > self.finished_count {
622 FinalRunStats::Cancelled {
623 reason: self.cancel_reason,
624 kind: RunStatsFailureKind::Test {
625 initial_run_count: self.initial_run_count,
626 not_run: self.initial_run_count.saturating_sub(self.finished_count),
627 },
628 }
629 } else if self.finished_count == 0 {
630 FinalRunStats::NoTestsRun
631 } else {
632 FinalRunStats::Success
633 }
634 }
635
636 pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
637 self.setup_scripts_finished_count += 1;
638
639 match status.result {
640 ExecutionResult::Pass
641 | ExecutionResult::Leak {
642 result: LeakTimeoutResult::Pass,
643 } => {
644 self.setup_scripts_passed += 1;
645 }
646 ExecutionResult::Fail { .. }
647 | ExecutionResult::Leak {
648 result: LeakTimeoutResult::Fail,
649 } => {
650 self.setup_scripts_failed += 1;
651 }
652 ExecutionResult::ExecFail => {
653 self.setup_scripts_exec_failed += 1;
654 }
655 // Timed out setup scripts are always treated as failures
656 ExecutionResult::Timeout { .. } => {
657 self.setup_scripts_timed_out += 1;
658 }
659 }
660 }
661
662 pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
663 self.finished_count += 1;
664 // run_statuses is guaranteed to have at least one element.
665 // * If the last element is success, treat it as success (and possibly flaky).
666 // * If the last element is a failure, use it to determine fail/exec fail.
667 // Note that this is different from what Maven Surefire does (use the first failure):
668 // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
669 //
670 // This is not likely to matter much in practice since failures are likely to be of the
671 // same type.
672 let last_status = run_statuses.last_status();
673 match last_status.result {
674 ExecutionResult::Pass => {
675 self.passed += 1;
676 if last_status.is_slow {
677 self.passed_slow += 1;
678 }
679 if run_statuses.len() > 1 {
680 self.flaky += 1;
681 }
682 }
683 ExecutionResult::Leak {
684 result: LeakTimeoutResult::Pass,
685 } => {
686 self.passed += 1;
687 self.leaky += 1;
688 if last_status.is_slow {
689 self.passed_slow += 1;
690 }
691 if run_statuses.len() > 1 {
692 self.flaky += 1;
693 }
694 }
695 ExecutionResult::Leak {
696 result: LeakTimeoutResult::Fail,
697 } => {
698 self.failed += 1;
699 self.leaky_failed += 1;
700 if last_status.is_slow {
701 self.failed_slow += 1;
702 }
703 }
704 ExecutionResult::Fail { .. } => {
705 self.failed += 1;
706 if last_status.is_slow {
707 self.failed_slow += 1;
708 }
709 }
710 ExecutionResult::Timeout {
711 result: SlowTimeoutResult::Pass,
712 } => {
713 self.passed += 1;
714 self.passed_timed_out += 1;
715 if run_statuses.len() > 1 {
716 self.flaky += 1;
717 }
718 }
719 ExecutionResult::Timeout {
720 result: SlowTimeoutResult::Fail,
721 } => {
722 self.failed_timed_out += 1;
723 }
724 ExecutionResult::ExecFail => self.exec_failed += 1,
725 }
726 }
727}
728
729/// A type summarizing the possible outcomes of a test run.
730#[derive(Copy, Clone, Debug, Eq, PartialEq)]
731pub enum FinalRunStats {
732 /// The test run was successful, or is successful so far.
733 Success,
734
735 /// The test run was successful, or is successful so far, but no tests were selected to run.
736 NoTestsRun,
737
738 /// The test run was cancelled.
739 Cancelled {
740 /// The reason for cancellation, if available.
741 ///
742 /// This should generally be available, but may be None if some tests
743 /// that were selected to run were not executed.
744 reason: Option<CancelReason>,
745
746 /// The kind of failure that occurred.
747 kind: RunStatsFailureKind,
748 },
749
750 /// At least one test failed.
751 Failed(RunStatsFailureKind),
752}
753
754/// Statistics for a stress run.
755#[derive(Clone, Debug)]
756pub struct StressRunStats {
757 /// The number of stress runs completed.
758 pub completed: StressIndex,
759
760 /// The number of stress runs that succeeded.
761 pub success_count: u32,
762
763 /// The number of stress runs that failed.
764 pub failed_count: u32,
765
766 /// The last stress run's `FinalRunStats`.
767 pub last_final_stats: FinalRunStats,
768}
769
770impl StressRunStats {
771 /// Summarizes the stats as an enum at the end of a test run.
772 pub fn summarize_final(&self) -> StressFinalRunStats {
773 if self.failed_count > 0 {
774 StressFinalRunStats::Failed
775 } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
776 StressFinalRunStats::Cancelled
777 } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
778 StressFinalRunStats::NoTestsRun
779 } else {
780 StressFinalRunStats::Success
781 }
782 }
783}
784
785/// A summary of final statistics for a stress run.
786pub enum StressFinalRunStats {
787 /// The stress run was successful.
788 Success,
789
790 /// No tests were run.
791 NoTestsRun,
792
793 /// The stress run was cancelled.
794 Cancelled,
795
796 /// At least one stress run failed.
797 Failed,
798}
799
800/// A type summarizing the step at which a test run failed.
801#[derive(Copy, Clone, Debug, Eq, PartialEq)]
802pub enum RunStatsFailureKind {
803 /// The run was interrupted during setup script execution.
804 SetupScript,
805
806 /// The run was interrupted during test execution.
807 Test {
808 /// The total number of tests scheduled.
809 initial_run_count: usize,
810
811 /// The number of tests not run, or for a currently-executing test the number queued up to
812 /// run.
813 not_run: usize,
814 },
815}
816
817/// Information about executions of a test, including retries.
818#[derive(Clone, Debug)]
819pub struct ExecutionStatuses {
820 /// This is guaranteed to be non-empty.
821 statuses: Vec<ExecuteStatus>,
822}
823
824#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
825impl ExecutionStatuses {
826 pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
827 Self { statuses }
828 }
829
830 /// Returns the last execution status.
831 ///
832 /// This status is typically used as the final result.
833 pub fn last_status(&self) -> &ExecuteStatus {
834 self.statuses
835 .last()
836 .expect("execution statuses is non-empty")
837 }
838
839 /// Iterates over all the statuses.
840 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
841 self.statuses.iter()
842 }
843
844 /// Returns the number of times the test was executed.
845 pub fn len(&self) -> usize {
846 self.statuses.len()
847 }
848
849 /// Returns a description of self.
850 pub fn describe(&self) -> ExecutionDescription<'_> {
851 let last_status = self.last_status();
852 if last_status.result.is_success() {
853 if self.statuses.len() > 1 {
854 ExecutionDescription::Flaky {
855 last_status,
856 prior_statuses: &self.statuses[..self.statuses.len() - 1],
857 }
858 } else {
859 ExecutionDescription::Success {
860 single_status: last_status,
861 }
862 }
863 } else {
864 let first_status = self
865 .statuses
866 .first()
867 .expect("execution statuses is non-empty");
868 let retries = &self.statuses[1..];
869 ExecutionDescription::Failure {
870 first_status,
871 last_status,
872 retries,
873 }
874 }
875 }
876}
877
878/// A description of test executions obtained from `ExecuteStatuses`.
879///
880/// This can be used to quickly determine whether a test passed, failed or was flaky.
881#[derive(Copy, Clone, Debug)]
882pub enum ExecutionDescription<'a> {
883 /// The test was run once and was successful.
884 Success {
885 /// The status of the test.
886 single_status: &'a ExecuteStatus,
887 },
888
889 /// The test was run more than once. The final result was successful.
890 Flaky {
891 /// The last, successful status.
892 last_status: &'a ExecuteStatus,
893
894 /// Previous statuses, none of which are successes.
895 prior_statuses: &'a [ExecuteStatus],
896 },
897
898 /// The test was run once, or possibly multiple times. All runs failed.
899 Failure {
900 /// The first, failing status.
901 first_status: &'a ExecuteStatus,
902
903 /// The last, failing status. Same as the first status if no retries were performed.
904 last_status: &'a ExecuteStatus,
905
906 /// Any retries that were performed. All of these runs failed.
907 ///
908 /// May be empty.
909 retries: &'a [ExecuteStatus],
910 },
911}
912
913impl<'a> ExecutionDescription<'a> {
914 /// Returns the status level for this `ExecutionDescription`.
915 pub fn status_level(&self) -> StatusLevel {
916 match self {
917 ExecutionDescription::Success { single_status } => match single_status.result {
918 ExecutionResult::Leak {
919 result: LeakTimeoutResult::Pass,
920 } => StatusLevel::Leak,
921 ExecutionResult::Pass => StatusLevel::Pass,
922 ExecutionResult::Timeout {
923 result: SlowTimeoutResult::Pass,
924 } => StatusLevel::Slow,
925 other => unreachable!(
926 "Success only permits Pass, Leak Pass, or Timeout Pass, found {other:?}"
927 ),
928 },
929 // A flaky test implies that we print out retry information for it.
930 ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
931 ExecutionDescription::Failure { .. } => StatusLevel::Fail,
932 }
933 }
934
935 /// Returns the final status level for this `ExecutionDescription`.
936 pub fn final_status_level(&self) -> FinalStatusLevel {
937 match self {
938 ExecutionDescription::Success { single_status, .. } => {
939 // Slow is higher priority than leaky, so return slow first here.
940 if single_status.is_slow {
941 FinalStatusLevel::Slow
942 } else {
943 match single_status.result {
944 ExecutionResult::Pass => FinalStatusLevel::Pass,
945 ExecutionResult::Leak {
946 result: LeakTimeoutResult::Pass,
947 } => FinalStatusLevel::Leak,
948 // Timeout with Pass should return Slow, but this case
949 // shouldn't be reached because is_slow is true for
950 // timeout scenarios. Handle it for completeness.
951 ExecutionResult::Timeout {
952 result: SlowTimeoutResult::Pass,
953 } => FinalStatusLevel::Slow,
954 other => unreachable!(
955 "Success only permits Pass, Leak Pass, or Timeout Pass, found {other:?}"
956 ),
957 }
958 }
959 }
960 // A flaky test implies that we print out retry information for it.
961 ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
962 ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
963 }
964 }
965
966 /// Returns the last run status.
967 pub fn last_status(&self) -> &'a ExecuteStatus {
968 match self {
969 ExecutionDescription::Success {
970 single_status: last_status,
971 }
972 | ExecutionDescription::Flaky { last_status, .. }
973 | ExecutionDescription::Failure { last_status, .. } => last_status,
974 }
975 }
976}
977
978/// Information about a single execution of a test.
979#[derive(Clone, Debug)]
980pub struct ExecuteStatus {
981 /// Retry-related data.
982 pub retry_data: RetryData,
983 /// The stdout and stderr output for this test.
984 pub output: ChildExecutionOutput,
985 /// The execution result for this test: pass, fail or execution error.
986 pub result: ExecutionResult,
987 /// The time at which the test started.
988 pub start_time: DateTime<FixedOffset>,
989 /// The time it took for the test to run.
990 pub time_taken: Duration,
991 /// Whether this test counts as slow.
992 pub is_slow: bool,
993 /// The delay will be non-zero if this is a retry and delay was specified.
994 pub delay_before_start: Duration,
995}
996
997/// Information about the execution of a setup script.
998#[derive(Clone, Debug)]
999pub struct SetupScriptExecuteStatus {
1000 /// Output for this setup script.
1001 pub output: ChildExecutionOutput,
1002
1003 /// The execution result for this setup script: pass, fail or execution error.
1004 pub result: ExecutionResult,
1005
1006 /// The time at which the script started.
1007 pub start_time: DateTime<FixedOffset>,
1008
1009 /// The time it took for the script to run.
1010 pub time_taken: Duration,
1011
1012 /// Whether this script counts as slow.
1013 pub is_slow: bool,
1014
1015 /// The map of environment variables that were set by this script.
1016 ///
1017 /// `None` if an error occurred while running the script or reading the
1018 /// environment map.
1019 pub env_map: Option<SetupScriptEnvMap>,
1020}
1021
1022/// A map of environment variables set by a setup script.
1023///
1024/// Part of [`SetupScriptExecuteStatus`].
1025#[derive(Clone, Debug)]
1026pub struct SetupScriptEnvMap {
1027 /// The map of environment variables set by the script.
1028 pub env_map: BTreeMap<String, String>,
1029}
1030
1031/// Data related to retries for a test.
1032#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
1033pub struct RetryData {
1034 /// The current attempt. In the range `[1, total_attempts]`.
1035 pub attempt: u32,
1036
1037 /// The total number of times this test can be run. Equal to `1 + retries`.
1038 pub total_attempts: u32,
1039}
1040
1041impl RetryData {
1042 /// Returns true if there are no more attempts after this.
1043 pub fn is_last_attempt(&self) -> bool {
1044 self.attempt >= self.total_attempts
1045 }
1046}
1047
1048/// Whether a test passed, failed or an error occurred while executing the test.
1049#[derive(Copy, Clone, Debug, Eq, PartialEq)]
1050pub enum ExecutionResult {
1051 /// The test passed.
1052 Pass,
1053 /// The test passed but leaked handles. This usually indicates that
1054 /// a subprocess that inherit standard IO was created, but it didn't shut down when
1055 /// the test failed.
1056 Leak {
1057 /// Whether this leak was treated as a failure.
1058 ///
1059 /// Note the difference between `Fail { leaked: true }` and `Leak {
1060 /// failed: true }`. In the former case, the test failed and also leaked
1061 /// handles. In the latter case, the test passed but leaked handles, and
1062 /// configuration indicated that this is a failure.
1063 result: LeakTimeoutResult,
1064 },
1065 /// The test failed.
1066 Fail {
1067 /// The abort status of the test, if any (for example, the signal on Unix).
1068 failure_status: FailureStatus,
1069
1070 /// Whether a test leaked handles. If set to true, this usually indicates that
1071 /// a subprocess that inherit standard IO was created, but it didn't shut down when
1072 /// the test failed.
1073 leaked: bool,
1074 },
1075 /// An error occurred while executing the test.
1076 ExecFail,
1077 /// The test was terminated due to a timeout.
1078 Timeout {
1079 /// Whether this timeout was treated as a failure.
1080 result: SlowTimeoutResult,
1081 },
1082}
1083
1084impl ExecutionResult {
1085 /// Returns true if the test was successful.
1086 pub fn is_success(self) -> bool {
1087 match self {
1088 ExecutionResult::Pass
1089 | ExecutionResult::Timeout {
1090 result: SlowTimeoutResult::Pass,
1091 }
1092 | ExecutionResult::Leak {
1093 result: LeakTimeoutResult::Pass,
1094 } => true,
1095 ExecutionResult::Leak {
1096 result: LeakTimeoutResult::Fail,
1097 }
1098 | ExecutionResult::Fail { .. }
1099 | ExecutionResult::ExecFail
1100 | ExecutionResult::Timeout {
1101 result: SlowTimeoutResult::Fail,
1102 } => false,
1103 }
1104 }
1105
1106 /// Returns true if this result represents a test that was terminated by nextest
1107 /// (as opposed to failing naturally).
1108 ///
1109 /// This is used to suppress output spam when immediate termination is active.
1110 ///
1111 /// TODO: This is a heuristic that checks if the test was terminated by SIGTERM (Unix) or
1112 /// job object (Windows). In an edge case, a test could send SIGTERM to itself, which would
1113 /// incorrectly be detected as a nextest-initiated termination. A more robust solution would
1114 /// track which tests were explicitly sent termination signals by nextest.
1115 pub fn is_termination_failure(&self) -> bool {
1116 match self {
1117 #[cfg(unix)]
1118 ExecutionResult::Fail {
1119 failure_status: FailureStatus::Abort(AbortStatus::UnixSignal(libc::SIGTERM)),
1120 ..
1121 } => true,
1122 #[cfg(windows)]
1123 ExecutionResult::Fail {
1124 failure_status: FailureStatus::Abort(AbortStatus::JobObject),
1125 ..
1126 } => true,
1127 _ => false,
1128 }
1129 }
1130
1131 /// Returns a static string representation of the result.
1132 pub fn as_static_str(&self) -> &'static str {
1133 match self {
1134 ExecutionResult::Pass => "pass",
1135 ExecutionResult::Leak { .. } => "leak",
1136 ExecutionResult::Fail { .. } => "fail",
1137 ExecutionResult::ExecFail => "exec-fail",
1138 ExecutionResult::Timeout { .. } => "timeout",
1139 }
1140 }
1141}
1142
1143/// Failure status: either an exit code or an abort status.
1144#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1145pub enum FailureStatus {
1146 /// The test exited with a non-zero exit code.
1147 ExitCode(i32),
1148
1149 /// The test aborted.
1150 Abort(AbortStatus),
1151}
1152
1153impl FailureStatus {
1154 /// Extract the failure status from an `ExitStatus`.
1155 pub fn extract(exit_status: ExitStatus) -> Self {
1156 if let Some(abort_status) = AbortStatus::extract(exit_status) {
1157 FailureStatus::Abort(abort_status)
1158 } else {
1159 FailureStatus::ExitCode(
1160 exit_status
1161 .code()
1162 .expect("if abort_status is None, then code must be present"),
1163 )
1164 }
1165 }
1166}
1167
1168/// A regular exit code or Windows NT abort status for a test.
1169///
1170/// Returned as part of the [`ExecutionResult::Fail`] variant.
1171#[derive(Copy, Clone, Eq, PartialEq)]
1172pub enum AbortStatus {
1173 /// The test was aborted due to a signal on Unix.
1174 #[cfg(unix)]
1175 UnixSignal(i32),
1176
1177 /// The test was determined to have aborted because the high bit was set on Windows.
1178 #[cfg(windows)]
1179 WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1180
1181 /// The test was terminated via job object on Windows.
1182 #[cfg(windows)]
1183 JobObject,
1184}
1185
1186impl AbortStatus {
1187 /// Extract the abort status from an [`ExitStatus`].
1188 pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1189 cfg_if::cfg_if! {
1190 if #[cfg(unix)] {
1191 // On Unix, extract the signal if it's found.
1192 use std::os::unix::process::ExitStatusExt;
1193 exit_status.signal().map(AbortStatus::UnixSignal)
1194 } else if #[cfg(windows)] {
1195 exit_status.code().and_then(|code| {
1196 (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1197 })
1198 } else {
1199 None
1200 }
1201 }
1202 }
1203}
1204
1205impl fmt::Debug for AbortStatus {
1206 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1207 match self {
1208 #[cfg(unix)]
1209 AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1210 #[cfg(windows)]
1211 AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1212 #[cfg(windows)]
1213 AbortStatus::JobObject => write!(f, "JobObject"),
1214 }
1215 }
1216}
1217
1218// Note: the order here matters -- it indicates severity of cancellation
1219/// The reason why a test run is being cancelled.
1220#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1221#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1222pub enum CancelReason {
1223 /// A setup script failed.
1224 SetupScriptFailure,
1225
1226 /// A test failed and --no-fail-fast wasn't specified.
1227 TestFailure,
1228
1229 /// An error occurred while reporting results.
1230 ReportError,
1231
1232 /// The global timeout was exceeded.
1233 GlobalTimeout,
1234
1235 /// A test failed and fail-fast with immediate termination was specified.
1236 TestFailureImmediate,
1237
1238 /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1239 Signal,
1240
1241 /// An interrupt (on Unix, Ctrl-C) was received.
1242 Interrupt,
1243
1244 /// A second signal was received, and the run is being forcibly killed.
1245 SecondSignal,
1246}
1247
1248impl CancelReason {
1249 pub(crate) fn to_static_str(self) -> &'static str {
1250 match self {
1251 CancelReason::SetupScriptFailure => "setup script failure",
1252 CancelReason::TestFailure => "test failure",
1253 CancelReason::ReportError => "reporting error",
1254 CancelReason::GlobalTimeout => "global timeout",
1255 CancelReason::TestFailureImmediate => "test failure",
1256 CancelReason::Signal => "signal",
1257 CancelReason::Interrupt => "interrupt",
1258 CancelReason::SecondSignal => "second signal",
1259 }
1260 }
1261}
1262/// The kind of unit of work that nextest is executing.
1263#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1264pub enum UnitKind {
1265 /// A test.
1266 Test,
1267
1268 /// A script (e.g. a setup script).
1269 Script,
1270}
1271
1272impl UnitKind {
1273 pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1274 pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1275
1276 pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1277 pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1278
1279 pub(crate) fn waiting_on_message(&self) -> &'static str {
1280 match self {
1281 UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1282 UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1283 }
1284 }
1285
1286 pub(crate) fn executing_message(&self) -> &'static str {
1287 match self {
1288 UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1289 UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1290 }
1291 }
1292}
1293
1294/// A response to an information request.
1295#[derive(Clone, Debug)]
1296pub enum InfoResponse<'a> {
1297 /// A setup script's response.
1298 SetupScript(SetupScriptInfoResponse<'a>),
1299
1300 /// A test's response.
1301 Test(TestInfoResponse<'a>),
1302}
1303
1304/// A setup script's response to an information request.
1305#[derive(Clone, Debug)]
1306pub struct SetupScriptInfoResponse<'a> {
1307 /// The stress index of the setup script.
1308 pub stress_index: Option<StressIndex>,
1309
1310 /// The identifier of the setup script instance.
1311 pub script_id: ScriptId,
1312
1313 /// The program to run.
1314 pub program: String,
1315
1316 /// The list of arguments to the program.
1317 pub args: &'a [String],
1318
1319 /// The state of the setup script.
1320 pub state: UnitState,
1321
1322 /// Output obtained from the setup script.
1323 pub output: ChildExecutionOutput,
1324}
1325
1326/// A test's response to an information request.
1327#[derive(Clone, Debug)]
1328pub struct TestInfoResponse<'a> {
1329 /// The stress index of the test.
1330 pub stress_index: Option<StressIndex>,
1331
1332 /// The test instance that the information is about.
1333 pub test_instance: TestInstanceId<'a>,
1334
1335 /// Information about retries.
1336 pub retry_data: RetryData,
1337
1338 /// The state of the test.
1339 pub state: UnitState,
1340
1341 /// Output obtained from the test.
1342 pub output: ChildExecutionOutput,
1343}
1344
1345/// The current state of a test or script process: running, exiting, or
1346/// terminating.
1347///
1348/// Part of information response requests.
1349#[derive(Clone, Debug)]
1350pub enum UnitState {
1351 /// The unit is currently running.
1352 Running {
1353 /// The process ID.
1354 pid: u32,
1355
1356 /// The amount of time the unit has been running.
1357 time_taken: Duration,
1358
1359 /// `Some` if the test is marked as slow, along with the duration after
1360 /// which it was marked as slow.
1361 slow_after: Option<Duration>,
1362 },
1363
1364 /// The test has finished running, and is currently in the process of
1365 /// exiting.
1366 Exiting {
1367 /// The process ID.
1368 pid: u32,
1369
1370 /// The amount of time the unit ran for.
1371 time_taken: Duration,
1372
1373 /// `Some` if the unit is marked as slow, along with the duration after
1374 /// which it was marked as slow.
1375 slow_after: Option<Duration>,
1376
1377 /// The tentative execution result before leaked status is determined.
1378 ///
1379 /// None means that the exit status could not be read, and should be
1380 /// treated as a failure.
1381 tentative_result: Option<ExecutionResult>,
1382
1383 /// How long has been spent waiting for the process to exit.
1384 waiting_duration: Duration,
1385
1386 /// How much longer nextest will wait until the test is marked leaky.
1387 remaining: Duration,
1388 },
1389
1390 /// The child process is being terminated by nextest.
1391 Terminating(UnitTerminatingState),
1392
1393 /// The unit has finished running and the process has exited.
1394 Exited {
1395 /// The result of executing the unit.
1396 result: ExecutionResult,
1397
1398 /// The amount of time the unit ran for.
1399 time_taken: Duration,
1400
1401 /// `Some` if the unit is marked as slow, along with the duration after
1402 /// which it was marked as slow.
1403 slow_after: Option<Duration>,
1404 },
1405
1406 /// A delay is being waited out before the next attempt of the test is
1407 /// started. (Only relevant for tests.)
1408 DelayBeforeNextAttempt {
1409 /// The previous execution result.
1410 previous_result: ExecutionResult,
1411
1412 /// Whether the previous attempt was marked as slow.
1413 previous_slow: bool,
1414
1415 /// How long has been spent waiting so far.
1416 waiting_duration: Duration,
1417
1418 /// How much longer nextest will wait until retrying the test.
1419 remaining: Duration,
1420 },
1421}
1422
1423impl UnitState {
1424 /// Returns true if the state has a valid output attached to it.
1425 pub fn has_valid_output(&self) -> bool {
1426 match self {
1427 UnitState::Running { .. }
1428 | UnitState::Exiting { .. }
1429 | UnitState::Terminating(_)
1430 | UnitState::Exited { .. } => true,
1431 UnitState::DelayBeforeNextAttempt { .. } => false,
1432 }
1433 }
1434}
1435
1436/// The current terminating state of a test or script process.
1437///
1438/// Part of [`UnitState::Terminating`].
1439#[derive(Clone, Debug)]
1440pub struct UnitTerminatingState {
1441 /// The process ID.
1442 pub pid: u32,
1443
1444 /// The amount of time the unit ran for.
1445 pub time_taken: Duration,
1446
1447 /// The reason for the termination.
1448 pub reason: UnitTerminateReason,
1449
1450 /// The method by which the process is being terminated.
1451 pub method: UnitTerminateMethod,
1452
1453 /// How long has been spent waiting for the process to exit.
1454 pub waiting_duration: Duration,
1455
1456 /// How much longer nextest will wait until a kill command is sent to the process.
1457 pub remaining: Duration,
1458}
1459
1460/// The reason for a script or test being forcibly terminated by nextest.
1461///
1462/// Part of information response requests.
1463#[derive(Clone, Copy, Debug)]
1464pub enum UnitTerminateReason {
1465 /// The unit is being terminated due to a test timeout being hit.
1466 Timeout,
1467
1468 /// The unit is being terminated due to nextest receiving a signal.
1469 Signal,
1470
1471 /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1472 Interrupt,
1473}
1474
1475impl fmt::Display for UnitTerminateReason {
1476 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1477 match self {
1478 UnitTerminateReason::Timeout => write!(f, "timeout"),
1479 UnitTerminateReason::Signal => write!(f, "signal"),
1480 UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1481 }
1482 }
1483}
1484
1485/// The way in which a script or test is being forcibly terminated by nextest.
1486#[derive(Clone, Copy, Debug)]
1487pub enum UnitTerminateMethod {
1488 /// The unit is being terminated by sending a signal.
1489 #[cfg(unix)]
1490 Signal(UnitTerminateSignal),
1491
1492 /// The unit is being terminated by terminating the Windows job object.
1493 #[cfg(windows)]
1494 JobObject,
1495
1496 /// The unit is being waited on to exit. A termination signal will be sent
1497 /// if it doesn't exit within the grace period.
1498 ///
1499 /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1500 /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1501 /// tests do not exit within the grace period configured for them, their
1502 /// corresponding job objects will be terminated.
1503 #[cfg(windows)]
1504 Wait,
1505
1506 /// A fake method used for testing.
1507 #[cfg(test)]
1508 Fake,
1509}
1510
1511#[cfg(unix)]
1512/// The signal that is or was sent to terminate a script or test.
1513#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1514pub enum UnitTerminateSignal {
1515 /// The unit is being terminated by sending a SIGINT.
1516 Interrupt,
1517
1518 /// The unit is being terminated by sending a SIGTERM signal.
1519 Term,
1520
1521 /// The unit is being terminated by sending a SIGHUP signal.
1522 Hangup,
1523
1524 /// The unit is being terminated by sending a SIGQUIT signal.
1525 Quit,
1526
1527 /// The unit is being terminated by sending a SIGKILL signal.
1528 Kill,
1529}
1530
1531#[cfg(unix)]
1532impl fmt::Display for UnitTerminateSignal {
1533 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1534 match self {
1535 UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1536 UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1537 UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1538 UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1539 UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1540 }
1541 }
1542}
1543
1544#[cfg(test)]
1545mod tests {
1546 use super::*;
1547
1548 #[test]
1549 fn test_is_success() {
1550 assert_eq!(
1551 RunStats::default().summarize_final(),
1552 FinalRunStats::NoTestsRun,
1553 "empty run => no tests run"
1554 );
1555 assert_eq!(
1556 RunStats {
1557 initial_run_count: 42,
1558 finished_count: 42,
1559 ..RunStats::default()
1560 }
1561 .summarize_final(),
1562 FinalRunStats::Success,
1563 "initial run count = final run count => success"
1564 );
1565 assert_eq!(
1566 RunStats {
1567 initial_run_count: 42,
1568 finished_count: 41,
1569 ..RunStats::default()
1570 }
1571 .summarize_final(),
1572 FinalRunStats::Cancelled {
1573 reason: None,
1574 kind: RunStatsFailureKind::Test {
1575 initial_run_count: 42,
1576 not_run: 1
1577 }
1578 },
1579 "initial run count > final run count => cancelled"
1580 );
1581 assert_eq!(
1582 RunStats {
1583 initial_run_count: 42,
1584 finished_count: 42,
1585 failed: 1,
1586 ..RunStats::default()
1587 }
1588 .summarize_final(),
1589 FinalRunStats::Failed(RunStatsFailureKind::Test {
1590 initial_run_count: 42,
1591 not_run: 0
1592 }),
1593 "failed => failure"
1594 );
1595 assert_eq!(
1596 RunStats {
1597 initial_run_count: 42,
1598 finished_count: 42,
1599 exec_failed: 1,
1600 ..RunStats::default()
1601 }
1602 .summarize_final(),
1603 FinalRunStats::Failed(RunStatsFailureKind::Test {
1604 initial_run_count: 42,
1605 not_run: 0
1606 }),
1607 "exec failed => failure"
1608 );
1609 assert_eq!(
1610 RunStats {
1611 initial_run_count: 42,
1612 finished_count: 42,
1613 failed_timed_out: 1,
1614 ..RunStats::default()
1615 }
1616 .summarize_final(),
1617 FinalRunStats::Failed(RunStatsFailureKind::Test {
1618 initial_run_count: 42,
1619 not_run: 0
1620 }),
1621 "timed out => failure {:?} {:?}",
1622 RunStats {
1623 initial_run_count: 42,
1624 finished_count: 42,
1625 failed_timed_out: 1,
1626 ..RunStats::default()
1627 }
1628 .summarize_final(),
1629 FinalRunStats::Failed(RunStatsFailureKind::Test {
1630 initial_run_count: 42,
1631 not_run: 0
1632 }),
1633 );
1634 assert_eq!(
1635 RunStats {
1636 initial_run_count: 42,
1637 finished_count: 42,
1638 skipped: 1,
1639 ..RunStats::default()
1640 }
1641 .summarize_final(),
1642 FinalRunStats::Success,
1643 "skipped => not considered a failure"
1644 );
1645
1646 assert_eq!(
1647 RunStats {
1648 setup_scripts_initial_count: 2,
1649 setup_scripts_finished_count: 1,
1650 ..RunStats::default()
1651 }
1652 .summarize_final(),
1653 FinalRunStats::Cancelled {
1654 reason: None,
1655 kind: RunStatsFailureKind::SetupScript,
1656 },
1657 "setup script failed => failure"
1658 );
1659
1660 assert_eq!(
1661 RunStats {
1662 setup_scripts_initial_count: 2,
1663 setup_scripts_finished_count: 2,
1664 setup_scripts_failed: 1,
1665 ..RunStats::default()
1666 }
1667 .summarize_final(),
1668 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1669 "setup script failed => failure"
1670 );
1671 assert_eq!(
1672 RunStats {
1673 setup_scripts_initial_count: 2,
1674 setup_scripts_finished_count: 2,
1675 setup_scripts_exec_failed: 1,
1676 ..RunStats::default()
1677 }
1678 .summarize_final(),
1679 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1680 "setup script exec failed => failure"
1681 );
1682 assert_eq!(
1683 RunStats {
1684 setup_scripts_initial_count: 2,
1685 setup_scripts_finished_count: 2,
1686 setup_scripts_timed_out: 1,
1687 ..RunStats::default()
1688 }
1689 .summarize_final(),
1690 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1691 "setup script timed out => failure"
1692 );
1693 assert_eq!(
1694 RunStats {
1695 setup_scripts_initial_count: 2,
1696 setup_scripts_finished_count: 2,
1697 setup_scripts_passed: 2,
1698 ..RunStats::default()
1699 }
1700 .summarize_final(),
1701 FinalRunStats::NoTestsRun,
1702 "setup scripts passed => success, but no tests run"
1703 );
1704 }
1705}