use super::{
base::BaseApp,
filter::TestBuildFilter,
value_enums::{
FinalStatusLevelOpt, MessageFormat, NoTestsBehaviorOpt, ShowProgressOpt, StatusLevelOpt,
TestOutputDisplayOpt,
},
};
use crate::{
ExpectedError, Result,
dispatch::helpers::{build_filtersets, final_stats_to_error, resolve_user_config},
output::OutputWriter,
reuse_build::ReuseBuildOpts,
};
use camino::Utf8Path;
use clap::{Args, builder::BoolishValueParser};
use nextest_filtering::{FiltersetKind, ParseContext};
use nextest_runner::{
cargo_config::EnvironmentMap,
config::{
core::ConfigExperimental,
elements::{MaxFail, RetryPolicy, TestThreads},
},
helpers::plural,
input::InputHandlerKind,
list::{BinaryList, TestExecuteContext, TestList},
record::{
ComputedRerunInfo, PortableRecording, RecordOpts, RecordReader, RecordRetentionPolicy,
RecordSession, RecordSessionConfig, RerunRootInfo, RunIdOrRecordingSelector, RunIdSelector,
RunStore, STORE_FORMAT_VERSION, Styles as RecordStyles, records_state_dir,
},
reporter::{
MaxProgressRunning, ReporterBuilder, ShowProgress, ShowTerminalProgress, TestOutputDisplay,
events::{FinalRunStats, RunStats},
structured,
},
run_mode::NextestRunMode,
runner::{
DebuggerCommand, Interceptor, StressCondition, StressCount, TestRunnerBuilder,
TracerCommand, VersionEnvVars, configure_handle_inheritance,
},
signal::SignalHandlerKind,
test_filter::TestFilter,
test_output::CaptureStrategy,
user_config::{UserConfigExperimental, elements::UiConfig},
};
use quick_junit::ReportUuid;
use std::{collections::BTreeMap, io::IsTerminal, sync::Arc, time::Duration};
use tracing::{debug, info, warn};
#[derive(Debug, Args)]
pub(crate) struct RunOpts {
#[clap(flatten)]
pub(crate) cargo_options: crate::cargo_cli::CargoOptions,
#[arg(
long,
short = 'R',
value_name = "RUN_ID_OR_RECORDING",
alias = "run-id",
help_heading = "Filter options"
)]
pub(crate) rerun: Option<RunIdOrRecordingSelector>,
#[clap(flatten)]
pub(crate) build_filter: TestBuildFilter,
#[clap(flatten)]
pub(crate) runner_opts: TestRunnerOpts,
#[arg(
long,
name = "no-capture",
alias = "nocapture",
help_heading = "Runner options",
display_order = 100
)]
pub(crate) no_capture: bool,
#[clap(flatten)]
pub(crate) reporter_opts: ReporterOpts,
#[clap(flatten)]
pub(crate) reuse_build: ReuseBuildOpts,
}
#[derive(Debug, Args)]
pub(crate) struct BenchOpts {
#[clap(flatten)]
pub(crate) cargo_options: crate::cargo_cli::CargoOptions,
#[clap(flatten)]
pub(crate) build_filter: TestBuildFilter,
#[clap(flatten)]
pub(crate) runner_opts: BenchRunnerOpts,
#[arg(
long,
name = "no-capture",
alias = "nocapture",
help_heading = "Runner options",
display_order = 100
)]
pub(crate) no_capture: bool,
#[clap(flatten)]
pub(crate) reporter_opts: BenchReporterOpts,
}
#[derive(Debug, Default, Args)]
#[command(next_help_heading = "Runner options")]
pub struct TestRunnerOpts {
#[arg(long, name = "no-run")]
pub(crate) no_run: bool,
#[arg(
long,
short = 'j',
visible_alias = "jobs",
value_name = "N",
env = "NEXTEST_TEST_THREADS",
allow_negative_numbers = true
)]
test_threads: Option<TestThreads>,
#[arg(long, env = "NEXTEST_RETRIES", value_name = "N")]
retries: Option<u32>,
#[arg(
long,
visible_alias = "ff",
name = "fail-fast",
// TODO: It would be nice to warn rather than error if fail-fast is used
// with no-run, so that this matches the other options like
// test-threads. But there seem to be issues with that: clap 4.5 doesn't
// appear to like `Option<bool>` very much. With `ArgAction::SetTrue` it
// always sets the value to false or true rather than leaving it unset.
conflicts_with = "no-run"
)]
fail_fast: bool,
#[arg(
long,
visible_alias = "nff",
name = "no-fail-fast",
conflicts_with = "no-run",
overrides_with = "fail-fast"
)]
no_fail_fast: bool,
#[arg(
long,
name = "max-fail",
value_name = "N[:MODE]",
conflicts_with_all = &["no-run", "fail-fast", "no-fail-fast"],
)]
max_fail: Option<MaxFail>,
#[clap(flatten)]
pub(crate) interceptor: InterceptorOpt,
#[arg(long, value_enum, value_name = "ACTION", env = "NEXTEST_NO_TESTS")]
pub(crate) no_tests: Option<NoTestsBehaviorOpt>,
#[clap(flatten)]
pub(crate) stress: StressOptions,
}
impl TestRunnerOpts {
pub(crate) fn to_builder(&self, cap_strat: CaptureStrategy) -> Option<TestRunnerBuilder> {
if self.test_threads.is_some()
&& let Some(reasons) =
no_run_no_capture_reasons(self.no_run, cap_strat == CaptureStrategy::None)
{
warn!("ignoring --test-threads because {reasons}");
}
if self.retries.is_some() && self.no_run {
warn!("ignoring --retries because --no-run is specified");
}
if self.no_tests.is_some() && self.no_run {
warn!("ignoring --no-tests because --no-run is specified");
}
if self.no_run {
return None;
}
let mut builder = TestRunnerBuilder::default();
builder.set_capture_strategy(cap_strat);
if let Some(retries) = self.retries {
builder.set_retries(RetryPolicy::new_without_delay(retries));
}
if let Some(max_fail) = self.max_fail {
builder.set_max_fail(max_fail);
debug!(max_fail = ?max_fail, "set max fail");
} else if self.no_fail_fast {
builder.set_max_fail(MaxFail::from_fail_fast(false));
debug!("set max fail via from_fail_fast(false)");
} else if self.fail_fast {
builder.set_max_fail(MaxFail::from_fail_fast(true));
debug!("set max fail via from_fail_fast(true)");
}
if let Some(test_threads) = self.test_threads {
builder.set_test_threads(test_threads);
}
if let Some(condition) = self.stress.condition.as_ref() {
builder.set_stress_condition(condition.stress_condition());
}
builder.set_interceptor(self.interceptor.to_interceptor());
Some(builder)
}
}
#[derive(Debug, Default, Args)]
#[command(next_help_heading = "Runner options")]
pub(crate) struct BenchRunnerOpts {
#[arg(long, name = "no-run")]
pub(crate) no_run: bool,
#[arg(
long,
visible_alias = "ff",
name = "fail-fast",
// TODO: It would be nice to warn rather than error if fail-fast is used
// with no-run, so that this matches the other options like
// test-threads. But there seem to be issues with that: clap 4.5 doesn't
// appear to like `Option<bool>` very much. With `ArgAction::SetTrue` it
// always sets the value to false or true rather than leaving it unset.
conflicts_with = "no-run"
)]
fail_fast: bool,
#[arg(
long,
visible_alias = "nff",
name = "no-fail-fast",
conflicts_with = "no-run",
overrides_with = "fail-fast"
)]
no_fail_fast: bool,
#[arg(
long,
name = "max-fail",
value_name = "N",
conflicts_with_all = &["no-run", "fail-fast", "no-fail-fast"],
)]
max_fail: Option<MaxFail>,
#[arg(long, value_enum, value_name = "ACTION", env = "NEXTEST_NO_TESTS")]
pub(crate) no_tests: Option<NoTestsBehaviorOpt>,
#[clap(flatten)]
pub(crate) stress: StressOptions,
#[clap(flatten)]
pub(crate) interceptor: InterceptorOpt,
}
impl BenchRunnerOpts {
pub(crate) fn to_builder(&self, cap_strat: CaptureStrategy) -> Option<TestRunnerBuilder> {
if self.no_tests.is_some() && self.no_run {
warn!("ignoring --no-tests because --no-run is specified");
}
if self.no_run {
return None;
}
let mut builder = TestRunnerBuilder::default();
builder.set_capture_strategy(cap_strat);
if let Some(max_fail) = self.max_fail {
builder.set_max_fail(max_fail);
debug!(max_fail = ?max_fail, "set max fail");
} else if self.no_fail_fast {
builder.set_max_fail(MaxFail::from_fail_fast(false));
debug!("set max fail via from_fail_fast(false)");
} else if self.fail_fast {
builder.set_max_fail(MaxFail::from_fail_fast(true));
debug!("set max fail via from_fail_fast(true)");
}
builder.set_test_threads(TestThreads::Count(1));
if let Some(condition) = self.stress.condition.as_ref() {
builder.set_stress_condition(condition.stress_condition());
}
builder.set_interceptor(self.interceptor.to_interceptor());
Some(builder)
}
}
#[derive(Debug, Default, Args)]
#[group(id = "interceptor", multiple = false)]
pub(crate) struct InterceptorOpt {
#[arg(long, value_name = "DEBUGGER", conflicts_with_all = ["stress_condition", "no-run"])]
pub(crate) debugger: Option<DebuggerCommand>,
#[arg(long, value_name = "TRACER", conflicts_with_all = ["stress_condition", "no-run"])]
pub(crate) tracer: Option<TracerCommand>,
}
impl InterceptorOpt {
pub(crate) fn is_active(&self) -> bool {
self.debugger.is_some() || self.tracer.is_some()
}
pub(crate) fn to_interceptor(&self) -> Interceptor {
match (&self.debugger, &self.tracer) {
(Some(debugger), None) => Interceptor::Debugger(debugger.clone()),
(None, Some(tracer)) => Interceptor::Tracer(tracer.clone()),
(None, None) => Interceptor::None,
(Some(_), Some(_)) => {
unreachable!("clap group ensures debugger and tracer are mutually exclusive")
}
}
}
}
#[derive(Debug, Default, Args)]
#[command(next_help_heading = "Stress testing options")]
pub(crate) struct StressOptions {
#[clap(flatten)]
pub(crate) condition: Option<StressConditionOpt>,
}
#[derive(Clone, Debug, Default, Args)]
#[group(id = "stress_condition", multiple = false)]
pub(crate) struct StressConditionOpt {
#[arg(long, value_name = "COUNT")]
stress_count: Option<StressCount>,
#[arg(long, value_name = "DURATION", value_parser = non_zero_duration)]
stress_duration: Option<Duration>,
}
impl StressConditionOpt {
fn stress_condition(&self) -> StressCondition {
if let Some(count) = self.stress_count {
StressCondition::Count(count)
} else if let Some(duration) = self.stress_duration {
StressCondition::Duration(duration)
} else {
unreachable!(
"if StressOptions::condition is Some, \
one of these should be set"
)
}
}
}
fn non_zero_duration(input: &str) -> std::result::Result<Duration, String> {
let duration = humantime::parse_duration(input).map_err(|error| error.to_string())?;
if duration.is_zero() {
Err("duration must be non-zero".to_string())
} else {
Ok(duration)
}
}
fn no_run_no_capture_reasons(no_run: bool, no_capture: bool) -> Option<&'static str> {
match (no_run, no_capture) {
(true, true) => Some("--no-run and --no-capture are specified"),
(true, false) => Some("--no-run is specified"),
(false, true) => Some("--no-capture is specified"),
(false, false) => None,
}
}
#[derive(Debug, Default, Args)]
pub(crate) struct ReporterCommonOpts {
#[arg(
long,
value_enum,
value_name = "WHEN",
env = "NEXTEST_FAILURE_OUTPUT",
help_heading = "Reporter options"
)]
pub(crate) failure_output: Option<TestOutputDisplayOpt>,
#[arg(
long,
value_enum,
value_name = "WHEN",
env = "NEXTEST_SUCCESS_OUTPUT",
help_heading = "Reporter options"
)]
pub(crate) success_output: Option<TestOutputDisplayOpt>,
#[arg(
long,
value_enum,
value_name = "LEVEL",
env = "NEXTEST_STATUS_LEVEL",
help_heading = "Reporter options"
)]
pub(crate) status_level: Option<StatusLevelOpt>,
#[arg(
long,
value_enum,
value_name = "LEVEL",
env = "NEXTEST_FINAL_STATUS_LEVEL",
help_heading = "Reporter options"
)]
pub(crate) final_status_level: Option<FinalStatusLevelOpt>,
#[arg(
long,
env = "NEXTEST_NO_OUTPUT_INDENT",
value_parser = BoolishValueParser::new(),
help_heading = "Reporter options"
)]
pub(crate) no_output_indent: bool,
}
impl ReporterCommonOpts {
pub(crate) fn apply_to_builder(&self, builder: &mut ReporterBuilder, resolved_ui: &UiConfig) {
let no_output_indent = self.no_output_indent || !resolved_ui.output_indent;
if let Some(failure_output) = self.failure_output {
builder.set_failure_output(failure_output.into());
}
if let Some(success_output) = self.success_output {
builder.set_success_output(success_output.into());
}
if let Some(status_level) = self.status_level {
builder.set_status_level(status_level.into());
}
if let Some(final_status_level) = self.final_status_level {
builder.set_final_status_level(final_status_level.into());
}
builder.set_no_output_indent(no_output_indent);
}
pub(crate) fn apply_to_replay_builder(
&self,
builder: &mut nextest_runner::record::ReplayReporterBuilder,
resolved_ui: &UiConfig,
no_capture: bool,
) {
let no_output_indent = self.no_output_indent || no_capture || !resolved_ui.output_indent;
if let Some(failure_output) = self.failure_output {
builder.set_failure_output(failure_output.into());
} else if no_capture {
builder.set_failure_output(TestOutputDisplay::Immediate);
}
if let Some(success_output) = self.success_output {
builder.set_success_output(success_output.into());
} else if no_capture {
builder.set_success_output(TestOutputDisplay::Immediate);
}
if let Some(status_level) = self.status_level {
builder.set_status_level(status_level.into());
}
if let Some(final_status_level) = self.final_status_level {
builder.set_final_status_level(final_status_level.into());
}
builder.set_no_output_indent(no_output_indent);
}
}
#[derive(Debug, Default, Args)]
#[command(next_help_heading = "Reporter options")]
pub(crate) struct ReporterOpts {
#[command(flatten)]
pub(crate) common: ReporterCommonOpts,
#[arg(long, env = "NEXTEST_SHOW_PROGRESS")]
show_progress: Option<ShowProgressOpt>,
#[arg(long, env = "NEXTEST_HIDE_PROGRESS_BAR", value_parser = BoolishValueParser::new())]
hide_progress_bar: bool,
#[arg(long, env = "NEXTEST_NO_INPUT_HANDLER", value_parser = BoolishValueParser::new())]
pub(crate) no_input_handler: bool,
#[arg(
long = "max-progress-running",
value_name = "N",
env = "NEXTEST_MAX_PROGRESS_RUNNING"
)]
max_progress_running: Option<MaxProgressRunning>,
#[arg(
long,
name = "message-format",
value_enum,
value_name = "FORMAT",
env = "NEXTEST_MESSAGE_FORMAT"
)]
pub(crate) message_format: Option<MessageFormat>,
#[arg(
long,
requires = "message-format",
value_name = "VERSION",
env = "NEXTEST_MESSAGE_FORMAT_VERSION"
)]
pub(crate) message_format_version: Option<String>,
}
impl ReporterOpts {
pub(crate) fn to_builder(
&self,
no_run: bool,
no_capture: bool,
should_colorize: bool,
resolved_ui: &UiConfig,
) -> ReporterBuilder {
if no_run && no_capture {
warn!("ignoring --no-capture because --no-run is specified");
}
let reasons = no_run_no_capture_reasons(no_run, no_capture);
if self.common.failure_output.is_some()
&& let Some(reasons) = reasons
{
warn!("ignoring --failure-output because {}", reasons);
}
if self.common.success_output.is_some()
&& let Some(reasons) = reasons
{
warn!("ignoring --success-output because {}", reasons);
}
if self.common.status_level.is_some() && no_run {
warn!("ignoring --status-level because --no-run is specified");
}
if self.common.final_status_level.is_some() && no_run {
warn!("ignoring --final-status-level because --no-run is specified");
}
if self.message_format.is_some() && no_run {
warn!("ignoring --message-format because --no-run is specified");
}
if self.message_format_version.is_some() && no_run {
warn!("ignoring --message-format-version because --no-run is specified");
}
let show_progress = match (self.show_progress, self.hide_progress_bar) {
(Some(show_progress), true) => {
warn!("ignoring --hide-progress-bar because --show-progress is specified");
show_progress.into()
}
(Some(show_progress), false) => show_progress.into(),
(None, true) => ShowProgress::None,
(None, false) => resolved_ui.show_progress.into(),
};
let max_progress_running = self
.max_progress_running
.unwrap_or(resolved_ui.max_progress_running);
let no_output_indent = self.common.no_output_indent || !resolved_ui.output_indent;
debug!(
?show_progress,
?max_progress_running,
?no_output_indent,
"resolved reporter UI settings"
);
let mut builder = ReporterBuilder::default();
builder.set_no_capture(no_capture);
builder.set_colorize(should_colorize);
self.common.apply_to_builder(&mut builder, resolved_ui);
builder.set_show_progress(show_progress);
builder.set_max_progress_running(max_progress_running);
builder
}
}
#[derive(Debug, Default, Args)]
#[command(next_help_heading = "Reporter options")]
pub(crate) struct BenchReporterOpts {
#[arg(long, env = "NEXTEST_SHOW_PROGRESS")]
show_progress: Option<ShowProgressOpt>,
#[arg(long, env = "NEXTEST_NO_INPUT_HANDLER", value_parser = BoolishValueParser::new())]
pub(crate) no_input_handler: bool,
}
impl BenchReporterOpts {
pub(crate) fn to_builder(
&self,
should_colorize: bool,
resolved_ui: &UiConfig,
) -> ReporterBuilder {
let mut builder = ReporterBuilder::default();
builder.set_no_capture(true);
builder.set_colorize(should_colorize);
let show_progress = self
.show_progress
.map(ShowProgress::from)
.unwrap_or(resolved_ui.show_progress.into());
builder.set_show_progress(show_progress);
builder
}
}
fn check_experimental_filtering(_output: crate::output::OutputContext) {
const EXPERIMENTAL_ENV: &str = "NEXTEST_EXPERIMENTAL_FILTER_EXPR";
if std::env::var(EXPERIMENTAL_ENV).is_ok() {
warn!(
"filtersets are no longer experimental: NEXTEST_EXPERIMENTAL_FILTER_EXPR does not need to be set"
);
}
}
fn capture_env_vars_for_recording() -> BTreeMap<String, String> {
filter_env_vars_for_recording(std::env::vars())
}
pub(super) fn filter_env_vars_for_recording(
vars: impl Iterator<Item = (String, String)>,
) -> BTreeMap<String, String> {
vars.filter(|(key, _)| {
(key.starts_with("NEXTEST_") || key.starts_with("CARGO_")) && !key.ends_with("_TOKEN")
})
.collect()
}
pub(crate) struct App {
pub(crate) base: BaseApp,
pub(crate) build_filter: TestBuildFilter,
}
impl App {
pub(crate) fn new(base: BaseApp, build_filter: TestBuildFilter) -> Result<Self> {
check_experimental_filtering(base.output);
Ok(Self { base, build_filter })
}
pub(crate) fn build_test_list(
&self,
ctx: &TestExecuteContext<'_>,
binary_list: Arc<BinaryList>,
test_filter: &TestFilter,
profile: &nextest_runner::config::core::EvaluatableProfile<'_>,
) -> Result<TestList<'_>> {
let env = EnvironmentMap::new(&self.base.cargo_configs);
self.build_filter.compute_test_list(
ctx,
self.base.graph(),
self.base.workspace_root.clone(),
binary_list,
test_filter,
env,
profile,
&self.base.reuse_build,
)
}
pub(crate) fn exec_run(
&self,
no_capture: bool,
rerun: Option<&RunIdOrRecordingSelector>,
runner_opts: &TestRunnerOpts,
reporter_opts: &ReporterOpts,
cli_args: Vec<String>,
output_writer: &mut OutputWriter,
) -> Result<()> {
let pcx = ParseContext::new(self.base.graph());
let (version_only_config, config) = self
.base
.load_config(&pcx, &std::collections::BTreeSet::new())?;
let profile = self.base.load_profile(&config)?;
let mut structured_reporter = structured::StructuredReporter::new();
let message_format = reporter_opts.message_format.unwrap_or_default();
match message_format {
MessageFormat::Human => {}
MessageFormat::LibtestJson | MessageFormat::LibtestJsonPlus => {
const EXPERIMENTAL_ENV: &str = "NEXTEST_EXPERIMENTAL_LIBTEST_JSON";
if std::env::var(EXPERIMENTAL_ENV).as_deref() != Ok("1") {
return Err(ExpectedError::ExperimentalFeatureNotEnabled {
name: "libtest JSON output",
var_name: EXPERIMENTAL_ENV,
});
}
let libtest = structured::LibtestReporter::new(
reporter_opts.message_format_version.as_deref(),
if matches!(message_format, MessageFormat::LibtestJsonPlus) {
structured::EmitNextestObject::Yes
} else {
structured::EmitNextestObject::No
},
)?;
structured_reporter.set_libtest(libtest);
}
};
let cap_strat = if no_capture || runner_opts.interceptor.is_active() {
CaptureStrategy::None
} else if matches!(message_format, MessageFormat::Human) {
CaptureStrategy::Split
} else {
CaptureStrategy::Combined
};
let should_colorize = self
.base
.output
.color
.should_colorize(supports_color::Stream::Stderr);
let resolved_user_config = resolve_user_config(
&self.base.build_platforms.host.platform,
self.base.early_args.user_config_location(),
)?;
if rerun.is_some()
&& !resolved_user_config.is_experimental_enabled(UserConfigExperimental::Record)
{
return Err(ExpectedError::ExperimentalFeatureNotEnabled {
name: "rerunning tests (-R/--rerun)",
var_name: UserConfigExperimental::Record.env_var(),
});
}
let runner_builder = runner_opts.to_builder(cap_strat);
let mut reporter_builder = reporter_opts.to_builder(
runner_opts.no_run,
no_capture || runner_opts.interceptor.is_active(),
should_colorize,
&resolved_user_config.ui,
);
reporter_builder.set_verbose(self.base.output.verbose);
let filter_exprs =
build_filtersets(&pcx, &self.build_filter.filterset, FiltersetKind::Test)?;
let mut test_filter = self
.build_filter
.make_test_filter(NextestRunMode::Test, filter_exprs)?;
let (rerun_state, expected_outstanding) = match rerun {
Some(RunIdOrRecordingSelector::RunId(selector)) => {
let (rerun_state, outstanding_tests) = self.resolve_rerun(selector)?;
let expected = outstanding_tests.expected_test_ids();
test_filter.set_outstanding_tests(outstanding_tests);
(Some(rerun_state), Some(expected))
}
Some(RunIdOrRecordingSelector::RecordingPath(path)) => {
let (rerun_state, outstanding_tests) = self.resolve_rerun_from_archive(path)?;
let expected = outstanding_tests.expected_test_ids();
test_filter.set_outstanding_tests(outstanding_tests);
(Some(rerun_state), Some(expected))
}
None => (None, None),
};
let rerun_build_scope = rerun_state
.as_ref()
.map(|s| s.root_info.build_scope_args.as_slice());
let binary_list = self
.base
.build_binary_list_with_rerun("test", rerun_build_scope)?;
let build_platforms = &binary_list.rust_build_meta.build_platforms.clone();
let double_spawn = self.base.load_double_spawn();
let target_runner = self.base.load_runner(build_platforms);
let profile = profile.apply_build_platforms(build_platforms);
let ctx = TestExecuteContext {
profile_name: profile.name(),
double_spawn,
target_runner,
};
let test_list = self.build_test_list(&ctx, binary_list, &test_filter, &profile)?;
if runner_opts.interceptor.is_active() {
let test_count = test_list.run_count();
if test_count == 0 {
if let Some(debugger) = &runner_opts.interceptor.debugger {
return Err(ExpectedError::DebuggerNoTests {
debugger: debugger.clone(),
mode: NextestRunMode::Test,
});
} else if let Some(tracer) = &runner_opts.interceptor.tracer {
return Err(ExpectedError::TracerNoTests {
tracer: tracer.clone(),
mode: NextestRunMode::Test,
});
} else {
unreachable!("interceptor is active but neither debugger nor tracer is set");
}
} else if test_count > 1 {
let test_instances: Vec<_> = test_list
.iter_tests()
.filter(|test| test.test_info.filter_match.is_match())
.take(8)
.map(|test| test.id().to_owned())
.collect();
if let Some(debugger) = &runner_opts.interceptor.debugger {
return Err(ExpectedError::DebuggerTooManyTests {
debugger: debugger.clone(),
mode: NextestRunMode::Test,
test_count,
test_instances,
});
} else if let Some(tracer) = &runner_opts.interceptor.tracer {
return Err(ExpectedError::TracerTooManyTests {
tracer: tracer.clone(),
mode: NextestRunMode::Test,
test_count,
test_instances,
});
} else {
unreachable!("interceptor is active but neither debugger nor tracer is set");
}
}
}
let output = output_writer.reporter_output();
let signal_handler = if runner_opts.interceptor.debugger.is_some() {
SignalHandlerKind::DebuggerMode
} else {
SignalHandlerKind::Standard
};
let input_handler =
if reporter_opts.no_input_handler || runner_opts.interceptor.debugger.is_some() {
InputHandlerKind::Noop
} else if resolved_user_config.ui.input_handler {
InputHandlerKind::Standard
} else {
InputHandlerKind::Noop
};
let Some(mut runner_builder) = runner_builder else {
return Ok(());
};
if let Some(expected) = expected_outstanding {
runner_builder.set_expected_outstanding(expected);
}
let nextest_version_config = version_only_config.nextest_version();
runner_builder.set_version_env_vars(VersionEnvVars {
current_version: self.base.current_version.clone(),
required_version: nextest_version_config.required.version().cloned(),
recommended_version: nextest_version_config.recommended.version().cloned(),
});
let cli_args_for_recording = cli_args.clone();
let runner = runner_builder.build(
&test_list,
&profile,
cli_args,
signal_handler,
input_handler,
double_spawn.clone(),
target_runner.clone(),
)?;
let (recording_session, run_id_unique_prefix) = if resolved_user_config
.is_experimental_enabled(UserConfigExperimental::Record)
&& resolved_user_config.record.enabled
{
let env_vars_for_recording = capture_env_vars_for_recording();
let outstanding_tests = test_filter.into_rerun_info();
let rerun_info = if let Some(outstanding) = outstanding_tests {
let rerun_state =
rerun_state.expect("rerun_state is Some iff outstanding_tests is Some");
Some(outstanding.into_rerun_info(rerun_state.parent_run_id, rerun_state.root_info))
} else {
None
};
let config = RecordSessionConfig {
workspace_root: &self.base.workspace_root,
run_id: runner.run_id(),
nextest_version: self.base.current_version.clone(),
started_at: runner.started_at().fixed_offset(),
cli_args: cli_args_for_recording,
build_scope_args: self.base.build_scope_args(),
env_vars: env_vars_for_recording,
max_output_size: resolved_user_config.record.max_output_size,
rerun_info,
};
match RecordSession::setup(config) {
Ok(setup) => {
let record = structured::RecordReporter::new(setup.recorder);
let opts = RecordOpts::new(test_list.mode());
record.write_meta(
self.base.cargo_metadata_json.clone(),
test_list.to_summary(),
opts,
);
structured_reporter.set_record(record);
(Some(setup.session), Some(setup.run_id_unique_prefix))
}
Err(err) => match err.disabled_error() {
Some(reason) => {
warn!("recording disabled: {reason}");
(None, None)
}
None => return Err(ExpectedError::RecordSessionSetupError { err }),
},
}
} else {
(None, None)
};
let show_term_progress = ShowTerminalProgress::from_cargo_configs(
&self.base.cargo_configs,
std::io::stderr().is_terminal(),
);
let mut reporter = reporter_builder.build(
&test_list,
&profile,
show_term_progress,
output,
structured_reporter,
);
if let Some(prefix) = run_id_unique_prefix {
reporter.set_run_id_unique_prefix(prefix);
}
configure_handle_inheritance(no_capture)?;
let run_stats = runner.try_execute(|event| reporter.report_event(event))?;
let reporter_stats = reporter.finish();
let outstanding_not_seen_count = reporter_stats
.run_finished
.and_then(|rf| rf.outstanding_not_seen_count);
let rerun_available = recording_session.is_some();
let result = final_result(
NextestRunMode::Test,
run_stats,
runner_opts.no_tests,
outstanding_not_seen_count,
rerun_available,
);
let exit_code = result.as_ref().err().map_or(0, |e| e.process_exit_code());
if let Some(session) = recording_session {
let policy = RecordRetentionPolicy::from(&resolved_user_config.record);
let mut styles = RecordStyles::default();
if should_colorize {
styles.colorize();
}
session
.finalize(
reporter_stats.recording_sizes,
reporter_stats.run_finished,
exit_code,
&policy,
)
.log(&styles);
}
self.base
.check_version_config_final(version_only_config.nextest_version())?;
result
}
pub(crate) fn exec_bench(
&self,
runner_opts: &BenchRunnerOpts,
reporter_opts: &BenchReporterOpts,
cli_args: Vec<String>,
output_writer: &mut OutputWriter,
) -> Result<()> {
let pcx = ParseContext::new(self.base.graph());
let (version_only_config, config) = self.base.load_config(
&pcx,
&[ConfigExperimental::Benchmarks].into_iter().collect(),
)?;
let profile = self.base.load_profile(&config)?;
let mut structured_reporter = structured::StructuredReporter::new();
let cap_strat = CaptureStrategy::None;
let should_colorize = self
.base
.output
.color
.should_colorize(supports_color::Stream::Stderr);
let resolved_user_config = resolve_user_config(
&self.base.build_platforms.host.platform,
self.base.early_args.user_config_location(),
)?;
let runner_builder = runner_opts.to_builder(cap_strat);
let mut reporter_builder =
reporter_opts.to_builder(should_colorize, &resolved_user_config.ui);
reporter_builder.set_verbose(self.base.output.verbose);
let filter_exprs =
build_filtersets(&pcx, &self.build_filter.filterset, FiltersetKind::Test)?;
let test_filter = self
.build_filter
.make_test_filter(NextestRunMode::Benchmark, filter_exprs)?;
let binary_list = self.base.build_binary_list("bench")?;
let build_platforms = &binary_list.rust_build_meta.build_platforms.clone();
let double_spawn = self.base.load_double_spawn();
let target_runner = self.base.load_runner(build_platforms);
let profile = profile.apply_build_platforms(build_platforms);
let ctx = TestExecuteContext {
profile_name: profile.name(),
double_spawn,
target_runner,
};
let test_list = self.build_test_list(&ctx, binary_list, &test_filter, &profile)?;
if runner_opts.interceptor.is_active() {
let test_count = test_list.run_count();
if test_count == 0 {
if let Some(debugger) = &runner_opts.interceptor.debugger {
return Err(ExpectedError::DebuggerNoTests {
debugger: debugger.clone(),
mode: NextestRunMode::Benchmark,
});
} else if let Some(tracer) = &runner_opts.interceptor.tracer {
return Err(ExpectedError::TracerNoTests {
tracer: tracer.clone(),
mode: NextestRunMode::Benchmark,
});
} else {
unreachable!("interceptor is active but neither debugger nor tracer is set");
}
} else if test_count > 1 {
let test_instances: Vec<_> = test_list
.iter_tests()
.filter(|test| test.test_info.filter_match.is_match())
.take(8)
.map(|test| test.id().to_owned())
.collect();
if let Some(debugger) = &runner_opts.interceptor.debugger {
return Err(ExpectedError::DebuggerTooManyTests {
debugger: debugger.clone(),
mode: NextestRunMode::Benchmark,
test_count,
test_instances,
});
} else if let Some(tracer) = &runner_opts.interceptor.tracer {
return Err(ExpectedError::TracerTooManyTests {
tracer: tracer.clone(),
mode: NextestRunMode::Benchmark,
test_count,
test_instances,
});
} else {
unreachable!("interceptor is active but neither debugger nor tracer is set");
}
}
}
let output = output_writer.reporter_output();
let signal_handler = if runner_opts.interceptor.debugger.is_some() {
SignalHandlerKind::DebuggerMode
} else {
SignalHandlerKind::Standard
};
let input_handler =
if reporter_opts.no_input_handler || runner_opts.interceptor.debugger.is_some() {
InputHandlerKind::Noop
} else if resolved_user_config.ui.input_handler {
InputHandlerKind::Standard
} else {
InputHandlerKind::Noop
};
let Some(mut runner_builder) = runner_builder else {
return Ok(());
};
let nextest_version_config = version_only_config.nextest_version();
runner_builder.set_version_env_vars(VersionEnvVars {
current_version: self.base.current_version.clone(),
required_version: nextest_version_config.required.version().cloned(),
recommended_version: nextest_version_config.recommended.version().cloned(),
});
let cli_args_for_recording = cli_args.clone();
let runner = runner_builder.build(
&test_list,
&profile,
cli_args,
signal_handler,
input_handler,
double_spawn.clone(),
target_runner.clone(),
)?;
let (recording_session, run_id_unique_prefix) = if resolved_user_config
.is_experimental_enabled(UserConfigExperimental::Record)
&& resolved_user_config.record.enabled
{
let env_vars_for_recording = capture_env_vars_for_recording();
let config = RecordSessionConfig {
workspace_root: &self.base.workspace_root,
run_id: runner.run_id(),
nextest_version: self.base.current_version.clone(),
started_at: runner.started_at().fixed_offset(),
cli_args: cli_args_for_recording,
build_scope_args: self.base.build_scope_args(),
env_vars: env_vars_for_recording,
max_output_size: resolved_user_config.record.max_output_size,
rerun_info: None,
};
match RecordSession::setup(config) {
Ok(setup) => {
let record = structured::RecordReporter::new(setup.recorder);
let opts = RecordOpts::new(test_list.mode());
record.write_meta(
self.base.cargo_metadata_json.clone(),
test_list.to_summary(),
opts,
);
structured_reporter.set_record(record);
(Some(setup.session), Some(setup.run_id_unique_prefix))
}
Err(err) => match err.disabled_error() {
Some(reason) => {
warn!("recording disabled: {reason}");
(None, None)
}
None => return Err(ExpectedError::RecordSessionSetupError { err }),
},
}
} else {
(None, None)
};
let show_term_progress = ShowTerminalProgress::from_cargo_configs(
&self.base.cargo_configs,
std::io::stderr().is_terminal(),
);
let mut reporter = reporter_builder.build(
&test_list,
&profile,
show_term_progress,
output,
structured_reporter,
);
if let Some(prefix) = run_id_unique_prefix {
reporter.set_run_id_unique_prefix(prefix);
}
configure_handle_inheritance(true)?;
let run_stats = runner.try_execute(|event| reporter.report_event(event))?;
let reporter_stats = reporter.finish();
let rerun_available = recording_session.is_some();
let result = final_result(
NextestRunMode::Benchmark,
run_stats,
runner_opts.no_tests,
None,
rerun_available,
);
let exit_code = result.as_ref().err().map_or(0, |e| e.process_exit_code());
if let Some(session) = recording_session {
let policy = RecordRetentionPolicy::from(&resolved_user_config.record);
let mut styles = RecordStyles::default();
if should_colorize {
styles.colorize();
}
session
.finalize(
reporter_stats.recording_sizes,
reporter_stats.run_finished,
exit_code,
&policy,
)
.log(&styles);
}
self.base
.check_version_config_final(version_only_config.nextest_version())?;
result
}
fn resolve_rerun(
&self,
run_id_selector: &RunIdSelector,
) -> Result<(RerunState, ComputedRerunInfo), ExpectedError> {
let state_dir = records_state_dir(&self.base.workspace_root)
.map_err(|err| ExpectedError::RecordStateDirNotFound { err })?;
let store =
RunStore::new(&state_dir).map_err(|err| ExpectedError::RecordSetupError { err })?;
let snapshot = store
.lock_shared()
.map_err(|err| ExpectedError::RecordSetupError { err })?
.into_snapshot();
let resolved = snapshot
.resolve_run_id(run_id_selector)
.map_err(|err| ExpectedError::RunIdResolutionError { err })?;
let parent_run_id = resolved.run_id;
let run_info = snapshot
.runs()
.iter()
.find(|r| r.run_id == parent_run_id)
.expect("resolved run ID must be in the snapshot");
if let Err(incompatibility) = run_info
.store_format_version
.check_readable_by(STORE_FORMAT_VERSION)
{
return Err(ExpectedError::StoreVersionIncompatible {
run_id: parent_run_id,
incompatibility,
});
}
let run_dir = snapshot.runs_dir().run_dir(parent_run_id);
let mut reader =
RecordReader::open(&run_dir).map_err(|err| ExpectedError::RecordReadError { err })?;
let (outstanding_tests, root_info) = ComputedRerunInfo::compute(&mut reader)
.map_err(|err| ExpectedError::RecordReadError { err })?;
let root_info = root_info.unwrap_or_else(|| {
RerunRootInfo::new(parent_run_id, run_info.build_scope_args.clone())
});
Ok((
RerunState {
parent_run_id,
root_info,
},
outstanding_tests,
))
}
fn resolve_rerun_from_archive(
&self,
archive_path: &Utf8Path,
) -> Result<(RerunState, ComputedRerunInfo), ExpectedError> {
let mut archive = PortableRecording::open(archive_path)
.map_err(|err| ExpectedError::PortableRecordingReadError { err })?;
let run_info = archive.run_info();
let parent_run_id = run_info.run_id;
let (outstanding_tests, root_info) = ComputedRerunInfo::compute_from_archive(&mut archive)
.map_err(|err| ExpectedError::RecordReadError { err })?;
let root_info = root_info.unwrap_or_else(|| {
RerunRootInfo::new(parent_run_id, run_info.build_scope_args.clone())
});
Ok((
RerunState {
parent_run_id,
root_info,
},
outstanding_tests,
))
}
}
#[derive(Debug)]
struct RerunState {
parent_run_id: ReportUuid,
root_info: RerunRootInfo,
}
fn final_result(
mode: NextestRunMode,
run_stats: RunStats,
no_tests: Option<NoTestsBehaviorOpt>,
outstanding_not_seen_count: Option<usize>,
rerun_available: bool,
) -> Result<(), ExpectedError> {
let final_stats = run_stats.summarize_final();
let is_rerun = outstanding_not_seen_count.is_some();
if matches!(final_stats, FinalRunStats::NoTestsRun) {
match no_tests {
Some(NoTestsBehaviorOpt::Pass) => return Ok(()),
Some(NoTestsBehaviorOpt::Warn) => {
warn!("no {} to run", plural::tests_plural(mode));
return Ok(());
}
Some(NoTestsBehaviorOpt::Fail) => {
return Err(ExpectedError::NoTestsRun {
mode,
is_default: false,
});
}
Some(NoTestsBehaviorOpt::Auto) => {
if !is_rerun {
return Err(ExpectedError::NoTestsRun {
mode,
is_default: false,
});
}
}
None => {
if !is_rerun {
return Err(ExpectedError::NoTestsRun {
mode,
is_default: true,
});
}
}
}
} else {
if let Some(err) = final_stats_to_error(final_stats, mode, rerun_available) {
return Err(err);
}
}
match outstanding_not_seen_count {
Some(0) => {
info!("no outstanding tests remain");
Ok(())
}
Some(count) => Err(ExpectedError::RerunTestsOutstanding { count }),
None => Ok(()),
}
}
#[cfg(test)]
mod tests {
use super::*;
use nextest_runner::reporter::events::RunStats;
fn make_run_stats(initial_run_count: usize, finished_count: usize, passed: usize) -> RunStats {
RunStats {
initial_run_count,
finished_count,
passed,
..Default::default()
}
}
#[test]
fn test_final_result() {
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Pass),
None,
false,
);
assert!(result.is_ok(), "--no-tests=pass should succeed");
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Warn),
None,
false,
);
assert!(result.is_ok(), "--no-tests=warn should succeed");
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Fail),
None,
false,
);
assert!(
matches!(
result,
Err(ExpectedError::NoTestsRun {
is_default: false,
..
})
),
"--no-tests=fail should fail"
);
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Auto),
None,
false,
);
assert!(
matches!(
result,
Err(ExpectedError::NoTestsRun {
is_default: false,
..
})
),
"--no-tests=auto (not rerun) should fail"
);
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Auto),
Some(5),
false,
);
assert!(
matches!(
result,
Err(ExpectedError::RerunTestsOutstanding { count: 5 })
),
"--no-tests=auto (rerun with outstanding) should return RerunTestsOutstanding"
);
let stats = make_run_stats(0, 0, 0);
let result = final_result(
NextestRunMode::Test,
stats,
Some(NoTestsBehaviorOpt::Auto),
Some(0),
false,
);
assert!(
result.is_ok(),
"--no-tests=auto (rerun, no outstanding) should succeed"
);
let stats = make_run_stats(0, 0, 0);
let result = final_result(NextestRunMode::Test, stats, None, None, false);
assert!(
matches!(
result,
Err(ExpectedError::NoTestsRun {
is_default: true,
..
})
),
"default (not rerun) should fail with is_default: true"
);
let stats = make_run_stats(0, 0, 0);
let result = final_result(NextestRunMode::Test, stats, None, Some(3), false);
assert!(
matches!(
result,
Err(ExpectedError::RerunTestsOutstanding { count: 3 })
),
"default (rerun with outstanding) should return RerunTestsOutstanding"
);
let stats = make_run_stats(5, 5, 5);
let result = final_result(NextestRunMode::Test, stats, None, None, false);
assert!(
result.is_ok(),
"all tests passed (not rerun) should succeed"
);
let stats = make_run_stats(5, 5, 5);
let result = final_result(NextestRunMode::Test, stats, None, Some(0), false);
assert!(
result.is_ok(),
"all tests passed (rerun, no outstanding) should succeed"
);
let stats = make_run_stats(5, 5, 5);
let result = final_result(NextestRunMode::Test, stats, None, Some(2), false);
assert!(
matches!(
result,
Err(ExpectedError::RerunTestsOutstanding { count: 2 })
),
"all tests passed (rerun with outstanding) should return RerunTestsOutstanding"
);
let mut stats = make_run_stats(5, 5, 3);
stats.failed = 2;
let result = final_result(NextestRunMode::Test, stats, None, None, false);
assert!(
matches!(
result,
Err(ExpectedError::TestRunFailed {
rerun_available: false
})
),
"test failures should return TestRunFailed"
);
let mut stats = make_run_stats(5, 5, 3);
stats.failed = 2;
let result = final_result(NextestRunMode::Test, stats, None, None, true);
assert!(
matches!(
result,
Err(ExpectedError::TestRunFailed {
rerun_available: true
})
),
"test failures with rerun available should return TestRunFailed with rerun_available: true"
);
let mut stats = make_run_stats(5, 5, 3);
stats.failed = 2;
let result = final_result(NextestRunMode::Test, stats, None, Some(10), false);
assert!(
matches!(
result,
Err(ExpectedError::TestRunFailed {
rerun_available: false
})
),
"test failures should take precedence over outstanding tests"
);
}
}