use anyhow::{Context, Result};
use std::path::{Path, PathBuf};
use std::process::Stdio;
use tokio::process::Command;
use crate::types::{BehavioralContract, Criticality};
use perspt_core::plugin::{VerifierProfile, VerifierStage};
#[derive(Debug, Clone, Default)]
pub struct TestResults {
pub passed: usize,
pub failed: usize,
pub skipped: usize,
pub total: usize,
pub failures: Vec<TestFailure>,
pub duration_ms: u64,
pub output: String,
pub run_succeeded: bool,
}
impl TestResults {
pub fn all_passed(&self) -> bool {
self.run_succeeded && self.failed == 0
}
pub fn pass_rate(&self) -> f32 {
if self.total == 0 {
1.0
} else {
(self.passed as f32) / (self.total as f32)
}
}
}
#[derive(Debug, Clone)]
pub struct TestFailure {
pub name: String,
pub file: Option<String>,
pub line: Option<u32>,
pub message: String,
pub criticality: Criticality,
}
fn force_failure_on_nonzero_exit(
results: &mut TestResults,
command_name: &str,
exit_code: Option<i32>,
output: &str,
) {
if results.failed == 0 {
results.failed = 1;
}
if results.total == 0 {
results.total = results.passed + results.failed + results.skipped;
}
if results.failures.is_empty() {
results.failures.push(TestFailure {
name: command_name.to_string(),
file: None,
line: None,
message: format!(
"{} exited with code {:?} without a parseable success summary. Output:\n{}",
command_name, exit_code, output
),
criticality: Criticality::High,
});
}
}
pub struct PythonTestRunner {
working_dir: PathBuf,
timeout_secs: u64,
auto_setup: bool,
}
impl PythonTestRunner {
pub fn new(working_dir: PathBuf) -> Self {
Self {
working_dir,
timeout_secs: 300, auto_setup: true,
}
}
pub fn with_timeout(mut self, secs: u64) -> Self {
self.timeout_secs = secs;
self
}
pub fn without_auto_setup(mut self) -> Self {
self.auto_setup = false;
self
}
pub fn has_pyproject(&self) -> bool {
self.working_dir.join("pyproject.toml").exists()
}
pub async fn has_pytest(&self) -> bool {
let result = Command::new("uv")
.args(["run", "pytest", "--version"])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.await;
result.map(|s| s.success()).unwrap_or(false)
}
pub async fn setup_environment(&self) -> Result<()> {
log::info!("Setting up Python environment with uv");
if !self.has_pyproject() {
if self.auto_setup {
log::warn!(
"No pyproject.toml found. Project should be initialized via 'uv init' first."
);
log::info!("Attempting to run 'uv init --lib' as fallback...");
let init_output = Command::new("uv")
.args(["init", "--lib"])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run uv init")?;
if !init_output.status.success() {
let stderr = String::from_utf8_lossy(&init_output.stderr);
log::warn!("uv init failed: {}", stderr);
return self.install_pytest_directly().await;
}
} else {
anyhow::bail!(
"No pyproject.toml found and auto_setup is disabled. Run 'uv init' first."
);
}
}
let output = Command::new("uv")
.args(["sync", "--dev"])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run uv sync")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
log::warn!("uv sync failed: {}", stderr);
return self.install_pytest_directly().await;
}
if !self.has_pytest().await {
log::info!("pytest not available after sync — adding as dev dependency");
let add_output = Command::new("uv")
.args(["add", "--dev", "pytest"])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await;
match add_output {
Ok(o) if o.status.success() => {
log::info!("Added pytest as dev dependency");
}
Ok(o) => {
let stderr = String::from_utf8_lossy(&o.stderr);
log::warn!("uv add --dev pytest failed: {}", stderr);
return self.install_pytest_directly().await;
}
Err(e) => {
log::warn!("Failed to run uv add --dev pytest: {}", e);
return self.install_pytest_directly().await;
}
}
}
log::info!("Python environment ready");
Ok(())
}
async fn install_pytest_directly(&self) -> Result<()> {
log::info!("Installing pytest via uv pip");
let output = Command::new("uv")
.args(["pip", "install", "pytest"])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to install pytest")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
anyhow::bail!("Failed to install pytest: {}", stderr);
}
Ok(())
}
pub async fn run_pytest(&self, test_args: &[&str]) -> Result<TestResults> {
log::info!("Running pytest in {}", self.working_dir.display());
if !self.has_pytest().await {
self.setup_environment().await?;
}
let mut args = vec!["run", "pytest", "-v", "--tb=short"];
args.extend(test_args);
let start = std::time::Instant::now();
let output = Command::new("uv")
.args(&args)
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run pytest")?;
let duration_ms = start.elapsed().as_millis() as u64;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
let combined = format!("{}\n{}", stdout, stderr);
log::debug!("pytest exit code: {:?}", output.status.code());
if !stdout.is_empty() {
log::debug!("pytest stdout:\n{}", stdout);
}
let mut results = self.parse_pytest_output(&combined, duration_ms);
results.run_succeeded = true;
let exit_code = output.status.code();
if exit_code == Some(5) {
log::info!("pytest exit code 5 — no tests collected (vacuous pass)");
results.passed = 0;
results.failed = 0;
results.total = 0;
results.failures.clear();
} else if !output.status.success() {
force_failure_on_nonzero_exit(&mut results, "pytest", exit_code, &combined);
}
if results.all_passed() {
log::info!("✅ Tests passed: {}/{}", results.passed, results.total);
} else {
log::info!(
"❌ Tests failed: {} passed, {} failed",
results.passed,
results.failed
);
}
Ok(results)
}
pub async fn run_test_files(&self, test_files: &[&Path]) -> Result<TestResults> {
let file_args: Vec<&str> = test_files.iter().filter_map(|p| p.to_str()).collect();
self.run_pytest(&file_args).await
}
fn parse_pytest_output(&self, output: &str, duration_ms: u64) -> TestResults {
let mut results = TestResults {
duration_ms,
output: output.to_string(),
..Default::default()
};
for line in output.lines() {
let line = line.trim();
if (line.contains("passed") || line.contains("failed") || line.contains("error"))
&& (line.contains(" in ") || line.starts_with('='))
{
let parts: Vec<&str> = line.split_whitespace().collect();
for i in 0..parts.len() {
if parts[i] == "passed" || parts[i] == "passed," {
if i > 0 {
if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
results.passed = n;
}
}
} else if parts[i] == "failed" || parts[i] == "failed," {
if i > 0 {
if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
results.failed = n;
}
}
} else if parts[i] == "skipped" || parts[i] == "skipped," {
if i > 0 {
if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
results.skipped = n;
}
}
} else if (parts[i] == "error" || parts[i] == "errors") && i > 0 {
if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
results.failed += n;
}
}
}
}
if line.starts_with("FAILED ") {
let failure = self.parse_failure_line(line);
results.failures.push(failure);
}
}
results.total = results.passed + results.failed + results.skipped;
results
}
fn parse_failure_line(&self, line: &str) -> TestFailure {
let rest = line.strip_prefix("FAILED ").unwrap_or(line);
let (test_path, message) = if let Some(idx) = rest.find(" - ") {
(&rest[..idx], rest[idx + 3..].to_string())
} else {
(rest, String::new())
};
let parts: Vec<&str> = test_path.split("::").collect();
let (file, name) = if parts.len() >= 2 {
(
Some(parts[0].to_string()),
parts.last().unwrap_or(&"").to_string(),
)
} else {
(None, test_path.to_string())
};
TestFailure {
name,
file,
line: None,
message,
criticality: Criticality::High, }
}
pub fn calculate_v_log(&self, results: &TestResults, contract: &BehavioralContract) -> f32 {
let gamma = contract.gamma(); let mut v_log = 0.0;
for failure in &results.failures {
let weight = contract
.weighted_tests
.iter()
.find(|wt| {
failure.name.contains(&wt.test_name) || wt.test_name.contains(&failure.name)
})
.map(|wt| wt.criticality.weight())
.unwrap_or(Criticality::High.weight());
v_log += gamma * weight;
}
v_log
}
pub fn match_weighted_tests(&self, results: &mut TestResults, contract: &BehavioralContract) {
for failure in &mut results.failures {
if let Some(wt) = contract.weighted_tests.iter().find(|wt| {
failure.name.contains(&wt.test_name) || wt.test_name.contains(&failure.name)
}) {
failure.criticality = wt.criticality;
}
}
}
}
#[async_trait::async_trait]
pub trait TestRunnerTrait: Send + Sync {
async fn run_syntax_check(&self) -> Result<TestResults>;
async fn run_tests(&self) -> Result<TestResults>;
async fn run_build_check(&self) -> Result<TestResults>;
async fn run_lint(&self) -> Result<TestResults> {
Ok(TestResults {
passed: 1,
total: 1,
run_succeeded: true,
output: "No lint stage configured".to_string(),
..Default::default()
})
}
async fn run_stage(&self, stage: VerifierStage) -> Result<TestResults> {
match stage {
VerifierStage::SyntaxCheck => self.run_syntax_check().await,
VerifierStage::Build => self.run_build_check().await,
VerifierStage::Test => self.run_tests().await,
VerifierStage::Lint => self.run_lint().await,
}
}
fn name(&self) -> &str;
}
#[async_trait::async_trait]
impl TestRunnerTrait for PythonTestRunner {
async fn run_syntax_check(&self) -> Result<TestResults> {
let output = Command::new("uvx")
.args(["ty", "check", "."])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run ty check")?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: format!("{}\n{}", stdout, stderr),
..Default::default()
})
}
async fn run_tests(&self) -> Result<TestResults> {
self.run_pytest(&[]).await
}
async fn run_build_check(&self) -> Result<TestResults> {
Ok(TestResults {
passed: 1,
total: 1,
run_succeeded: true,
output: "No build step for Python".to_string(),
..Default::default()
})
}
async fn run_lint(&self) -> Result<TestResults> {
let output = Command::new("uv")
.args(["run", "ruff", "check", "."])
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run ruff check")?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: format!("{}\n{}", stdout, stderr),
..Default::default()
})
}
fn name(&self) -> &str {
"python"
}
}
pub struct RustTestRunner {
working_dir: PathBuf,
}
impl RustTestRunner {
pub fn new(working_dir: PathBuf) -> Self {
Self { working_dir }
}
fn parse_cargo_test_output(&self, output: &str) -> TestResults {
let mut results = TestResults {
output: output.to_string(),
run_succeeded: true,
..Default::default()
};
for line in output.lines() {
let line = line.trim();
if line.starts_with("test result:") {
let parts: Vec<&str> = line.split_whitespace().collect();
for i in 0..parts.len() {
if (parts[i] == "passed;" || parts[i] == "passed") && i > 0 {
if let Ok(n) = parts[i - 1].parse::<usize>() {
results.passed = n;
}
} else if (parts[i] == "failed;" || parts[i] == "failed") && i > 0 {
if let Ok(n) = parts[i - 1].parse::<usize>() {
results.failed = n;
}
} else if (parts[i] == "ignored;" || parts[i] == "ignored") && i > 0 {
if let Ok(n) = parts[i - 1].parse::<usize>() {
results.skipped = n;
}
}
}
}
}
results.total = results.passed + results.failed + results.skipped;
results
}
}
#[async_trait::async_trait]
impl TestRunnerTrait for RustTestRunner {
async fn run_syntax_check(&self) -> Result<TestResults> {
let output = Command::new("cargo")
.args(["check"])
.current_dir(&self.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run cargo check")?;
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: stderr,
..Default::default()
})
}
async fn run_tests(&self) -> Result<TestResults> {
let output = Command::new("cargo")
.args(["test"])
.current_dir(&self.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run cargo test")?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
let combined = format!("{}\n{}", stdout, stderr);
let mut results = self.parse_cargo_test_output(&combined);
results.run_succeeded = true;
if !output.status.success() {
force_failure_on_nonzero_exit(
&mut results,
"cargo test",
output.status.code(),
&combined,
);
}
Ok(results)
}
async fn run_build_check(&self) -> Result<TestResults> {
let output = Command::new("cargo")
.args(["build"])
.current_dir(&self.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run cargo build")?;
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: stderr,
..Default::default()
})
}
async fn run_lint(&self) -> Result<TestResults> {
let output = Command::new("cargo")
.args(["clippy", "--", "-D", "warnings"])
.current_dir(&self.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.context("Failed to run cargo clippy")?;
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: stderr,
..Default::default()
})
}
fn name(&self) -> &str {
"rust"
}
}
pub struct PluginVerifierRunner {
working_dir: PathBuf,
profile: VerifierProfile,
}
impl PluginVerifierRunner {
pub fn new(working_dir: PathBuf, profile: VerifierProfile) -> Self {
Self {
working_dir,
profile,
}
}
async fn exec_command(&self, command: &str, stage: VerifierStage) -> Result<TestResults> {
let sr = perspt_policy::sanitize_command(command)?;
if sr.rejected {
anyhow::bail!(
"{} command rejected by policy: {}",
stage,
sr.rejection_reason.unwrap_or_default()
);
}
for warning in &sr.warnings {
log::warn!(
"[{}] policy warning for {} stage: {}",
self.profile.plugin_name,
stage,
warning
);
}
perspt_policy::validate_workspace_bound(command, &self.working_dir)?;
let parts: Vec<&str> = command.split_whitespace().collect();
if parts.is_empty() {
anyhow::bail!("empty command for stage {}", stage);
}
let program = parts[0];
let args = &parts[1..];
log::info!(
"[{}] running {} stage: {}",
self.profile.plugin_name,
stage,
command
);
let output = Command::new(program)
.args(args)
.current_dir(&self.working_dir)
.env_remove("VIRTUAL_ENV")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await
.with_context(|| format!("Failed to run {} for {} stage", command, stage))?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Ok(TestResults {
passed: if output.status.success() { 1 } else { 0 },
failed: if output.status.success() { 0 } else { 1 },
total: 1,
run_succeeded: true,
output: format!("{}\n{}", stdout, stderr),
..Default::default()
})
}
async fn run_profile_stage(&self, stage: VerifierStage) -> Result<TestResults> {
let cap = match self.profile.get(stage) {
Some(c) => c,
None => {
return Ok(TestResults {
passed: 1,
total: 1,
run_succeeded: true,
output: format!(
"No {} stage declared for {}",
stage, self.profile.plugin_name
),
..Default::default()
});
}
};
match cap.effective_command() {
Some(cmd) => self.exec_command(cmd, stage).await,
None => {
log::warn!(
"[{}] {} stage declared but no tool available (degraded)",
self.profile.plugin_name,
stage
);
Ok(TestResults {
passed: 0,
failed: 0,
total: 0,
run_succeeded: false,
output: format!(
"{} stage skipped: no tool available for {}",
stage, self.profile.plugin_name
),
..Default::default()
})
}
}
}
pub async fn run_all_stages(&self) -> Vec<(VerifierStage, Result<TestResults>)> {
let stages = [
VerifierStage::SyntaxCheck,
VerifierStage::Build,
VerifierStage::Test,
VerifierStage::Lint,
];
let mut results = Vec::new();
for stage in stages {
if self.profile.get(stage).is_some() {
results.push((stage, self.run_profile_stage(stage).await));
}
}
results
}
pub fn profile(&self) -> &VerifierProfile {
&self.profile
}
}
#[async_trait::async_trait]
impl TestRunnerTrait for PluginVerifierRunner {
async fn run_syntax_check(&self) -> Result<TestResults> {
self.run_profile_stage(VerifierStage::SyntaxCheck).await
}
async fn run_tests(&self) -> Result<TestResults> {
self.run_profile_stage(VerifierStage::Test).await
}
async fn run_build_check(&self) -> Result<TestResults> {
self.run_profile_stage(VerifierStage::Build).await
}
async fn run_lint(&self) -> Result<TestResults> {
self.run_profile_stage(VerifierStage::Lint).await
}
fn name(&self) -> &str {
&self.profile.plugin_name
}
}
pub fn test_runner_for_plugin(plugin_name: &str, working_dir: PathBuf) -> Box<dyn TestRunnerTrait> {
match plugin_name {
"rust" => Box::new(RustTestRunner::new(working_dir)),
"python" => Box::new(PythonTestRunner::new(working_dir)),
_ => Box::new(PythonTestRunner::new(working_dir)), }
}
pub fn test_runner_for_profile(
profile: VerifierProfile,
working_dir: PathBuf,
) -> Box<dyn TestRunnerTrait> {
match profile.plugin_name.as_str() {
"rust" => Box::new(RustTestRunner::new(working_dir)),
"python" => Box::new(PythonTestRunner::new(working_dir)),
_ => Box::new(PluginVerifierRunner::new(working_dir, profile)),
}
}
pub type TestRunner = PythonTestRunner;
#[cfg(test)]
mod tests {
use super::*;
use crate::types::WeightedTest;
use perspt_core::plugin::{
LanguagePlugin, LspCapability, LspConfig, VerifierCapability, VerifierProfile,
};
#[test]
fn test_parse_pytest_summary() {
let runner = PythonTestRunner::new(PathBuf::from("."));
let output = "===== 3 passed, 2 failed, 1 skipped in 0.12s =====";
let results = runner.parse_pytest_output(output, 120);
assert_eq!(results.passed, 3);
assert_eq!(results.failed, 2);
assert_eq!(results.skipped, 1);
assert_eq!(results.total, 6);
}
#[test]
fn test_parse_pytest_failure_line() {
let runner = PythonTestRunner::new(PathBuf::from("."));
let line = "FAILED test_calculator.py::TestDivide::test_divide_by_zero - ZeroDivisionError";
let failure = runner.parse_failure_line(line);
assert_eq!(failure.name, "test_divide_by_zero");
assert_eq!(failure.file, Some("test_calculator.py".to_string()));
assert!(failure.message.contains("ZeroDivisionError"));
}
#[test]
fn test_force_failure_on_nonzero_exit_marks_failure() {
let mut results = TestResults::default();
force_failure_on_nonzero_exit(&mut results, "pytest", Some(2), "collection error");
assert_eq!(results.failed, 1);
assert_eq!(results.total, 1);
assert_eq!(results.failures.len(), 1);
assert!(results.failures[0].message.contains("collection error"));
}
#[test]
fn test_calculate_v_log() {
let runner = PythonTestRunner::new(PathBuf::from("."));
let results = TestResults {
failures: vec![TestFailure {
name: "test_critical_feature".to_string(),
file: None,
line: None,
message: String::new(),
criticality: Criticality::Critical,
}],
..Default::default()
};
let mut contract = BehavioralContract::new();
contract.weighted_tests = vec![WeightedTest {
test_name: "test_critical_feature".to_string(),
criticality: Criticality::Critical,
}];
let v_log = runner.calculate_v_log(&results, &contract);
assert!((v_log - 20.0).abs() < 0.01);
}
#[test]
fn test_parse_cargo_test_output() {
let runner = RustTestRunner::new(PathBuf::from("."));
let output = r#"
running 5 tests
test tests::test_add ... ok
test tests::test_sub ... ok
test tests::test_mul ... FAILED
test tests::test_div ... ok
test tests::test_rem ... ignored
test result: ok. 3 passed; 1 failed; 1 ignored; 0 measured; 0 filtered out
"#;
let results = runner.parse_cargo_test_output(output);
assert_eq!(results.passed, 3);
assert_eq!(results.failed, 1);
assert_eq!(results.skipped, 1);
assert_eq!(results.total, 5);
}
#[test]
fn test_runner_for_plugin_factory() {
let rust_runner = test_runner_for_plugin("rust", PathBuf::from("."));
assert_eq!(rust_runner.name(), "rust");
let python_runner = test_runner_for_plugin("python", PathBuf::from("."));
assert_eq!(python_runner.name(), "python");
let fallback = test_runner_for_plugin("go", PathBuf::from("."));
assert_eq!(fallback.name(), "python");
}
fn make_test_profile(name: &str, caps: Vec<VerifierCapability>) -> VerifierProfile {
VerifierProfile {
plugin_name: name.to_string(),
capabilities: caps,
lsp: LspCapability {
primary: LspConfig {
server_binary: "test-ls".to_string(),
args: vec![],
language_id: name.to_string(),
},
primary_available: false,
fallback: None,
fallback_available: false,
},
}
}
#[test]
fn test_plugin_verifier_runner_name() {
let profile = make_test_profile("go", vec![]);
let runner = PluginVerifierRunner::new(PathBuf::from("."), profile);
assert_eq!(runner.name(), "go");
}
#[tokio::test]
async fn test_plugin_verifier_runner_no_stage_declared() {
let profile = make_test_profile("go", vec![]);
let runner = PluginVerifierRunner::new(PathBuf::from("."), profile);
let result = runner.run_syntax_check().await.unwrap();
assert_eq!(result.passed, 1);
assert_eq!(result.total, 1);
assert!(result.output.contains("No syntax_check stage"));
}
#[tokio::test]
async fn test_plugin_verifier_runner_no_tool_available() {
let profile = make_test_profile(
"go",
vec![VerifierCapability {
stage: VerifierStage::Build,
command: Some("go build ./...".to_string()),
available: false,
fallback_command: None,
fallback_available: false,
}],
);
let runner = PluginVerifierRunner::new(PathBuf::from("."), profile);
let result = runner.run_build_check().await.unwrap();
assert!(!result.run_succeeded);
assert!(result.output.contains("no tool available"));
}
#[tokio::test]
async fn test_plugin_verifier_runner_echo_command() {
let profile = make_test_profile(
"echo-lang",
vec![VerifierCapability {
stage: VerifierStage::SyntaxCheck,
command: Some("echo syntax-ok".to_string()),
available: true,
fallback_command: None,
fallback_available: false,
}],
);
let runner = PluginVerifierRunner::new(PathBuf::from("."), profile);
let result = runner.run_syntax_check().await.unwrap();
assert_eq!(result.passed, 1);
assert!(result.run_succeeded);
assert!(result.output.contains("syntax-ok"));
}
#[tokio::test]
async fn test_plugin_verifier_runner_run_all_stages() {
let profile = make_test_profile(
"echo-lang",
vec![
VerifierCapability {
stage: VerifierStage::SyntaxCheck,
command: Some("echo check".to_string()),
available: true,
fallback_command: None,
fallback_available: false,
},
VerifierCapability {
stage: VerifierStage::Lint,
command: Some("echo lint".to_string()),
available: true,
fallback_command: None,
fallback_available: false,
},
],
);
let runner = PluginVerifierRunner::new(PathBuf::from("."), profile);
let results = runner.run_all_stages().await;
assert_eq!(results.len(), 2);
assert_eq!(results[0].0, VerifierStage::SyntaxCheck);
assert_eq!(results[1].0, VerifierStage::Lint);
assert!(results[0].1.is_ok());
assert!(results[1].1.is_ok());
}
#[test]
fn test_runner_for_profile_factory() {
use perspt_core::plugin::RustPlugin;
let rust_profile = RustPlugin.verifier_profile();
let runner = test_runner_for_profile(rust_profile, PathBuf::from("."));
assert_eq!(runner.name(), "rust");
let custom = make_test_profile("go", vec![]);
let runner = test_runner_for_profile(custom, PathBuf::from("."));
assert_eq!(runner.name(), "go");
}
#[tokio::test]
async fn test_exec_command_rejects_dangerous_pattern() {
let profile = make_test_profile(
"danger",
vec![VerifierCapability {
stage: VerifierStage::SyntaxCheck,
command: Some("rm -rf /".to_string()),
available: true,
fallback_command: None,
fallback_available: false,
}],
);
let runner = PluginVerifierRunner::new(PathBuf::from("/tmp"), profile);
let result = runner.run_syntax_check().await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_exec_command_rejects_workspace_escape() {
let profile = make_test_profile(
"escape",
vec![VerifierCapability {
stage: VerifierStage::SyntaxCheck,
command: Some("cat /etc/passwd".to_string()),
available: true,
fallback_command: None,
fallback_available: false,
}],
);
let runner = PluginVerifierRunner::new(PathBuf::from("/home/user/project"), profile);
let result = runner.run_syntax_check().await;
assert!(result.is_err());
}
#[test]
fn test_fallback_command_selected_when_primary_unavailable() {
let cap = VerifierCapability {
stage: VerifierStage::Test,
command: Some("uv run pytest".to_string()),
available: false,
fallback_command: Some("python -m pytest".to_string()),
fallback_available: true,
};
assert_eq!(cap.effective_command(), Some("python -m pytest"));
}
}