1use anyhow::{Context, Result};
9use std::path::{Path, PathBuf};
10use std::process::Stdio;
11use tokio::process::Command;
12
13use crate::types::{BehavioralContract, Criticality};
14
15#[derive(Debug, Clone, Default)]
17pub struct TestResults {
18 pub passed: usize,
20 pub failed: usize,
22 pub skipped: usize,
24 pub total: usize,
26 pub failures: Vec<TestFailure>,
28 pub duration_ms: u64,
30 pub output: String,
32 pub run_succeeded: bool,
34}
35
36impl TestResults {
37 pub fn all_passed(&self) -> bool {
39 self.run_succeeded && self.failed == 0
40 }
41
42 pub fn pass_rate(&self) -> f32 {
44 if self.total == 0 {
45 1.0
46 } else {
47 (self.passed as f32) / (self.total as f32)
48 }
49 }
50}
51
52#[derive(Debug, Clone)]
54pub struct TestFailure {
55 pub name: String,
57 pub file: Option<String>,
59 pub line: Option<u32>,
61 pub message: String,
63 pub criticality: Criticality,
65}
66
67pub struct PythonTestRunner {
75 working_dir: PathBuf,
77 timeout_secs: u64,
79 auto_setup: bool,
81}
82
83impl PythonTestRunner {
84 pub fn new(working_dir: PathBuf) -> Self {
86 Self {
87 working_dir,
88 timeout_secs: 300, auto_setup: true,
90 }
91 }
92
93 pub fn with_timeout(mut self, secs: u64) -> Self {
95 self.timeout_secs = secs;
96 self
97 }
98
99 pub fn without_auto_setup(mut self) -> Self {
101 self.auto_setup = false;
102 self
103 }
104
105 pub fn has_pyproject(&self) -> bool {
107 self.working_dir.join("pyproject.toml").exists()
108 }
109
110 pub async fn has_pytest(&self) -> bool {
112 let result = Command::new("uv")
114 .args(["run", "pytest", "--version"])
115 .current_dir(&self.working_dir)
116 .stdout(Stdio::null())
117 .stderr(Stdio::null())
118 .status()
119 .await;
120
121 result.map(|s| s.success()).unwrap_or(false)
122 }
123
124 pub async fn setup_environment(&self) -> Result<()> {
127 log::info!("Setting up Python environment with uv");
128
129 if !self.has_pyproject() {
131 if self.auto_setup {
132 log::warn!(
133 "No pyproject.toml found. Project should be initialized via 'uv init' first."
134 );
135 log::info!("Attempting to run 'uv init' as fallback...");
136 let init_output = Command::new("uv")
137 .args(["init"])
138 .current_dir(&self.working_dir)
139 .stdout(Stdio::piped())
140 .stderr(Stdio::piped())
141 .output()
142 .await
143 .context("Failed to run uv init")?;
144
145 if !init_output.status.success() {
146 let stderr = String::from_utf8_lossy(&init_output.stderr);
147 log::warn!("uv init failed: {}", stderr);
148 return self.install_pytest_directly().await;
149 }
150 } else {
151 anyhow::bail!(
152 "No pyproject.toml found and auto_setup is disabled. Run 'uv init' first."
153 );
154 }
155 }
156
157 let output = Command::new("uv")
159 .args(["sync", "--dev"])
160 .current_dir(&self.working_dir)
161 .stdout(Stdio::piped())
162 .stderr(Stdio::piped())
163 .output()
164 .await
165 .context("Failed to run uv sync")?;
166
167 if !output.status.success() {
168 let stderr = String::from_utf8_lossy(&output.stderr);
169 log::warn!("uv sync failed: {}", stderr);
170 return self.install_pytest_directly().await;
172 }
173
174 log::info!("Python environment ready");
175 Ok(())
176 }
177
178 async fn install_pytest_directly(&self) -> Result<()> {
180 log::info!("Installing pytest via uv pip");
181
182 let output = Command::new("uv")
183 .args(["pip", "install", "pytest"])
184 .current_dir(&self.working_dir)
185 .stdout(Stdio::piped())
186 .stderr(Stdio::piped())
187 .output()
188 .await
189 .context("Failed to install pytest")?;
190
191 if !output.status.success() {
192 let stderr = String::from_utf8_lossy(&output.stderr);
193 anyhow::bail!("Failed to install pytest: {}", stderr);
194 }
195
196 Ok(())
197 }
198
199 pub async fn run_pytest(&self, test_args: &[&str]) -> Result<TestResults> {
203 log::info!("Running pytest in {}", self.working_dir.display());
204
205 if !self.has_pytest().await {
207 self.setup_environment().await?;
208 }
209
210 let mut args = vec!["run", "pytest", "-v", "--tb=short"];
212 args.extend(test_args);
213
214 let start = std::time::Instant::now();
215
216 let output = Command::new("uv")
217 .args(&args)
218 .current_dir(&self.working_dir)
219 .stdout(Stdio::piped())
220 .stderr(Stdio::piped())
221 .output()
222 .await
223 .context("Failed to run pytest")?;
224
225 let duration_ms = start.elapsed().as_millis() as u64;
226 let stdout = String::from_utf8_lossy(&output.stdout).to_string();
227 let stderr = String::from_utf8_lossy(&output.stderr).to_string();
228 let combined = format!("{}\n{}", stdout, stderr);
229
230 log::debug!("pytest exit code: {:?}", output.status.code());
231 if !stdout.is_empty() {
232 log::debug!("pytest stdout:\n{}", stdout);
233 }
234
235 let mut results = self.parse_pytest_output(&combined, duration_ms);
236 results.run_succeeded = true; if results.all_passed() {
240 log::info!("✅ Tests passed: {}/{}", results.passed, results.total);
241 } else {
242 log::info!(
243 "❌ Tests failed: {} passed, {} failed",
244 results.passed,
245 results.failed
246 );
247 }
248
249 Ok(results)
250 }
251
252 pub async fn run_test_files(&self, test_files: &[&Path]) -> Result<TestResults> {
254 let file_args: Vec<&str> = test_files.iter().filter_map(|p| p.to_str()).collect();
255
256 self.run_pytest(&file_args).await
257 }
258
259 fn parse_pytest_output(&self, output: &str, duration_ms: u64) -> TestResults {
261 let mut results = TestResults {
262 duration_ms,
263 output: output.to_string(),
264 ..Default::default()
265 };
266
267 for line in output.lines() {
269 let line = line.trim();
270
271 if (line.contains("passed") || line.contains("failed") || line.contains("error"))
273 && (line.contains(" in ") || line.starts_with('='))
274 {
275 let parts: Vec<&str> = line.split_whitespace().collect();
276 for i in 0..parts.len() {
277 if parts[i] == "passed" || parts[i] == "passed," {
278 if i > 0 {
279 if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
280 results.passed = n;
281 }
282 }
283 } else if parts[i] == "failed" || parts[i] == "failed," {
284 if i > 0 {
285 if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
286 results.failed = n;
287 }
288 }
289 } else if parts[i] == "skipped" || parts[i] == "skipped," {
290 if i > 0 {
291 if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
292 results.skipped = n;
293 }
294 }
295 } else if (parts[i] == "error" || parts[i] == "errors") && i > 0 {
296 if let Ok(n) = parts[i - 1].trim_matches(',').parse::<usize>() {
297 results.failed += n;
298 }
299 }
300 }
301 }
302
303 if line.starts_with("FAILED ") {
306 let failure = self.parse_failure_line(line);
307 results.failures.push(failure);
308 }
309 }
310
311 results.total = results.passed + results.failed + results.skipped;
312 results
313 }
314
315 fn parse_failure_line(&self, line: &str) -> TestFailure {
317 let rest = line.strip_prefix("FAILED ").unwrap_or(line);
319
320 let (test_path, message) = if let Some(idx) = rest.find(" - ") {
321 (&rest[..idx], rest[idx + 3..].to_string())
322 } else {
323 (rest, String::new())
324 };
325
326 let parts: Vec<&str> = test_path.split("::").collect();
328 let (file, name) = if parts.len() >= 2 {
329 (
330 Some(parts[0].to_string()),
331 parts.last().unwrap_or(&"").to_string(),
332 )
333 } else {
334 (None, test_path.to_string())
335 };
336
337 TestFailure {
338 name,
339 file,
340 line: None,
341 message,
342 criticality: Criticality::High, }
344 }
345
346 pub fn calculate_v_log(&self, results: &TestResults, contract: &BehavioralContract) -> f32 {
349 let gamma = contract.gamma(); let mut v_log = 0.0;
351
352 for failure in &results.failures {
353 let weight = contract
355 .weighted_tests
356 .iter()
357 .find(|wt| {
358 failure.name.contains(&wt.test_name) || wt.test_name.contains(&failure.name)
359 })
360 .map(|wt| wt.criticality.weight())
361 .unwrap_or(Criticality::High.weight()); v_log += gamma * weight;
364 }
365
366 v_log
367 }
368
369 pub fn match_weighted_tests(&self, results: &mut TestResults, contract: &BehavioralContract) {
371 for failure in &mut results.failures {
372 if let Some(wt) = contract.weighted_tests.iter().find(|wt| {
373 failure.name.contains(&wt.test_name) || wt.test_name.contains(&failure.name)
374 }) {
375 failure.criticality = wt.criticality;
376 }
377 }
378 }
379}
380
381pub type TestRunner = PythonTestRunner;
384
385#[cfg(test)]
386mod tests {
387 use super::*;
388 use crate::types::WeightedTest;
389
390 #[test]
391 fn test_parse_pytest_summary() {
392 let runner = PythonTestRunner::new(PathBuf::from("."));
393
394 let output = "===== 3 passed, 2 failed, 1 skipped in 0.12s =====";
395 let results = runner.parse_pytest_output(output, 120);
396
397 assert_eq!(results.passed, 3);
398 assert_eq!(results.failed, 2);
399 assert_eq!(results.skipped, 1);
400 assert_eq!(results.total, 6);
401 }
402
403 #[test]
404 fn test_parse_pytest_failure_line() {
405 let runner = PythonTestRunner::new(PathBuf::from("."));
406
407 let line = "FAILED test_calculator.py::TestDivide::test_divide_by_zero - ZeroDivisionError";
408 let failure = runner.parse_failure_line(line);
409
410 assert_eq!(failure.name, "test_divide_by_zero");
411 assert_eq!(failure.file, Some("test_calculator.py".to_string()));
412 assert!(failure.message.contains("ZeroDivisionError"));
413 }
414
415 #[test]
416 fn test_calculate_v_log() {
417 let runner = PythonTestRunner::new(PathBuf::from("."));
418
419 let results = TestResults {
420 failures: vec![TestFailure {
421 name: "test_critical_feature".to_string(),
422 file: None,
423 line: None,
424 message: String::new(),
425 criticality: Criticality::Critical,
426 }],
427 ..Default::default()
428 };
429
430 let mut contract = BehavioralContract::new();
431 contract.weighted_tests = vec![WeightedTest {
432 test_name: "test_critical_feature".to_string(),
433 criticality: Criticality::Critical,
434 }];
435
436 let v_log = runner.calculate_v_log(&results, &contract);
437 assert!((v_log - 20.0).abs() < 0.01);
439 }
440}