ferrous_forge/safety/checks/
test.rs1use crate::Result;
4use std::path::Path;
5use std::process::Command;
6use std::time::Instant;
7
8use super::SafetyCheck;
9use crate::safety::{CheckType, report::CheckResult};
10
11pub struct TestCheck;
13
14impl SafetyCheck for TestCheck {
15 async fn run(project_path: &Path) -> Result<CheckResult> {
16 run(project_path).await
17 }
18
19 fn name() -> &'static str {
20 "test"
21 }
22
23 fn description() -> &'static str {
24 "Runs the complete test suite"
25 }
26}
27
28pub async fn run(project_path: &Path) -> Result<CheckResult> {
30 let start = Instant::now();
31 let mut result = CheckResult::new(CheckType::Test);
32
33 let output = Command::new("cargo")
35 .current_dir(project_path)
36 .args(&["test", "--all-targets", "--all-features"])
37 .output()?;
38
39 result.set_duration(start.elapsed());
40
41 if !output.status.success() {
42 handle_test_failures(&mut result, &output);
43 } else {
44 handle_test_success(&mut result, &output);
45 }
46
47 Ok(result)
48}
49
50fn handle_test_failures(result: &mut CheckResult, output: &std::process::Output) {
52 result.add_error("Tests failed");
53 result.add_suggestion("Fix failing tests before proceeding");
54
55 let stdout = String::from_utf8_lossy(&output.stdout);
56 let stderr = String::from_utf8_lossy(&output.stderr);
57
58 let failure_count = parse_test_failures(result, &stdout, &stderr);
59
60 if failure_count >= 5 {
61 result.add_error("... and more test failures (showing first 5)");
62 }
63
64 result.add_suggestion("Run 'cargo test' to see detailed test output");
65 result.add_suggestion("Check test logic and fix failing assertions");
66}
67
68fn parse_test_failures(result: &mut CheckResult, stdout: &str, stderr: &str) -> usize {
70 let mut failure_count = 0;
71 let mut in_failure = false;
72
73 for line in stdout.lines().chain(stderr.lines()) {
74 if line.starts_with("test ") && line.contains("FAILED") && failure_count < 5 {
75 result.add_error(format!("Test failure: {}", line.trim()));
76 failure_count += 1;
77 } else if line.starts_with("---- ") && line.contains("stdout ----") {
78 in_failure = true;
79 } else if in_failure && !line.trim().is_empty() && failure_count <= 5 {
80 result.add_context(format!("Test output: {}", line.trim()));
81 in_failure = false;
82 } else if line.contains("test result:") && line.contains("FAILED") {
83 result.add_error(line.trim().to_string());
84 }
85 }
86
87 failure_count
88}
89
90fn handle_test_success(result: &mut CheckResult, output: &std::process::Output) {
92 let stdout = String::from_utf8_lossy(&output.stdout);
93
94 for line in stdout.lines() {
95 if line.contains("test result: ok.") {
96 result.add_context(format!("Tests: {}", line.trim()));
97 return;
98 }
99 }
100
101 result.add_context("All tests passed");
102}
103
104#[cfg(test)]
105#[allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)]
106mod tests {
107 use super::*;
108 use tempfile::TempDir;
109 use tokio::fs;
110
111 #[tokio::test]
112 async fn test_test_check_on_project_with_tests() {
113 let temp_dir = TempDir::new().unwrap();
114
115 let cargo_toml = r#"
117[package]
118name = "test"
119version = "0.1.0"
120edition = "2021"
121"#;
122 fs::write(temp_dir.path().join("Cargo.toml"), cargo_toml)
123 .await
124 .unwrap();
125
126 fs::create_dir_all(temp_dir.path().join("src"))
128 .await
129 .unwrap();
130
131 let lib_rs = r#"
133pub fn add(a: i32, b: i32) -> i32 {
134 a + b
135}
136
137#[cfg(test)]
138#[allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)]
139mod tests {
140 use super::*;
141
142 #[test]
143 fn test_add() {
144 assert_eq!(add(2, 2), 4);
145 }
146}
147"#;
148 fs::write(temp_dir.path().join("src/lib.rs"), lib_rs)
149 .await
150 .unwrap();
151
152 let result = run(temp_dir.path()).await.unwrap();
153
154 assert!(result.passed);
156 }
157
158 #[test]
159 fn test_test_check_struct() {
160 assert_eq!(TestCheck::name(), "test");
161 assert!(!TestCheck::description().is_empty());
162 }
163}