ferrous_forge/safety/checks/
test.rs1use crate::Result;
4use std::path::Path;
5use std::process::Command;
6use std::time::Instant;
7
8use super::SafetyCheck;
9use crate::safety::{CheckType, report::CheckResult};
10
11pub struct TestCheck;
13
14impl SafetyCheck for TestCheck {
15 async fn run(project_path: &Path) -> Result<CheckResult> {
16 run(project_path).await
17 }
18
19 fn name() -> &'static str {
20 "test"
21 }
22
23 fn description() -> &'static str {
24 "Runs the complete test suite"
25 }
26}
27
28pub async fn run(project_path: &Path) -> Result<CheckResult> {
34 let start = Instant::now();
35 let mut result = CheckResult::new(CheckType::Test);
36
37 let output = Command::new("cargo")
39 .current_dir(project_path)
40 .args(&["test", "--all-targets", "--all-features"])
41 .output()?;
42
43 result.set_duration(start.elapsed());
44
45 if !output.status.success() {
46 handle_test_failures(&mut result, &output);
47 } else {
48 handle_test_success(&mut result, &output);
49 }
50
51 Ok(result)
52}
53
54fn handle_test_failures(result: &mut CheckResult, output: &std::process::Output) {
56 result.add_error("Tests failed");
57 result.add_suggestion("Fix failing tests before proceeding");
58
59 let stdout = String::from_utf8_lossy(&output.stdout);
60 let stderr = String::from_utf8_lossy(&output.stderr);
61
62 let failure_count = parse_test_failures(result, &stdout, &stderr);
63
64 if failure_count >= 5 {
65 result.add_error("... and more test failures (showing first 5)");
66 }
67
68 result.add_suggestion("Run 'cargo test' to see detailed test output");
69 result.add_suggestion("Check test logic and fix failing assertions");
70}
71
72fn parse_test_failures(result: &mut CheckResult, stdout: &str, stderr: &str) -> usize {
74 let mut failure_count = 0;
75 let mut in_failure = false;
76
77 for line in stdout.lines().chain(stderr.lines()) {
78 if line.starts_with("test ") && line.contains("FAILED") && failure_count < 5 {
79 result.add_error(format!("Test failure: {}", line.trim()));
80 failure_count += 1;
81 } else if line.starts_with("---- ") && line.contains("stdout ----") {
82 in_failure = true;
83 } else if in_failure && !line.trim().is_empty() && failure_count <= 5 {
84 result.add_context(format!("Test output: {}", line.trim()));
85 in_failure = false;
86 } else if line.contains("test result:") && line.contains("FAILED") {
87 result.add_error(line.trim().to_string());
88 }
89 }
90
91 failure_count
92}
93
94fn handle_test_success(result: &mut CheckResult, output: &std::process::Output) {
96 let stdout = String::from_utf8_lossy(&output.stdout);
97
98 for line in stdout.lines() {
99 if line.contains("test result: ok.") {
100 result.add_context(format!("Tests: {}", line.trim()));
101 return;
102 }
103 }
104
105 result.add_context("All tests passed");
106}
107
108#[cfg(test)]
109#[allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)]
110mod tests {
111 use super::*;
112 use tempfile::TempDir;
113 use tokio::fs;
114
115 #[tokio::test]
116 async fn test_test_check_on_project_with_tests() {
117 let temp_dir = TempDir::new().unwrap();
118
119 let cargo_toml = r#"
121[package]
122name = "test"
123version = "0.1.0"
124edition = "2021"
125"#;
126 fs::write(temp_dir.path().join("Cargo.toml"), cargo_toml)
127 .await
128 .unwrap();
129
130 fs::create_dir_all(temp_dir.path().join("src"))
132 .await
133 .unwrap();
134
135 let lib_rs = r#"
137pub fn add(a: i32, b: i32) -> i32 {
138 a + b
139}
140
141#[cfg(test)]
142#[allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)]
143mod tests {
144 use super::*;
145
146 #[test]
147 fn test_add() {
148 assert_eq!(add(2, 2), 4);
149 }
150}
151"#;
152 fs::write(temp_dir.path().join("src/lib.rs"), lib_rs)
153 .await
154 .unwrap();
155
156 let result = run(temp_dir.path()).await.unwrap();
157
158 assert!(result.passed);
160 }
161
162 #[test]
163 fn test_test_check_struct() {
164 assert_eq!(TestCheck::name(), "test");
165 assert!(!TestCheck::description().is_empty());
166 }
167}