//! Test Smell detector for common test anti-patterns.
//!
//! Detects common test smells that indicate problematic test design:
//! 1. Over-mocked tests (5+ mocks) - tests that mock so much they test nothing
//! 2. Flaky patterns - time.sleep(), datetime.now() without freezing
//! 3. Assert-free tests - tests without assertions (test nothing)
use std::collections::HashSet;
use crate::detectors::base::{Detector, DetectorConfig, DetectorResult};
use crate::graph::GraphClient;
use crate::models::{Finding, Severity};
/// Test smell detector
///
/// Detects:
/// - Over-mocked tests: Functions with 5+ @patch decorators
/// - Flaky patterns: time-sensitive operations without freezing
/// - Assert-free tests: Test functions without any assertions
pub struct TestSmellDetector {
config: DetectorConfig,
/// Threshold for over-mocking detection
over_mock_threshold: usize,
/// Minimum lines to report assert-free test
min_lines_for_no_assert: usize,
/// Maximum findings to report
max_findings: usize,
}
impl TestSmellDetector {
/// Create a new test smell detector
pub fn new() -> Self {
Self {
config: DetectorConfig::default(),
over_mock_threshold: 5,
min_lines_for_no_assert: 5,
max_findings: 100,
}
}
/// Set over-mock threshold
pub fn with_mock_threshold(mut self, threshold: usize) -> Self {
self.over_mock_threshold = threshold;
self
}
/// Flaky function calls that indicate test issues
fn flaky_calls() -> Vec<(&'static str, &'static str)> {
vec![
("time.sleep", "Use pytest-timeout or freezegun for time-dependent tests"),
("sleep", "Use pytest-timeout or freezegun for time-dependent tests"),
("datetime.now", "Use freezegun (@freeze_time) to freeze time in tests"),
("datetime.utcnow", "Use freezegun (@freeze_time) to freeze time in tests"),
("date.today", "Use freezegun (@freeze_time) to freeze time in tests"),
("time.time", "Use freezegun or mock time.time for deterministic tests"),
("random.random", "Seed random with a fixed value or mock for determinism"),
("random.randint", "Seed random with a fixed value or mock for determinism"),
("random.choice", "Seed random with a fixed value or mock for determinism"),
("uuid.uuid4", "Mock uuid4 for deterministic test results"),
]
}
/// Find over-mocked tests (tests with many decorators)
fn find_over_mocked_tests(&self, graph: &GraphClient) -> anyhow::Result<Vec<Finding>> {
let mut findings = Vec::new();
// Query for test functions with mock_count property or many decorators
let query = format!(
r#"
MATCH (f:Function)
WHERE (f.name STARTS WITH 'test_' OR f.name STARTS WITH 'test')
AND f.decorators IS NOT NULL
AND size(f.decorators) >= {}
RETURN f.qualifiedName AS func_name,
f.name AS func_simple_name,
f.filePath AS file_path,
f.lineStart AS func_line,
f.decorators AS decorators,
size(f.decorators) AS decorator_count
ORDER BY decorator_count DESC
LIMIT {}
"#,
self.over_mock_threshold, self.max_findings
);
let results = graph.execute(&query)?;
for row in results {
let func_name = row.get_string("func_name").unwrap_or_default();
let func_simple_name = row.get_string("func_simple_name").unwrap_or_default();
let file_path = row.get_string("file_path").unwrap_or_default();
let func_line = row.get_i64("func_line");
let decorators: Vec<String> = row
.get_string_array("decorators")
.unwrap_or_default();
// Count mock-related decorators
let mock_decorators: Vec<&String> = decorators
.iter()
.filter(|d| Self::is_mock_decorator(d))
.collect();
let mock_count = mock_decorators.len();
if mock_count < self.over_mock_threshold {
continue;
}
if findings.len() >= self.max_findings {
break;
}
let finding = self.create_over_mocked_finding(
&func_name,
&func_simple_name,
&file_path,
func_line,
&mock_decorators,
);
findings.push(finding);
}
Ok(findings)
}
/// Check if decorator is mock-related
fn is_mock_decorator(decorator: &str) -> bool {
let decorator_lower = decorator.to_lowercase();
decorator_lower.contains("patch")
|| decorator_lower.contains("mock")
|| decorator == "Mock"
|| decorator == "MagicMock"
}
fn create_over_mocked_finding(
&self,
func_name: &str,
func_simple_name: &str,
file_path: &str,
func_line: Option<i64>,
mock_decorators: &[&String],
) -> Finding {
let mock_count = mock_decorators.len();
let severity = if mock_count >= 8 {
Severity::High
} else {
Severity::Medium
};
let decorators_display: Vec<String> = mock_decorators
.iter()
.take(7)
.map(|d| format!("- `@{}`", d))
.collect();
let mut description = format!(
"Test `{}` has **{} mock decorators**.\n\n\
Decorators:\n{}\n\n\
When a test requires this many mocks, it often indicates:\n\
- The test is not testing the actual behavior\n\
- The code under test has too many dependencies\n\
- The test might pass even when the code is broken",
func_simple_name,
mock_count,
decorators_display.join("\n")
);
if mock_count > 7 {
description.push_str(&format!("\n- ... and {} more", mock_count - 7));
}
let suggestion = r#"**Consider these refactoring approaches:**
1. **Split the test**: Break into smaller, focused tests that each mock fewer dependencies
2. **Refactor the code under test**: High mock count often indicates the function does too much. Extract dependencies into separate classes/functions.
3. **Use integration tests**: If testing the integration is important, consider fewer mocks and more real objects.
4. **Use dependency injection**: Make dependencies explicit parameters rather than internal calls."#;
Finding {
id: format!("test_over_mocked_{}", func_name),
detector: "TestSmellDetector".to_string(),
severity,
title: format!("Over-mocked test: {} ({} mocks)", func_simple_name, mock_count),
description,
affected_nodes: vec![func_name.to_string()],
affected_files: if file_path.is_empty() {
vec![]
} else {
vec![file_path.to_string()]
},
line_start: func_line,
line_end: None,
suggested_fix: Some(suggestion.to_string()),
estimated_effort: Some("Medium (1-2 hours)".to_string()),
confidence: 0.90,
tags: vec![
"test_smell".to_string(),
"over_mocked".to_string(),
"test_quality".to_string(),
],
metadata: serde_json::json!({
"smell_type": "over_mocked",
"mock_count": mock_count,
"mock_decorators": mock_decorators,
"test_name": func_simple_name,
}),
}
}
/// Find tests that call flaky functions
fn find_flaky_tests(&self, graph: &GraphClient) -> anyhow::Result<Vec<Finding>> {
let mut findings = Vec::new();
// Build query for test functions that call flaky functions
let flaky_names: Vec<&str> = Self::flaky_calls().iter().map(|(n, _)| *n).collect();
let query = r#"
MATCH (f:Function)-[:CALLS]->(target)
WHERE (f.name STARTS WITH 'test_' OR f.name STARTS WITH 'test')
AND target.name IS NOT NULL
RETURN f.qualifiedName AS func_name,
f.name AS func_simple_name,
f.filePath AS file_path,
f.lineStart AS func_line,
collect(DISTINCT target.name) AS called_functions
LIMIT 200
"#;
let results = graph.execute(query)?;
for row in results {
let func_name = row.get_string("func_name").unwrap_or_default();
let called: Vec<String> = row
.get_string_array("called_functions")
.unwrap_or_default();
// Find flaky calls
let flaky_found: Vec<(&str, &str)> = Self::flaky_calls()
.into_iter()
.filter(|(name, _)| {
called.iter().any(|c| c == *name || c.ends_with(&format!(".{}", name)))
})
.collect();
if flaky_found.is_empty() {
continue;
}
if findings.len() >= self.max_findings {
break;
}
let finding = self.create_flaky_finding(&row, &flaky_found);
findings.push(finding);
}
Ok(findings)
}
fn create_flaky_finding(
&self,
row: &crate::graph::QueryRow,
flaky_calls: &[(&str, &str)],
) -> Finding {
let func_name = row.get_string("func_name").unwrap_or_default();
let func_simple_name = row.get_string("func_simple_name").unwrap_or_default();
let file_path = row.get_string("file_path").unwrap_or_default();
let func_line = row.get_i64("func_line");
let calls_display: Vec<String> = flaky_calls
.iter()
.map(|(name, _)| format!("- `{}`", name))
.collect();
let mut suggestions_set: HashSet<&str> = HashSet::new();
for (_, suggestion) in flaky_calls {
suggestions_set.insert(suggestion);
}
let mut description = format!(
"Test `{}` uses potentially flaky patterns:\n\n{}\n\n\
These patterns can cause tests to:\n\
- Pass on one machine but fail on another\n\
- Fail intermittently based on timing\n\
- Produce non-deterministic results",
func_simple_name,
calls_display.join("\n")
);
let mut suggestion = "**Recommended fixes:**\n\n".to_string();
for s in &suggestions_set {
suggestion.push_str(&format!("- {}\n", s));
}
let has_sleep = flaky_calls.iter().any(|(n, _)| n.contains("sleep"));
if has_sleep {
suggestion.push_str(
"\n**For time.sleep specifically:**\n\
- Use `pytest-timeout` for timeouts\n\
- Use `freezegun` to freeze time\n\
- Mock the sleep call if testing retry logic",
);
}
let severity = if flaky_calls.len() >= 2 {
Severity::Medium
} else {
Severity::Low
};
Finding {
id: format!("test_flaky_{}", func_name),
detector: "TestSmellDetector".to_string(),
severity,
title: format!("Flaky test pattern in {}", func_simple_name),
description,
affected_nodes: vec![func_name],
affected_files: if file_path.is_empty() {
vec![]
} else {
vec![file_path]
},
line_start: func_line,
line_end: None,
suggested_fix: Some(suggestion),
estimated_effort: Some("Small (30-60 minutes)".to_string()),
confidence: 0.80,
tags: vec![
"test_smell".to_string(),
"flaky".to_string(),
"test_quality".to_string(),
"non_deterministic".to_string(),
],
metadata: serde_json::json!({
"smell_type": "flaky_pattern",
"flaky_calls": flaky_calls.iter().map(|(n, _)| n).collect::<Vec<_>>(),
"test_name": func_simple_name,
}),
}
}
}
impl Default for TestSmellDetector {
fn default() -> Self {
Self::new()
}
}
impl Detector for TestSmellDetector {
fn name(&self) -> &'static str {
"TestSmellDetector"
}
fn description(&self) -> &'static str {
"Detects test anti-patterns: over-mocking, flaky patterns, assert-free tests"
}
fn detect(&self, graph: &GraphClient) -> DetectorResult {
let mut findings = Vec::new();
// Find over-mocked tests
match self.find_over_mocked_tests(graph) {
Ok(over_mocked) => findings.extend(over_mocked),
Err(e) => tracing::warn!("Failed to find over-mocked tests: {}", e),
}
// Find flaky tests
match self.find_flaky_tests(graph) {
Ok(flaky) => findings.extend(flaky),
Err(e) => tracing::warn!("Failed to find flaky tests: {}", e),
}
// Note: Assert-free test detection would require source analysis
// to check for assertion statements in the AST
Ok(findings)
}
fn is_dependent(&self) -> bool {
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_mock_decorator() {
assert!(TestSmellDetector::is_mock_decorator("patch"));
assert!(TestSmellDetector::is_mock_decorator("mock.patch"));
assert!(TestSmellDetector::is_mock_decorator("Mock"));
assert!(TestSmellDetector::is_mock_decorator("MagicMock"));
assert!(!TestSmellDetector::is_mock_decorator("pytest.fixture"));
}
#[test]
fn test_flaky_calls() {
let flaky = TestSmellDetector::flaky_calls();
assert!(flaky.iter().any(|(n, _)| *n == "time.sleep"));
assert!(flaky.iter().any(|(n, _)| *n == "datetime.now"));
}
}