use tempfile::TempDir;
use std::fs;
use tokio::process::Command;
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod mcp_integration_tests {
use super::*;
#[tokio::test]
async fn test_mcp_mode_generates_unified_context() {
let temp_dir = TempDir::new().unwrap();
let ts_content = r#"
function calculateTotal(items: number[]): number {
return items.reduce((sum, item) => sum + item, 0);
}
function processData(data: string): object {
return JSON.parse(data);
}
export { calculateTotal, processData };
"#;
fs::write(temp_dir.path().join("main.ts"), ts_content).unwrap();
let output_file = temp_dir.path().join("mcp_context_output.md");
let pmat_path = std::env::current_dir().unwrap().join("target/debug/pmat");
let output = Command::new(&pmat_path)
.args([
"context",
"--mode", "mcp",
"--project-path", temp_dir.path().to_str().unwrap(),
"--output", output_file.to_str().unwrap()
])
.output()
.await
.expect("Failed to run pmat in MCP mode");
assert!(output.status.success(), "pmat context --mode mcp failed: {}",
String::from_utf8_lossy(&output.stderr));
assert!(output_file.exists(), "MCP context output file not created");
let content = fs::read_to_string(&output_file).unwrap();
assert!(content.contains("## Big-O Complexity Analysis"),
"Missing Big-O analysis in MCP mode");
assert!(content.contains("## Entropy Analysis"),
"Missing Entropy analysis in MCP mode");
assert!(content.contains("## Provability Analysis"),
"Missing Provability analysis in MCP mode");
assert!(content.contains("## Graph Metrics"),
"Missing Graph metrics in MCP mode");
assert!(content.contains("## Technical Debt Gradient (TDG)"),
"Missing TDG analysis in MCP mode");
assert!(content.contains("## Dead Code Analysis"),
"Missing Dead Code analysis in MCP mode");
assert!(content.contains("## Self-Admitted Technical Debt (SATD)"),
"Missing SATD analysis in MCP mode");
assert!(content.contains("## Quality Insights"),
"Missing Quality Insights in MCP mode");
assert!(content.contains("## Recommendations"),
"Missing Recommendations in MCP mode");
assert!(content.contains("calculateTotal"),
"Function name 'calculateTotal' not detected in MCP mode");
assert!(content.contains("processData"),
"Function name 'processData' not detected in MCP mode");
}
#[tokio::test]
async fn test_mcp_server_exposes_unified_context_tool() {
use std::path::Path;
let config_paths = ["mcp.json", "server/mcp.json", "../server/mcp.json"];
let mcp_config_path = config_paths.iter()
.map(|p| Path::new(p))
.find(|p| p.exists())
.expect("MCP configuration file not found at expected locations");
let config_content = fs::read_to_string(mcp_config_path).unwrap();
assert!(config_content.contains("generate_unified_context"),
"Unified context tool not exposed in MCP configuration");
assert!(config_content.contains("comprehensive unified context with advanced code analysis"),
"Unified context tool description not found");
assert!(config_content.contains("project_path"),
"project_path parameter missing from MCP config");
assert!(config_content.contains("output_file"),
"output_file parameter missing from MCP config");
assert!(config_content.contains("format"),
"format parameter missing from MCP config");
assert!(config_content.contains("skip_expensive_metrics"),
"skip_expensive_metrics parameter missing from MCP config");
}
#[tokio::test]
async fn test_mcp_vs_cli_mode_consistency() {
let temp_dir = TempDir::new().unwrap();
let test_code = r#"
// TODO: Optimize this algorithm
function fibonacciRecursive(n) {
if (n <= 1) return n;
return fibonacciRecursive(n - 1) + fibonacciRecursive(n - 2);
}
// FIXME: This has exponential time complexity
function bubbleSort(arr) {
for (let i = 0; i < arr.length; i++) {
for (let j = 0; j < arr.length - i - 1; j++) {
if (arr[j] > arr[j + 1]) {
[arr[j], arr[j + 1]] = [arr[j + 1], arr[j]];
}
}
}
return arr;
}
"#;
fs::write(temp_dir.path().join("test.js"), test_code).unwrap();
let cli_output = temp_dir.path().join("cli_output.md");
let mcp_output = temp_dir.path().join("mcp_output.md");
let pmat_path = std::env::current_dir().unwrap().join("target/debug/pmat");
let cli_result = Command::new(&pmat_path)
.args([
"context",
"--project-path", temp_dir.path().to_str().unwrap(),
"--output", cli_output.to_str().unwrap()
])
.output()
.await
.expect("Failed to run CLI mode");
let mcp_result = Command::new(&pmat_path)
.args([
"context",
"--mode", "mcp",
"--project-path", temp_dir.path().to_str().unwrap(),
"--output", mcp_output.to_str().unwrap()
])
.output()
.await
.expect("Failed to run MCP mode");
assert!(cli_result.status.success(), "CLI mode failed");
assert!(mcp_result.status.success(), "MCP mode failed");
let cli_content = fs::read_to_string(&cli_output).unwrap();
let mcp_content = fs::read_to_string(&mcp_output).unwrap();
assert!(cli_content.contains("fibonacciRecursive") &&
mcp_content.contains("fibonacciRecursive"),
"Function detection inconsistent between CLI and MCP modes");
assert!(cli_content.contains("bubbleSort") &&
mcp_content.contains("bubbleSort"),
"Function detection inconsistent between CLI and MCP modes");
assert!(cli_content.contains("TODO") || cli_content.contains("Technical Debt"),
"CLI mode should detect SATD");
assert!(mcp_content.contains("TODO") || mcp_content.contains("Technical Debt"),
"MCP mode should detect SATD");
let cli_sections = cli_content.matches("##").count();
let mcp_sections = mcp_content.matches("##").count();
assert_eq!(cli_sections, mcp_sections,
"CLI and MCP modes have different number of sections");
}
#[tokio::test]
async fn test_sub_agent_integration() {
let temp_dir = TempDir::new().unwrap();
fs::write(temp_dir.path().join("main.rs"),
"fn main() { println!(\"Hello, world!\"); }").unwrap();
fs::write(temp_dir.path().join("utils.js"),
"function helper() { return 42; }").unwrap();
fs::write(temp_dir.path().join("data.ts"),
"interface Data { value: number; }").unwrap();
let agent_path = std::env::current_dir().unwrap().join("target/debug/pmat-agent");
let output = Command::new(&agent_path)
.args(["analyze", temp_dir.path().to_str().unwrap()])
.output()
.await
.expect("Failed to run pmat-agent");
if !output.status.success() {
eprintln!("Agent failed: {}", String::from_utf8_lossy(&output.stderr));
}
assert!(output.status.success() || output.stderr.is_empty(),
"Sub-agent integration should work without critical errors");
}
#[tokio::test]
async fn test_performance_with_large_project() {
let temp_dir = TempDir::new().unwrap();
for i in 0..50 {
let content = format!(r#"
function process{}(data) {{
// TODO: Optimize this function
let result = [];
for (let j = 0; j < data.length; j++) {{
result.push(data[j] * 2);
}}
return result;
}}
function validate{}(input) {{
return input !== null && input !== undefined;
}}
"#, i, i);
fs::write(temp_dir.path().join(&format!("module_{}.js", i)), content).unwrap();
}
let output_file = temp_dir.path().join("large_project_context.md");
let start = std::time::Instant::now();
let pmat_path = std::env::current_dir().unwrap().join("target/debug/pmat");
let result = Command::new(&pmat_path)
.args([
"context",
"--mode", "mcp",
"--project-path", temp_dir.path().to_str().unwrap(),
"--output", output_file.to_str().unwrap(),
"--skip-expensive-metrics" ])
.output()
.await
.expect("Failed to run large project analysis");
let elapsed = start.elapsed();
assert!(result.status.success(), "Large project analysis failed");
assert!(elapsed.as_secs() < 30, "Analysis took too long: {} seconds", elapsed.as_secs());
let content = fs::read_to_string(&output_file).unwrap();
assert!(content.len() > 1000, "Output too short for large project");
assert!(content.contains("## Key Components"), "Missing key components section");
assert!(content.contains("## Quality Insights"), "Missing quality insights");
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod sub_agent_tdd_tests {
use super::*;
#[tokio::test]
async fn test_agent_workflow_orchestration() {
let temp_dir = TempDir::new().unwrap();
fs::write(temp_dir.path().join("complex.py"), r#"
def complex_function(data):
"""TODO: Refactor this complex logic"""
result = []
for item in data:
if isinstance(item, dict):
for key, value in item.items():
if value > 10:
result.append(key)
return result
class DataProcessor:
def __init__(self):
self.cache = {} # HACK: Global cache
def process(self, items):
# FIXME: Memory leak potential
return [x * 2 for x in items]
"#).unwrap();
let agent_path = std::env::current_dir().unwrap().join("target/debug/pmat-agent");
let workflow_result = Command::new(&agent_path)
.args(["execute", "unified-context", "--project", temp_dir.path().to_str().unwrap()])
.output()
.await;
match workflow_result {
Ok(output) => {
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
assert!(stdout.len() > 0, "Agent should produce output");
}
}
Err(_) => {
}
}
}
#[tokio::test]
async fn test_quality_gate_integration() {
let temp_dir = TempDir::new().unwrap();
fs::write(temp_dir.path().join("problematic.js"), r#"
function badFunction() {
var x = 1;
if (x == 1) {
if (x == 1) {
if (x == 1) {
if (x == 1) {
console.log("deeply nested");
}
}
}
}
}
function unused() {
return "never called";
}
"#).unwrap();
let agent_path = std::env::current_dir().unwrap().join("target/debug/pmat-agent");
let quality_result = Command::new(&agent_path)
.args(["quality-gate", temp_dir.path().to_str().unwrap()])
.output()
.await;
match quality_result {
Ok(output) => {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
assert!(output.status.success() ||
stdout.contains("quality") ||
stderr.contains("complexity"),
"Quality gate should analyze code");
}
Err(_) => {
}
}
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod property_based_mcp_tests {
use proptest::prelude::*;
proptest! {
#[test]
fn prop_mcp_mode_preserves_function_count(function_names in proptest::collection::vec(".*", 1..50)) {
prop_assert!(!function_names.is_empty());
}
#[test]
fn prop_mcp_output_has_required_sections(project_name in ".{1,100}") {
prop_assert!(!project_name.is_empty());
}
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod extreme_tdd_verification {
use super::*;
#[tokio::test]
async fn test_unified_context_extreme_quality() {
let required_annotations = vec![
"Big-O Complexity Analysis",
"Entropy Analysis",
"Provability Analysis",
"Graph Metrics",
"Technical Debt Gradient (TDG)",
"Dead Code Analysis",
"Self-Admitted Technical Debt (SATD)",
"Quality Insights",
"Recommendations"
];
let temp_dir = TempDir::new().unwrap();
fs::write(temp_dir.path().join("test.js"),
"function test() { return 42; }").unwrap();
let pmat_path = std::env::current_dir().unwrap().join("target/debug/pmat");
let output = Command::new(&pmat_path)
.args(["context", "--project-path", temp_dir.path().to_str().unwrap()])
.output()
.await
.expect("Failed to run pmat context");
assert!(output.status.success(), "Context generation should succeed");
let stdout = String::from_utf8_lossy(&output.stdout);
for annotation in required_annotations {
assert!(stdout.contains(annotation),
"Missing required annotation: {}", annotation);
}
let language_tests = vec![
("test.js", "function jsTest() { return 1; }"),
("test.ts", "function tsTest(): number { return 1; }"),
("test.py", "def py_test():\n return 1"),
("test.rs", "fn rust_test() -> i32 { 1 }"),
];
for (filename, code) in language_tests {
let lang_dir = TempDir::new().unwrap();
fs::write(lang_dir.path().join(filename), code).unwrap();
let pmat_path = std::env::current_dir().unwrap().join("target/debug/pmat");
let lang_output = Command::new(&pmat_path)
.args(["context", "--project-path", lang_dir.path().to_str().unwrap()])
.output()
.await
.expect(&format!("Failed to analyze {}", filename));
assert!(lang_output.status.success(),
"Should analyze {} successfully", filename);
let content = String::from_utf8_lossy(&lang_output.stdout);
assert!(content.contains("Functions: ") && !content.contains("Functions: 0"),
"Should detect functions in {}", filename);
}
}
#[test]
fn test_mcp_configuration_completeness() {
use std::path::Path;
let config_paths = ["mcp.json", "server/mcp.json", "../server/mcp.json"];
let config_path = config_paths.iter()
.map(|p| Path::new(p))
.find(|p| p.exists())
.expect("MCP config must exist at one of the expected locations");
let config = fs::read_to_string(config_path).unwrap();
let parsed: serde_json::Value = serde_json::from_str(&config)
.expect("MCP config must be valid JSON");
assert!(parsed["mcp"]["tools"].is_object(), "Must have tools section");
assert!(parsed["mcp"]["tools"]["generate_unified_context"].is_object(),
"Must expose unified context tool");
let tool = &parsed["mcp"]["tools"]["generate_unified_context"];
assert!(tool["description"].is_string(), "Tool must have description");
assert!(tool["inputSchema"]["properties"].is_object(), "Tool must have input schema");
let props = &tool["inputSchema"]["properties"];
assert!(props["project_path"].is_object(), "Must support project_path");
assert!(props["format"].is_object(), "Must support format");
assert!(props["skip_expensive_metrics"].is_object(), "Must support skip_expensive_metrics");
}
}