let analyzer = LanguageAnalyzer::new();
let content =
"const result = eval(userInput);\ndocument.write(html);\nelem.innerHTML = data;";
let result = analyzer
.analyze_security(content, Language::JavaScript)
.await;
assert!(result.success);
let count = result.data["issues_count"].as_u64().unwrap();
assert_eq!(count, 3);
}
#[tokio::test]
async fn test_analyze_security_python() {
let analyzer = LanguageAnalyzer::new();
let content = "exec(user_code)\neval(expression)\nos.system(cmd)";
let result = analyzer.analyze_security(content, Language::Python).await;
let count = result.data["issues_count"].as_u64().unwrap();
assert_eq!(count, 3);
}
#[tokio::test]
async fn test_analyze_security_sql() {
let analyzer = LanguageAnalyzer::new();
let content = sample_sql_code();
let result = analyzer.analyze_security(content, Language::SQL).await;
let count = result.data["issues_count"].as_u64().unwrap();
assert!(count >= 3); }
#[tokio::test]
async fn test_analyze_security_generic_patterns() {
let analyzer = LanguageAnalyzer::new();
let content = "const password = 'secret123';\nconst secret = 'abc';\nconst token = 'xyz';";
let result = analyzer.analyze_security(content, Language::Rust).await;
let count = result.data["issues_count"].as_u64().unwrap();
assert_eq!(count, 4); }
#[tokio::test]
async fn test_analyze_security_no_issues() {
let analyzer = LanguageAnalyzer::new();
let content = "fn main() {\n let x = 5;\n}";
let result = analyzer.analyze_security(content, Language::Rust).await;
let count = result.data["issues_count"].as_u64().unwrap();
assert_eq!(count, 0);
}
#[test]
fn test_get_security_patterns_javascript() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_security_patterns(Language::JavaScript);
assert!(patterns.contains(&"eval("));
assert!(patterns.contains(&"innerHTML"));
assert!(patterns.contains(&"document.write"));
}
#[test]
fn test_get_security_patterns_typescript() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_security_patterns(Language::TypeScript);
assert!(patterns.contains(&"eval("));
assert!(patterns.contains(&"innerHTML"));
}
#[test]
fn test_get_security_patterns_python() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_security_patterns(Language::Python);
assert!(patterns.contains(&"exec("));
assert!(patterns.contains(&"eval("));
assert!(patterns.contains(&"os.system"));
}
#[test]
fn test_get_security_patterns_sql() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_security_patterns(Language::SQL);
assert!(patterns.contains(&"DROP"));
assert!(patterns.contains(&"DELETE"));
assert!(patterns.contains(&"UPDATE"));
}
#[test]
fn test_get_security_patterns_generic() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_security_patterns(Language::Rust);
assert!(patterns.contains(&"password"));
assert!(patterns.contains(&"secret"));
assert!(patterns.contains(&"token"));
}
#[tokio::test]
async fn test_analyze_style() {
let analyzer = LanguageAnalyzer::new();
let content = "short\na medium length line here\na very long line that exceeds the 120 character limit and should be flagged as a long line in the analysis output data";
let result = analyzer.analyze_style(content, Language::Rust).await;
assert!(result.success);
let avg_len = result.data["average_line_length"].as_f64().unwrap();
assert!(avg_len > 0.0);
let max_len = result.data["max_line_length"].as_u64().unwrap();
assert_eq!(max_len, 118);
let long_lines = result.data["long_lines"].as_u64().unwrap();
assert_eq!(long_lines, 0);
}
#[tokio::test]
async fn test_analyze_style_empty() {
let analyzer = LanguageAnalyzer::new();
let result = analyzer.analyze_style("", Language::Rust).await;
assert!(result.success);
let avg_len = result.data["average_line_length"].as_f64().unwrap();
assert_eq!(avg_len, 0.0);
}
#[tokio::test]
async fn test_analyze_style_no_long_lines() {
let analyzer = LanguageAnalyzer::new();
let content = "short line\nanother short line\nstill short";
let result = analyzer.analyze_style(content, Language::Rust).await;
let long_lines = result.data["long_lines"].as_u64().unwrap();
assert_eq!(long_lines, 0);
}
#[tokio::test]
async fn test_analyze_documentation_good_ratio() {
let analyzer = LanguageAnalyzer::new();
let content = sample_python_with_docs();
let result = analyzer
.analyze_documentation(content, Language::Python)
.await;
assert!(result.success);
let ratio = result.data["documentation_ratio"].as_f64().unwrap();
assert!(ratio > 0.1);
}
#[tokio::test]
async fn test_analyze_documentation_low_ratio() {
let analyzer = LanguageAnalyzer::new();
let content =
"fn main() {\n let x = 5;\n let y = 10;\n println!(\"{}\", x + y);\n}";
let result = analyzer
.analyze_documentation(content, Language::Rust)
.await;
let ratio = result.data["documentation_ratio"].as_f64().unwrap();
assert_eq!(ratio, 0.0);
assert_eq!(result.data["assessment"].as_str().unwrap(), "low");
}
#[tokio::test]
async fn test_analyze_documentation_empty() {
let analyzer = LanguageAnalyzer::new();
let result = analyzer.analyze_documentation("", Language::Rust).await;
let ratio = result.data["documentation_ratio"].as_f64().unwrap();
assert_eq!(ratio, 0.0);
}
#[tokio::test]
async fn test_analyze_documentation_all_comments() {
let analyzer = LanguageAnalyzer::new();
let content = "// comment 1\n// comment 2\n// comment 3";
let result = analyzer
.analyze_documentation(content, Language::Rust)
.await;
let ratio = result.data["documentation_ratio"].as_f64().unwrap();
assert_eq!(ratio, 1.0);
assert_eq!(result.data["assessment"].as_str().unwrap(), "good");
}
#[tokio::test]
async fn test_analyze_dependencies_rust() {
let analyzer = LanguageAnalyzer::new();
let content = "use std::io;\nuse std::fs;\nextern crate serde;";
let result = analyzer.analyze_dependencies(content, Language::Rust).await;
assert!(result.success);
let count = result.data["import_count"].as_u64().unwrap();
assert_eq!(count, 3);
}
#[tokio::test]
async fn test_analyze_dependencies_python() {
let analyzer = LanguageAnalyzer::new();
let content =
"import os\nimport sys\nfrom typing import List\nfrom collections import defaultdict";
let result = analyzer
.analyze_dependencies(content, Language::Python)
.await;
let count = result.data["import_count"].as_u64().unwrap();
assert_eq!(count, 4);
}
#[tokio::test]
async fn test_analyze_dependencies_javascript() {
let analyzer = LanguageAnalyzer::new();
let content = "import { x } from 'module';\nconst y = require('another');";
let result = analyzer
.analyze_dependencies(content, Language::JavaScript)
.await;
let count = result.data["import_count"].as_u64().unwrap();
assert_eq!(count, 1);
}
#[tokio::test]
async fn test_analyze_dependencies_empty() {
let analyzer = LanguageAnalyzer::new();
let result = analyzer.analyze_dependencies("", Language::Rust).await;
let count = result.data["import_count"].as_u64().unwrap();
assert_eq!(count, 0);
}
#[test]
fn test_get_import_patterns_rust() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_import_patterns(Language::Rust);
assert!(patterns.contains(&"use "));
assert!(patterns.contains(&"extern crate"));
}
#[test]
fn test_get_import_patterns_python() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_import_patterns(Language::Python);
assert!(patterns.contains(&"import "));
assert!(patterns.contains(&"from "));
}
#[test]
fn test_get_import_patterns_javascript() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_import_patterns(Language::JavaScript);
assert!(patterns.contains(&"import "));
assert!(patterns.contains(&"require("));
}
#[test]
fn test_get_import_patterns_go() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_import_patterns(Language::Go);
assert!(patterns.contains(&"import "));
}
#[test]
fn test_get_import_patterns_generic() {
let analyzer = LanguageAnalyzer::new();
let patterns = analyzer.get_import_patterns(Language::Unknown);
assert!(patterns.contains(&"import"));
assert!(patterns.contains(&"include"));
assert!(patterns.contains(&"require"));
}
#[tokio::test]
async fn test_analyze_metrics_rust() {
let analyzer = LanguageAnalyzer::new();
let content = sample_rust_code();
let result = analyzer.analyze_metrics(content, Language::Rust).await;
assert!(result.success);
let lines = result.data["total_lines"].as_u64().unwrap();
assert!(lines > 0);
let functions = result.data["estimated_functions"].as_u64().unwrap();
assert!(functions >= 2); }
#[tokio::test]
async fn test_analyze_metrics_python() {
let analyzer = LanguageAnalyzer::new();
let content = sample_python_code();
let result = analyzer.analyze_metrics(content, Language::Python).await;
let functions = result.data["estimated_functions"].as_u64().unwrap();
assert!(functions >= 2); }
#[tokio::test]
async fn test_analyze_metrics_javascript() {
let analyzer = LanguageAnalyzer::new();
let content = sample_javascript_code();
let result = analyzer
.analyze_metrics(content, Language::JavaScript)
.await;
let functions = result.data["estimated_functions"].as_u64().unwrap();
assert!(functions >= 1);
}
#[tokio::test]
async fn test_analyze_metrics_java() {
let analyzer = LanguageAnalyzer::new();
let content = sample_java_code();
let result = analyzer.analyze_metrics(content, Language::Java).await;
let functions = result.data["estimated_functions"].as_u64().unwrap();
assert!(functions >= 2); }
#[tokio::test]
async fn test_analyze_metrics_unknown_language() {
let analyzer = LanguageAnalyzer::new();
let content = "some content\nmore content";
let result = analyzer.analyze_metrics(content, Language::Unknown).await;
assert!(result.success);
let functions = result.data["estimated_functions"].as_u64().unwrap();
assert_eq!(functions, 0); }
#[test]
fn test_create_unsupported_analysis_result() {
let analyzer = LanguageAnalyzer::new();
let result =
analyzer.create_unsupported_analysis_result(AnalysisType::Complexity, Language::JSON);
assert!(!result.success);
assert!(result.error.is_some());
assert!(result.error.unwrap().contains("not supported"));
}
#[tokio::test]
async fn test_perform_analyses_multiple() {
let analyzer = LanguageAnalyzer::new();
let content = sample_rust_code();
let analysis_types = vec![
AnalysisType::Complexity,
AnalysisType::Satd,
AnalysisType::Metrics,
];
let results = analyzer
.perform_analyses(content, Language::Rust, &analysis_types)
.await
.unwrap();
assert_eq!(results.len(), 3);
assert!(results.iter().all(|r| r.success));
}
#[tokio::test]
async fn test_perform_analyses_with_unsupported() {
let analyzer = LanguageAnalyzer::new();
let content = "{}";
let analysis_types = vec![
AnalysisType::Complexity, AnalysisType::Metrics, ];
let results = analyzer
.perform_analyses(content, Language::JSON, &analysis_types)
.await
.unwrap();
assert_eq!(results.len(), 2);
assert!(!results[0].success); assert!(results[1].success); }
#[tokio::test]
async fn test_analyze_file_rust() {
let analyzer = LanguageAnalyzer::new();
let file = create_temp_file(sample_rust_code(), ".rs");
let result = analyzer
.analyze_file(
file.path(),
vec![AnalysisType::Complexity, AnalysisType::Satd],
)
.await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.language, Language::Rust);
assert_eq!(result.analysis_results.len(), 2);
assert!(result.processing_time_ms < 10000); }
#[tokio::test]
async fn test_analyze_file_python() {
let analyzer = LanguageAnalyzer::new();
let file = create_temp_file(sample_python_code(), ".py");
let result = analyzer
.analyze_file(file.path(), vec![AnalysisType::Metrics])
.await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.language, Language::Python);
}
#[tokio::test]
async fn test_analyze_file_nonexistent() {
let analyzer = LanguageAnalyzer::new();
let path = Path::new("/nonexistent/file.rs");
let result = analyzer
.analyze_file(path, vec![AnalysisType::Complexity])
.await;
assert!(result.is_err());
}
#[test]
fn test_analysis_options_default() {
let options = AnalysisOptions::default();
assert_eq!(options.complexity_threshold, 20);
assert!(options.include_comments);
assert!(!options.include_tests);
assert!(options.parallel_analysis);
assert!(matches!(options.output_format, OutputFormat::Json));
}
#[test]
fn test_analysis_options_clone() {
let options = AnalysisOptions::default();
let cloned = options.clone();
assert_eq!(options.complexity_threshold, cloned.complexity_threshold);
assert_eq!(options.include_comments, cloned.include_comments);
}
#[test]