#![cfg_attr(coverage_nightly, coverage(off))]
use super::*;
#[tokio::test]
async fn test_establish_baseline() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let baseline = monitor
.establish_baseline("test-baseline".to_string())
.await;
assert!(baseline.is_ok());
let b = baseline.unwrap();
assert_eq!(b.id, "test-baseline");
}
#[tokio::test]
async fn test_apply_cache_optimization() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::CacheOptimization)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_apply_parallel_processing() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::ParallelProcessing)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_apply_memory_pooling() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::MemoryPooling)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_apply_incremental_parsing() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::IncrementalParsing)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_apply_io_optimization() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::IoOptimization)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_apply_ast_reuse() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor
.apply_optimization(OptimizationStrategy::AstReuse)
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_collect_system_info() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let info = monitor.collect_system_info().await;
assert!(info.is_ok());
let sys = info.unwrap();
assert!(!sys.os.is_empty());
}
#[tokio::test]
async fn test_collect_codebase_info() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let info = monitor.collect_codebase_info().await;
assert!(info.is_ok());
let cb = info.unwrap();
assert!(cb.total_loc > 0);
}
#[tokio::test]
async fn test_collect_baseline_measurements() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let measurements = monitor.collect_baseline_measurements().await;
assert!(measurements.is_ok());
let m = measurements.unwrap();
assert!(m.contains_key("analysis_time_ms"));
}
#[tokio::test]
async fn test_collect_metrics() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor.collect_metrics().await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_check_regressions() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let result = monitor.check_regressions().await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_auto_optimize() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor.auto_optimize().await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_cleanup_old_data() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor.cleanup_old_data().await;
assert!(result.is_ok());
}
#[test]
fn test_benchmark_report_serialization() {
let report = BenchmarkReport {
suite_name: "test_suite".to_string(),
executed_at: SystemTime::now(),
results: vec![(
"test".to_string(),
BenchmarkResult {
execution_time: Duration::from_millis(100),
memory_used: 1024,
cpu_time: Duration::from_millis(90),
throughput: 100.0,
success: true,
metrics: HashMap::new(),
},
)],
summary: BenchmarkSummary {
total_benchmarks: 1,
passed_benchmarks: 1,
failed_benchmarks: 0,
avg_execution_time: Duration::from_millis(100),
total_memory_used: 1024,
avg_throughput: 100.0,
},
regressions: vec![],
recommendations: vec!["Optimize caching".to_string()],
};
let json = serde_json::to_string(&report).unwrap();
assert!(json.contains("test_suite"));
let deserialized: BenchmarkReport = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.suite_name, "test_suite");
}
#[test]
fn test_benchmark_result_serialization() {
let mut metrics = HashMap::new();
metrics.insert("custom_metric".to_string(), 42.0);
let result = BenchmarkResult {
execution_time: Duration::from_secs(2),
memory_used: 1024 * 1024,
cpu_time: Duration::from_millis(1800),
throughput: 50.0,
success: true,
metrics,
};
let json = serde_json::to_string(&result).unwrap();
assert!(json.contains("50.0"));
let deserialized: BenchmarkResult = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.throughput, 50.0);
}
#[test]
fn test_expected_performance_serialization() {
let expected = ExpectedPerformance {
max_execution_time: Duration::from_secs(10),
max_memory_bytes: 1024 * 1024 * 100,
min_throughput: 25.0,
regression_threshold: 0.15,
};
let json = serde_json::to_string(&expected).unwrap();
assert!(json.contains("25.0"));
let deserialized: ExpectedPerformance = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.min_throughput, 25.0);
}
#[test]
fn test_performance_point_serialization() {
let mut context = HashMap::new();
context.insert("key".to_string(), "value".to_string());
let point = PerformancePoint {
timestamp: SystemTime::now(),
metric: "latency".to_string(),
value: 150.0,
context,
};
let json = serde_json::to_string(&point).unwrap();
assert!(json.contains("latency"));
let deserialized: PerformancePoint = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.metric, "latency");
}
#[test]
fn test_performance_statistics_serialization() {
let stats = PerformanceStatistics::default();
let json = serde_json::to_string(&stats).unwrap();
assert!(json.contains("analysis"));
let deserialized: PerformanceStatistics = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.analysis.avg_analysis_time_ms, 100.0);
}
#[test]
fn test_analysis_stats_serialization() {
let stats = AnalysisStats {
avg_analysis_time_ms: 75.0,
throughput_fps: 15.0,
cache_hit_ratio: 0.9,
parser_efficiency: 0.95,
};
let json = serde_json::to_string(&stats).unwrap();
let deserialized: AnalysisStats = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.avg_analysis_time_ms, 75.0);
}
#[test]
fn test_memory_stats_serialization() {
let stats = MemoryStats {
peak_memory_mb: 768.0,
avg_memory_mb: 384.0,
growth_rate_mb_per_hour: 8.0,
gc_impact_percent: 3.0,
};
let json = serde_json::to_string(&stats).unwrap();
let deserialized: MemoryStats = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.peak_memory_mb, 768.0);
}
#[test]
fn test_io_stats_serialization() {
let stats = IoStats {
read_throughput_mbps: 150.0,
avg_read_time_ms: 8.0,
io_wait_percent: 4.0,
cache_effectiveness: 0.88,
};
let json = serde_json::to_string(&stats).unwrap();
let deserialized: IoStats = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.read_throughput_mbps, 150.0);
}
#[test]
fn test_system_stats_serialization() {
let stats = SystemStats {
cpu_percent: 45.0,
thread_count: 12,
load_average: 1.8,
network_kbps: 512.0,
};
let json = serde_json::to_string(&stats).unwrap();
let deserialized: SystemStats = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.thread_count, 12);
}
#[test]
fn test_system_info_serialization() {
let info = SystemInfo {
cpu_model: "Intel Xeon".to_string(),
total_memory_mb: 65536,
os: "linux".to_string(),
rust_version: "1.76.0".to_string(),
};
let json = serde_json::to_string(&info).unwrap();
let deserialized: SystemInfo = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.cpu_model, "Intel Xeon");
}
#[test]
fn test_codebase_info_serialization() {
let info = CodebaseInfo {
total_loc: 250000,
file_count: 1500,
avg_complexity: 6.5,
primary_language: "go".to_string(),
};
let json = serde_json::to_string(&info).unwrap();
let deserialized: CodebaseInfo = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.total_loc, 250000);
}
#[test]
fn test_baseline_context_serialization() {
let context = BaselineContext {
system_info: SystemInfo {
cpu_model: "CPU".to_string(),
total_memory_mb: 4096,
os: "macos".to_string(),
rust_version: "1.70.0".to_string(),
},
codebase_info: CodebaseInfo {
total_loc: 10000,
file_count: 100,
avg_complexity: 4.5,
primary_language: "rust".to_string(),
},
config_hash: "hash123".to_string(),
};
let json = serde_json::to_string(&context).unwrap();
let deserialized: BaselineContext = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.config_hash, "hash123");
}
#[test]
fn test_baseline_serialization() {
let mut measurements = HashMap::new();
measurements.insert("metric1".to_string(), 100.0);
let baseline = Baseline {
id: "baseline-test".to_string(),
measurements,
measured_at: SystemTime::now(),
context: BaselineContext {
system_info: SystemInfo {
cpu_model: "CPU".to_string(),
total_memory_mb: 8192,
os: "linux".to_string(),
rust_version: "1.70.0".to_string(),
},
codebase_info: CodebaseInfo {
total_loc: 5000,
file_count: 50,
avg_complexity: 3.5,
primary_language: "python".to_string(),
},
config_hash: "hash".to_string(),
},
};
let json = serde_json::to_string(&baseline).unwrap();
let deserialized: Baseline = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.id, "baseline-test");
}
#[test]
fn test_performance_regression_serialization() {
let regression = PerformanceRegression {
benchmark_name: "bench1".to_string(),
metric_name: "latency_ms".to_string(),
current_value: 200.0,
baseline_value: 100.0,
regression_percent: 100.0,
severity: RegressionSeverity::Critical,
};
let json = serde_json::to_string(®ression).unwrap();
let deserialized: PerformanceRegression = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.regression_percent, 100.0);
}
#[test]
fn test_performance_alert_serialization() {
let alert = PerformanceAlert {
alert_type: AlertType::HighCpuUsage,
message: "CPU usage exceeded threshold".to_string(),
severity: AlertSeverity::Warning,
metric_name: "cpu_percent".to_string(),
current_value: 95.0,
threshold_value: 80.0,
triggered_at: SystemTime::now(),
};
let json = serde_json::to_string(&alert).unwrap();
let deserialized: PerformanceAlert = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.current_value, 95.0);
}
#[test]
fn test_performance_report_serialization() {
let report = PerformanceReport {
generated_at: SystemTime::now(),
current_statistics: PerformanceStatistics::default(),
recent_benchmarks: vec![],
optimization_history: vec![],
recommendations: vec!["Recommend1".to_string()],
alerts: vec![],
};
let json = serde_json::to_string(&report).unwrap();
let deserialized: PerformanceReport = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.recommendations.len(), 1);
}
#[test]
fn test_optimization_config_serialization() {
let config = OptimizationConfig {
auto_optimize: true,
strategies: vec![OptimizationStrategy::CacheOptimization],
min_improvement_percent: 7.5,
experimental: false,
};
let json = serde_json::to_string(&config).unwrap();
let deserialized: OptimizationConfig = serde_json::from_str(&json).unwrap();
assert!(deserialized.auto_optimize);
}
#[test]
fn test_retention_config_serialization() {
let config = RetentionConfig::default();
let json = serde_json::to_string(&config).unwrap();
let deserialized: RetentionConfig = serde_json::from_str(&json).unwrap();
assert!(deserialized.auto_cleanup);
}
#[test]
fn test_calculate_config_hash() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let hash = monitor.calculate_config_hash();
assert!(!hash.is_empty());
}
#[test]
fn test_generate_system_recommendations() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let recommendations = monitor.generate_system_recommendations();
assert!(!recommendations.is_empty());
}
#[test]
fn test_generate_performance_alerts() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let alerts = monitor.generate_performance_alerts();
assert!(alerts.is_empty());
}
#[test]
fn test_calculate_summary_stats() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let results = vec![(
"test1".to_string(),
BenchmarkResult {
execution_time: Duration::from_millis(100),
memory_used: 1024,
cpu_time: Duration::from_millis(90),
throughput: 100.0,
success: true,
metrics: HashMap::new(),
},
)];
let summary = monitor.calculate_summary_stats(&results);
assert_eq!(summary.total_benchmarks, 10); }
#[test]
fn test_generate_recommendations() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let summary = BenchmarkSummary {
total_benchmarks: 5,
passed_benchmarks: 5,
failed_benchmarks: 0,
avg_execution_time: Duration::from_millis(100),
total_memory_used: 1024,
avg_throughput: 50.0,
};
let recommendations = monitor.generate_recommendations(&summary);
assert!(!recommendations.is_empty());
}
#[test]
fn test_get_recent_benchmark_results() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let results = monitor.get_recent_benchmark_results(5);
assert!(results.is_empty());
}
#[test]
fn test_benchmark_suite_debug() {
let suite = BenchmarkSuite {
name: "debug_suite".to_string(),
benchmarks: vec![],
config: BenchmarkConfig::default(),
};
let debug = format!("{:?}", suite);
assert!(debug.contains("debug_suite"));
}
#[test]
fn test_benchmark_context_debug() {
let context = BenchmarkContext {
test_data: HashMap::new(),
temp_dir: PathBuf::from("/tmp"),
config: HashMap::new(),
};
let debug = format!("{:?}", context);
assert!(debug.contains("BenchmarkContext"));
}
#[test]
fn test_performance_metrics_debug() {
let metrics = PerformanceMetrics::new();
let debug = format!("{:?}", metrics);
assert!(debug.contains("PerformanceMetrics"));
}
#[test]
fn test_benchmark_suite_with_benchmarks() {
fn dummy_benchmark(_ctx: &BenchmarkContext) -> Result<BenchmarkResult> {
Ok(BenchmarkResult {
execution_time: Duration::from_millis(50),
memory_used: 512,
cpu_time: Duration::from_millis(45),
throughput: 150.0,
success: true,
metrics: HashMap::new(),
})
}
let benchmark = Benchmark {
name: "test_benchmark".to_string(),
benchmark_fn: dummy_benchmark,
setup_fn: None,
teardown_fn: None,
expected: ExpectedPerformance {
max_execution_time: Duration::from_secs(1),
max_memory_bytes: 1024 * 1024,
min_throughput: 100.0,
regression_threshold: 0.1,
},
};
let suite = BenchmarkSuite {
name: "full_suite".to_string(),
benchmarks: vec![benchmark],
config: BenchmarkConfig::default(),
};
assert_eq!(suite.benchmarks.len(), 1);
assert_eq!(suite.name, "full_suite");
}
#[test]
fn test_benchmark_with_setup_teardown() {
fn setup() -> Result<BenchmarkContext> {
Ok(BenchmarkContext {
test_data: HashMap::new(),
temp_dir: PathBuf::from("/tmp/bench"),
config: HashMap::new(),
})
}
fn teardown(_ctx: BenchmarkContext) -> Result<()> {
Ok(())
}
fn bench_fn(_ctx: &BenchmarkContext) -> Result<BenchmarkResult> {
Ok(BenchmarkResult {
execution_time: Duration::from_millis(100),
memory_used: 1024,
cpu_time: Duration::from_millis(90),
throughput: 100.0,
success: true,
metrics: HashMap::new(),
})
}
let benchmark = Benchmark {
name: "with_lifecycle".to_string(),
benchmark_fn: bench_fn,
setup_fn: Some(setup),
teardown_fn: Some(teardown),
expected: ExpectedPerformance {
max_execution_time: Duration::from_secs(5),
max_memory_bytes: 1024 * 1024 * 10,
min_throughput: 50.0,
regression_threshold: 0.2,
},
};
assert!(benchmark.setup_fn.is_some());
assert!(benchmark.teardown_fn.is_some());
}
#[test]
fn test_performance_point_with_context() {
let mut context = HashMap::new();
context.insert("env".to_string(), "production".to_string());
context.insert("version".to_string(), "1.0.0".to_string());
let point = PerformancePoint {
timestamp: SystemTime::now(),
metric: "latency_p99".to_string(),
value: 250.5,
context,
};
assert_eq!(point.context.len(), 2);
assert_eq!(point.context.get("env"), Some(&"production".to_string()));
}
#[test]
fn test_performance_regression_all_severities() {
let regressions = vec![
PerformanceRegression {
benchmark_name: "minor".to_string(),
metric_name: "latency".to_string(),
current_value: 105.0,
baseline_value: 100.0,
regression_percent: 5.0,
severity: RegressionSeverity::Minor,
},
PerformanceRegression {
benchmark_name: "moderate".to_string(),
metric_name: "latency".to_string(),
current_value: 120.0,
baseline_value: 100.0,
regression_percent: 20.0,
severity: RegressionSeverity::Moderate,
},
PerformanceRegression {
benchmark_name: "severe".to_string(),
metric_name: "latency".to_string(),
current_value: 140.0,
baseline_value: 100.0,
regression_percent: 40.0,
severity: RegressionSeverity::Severe,
},
PerformanceRegression {
benchmark_name: "critical".to_string(),
metric_name: "latency".to_string(),
current_value: 200.0,
baseline_value: 100.0,
regression_percent: 100.0,
severity: RegressionSeverity::Critical,
},
];
assert_eq!(regressions.len(), 4);
assert_eq!(regressions[0].regression_percent, 5.0);
assert_eq!(regressions[3].regression_percent, 100.0);
}
#[test]
fn test_performance_alert_all_types() {
let alerts: Vec<PerformanceAlert> = vec![
PerformanceAlert {
alert_type: AlertType::HighLatency,
message: "Latency exceeded".to_string(),
severity: AlertSeverity::Warning,
metric_name: "p99_latency".to_string(),
current_value: 500.0,
threshold_value: 200.0,
triggered_at: SystemTime::now(),
},
PerformanceAlert {
alert_type: AlertType::HighMemoryUsage,
message: "Memory high".to_string(),
severity: AlertSeverity::Critical,
metric_name: "heap_mb".to_string(),
current_value: 8000.0,
threshold_value: 4096.0,
triggered_at: SystemTime::now(),
},
PerformanceAlert {
alert_type: AlertType::HighCpuUsage,
message: "CPU high".to_string(),
severity: AlertSeverity::Warning,
metric_name: "cpu_percent".to_string(),
current_value: 95.0,
threshold_value: 80.0,
triggered_at: SystemTime::now(),
},
PerformanceAlert {
alert_type: AlertType::LowThroughput,
message: "Throughput low".to_string(),
severity: AlertSeverity::Info,
metric_name: "ops_per_sec".to_string(),
current_value: 50.0,
threshold_value: 100.0,
triggered_at: SystemTime::now(),
},
PerformanceAlert {
alert_type: AlertType::PerformanceRegression,
message: "Regression detected".to_string(),
severity: AlertSeverity::Critical,
metric_name: "benchmark_time".to_string(),
current_value: 200.0,
threshold_value: 100.0,
triggered_at: SystemTime::now(),
},
];
assert_eq!(alerts.len(), 5);
for alert in &alerts {
assert!(!alert.message.is_empty());
}
}
#[test]
fn test_optimization_config_all_strategies() {
let config = OptimizationConfig {
auto_optimize: true,
strategies: vec![
OptimizationStrategy::CacheOptimization,
OptimizationStrategy::ParallelProcessing,
OptimizationStrategy::MemoryPooling,
OptimizationStrategy::IncrementalParsing,
OptimizationStrategy::IoOptimization,
OptimizationStrategy::AstReuse,
],
min_improvement_percent: 5.0,
experimental: true,
};
assert_eq!(config.strategies.len(), 6);
assert!(config.auto_optimize);
assert!(config.experimental);
}
#[test]
fn test_benchmark_report_full() {
let result = BenchmarkResult {
execution_time: Duration::from_millis(150),
memory_used: 2048,
cpu_time: Duration::from_millis(140),
throughput: 75.0,
success: true,
metrics: {
let mut m = HashMap::new();
m.insert("gc_count".to_string(), 5.0);
m
},
};
let report = BenchmarkReport {
suite_name: "full_report".to_string(),
executed_at: SystemTime::now(),
results: vec![("test1".to_string(), result)],
summary: BenchmarkSummary {
total_benchmarks: 1,
passed_benchmarks: 1,
failed_benchmarks: 0,
avg_execution_time: Duration::from_millis(150),
total_memory_used: 2048,
avg_throughput: 75.0,
},
regressions: vec![],
recommendations: vec![
"Consider caching".to_string(),
"Enable parallel".to_string(),
],
};
assert_eq!(report.suite_name, "full_report");
assert_eq!(report.recommendations.len(), 2);
}
#[test]
fn test_baseline_with_measurements() {
let mut measurements = HashMap::new();
measurements.insert("analysis_time_ms".to_string(), 125.0);
measurements.insert("memory_mb".to_string(), 300.0);
measurements.insert("throughput_fps".to_string(), 45.0);
measurements.insert("cache_hit_ratio".to_string(), 0.85);
let baseline = Baseline {
id: "v2.0.0-baseline".to_string(),
measurements: measurements.clone(),
measured_at: SystemTime::now(),
context: BaselineContext {
system_info: SystemInfo {
cpu_model: "Intel Xeon Gold".to_string(),
total_memory_mb: 131072,
os: "linux".to_string(),
rust_version: "1.78.0".to_string(),
},
codebase_info: CodebaseInfo {
total_loc: 500000,
file_count: 3000,
avg_complexity: 7.5,
primary_language: "rust".to_string(),
},
config_hash: "sha256:abc123".to_string(),
},
};
assert_eq!(baseline.measurements.len(), 4);
assert!(baseline.measurements.get("analysis_time_ms").is_some());
}
#[test]
fn test_performance_monitor_with_benchmarks() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let suite = BenchmarkSuite {
name: "test_suite".to_string(),
benchmarks: vec![],
config: BenchmarkConfig::default(),
};
monitor.benchmarks.insert("test_suite".to_string(), suite);
assert!(monitor.benchmarks.contains_key("test_suite"));
}
#[test]
fn test_calculate_improvement_with_improvement() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let mut baseline = HashMap::new();
baseline.insert("latency".to_string(), 100.0);
baseline.insert("memory".to_string(), 500.0);
let mut optimized = HashMap::new();
optimized.insert("latency".to_string(), 75.0); optimized.insert("memory".to_string(), 400.0);
let improvement = monitor.calculate_improvement(&baseline, &optimized);
assert!(improvement > 20.0); }
#[test]
fn test_calculate_improvement_with_regression() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let mut baseline = HashMap::new();
baseline.insert("latency".to_string(), 100.0);
let mut optimized = HashMap::new();
optimized.insert("latency".to_string(), 120.0);
let improvement = monitor.calculate_improvement(&baseline, &optimized);
assert!(improvement < 0.0); }
#[test]
fn test_calculate_metrics_delta_multiple() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let mut baseline = HashMap::new();
baseline.insert("a".to_string(), 100.0);
baseline.insert("b".to_string(), 200.0);
baseline.insert("c".to_string(), 50.0);
let mut optimized = HashMap::new();
optimized.insert("a".to_string(), 80.0);
optimized.insert("b".to_string(), 250.0);
let delta = monitor.calculate_metrics_delta(&baseline, &optimized);
assert_eq!(delta.get("a"), Some(&-20.0));
assert_eq!(delta.get("b"), Some(&50.0));
assert!(delta.get("c").is_none()); }
#[tokio::test]
async fn test_run_single_benchmark() {
fn test_fn(_ctx: &BenchmarkContext) -> Result<BenchmarkResult> {
Ok(BenchmarkResult {
execution_time: Duration::from_millis(50),
memory_used: 1024,
cpu_time: Duration::from_millis(45),
throughput: 200.0,
success: true,
metrics: HashMap::new(),
})
}
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let benchmark = Benchmark {
name: "single_test".to_string(),
benchmark_fn: test_fn,
setup_fn: None,
teardown_fn: None,
expected: ExpectedPerformance {
max_execution_time: Duration::from_secs(1),
max_memory_bytes: 1024 * 1024,
min_throughput: 100.0,
regression_threshold: 0.1,
},
};
let result = monitor.run_single_benchmark(&benchmark).await;
assert!(result.is_ok());
let br = result.unwrap();
assert!(br.success);
}
#[tokio::test]
async fn test_detect_regressions_empty() {
let config = create_test_config();
let monitor = PerformanceMonitor::new(config);
let results = vec![];
let regressions = monitor.detect_regressions(&results).await;
assert!(regressions.is_ok());
assert!(regressions.unwrap().is_empty());
}
#[tokio::test]
async fn test_store_benchmark_results() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let report = BenchmarkReport {
suite_name: "store_test".to_string(),
executed_at: SystemTime::now(),
results: vec![],
summary: BenchmarkSummary {
total_benchmarks: 0,
passed_benchmarks: 0,
failed_benchmarks: 0,
avg_execution_time: Duration::from_millis(0),
total_memory_used: 0,
avg_throughput: 0.0,
},
regressions: vec![],
recommendations: vec![],
};
let result = monitor.store_benchmark_results(&report).await;
assert!(result.is_ok());
}
#[test]
fn test_retention_config_custom() {
let config = RetentionConfig {
detailed_retention: Duration::from_secs(3 * 24 * 60 * 60), summary_retention: Duration::from_secs(30 * 24 * 60 * 60), auto_cleanup: false,
};
assert_eq!(config.detailed_retention, Duration::from_secs(259200));
assert!(!config.auto_cleanup);
}
#[test]
fn test_performance_config_custom() {
let config = PerformanceConfig {
continuous_monitoring: true,
benchmark_interval: Duration::from_secs(300),
thresholds: PerformanceThresholds {
max_analysis_time_ms: 10000,
max_memory_mb: 2048,
max_cpu_percent: 90.0,
regression_threshold_percent: 15.0,
},
optimization: OptimizationConfig {
auto_optimize: true,
strategies: vec![OptimizationStrategy::CacheOptimization],
min_improvement_percent: 10.0,
experimental: false,
},
retention: RetentionConfig::default(),
};
assert!(config.continuous_monitoring);
assert_eq!(config.thresholds.max_analysis_time_ms, 10000);
}
#[test]
fn test_benchmark_result_with_custom_metrics() {
let mut metrics = HashMap::new();
metrics.insert("gc_pause_ms".to_string(), 25.0);
metrics.insert("allocations".to_string(), 1000.0);
metrics.insert("peak_rss_mb".to_string(), 512.0);
let result = BenchmarkResult {
execution_time: Duration::from_millis(200),
memory_used: 512 * 1024 * 1024,
cpu_time: Duration::from_millis(180),
throughput: 50.0,
success: true,
metrics,
};
assert_eq!(result.metrics.len(), 3);
assert_eq!(result.metrics.get("gc_pause_ms"), Some(&25.0));
}
#[test]
fn test_active_optimization_all_statuses() {
let statuses = vec![
(OptimizationStatus::Analyzing, "analyzing"),
(OptimizationStatus::Ready, "ready"),
(OptimizationStatus::Implementing, "implementing"),
(OptimizationStatus::Testing, "testing"),
(OptimizationStatus::Applied, "applied"),
(OptimizationStatus::Failed("timeout".to_string()), "failed"),
(
OptimizationStatus::RolledBack("crash".to_string()),
"rollback",
),
];
for (status, _name) in statuses {
let opt = ActiveOptimization {
strategy: OptimizationStrategy::CacheOptimization,
target_metric: "latency".to_string(),
expected_improvement: 20.0,
status,
};
assert_eq!(opt.expected_improvement, 20.0);
}
}
#[test]
fn test_performance_report_with_all_fields() {
let report = PerformanceReport {
generated_at: SystemTime::now(),
current_statistics: PerformanceStatistics::default(),
recent_benchmarks: vec![BenchmarkReport {
suite_name: "suite1".to_string(),
executed_at: SystemTime::now(),
results: vec![],
summary: BenchmarkSummary {
total_benchmarks: 5,
passed_benchmarks: 4,
failed_benchmarks: 1,
avg_execution_time: Duration::from_millis(100),
total_memory_used: 1024 * 1024,
avg_throughput: 100.0,
},
regressions: vec![],
recommendations: vec![],
}],
optimization_history: vec![OptimizationResult {
strategy: OptimizationStrategy::CacheOptimization,
improvement_percent: 15.0,
metrics_changed: HashMap::new(),
applied_at: SystemTime::now(),
success: true,
}],
recommendations: vec!["Enable caching".to_string()],
alerts: vec![PerformanceAlert {
alert_type: AlertType::HighLatency,
message: "Test alert".to_string(),
severity: AlertSeverity::Warning,
metric_name: "latency".to_string(),
current_value: 150.0,
threshold_value: 100.0,
triggered_at: SystemTime::now(),
}],
};
assert_eq!(report.recent_benchmarks.len(), 1);
assert_eq!(report.optimization_history.len(), 1);
assert_eq!(report.alerts.len(), 1);
}
#[test]
fn test_benchmark_clone_comprehensive() {
fn dummy(_ctx: &BenchmarkContext) -> Result<BenchmarkResult> {
Ok(BenchmarkResult {
execution_time: Duration::from_millis(1),
memory_used: 0,
cpu_time: Duration::from_millis(1),
throughput: 0.0,
success: true,
metrics: HashMap::new(),
})
}
let benchmark = Benchmark {
name: "clone_test".to_string(),
benchmark_fn: dummy,
setup_fn: None,
teardown_fn: None,
expected: ExpectedPerformance {
max_execution_time: Duration::from_secs(1),
max_memory_bytes: 1024,
min_throughput: 1.0,
regression_threshold: 0.1,
},
};
let cloned = benchmark.clone();
assert_eq!(cloned.name, benchmark.name);
}
#[tokio::test]
async fn test_run_benchmark_suite_not_found() {
let config = create_test_config();
let mut monitor = PerformanceMonitor::new(config);
let result = monitor.run_benchmark("nonexistent_suite").await;
assert!(result.is_err());
}