use anyhow::{anyhow, Result};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use tracing::info;
use scirs2_core::metrics::{Counter, Gauge, MetricsRegistry};
use crate::ast::Document;
use crate::distributed_cache::{CacheConfig, GraphQLQueryCache};
use crate::federation::{EnhancedFederationConfig, EnhancedFederationManager};
use crate::hybrid_optimizer::{HybridOptimizerConfig, HybridQueryOptimizer};
use crate::ml_optimizer::{MLOptimizerConfig, MLQueryOptimizer};
use crate::performance::PerformanceTracker;
use crate::quantum_optimizer::{QuantumOptimizerConfig, QuantumQueryOptimizer};
use crate::system_monitor;
#[derive(Debug, Clone)]
pub struct BenchmarkConfig {
pub test_duration: Duration,
pub warmup_duration: Duration,
pub concurrent_users: usize,
pub queries_per_user: usize,
pub enable_detailed_metrics: bool,
pub test_scenarios: Vec<TestScenario>,
pub optimization_strategies: Vec<OptimizationStrategy>,
pub cache_configurations: Vec<CacheConfig>,
pub federation_configurations: Vec<EnhancedFederationConfig>,
}
impl Default for BenchmarkConfig {
fn default() -> Self {
Self {
test_duration: Duration::from_secs(60),
warmup_duration: Duration::from_secs(10),
concurrent_users: 10,
queries_per_user: 100,
enable_detailed_metrics: true,
test_scenarios: vec![
TestScenario::SimpleQuery,
TestScenario::ComplexQuery,
TestScenario::DeepNestedQuery,
TestScenario::FederatedQuery,
TestScenario::AggregationQuery,
],
optimization_strategies: vec![
OptimizationStrategy::None,
OptimizationStrategy::ML,
OptimizationStrategy::Quantum,
OptimizationStrategy::Hybrid,
],
cache_configurations: vec![CacheConfig::default()],
federation_configurations: vec![EnhancedFederationConfig::default()],
}
}
}
#[derive(Debug, Clone)]
pub enum TestScenario {
SimpleQuery,
ComplexQuery,
DeepNestedQuery,
FederatedQuery,
AggregationQuery,
SubscriptionQuery,
BulkOperations,
StressTest,
}
#[derive(Debug, Clone)]
pub enum OptimizationStrategy {
None,
ML,
Quantum,
Hybrid,
Federation,
Cache,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkResult {
pub scenario: String,
pub strategy: String,
pub configuration: String,
pub total_requests: u64,
pub successful_requests: u64,
pub failed_requests: u64,
pub average_response_time: Duration,
pub min_response_time: Duration,
pub max_response_time: Duration,
pub p50_response_time: Duration,
pub p95_response_time: Duration,
pub p99_response_time: Duration,
pub requests_per_second: f64,
pub throughput_mbps: f64,
pub memory_usage_mb: f64,
pub cpu_usage_percent: f64,
pub cache_hit_rate: f64,
pub error_rate: f64,
pub detailed_metrics: Option<DetailedMetrics>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DetailedMetrics {
pub response_times: Vec<Duration>,
pub memory_samples: Vec<f64>,
pub cpu_samples: Vec<f64>,
pub network_bytes_sent: u64,
pub network_bytes_received: u64,
pub database_queries: u64,
pub cache_operations: CacheOperationMetrics,
pub optimization_metrics: OptimizationMetrics,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheOperationMetrics {
pub hits: u64,
pub misses: u64,
pub sets: u64,
pub deletes: u64,
pub invalidations: u64,
pub average_lookup_time: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OptimizationMetrics {
pub optimization_attempts: u64,
pub successful_optimizations: u64,
pub average_optimization_time: Duration,
pub performance_improvement: f64,
pub strategy_selection_accuracy: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkReport {
pub config: BenchmarkConfigSummary,
pub results: Vec<BenchmarkResult>,
pub summary: BenchmarkSummary,
pub recommendations: Vec<PerformanceRecommendation>,
pub generated_at: std::time::SystemTime,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkConfigSummary {
pub test_duration: Duration,
pub concurrent_users: usize,
pub total_queries: usize,
pub scenarios_tested: usize,
pub strategies_tested: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkSummary {
pub best_strategy: String,
pub best_average_response_time: Duration,
pub best_throughput: f64,
pub best_cache_hit_rate: f64,
pub overall_performance_improvement: f64,
pub strategy_rankings: Vec<StrategyRanking>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StrategyRanking {
pub strategy: String,
pub score: f64,
pub response_time_rank: usize,
pub throughput_rank: usize,
pub reliability_rank: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceRecommendation {
pub category: String,
pub recommendation: String,
pub impact: String,
pub priority: String,
}
pub struct PerformanceBenchmarkSuite {
config: BenchmarkConfig,
performance_tracker: Arc<PerformanceTracker>,
ml_optimizer: Option<Arc<MLQueryOptimizer>>,
quantum_optimizer: Option<Arc<QuantumQueryOptimizer>>,
hybrid_optimizer: Option<Arc<HybridQueryOptimizer>>,
cache: Option<Arc<GraphQLQueryCache>>,
#[allow(dead_code)]
federation_manager: Option<Arc<EnhancedFederationManager>>,
results: Arc<RwLock<Vec<BenchmarkResult>>>,
metrics_registry: Arc<MetricsRegistry>,
}
impl PerformanceBenchmarkSuite {
pub fn new(config: BenchmarkConfig) -> Self {
let performance_tracker = Arc::new(PerformanceTracker::new());
let metrics_registry = Arc::new(MetricsRegistry::new());
let _ = metrics_registry.register(
"total_requests".to_string(),
Counter::new("Total benchmark requests processed".to_string()),
);
let _ = metrics_registry.register(
"successful_requests".to_string(),
Counter::new("Successful benchmark requests".to_string()),
);
let _ = metrics_registry.register(
"failed_requests".to_string(),
Counter::new("Failed benchmark requests".to_string()),
);
let _ = metrics_registry.register(
"active_benchmarks".to_string(),
Gauge::new("Currently active benchmark runs".to_string()),
);
let _ = metrics_registry.register(
"memory_usage_mb".to_string(),
Gauge::new("Memory usage during benchmarking (MB)".to_string()),
);
let _ = metrics_registry.register(
"cpu_usage_percent".to_string(),
Gauge::new("CPU usage during benchmarking (%)".to_string()),
);
Self {
config,
performance_tracker,
ml_optimizer: None,
quantum_optimizer: None,
hybrid_optimizer: None,
cache: None,
federation_manager: None,
results: Arc::new(RwLock::new(Vec::new())),
metrics_registry,
}
}
pub fn metrics(&self) -> &MetricsRegistry {
&self.metrics_registry
}
pub async fn initialize_optimizers(&mut self) -> Result<()> {
info!("Initializing optimization strategies for benchmarking");
let ml_config = MLOptimizerConfig::default();
self.ml_optimizer = Some(Arc::new(MLQueryOptimizer::new(
ml_config,
self.performance_tracker.clone(),
)));
let quantum_config = QuantumOptimizerConfig::default();
self.quantum_optimizer = Some(Arc::new(QuantumQueryOptimizer::new(quantum_config)));
let hybrid_config = HybridOptimizerConfig::default();
self.hybrid_optimizer = Some(Arc::new(HybridQueryOptimizer::new(
hybrid_config,
self.performance_tracker.clone(),
)));
if !self.config.cache_configurations.is_empty() {
let cache_config = self.config.cache_configurations[0].clone();
self.cache = Some(Arc::new(GraphQLQueryCache::new(cache_config).await?));
}
info!("Optimization strategies initialized");
Ok(())
}
pub async fn run_benchmarks(&mut self) -> Result<BenchmarkReport> {
info!("Starting comprehensive performance benchmark suite");
self.initialize_optimizers().await?;
let start_time = Instant::now();
info!("Running warmup period");
self.run_warmup().await?;
for scenario in &self.config.test_scenarios.clone() {
for strategy in &self.config.optimization_strategies.clone() {
let result = self.run_single_benchmark(scenario, strategy).await?;
self.results.write().await.push(result);
}
}
let report = self.generate_report().await?;
let total_time = start_time.elapsed();
info!("Benchmark suite completed in {:?}", total_time);
Ok(report)
}
async fn run_warmup(&self) -> Result<()> {
let warmup_queries = self.generate_test_queries(&TestScenario::SimpleQuery, 50);
let warmup_start = Instant::now();
while warmup_start.elapsed() < self.config.warmup_duration {
for query in &warmup_queries {
let _ = self
.execute_query_with_strategy(query, &OptimizationStrategy::None)
.await;
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
Ok(())
}
async fn run_single_benchmark(
&self,
scenario: &TestScenario,
strategy: &OptimizationStrategy,
) -> Result<BenchmarkResult> {
info!("Running benchmark: {:?} with {:?}", scenario, strategy);
let queries = self.generate_test_queries(scenario, self.config.queries_per_user);
let mut response_times = Vec::new();
let mut successful_requests = 0;
let mut failed_requests = 0;
let mut _total_bytes_sent = 0u64;
let mut _total_bytes_received = 0u64;
let test_start = Instant::now();
let mut tasks = Vec::new();
for _user_id in 0..self.config.concurrent_users {
let queries_clone = queries.clone();
let strategy_clone = strategy.clone();
let task = {
let suite = self;
async move {
let mut user_response_times = Vec::new();
let mut user_successful = 0;
let mut user_failed = 0;
for query in queries_clone {
let start = Instant::now();
match suite
.execute_query_with_strategy(&query, &strategy_clone)
.await
{
Ok(_) => {
user_successful += 1;
user_response_times.push(start.elapsed());
}
Err(_) => {
user_failed += 1;
}
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
(user_response_times, user_successful, user_failed)
}
};
tasks.push(task);
}
let results = futures::future::join_all(tasks).await;
for (user_times, user_successful, user_failed) in results {
response_times.extend(user_times);
successful_requests += user_successful;
failed_requests += user_failed;
}
let total_time = test_start.elapsed();
let result = self
.calculate_benchmark_result(
scenario,
strategy,
&response_times,
successful_requests,
failed_requests,
total_time,
)
.await?;
Ok(result)
}
async fn execute_query_with_strategy(
&self,
query: &Document,
strategy: &OptimizationStrategy,
) -> Result<serde_json::Value> {
match strategy {
OptimizationStrategy::None => {
Ok(serde_json::json!({"data": {"result": "basic"}}))
}
OptimizationStrategy::ML => {
if let Some(ml_optimizer) = &self.ml_optimizer {
let _prediction = ml_optimizer.predict_performance(query).await?;
Ok(serde_json::json!({"data": {"result": "ml_optimized"}}))
} else {
Err(anyhow!("ML optimizer not initialized"))
}
}
OptimizationStrategy::Quantum => {
if let Some(_quantum_optimizer) = &self.quantum_optimizer {
Ok(serde_json::json!({"data": {"result": "quantum_optimized"}}))
} else {
Err(anyhow!("Quantum optimizer not initialized"))
}
}
OptimizationStrategy::Hybrid => {
if let Some(hybrid_optimizer) = &self.hybrid_optimizer {
let _result = hybrid_optimizer.optimize_query(query).await?;
Ok(serde_json::json!({"data": {"result": "hybrid_optimized"}}))
} else {
Err(anyhow!("Hybrid optimizer not initialized"))
}
}
OptimizationStrategy::Federation => {
Ok(serde_json::json!({"data": {"result": "federated"}}))
}
OptimizationStrategy::Cache => {
if let Some(cache) = &self.cache {
let _stats = cache.get_stats().await?;
Ok(serde_json::json!({"data": {"result": "cached"}}))
} else {
Err(anyhow!("Cache not initialized"))
}
}
}
}
fn generate_test_queries(&self, scenario: &TestScenario, count: usize) -> Vec<Document> {
let mut queries = Vec::new();
for i in 0..count {
let query = match scenario {
TestScenario::SimpleQuery => self.create_simple_query(i),
TestScenario::ComplexQuery => self.create_complex_query(i),
TestScenario::DeepNestedQuery => self.create_deep_nested_query(i),
TestScenario::FederatedQuery => self.create_federated_query(i),
TestScenario::AggregationQuery => self.create_aggregation_query(i),
TestScenario::SubscriptionQuery => self.create_subscription_query(i),
TestScenario::BulkOperations => self.create_bulk_operation_query(i),
TestScenario::StressTest => self.create_stress_test_query(i),
};
queries.push(query);
}
queries
}
fn create_simple_query(&self, id: usize) -> Document {
use crate::ast::*;
Document {
definitions: vec![Definition::Operation(OperationDefinition {
operation_type: OperationType::Query,
name: Some(format!("SimpleQuery{id}")),
variable_definitions: vec![],
directives: vec![],
selection_set: SelectionSet {
selections: vec![Selection::Field(Field {
alias: None,
name: "user".to_string(),
arguments: vec![],
directives: vec![],
selection_set: Some(SelectionSet {
selections: vec![
Selection::Field(Field {
alias: None,
name: "id".to_string(),
arguments: vec![],
directives: vec![],
selection_set: None,
}),
Selection::Field(Field {
alias: None,
name: "name".to_string(),
arguments: vec![],
directives: vec![],
selection_set: None,
}),
],
}),
})],
},
})],
}
}
fn create_complex_query(&self, id: usize) -> Document {
self.create_simple_query(id) }
fn create_deep_nested_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
fn create_federated_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
fn create_aggregation_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
fn create_subscription_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
fn create_bulk_operation_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
fn create_stress_test_query(&self, id: usize) -> Document {
self.create_simple_query(id)
}
async fn calculate_benchmark_result(
&self,
scenario: &TestScenario,
strategy: &OptimizationStrategy,
response_times: &[Duration],
successful_requests: u64,
failed_requests: u64,
total_time: Duration,
) -> Result<BenchmarkResult> {
if response_times.is_empty() {
let memory_usage_mb = system_monitor::get_current_memory_usage_mb().await;
let cpu_usage_percent = system_monitor::get_current_cpu_usage_percent().await;
return Ok(BenchmarkResult {
scenario: format!("{scenario:?}"),
strategy: format!("{strategy:?}"),
configuration: "default".to_string(),
total_requests: successful_requests + failed_requests,
successful_requests,
failed_requests,
average_response_time: Duration::from_millis(0),
min_response_time: Duration::from_millis(0),
max_response_time: Duration::from_millis(0),
p50_response_time: Duration::from_millis(0),
p95_response_time: Duration::from_millis(0),
p99_response_time: Duration::from_millis(0),
requests_per_second: 0.0,
throughput_mbps: 0.0,
memory_usage_mb,
cpu_usage_percent,
cache_hit_rate: 0.0,
error_rate: 100.0,
detailed_metrics: None,
});
}
let mut sorted_times = response_times.to_vec();
sorted_times.sort();
let total_requests = successful_requests + failed_requests;
let average_response_time =
response_times.iter().sum::<Duration>() / response_times.len() as u32;
let min_response_time = *sorted_times.first().unwrap_or(&Duration::from_millis(0));
let max_response_time = *sorted_times.last().unwrap_or(&Duration::from_millis(0));
let p50_index = (sorted_times.len() as f64 * 0.5) as usize;
let p95_index = (sorted_times.len() as f64 * 0.95) as usize;
let p99_index = (sorted_times.len() as f64 * 0.99) as usize;
let p50_response_time = sorted_times
.get(p50_index)
.copied()
.unwrap_or(Duration::from_millis(0));
let p95_response_time = sorted_times
.get(p95_index)
.copied()
.unwrap_or(Duration::from_millis(0));
let p99_response_time = sorted_times
.get(p99_index)
.copied()
.unwrap_or(Duration::from_millis(0));
let requests_per_second = if total_time.as_secs_f64() > 0.0 {
successful_requests as f64 / total_time.as_secs_f64()
} else {
0.0
};
let error_rate = if total_requests > 0 {
(failed_requests as f64 / total_requests as f64) * 100.0
} else {
0.0
};
let cache_hit_rate = if let Some(cache) = &self.cache {
match cache.get_stats().await {
Ok(stats) if (stats.hits + stats.misses) > 0 => {
(stats.hits as f64 / (stats.hits + stats.misses) as f64) * 100.0
}
_ => 0.0,
}
} else {
0.0
};
let memory_usage_mb = system_monitor::get_current_memory_usage_mb().await;
let cpu_usage_percent = system_monitor::get_current_cpu_usage_percent().await;
let avg_response_size_bytes = 2048.0; let throughput_mbps =
system_monitor::calculate_throughput_mbps(requests_per_second, avg_response_size_bytes);
Ok(BenchmarkResult {
scenario: format!("{scenario:?}"),
strategy: format!("{strategy:?}"),
configuration: "default".to_string(),
total_requests,
successful_requests,
failed_requests,
average_response_time,
min_response_time,
max_response_time,
p50_response_time,
p95_response_time,
p99_response_time,
requests_per_second,
throughput_mbps,
memory_usage_mb,
cpu_usage_percent,
cache_hit_rate,
error_rate,
detailed_metrics: None, })
}
async fn generate_report(&self) -> Result<BenchmarkReport> {
let results = self.results.read().await.clone();
let best_result = results
.iter()
.min_by(|a, b| a.average_response_time.cmp(&b.average_response_time))
.cloned();
let best_strategy = best_result
.as_ref()
.map(|r| r.strategy.clone())
.unwrap_or_else(|| "None".to_string());
let mut strategy_rankings = Vec::new();
let strategies: std::collections::HashSet<String> =
results.iter().map(|r| r.strategy.clone()).collect();
for strategy in strategies {
let strategy_results: Vec<_> =
results.iter().filter(|r| r.strategy == strategy).collect();
if !strategy_results.is_empty() {
let avg_response_time = strategy_results
.iter()
.map(|r| r.average_response_time.as_millis() as f64)
.sum::<f64>()
/ strategy_results.len() as f64;
let avg_throughput = strategy_results
.iter()
.map(|r| r.requests_per_second)
.sum::<f64>()
/ strategy_results.len() as f64;
let avg_error_rate = strategy_results.iter().map(|r| r.error_rate).sum::<f64>()
/ strategy_results.len() as f64;
let score = avg_response_time + (100.0 - avg_throughput) + avg_error_rate;
strategy_rankings.push(StrategyRanking {
strategy: strategy.clone(),
score,
response_time_rank: 0, throughput_rank: 0,
reliability_rank: 0,
});
}
}
strategy_rankings.sort_by(|a, b| {
a.score
.partial_cmp(&b.score)
.unwrap_or(std::cmp::Ordering::Equal)
});
let recommendations = self.generate_recommendations(&results).await;
Ok(BenchmarkReport {
config: BenchmarkConfigSummary {
test_duration: self.config.test_duration,
concurrent_users: self.config.concurrent_users,
total_queries: self.config.queries_per_user * self.config.concurrent_users,
scenarios_tested: self.config.test_scenarios.len(),
strategies_tested: self.config.optimization_strategies.len(),
},
results,
summary: BenchmarkSummary {
best_strategy: best_strategy.clone(),
best_average_response_time: best_result
.as_ref()
.map(|r| r.average_response_time)
.unwrap_or(Duration::from_millis(0)),
best_throughput: best_result
.as_ref()
.map(|r| r.requests_per_second)
.unwrap_or(0.0),
best_cache_hit_rate: best_result
.as_ref()
.map(|r| r.cache_hit_rate)
.unwrap_or(0.0),
overall_performance_improvement: 0.0, strategy_rankings,
},
recommendations,
generated_at: std::time::SystemTime::now(),
})
}
async fn generate_recommendations(
&self,
results: &[BenchmarkResult],
) -> Vec<PerformanceRecommendation> {
let mut recommendations = Vec::new();
let avg_error_rate =
results.iter().map(|r| r.error_rate).sum::<f64>() / results.len() as f64;
if avg_error_rate > 5.0 {
recommendations.push(PerformanceRecommendation {
category: "Reliability".to_string(),
recommendation: "High error rate detected. Consider implementing better error handling and retry mechanisms.".to_string(),
impact: "High".to_string(),
priority: "Critical".to_string(),
});
}
let avg_response_time = results
.iter()
.map(|r| r.average_response_time.as_millis() as f64)
.sum::<f64>()
/ results.len() as f64;
if avg_response_time > 1000.0 {
recommendations.push(PerformanceRecommendation {
category: "Performance".to_string(),
recommendation:
"Response times are high. Consider implementing hybrid optimization strategies."
.to_string(),
impact: "Medium".to_string(),
priority: "High".to_string(),
});
}
let cache_results: Vec<_> = results.iter().filter(|r| r.cache_hit_rate > 0.0).collect();
if !cache_results.is_empty() {
let avg_cache_hit_rate = cache_results.iter().map(|r| r.cache_hit_rate).sum::<f64>()
/ cache_results.len() as f64;
if avg_cache_hit_rate < 50.0 {
recommendations.push(PerformanceRecommendation {
category: "Caching".to_string(),
recommendation: "Low cache hit rate. Consider optimizing cache configuration and TTL settings.".to_string(),
impact: "Medium".to_string(),
priority: "Medium".to_string(),
});
}
}
recommendations
}
pub async fn export_results(&self, format: ExportFormat, path: &str) -> Result<()> {
let report = self.generate_report().await?;
match format {
ExportFormat::Json => {
let json = serde_json::to_string_pretty(&report)?;
tokio::fs::write(path, json).await?;
}
ExportFormat::Csv => {
let csv = self.results_to_csv(&report.results)?;
tokio::fs::write(path, csv).await?;
}
ExportFormat::Html => {
let html = self.results_to_html(&report)?;
tokio::fs::write(path, html).await?;
}
}
info!("Benchmark results exported to: {}", path);
Ok(())
}
fn results_to_csv(&self, results: &[BenchmarkResult]) -> Result<String> {
let mut csv = String::new();
csv.push_str("Scenario,Strategy,Configuration,TotalRequests,SuccessfulRequests,FailedRequests,AvgResponseTime,MinResponseTime,MaxResponseTime,RequestsPerSecond,ErrorRate,CacheHitRate\n");
for result in results {
csv.push_str(&format!(
"{},{},{},{},{},{},{},{},{},{:.2},{:.2},{:.2}\n",
result.scenario,
result.strategy,
result.configuration,
result.total_requests,
result.successful_requests,
result.failed_requests,
result.average_response_time.as_millis(),
result.min_response_time.as_millis(),
result.max_response_time.as_millis(),
result.requests_per_second,
result.error_rate,
result.cache_hit_rate,
));
}
Ok(csv)
}
fn results_to_html(&self, report: &BenchmarkReport) -> Result<String> {
let mut html = String::new();
html.push_str("<!DOCTYPE html><html><head><title>Benchmark Report</title></head><body>");
html.push_str("<h1>Performance Benchmark Report</h1>");
html.push_str(&format!("<p>Generated at: {:?}</p>", report.generated_at));
html.push_str(&format!(
"<p>Best Strategy: {}</p>",
report.summary.best_strategy
));
html.push_str("<table border='1'><tr><th>Scenario</th><th>Strategy</th><th>Avg Response Time</th><th>Requests/sec</th><th>Error Rate</th></tr>");
for result in &report.results {
html.push_str(&format!(
"<tr><td>{}</td><td>{}</td><td>{}ms</td><td>{:.2}</td><td>{:.2}%</td></tr>",
result.scenario,
result.strategy,
result.average_response_time.as_millis(),
result.requests_per_second,
result.error_rate,
));
}
html.push_str("</table></body></html>");
Ok(html)
}
}
#[derive(Debug, Clone)]
pub enum ExportFormat {
Json,
Csv,
Html,
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_benchmark_suite_creation() {
let config = BenchmarkConfig::default();
let mut suite = PerformanceBenchmarkSuite::new(config);
assert!(suite.initialize_optimizers().await.is_ok());
}
#[tokio::test]
async fn test_query_generation() {
let config = BenchmarkConfig::default();
let suite = PerformanceBenchmarkSuite::new(config);
let queries = suite.generate_test_queries(&TestScenario::SimpleQuery, 5);
assert_eq!(queries.len(), 5);
}
#[test]
fn test_csv_export() {
let config = BenchmarkConfig::default();
let suite = PerformanceBenchmarkSuite::new(config);
let results = vec![BenchmarkResult {
scenario: "SimpleQuery".to_string(),
strategy: "ML".to_string(),
configuration: "default".to_string(),
total_requests: 100,
successful_requests: 95,
failed_requests: 5,
average_response_time: Duration::from_millis(150),
min_response_time: Duration::from_millis(50),
max_response_time: Duration::from_millis(300),
p50_response_time: Duration::from_millis(140),
p95_response_time: Duration::from_millis(250),
p99_response_time: Duration::from_millis(290),
requests_per_second: 50.0,
throughput_mbps: 1.5,
memory_usage_mb: 128.0,
cpu_usage_percent: 25.0,
cache_hit_rate: 75.0,
error_rate: 5.0,
detailed_metrics: None,
}];
let csv = suite.results_to_csv(&results).expect("should succeed");
assert!(csv.contains("SimpleQuery"));
assert!(csv.contains("ML"));
assert!(csv.contains("95"));
}
}