use crate::yahoo_error::YahooError;
use std::collections::{HashMap, BTreeMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::RwLock;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone)]
pub struct PerformanceConfig {
pub auto_optimization: bool,
pub monitoring_interval: Duration,
pub benchmark_interval: Duration,
pub retention_period: Duration,
pub alert_thresholds: AlertThresholds,
pub optimization_targets: OptimizationTargets,
}
#[derive(Debug, Clone)]
pub struct AlertThresholds {
pub max_response_time: Duration,
pub max_error_rate: f64,
pub min_throughput: f64,
pub max_memory_usage: u64,
pub degradation_threshold: f64,
}
#[derive(Debug, Clone)]
pub struct OptimizationTargets {
pub target_response_time: Duration,
pub target_throughput: f64,
pub target_error_rate: f64,
pub target_memory_efficiency: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSnapshot {
pub timestamp: SystemTime,
pub response_time_stats: ResponseTimeStats,
pub throughput_stats: ThroughputStats,
pub error_stats: ErrorStats,
pub memory_stats: MemoryStats,
pub network_stats: NetworkStats,
pub cache_stats: CachePerformanceStats,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResponseTimeStats {
pub mean: Duration,
pub median: Duration,
pub p95: Duration,
pub p99: Duration,
pub max: Duration,
pub min: Duration,
pub std_dev: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ThroughputStats {
pub current_rps: f64,
pub avg_rps: f64,
pub peak_rps: f64,
pub total_requests: u64,
pub concurrent_requests: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorStats {
pub current_error_rate: f64,
pub avg_error_rate: f64,
pub total_errors: u64,
pub error_distribution: HashMap<String, u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryStats {
pub current_usage: u64,
pub peak_usage: u64,
pub avg_usage: u64,
pub efficiency: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkStats {
pub bytes_sent: u64,
pub bytes_received: u64,
pub pool_utilization: f64,
pub dns_resolution_time: Duration,
pub connection_time: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CachePerformanceStats {
pub hit_rate: f64,
pub miss_rate: f64,
pub avg_lookup_time: Duration,
pub efficiency_ratio: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkResult {
pub test_name: String,
pub timestamp: SystemTime,
pub duration: Duration,
pub measurements: PerformanceSnapshot,
pub test_config: BenchmarkTestConfig,
pub performance_score: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkTestConfig {
pub concurrency: usize,
pub total_requests: usize,
pub duration_limit: Duration,
pub request_types: Vec<String>,
pub data_size: usize,
}
#[derive(Debug, Clone)]
pub struct OptimizationRecommendation {
pub recommendation_type: RecommendationType,
pub priority: u8,
pub description: String,
pub suggested_action: String,
pub expected_improvement: f64,
pub complexity: u8,
}
#[derive(Debug, Clone, PartialEq)]
pub enum RecommendationType {
CacheOptimization,
ConnectionPoolTuning,
RateLimitAdjustment,
MemoryOptimization,
NetworkOptimization,
ConfigurationTuning,
}
#[derive(Debug, Clone)]
pub struct PerformanceAlert {
pub level: AlertLevel,
pub message: String,
pub timestamp: SystemTime,
pub metric: String,
pub current_value: f64,
pub threshold_value: f64,
}
#[derive(Debug, Clone, PartialEq)]
pub enum AlertLevel {
Info,
Warning,
Critical,
}
pub struct PerformanceOptimizer {
config: PerformanceConfig,
current_metrics: Arc<RwLock<PerformanceSnapshot>>,
historical_data: Arc<RwLock<VecDeque<PerformanceSnapshot>>>,
benchmark_history: Arc<RwLock<Vec<BenchmarkResult>>>,
recommendations: Arc<RwLock<Vec<OptimizationRecommendation>>>,
alerts: Arc<RwLock<Vec<PerformanceAlert>>>,
monitoring_stats: Arc<RwLock<MonitoringStats>>,
}
#[derive(Debug, Clone)]
struct MonitoringStats {
pub total_cycles: u64,
pub optimizations_applied: u64,
pub alerts_generated: u64,
pub uptime: Duration,
pub last_optimization: Option<SystemTime>,
}
pub struct RegressionDetector {
baselines: BTreeMap<String, PerformanceBaseline>,
detection_thresholds: RegressionThresholds,
}
#[derive(Debug, Clone)]
struct PerformanceBaseline {
pub id: String,
pub timestamp: SystemTime,
pub measurements: PerformanceSnapshot,
pub confidence_interval: f64,
}
#[derive(Debug, Clone)]
struct RegressionThresholds {
pub response_time_threshold: f64,
pub throughput_threshold: f64,
pub error_rate_threshold: f64,
pub memory_threshold: f64,
}
impl Default for PerformanceConfig {
fn default() -> Self {
Self {
auto_optimization: true,
monitoring_interval: Duration::from_secs(30),
benchmark_interval: Duration::from_secs(300), retention_period: Duration::from_secs(86400 * 7), alert_thresholds: AlertThresholds::default(),
optimization_targets: OptimizationTargets::default(),
}
}
}
impl Default for AlertThresholds {
fn default() -> Self {
Self {
max_response_time: Duration::from_secs(5),
max_error_rate: 0.05, min_throughput: 1.0, max_memory_usage: 100 * 1024 * 1024, degradation_threshold: 0.20, }
}
}
impl Default for OptimizationTargets {
fn default() -> Self {
Self {
target_response_time: Duration::from_secs(1),
target_throughput: 50.0, target_error_rate: 0.01, target_memory_efficiency: 1024 * 1024, }
}
}
impl PerformanceOptimizer {
pub fn new(config: PerformanceConfig) -> Self {
Self {
config,
current_metrics: Arc::new(RwLock::new(PerformanceSnapshot::default())),
historical_data: Arc::new(RwLock::new(VecDeque::new())),
benchmark_history: Arc::new(RwLock::new(Vec::new())),
recommendations: Arc::new(RwLock::new(Vec::new())),
alerts: Arc::new(RwLock::new(Vec::new())),
monitoring_stats: Arc::new(RwLock::new(MonitoringStats::default())),
}
}
pub async fn start_monitoring(&self) -> Result<(), YahooError> {
let config = self.config.clone();
let current_metrics = Arc::clone(&self.current_metrics);
let historical_data = Arc::clone(&self.historical_data);
let recommendations = Arc::clone(&self.recommendations);
let alerts = Arc::clone(&self.alerts);
let monitoring_stats = Arc::clone(&self.monitoring_stats);
tokio::spawn(async move {
let mut interval = tokio::time::interval(config.monitoring_interval);
loop {
interval.tick().await;
if let Ok(snapshot) = Self::collect_performance_snapshot().await {
{
let mut metrics = current_metrics.write().await;
*metrics = snapshot.clone();
}
{
let mut history = historical_data.write().await;
history.push_back(snapshot.clone());
let cutoff = SystemTime::now() - config.retention_period;
while let Some(front) = history.front() {
if front.timestamp < cutoff {
history.pop_front();
} else {
break;
}
}
}
if config.auto_optimization {
Self::generate_recommendations(&snapshot, &recommendations).await;
Self::check_alert_thresholds(&snapshot, &config.alert_thresholds, &alerts).await;
}
{
let mut stats = monitoring_stats.write().await;
stats.total_cycles += 1;
}
}
}
});
Ok(())
}
pub async fn run_benchmarks(&self) -> Result<Vec<BenchmarkResult>, YahooError> {
let mut results = Vec::new();
let test_configs = vec![
BenchmarkTestConfig {
concurrency: 1,
total_requests: 100,
duration_limit: Duration::from_secs(60),
request_types: vec!["quote".to_string()],
data_size: 1024,
},
BenchmarkTestConfig {
concurrency: 5,
total_requests: 500,
duration_limit: Duration::from_secs(120),
request_types: vec!["quote".to_string(), "chart".to_string()],
data_size: 1024,
},
BenchmarkTestConfig {
concurrency: 10,
total_requests: 1000,
duration_limit: Duration::from_secs(180),
request_types: vec!["quote".to_string(), "chart".to_string(), "search".to_string()],
data_size: 1024,
},
];
for (i, test_config) in test_configs.iter().enumerate() {
let result = self.run_single_benchmark(
&format!("benchmark_test_{}", i + 1),
test_config.clone()
).await?;
results.push(result);
}
{
let mut history = self.benchmark_history.write().await;
history.extend(results.clone());
if history.len() > 100 {
let len = history.len();
history.drain(0..len - 100);
}
}
Ok(results)
}
pub async fn get_current_metrics(&self) -> PerformanceSnapshot {
self.current_metrics.read().await.clone()
}
pub async fn get_historical_data(&self, limit: Option<usize>) -> Vec<PerformanceSnapshot> {
let history = self.historical_data.read().await;
match limit {
Some(n) => history.iter().rev().take(n).cloned().collect(),
None => history.iter().cloned().collect(),
}
}
pub async fn get_recommendations(&self) -> Vec<OptimizationRecommendation> {
self.recommendations.read().await.clone()
}
pub async fn get_alerts(&self, level: Option<AlertLevel>) -> Vec<PerformanceAlert> {
let alerts = self.alerts.read().await;
match level {
Some(target_level) => alerts.iter()
.filter(|alert| alert.level == target_level)
.cloned()
.collect(),
None => alerts.clone(),
}
}
pub async fn generate_performance_report(&self) -> Result<PerformanceReport, YahooError> {
let current = self.get_current_metrics().await;
let historical = self.get_historical_data(Some(100)).await;
let recommendations = self.get_recommendations().await;
let alerts = self.get_alerts(None).await;
Ok(PerformanceReport {
timestamp: SystemTime::now(),
current_metrics: current,
historical_summary: Self::summarize_historical_data(&historical),
recommendations,
alerts,
overall_score: Self::calculate_performance_score(&historical),
})
}
async fn collect_performance_snapshot() -> Result<PerformanceSnapshot, YahooError> {
Ok(PerformanceSnapshot::default())
}
async fn generate_recommendations(
snapshot: &PerformanceSnapshot,
recommendations: &Arc<RwLock<Vec<OptimizationRecommendation>>>
) {
let mut new_recommendations = Vec::new();
if snapshot.response_time_stats.mean > Duration::from_secs(2) {
new_recommendations.push(OptimizationRecommendation {
recommendation_type: RecommendationType::CacheOptimization,
priority: 4,
description: "High response times detected".to_string(),
suggested_action: "Increase cache hit ratio or optimize cache storage".to_string(),
expected_improvement: 0.30,
complexity: 3,
});
}
if snapshot.throughput_stats.current_rps < 10.0 {
new_recommendations.push(OptimizationRecommendation {
recommendation_type: RecommendationType::ConnectionPoolTuning,
priority: 3,
description: "Low throughput detected".to_string(),
suggested_action: "Increase connection pool size or optimize connection reuse".to_string(),
expected_improvement: 0.25,
complexity: 2,
});
}
let mut recs = recommendations.write().await;
recs.clear();
recs.extend(new_recommendations);
}
async fn check_alert_thresholds(
snapshot: &PerformanceSnapshot,
thresholds: &AlertThresholds,
alerts: &Arc<RwLock<Vec<PerformanceAlert>>>
) {
let mut new_alerts = Vec::new();
if snapshot.response_time_stats.mean > thresholds.max_response_time {
new_alerts.push(PerformanceAlert {
level: AlertLevel::Warning,
message: "Response time exceeded threshold".to_string(),
timestamp: SystemTime::now(),
metric: "response_time".to_string(),
current_value: snapshot.response_time_stats.mean.as_secs_f64(),
threshold_value: thresholds.max_response_time.as_secs_f64(),
});
}
if snapshot.error_stats.current_error_rate > thresholds.max_error_rate {
new_alerts.push(PerformanceAlert {
level: AlertLevel::Critical,
message: "Error rate exceeded threshold".to_string(),
timestamp: SystemTime::now(),
metric: "error_rate".to_string(),
current_value: snapshot.error_stats.current_error_rate,
threshold_value: thresholds.max_error_rate,
});
}
let mut alert_list = alerts.write().await;
alert_list.extend(new_alerts);
if alert_list.len() > 50 {
let len = alert_list.len();
alert_list.drain(0..len - 50);
}
}
async fn run_single_benchmark(
&self,
test_name: &str,
test_config: BenchmarkTestConfig
) -> Result<BenchmarkResult, YahooError> {
let start_time = Instant::now();
tokio::time::sleep(Duration::from_millis(100)).await;
let duration = start_time.elapsed();
let measurements = Self::collect_performance_snapshot().await?;
let performance_score = Self::calculate_benchmark_score(&measurements, &test_config);
Ok(BenchmarkResult {
test_name: test_name.to_string(),
timestamp: SystemTime::now(),
duration,
measurements,
test_config,
performance_score,
})
}
fn calculate_benchmark_score(
measurements: &PerformanceSnapshot,
_test_config: &BenchmarkTestConfig
) -> f64 {
let response_time_score = if measurements.response_time_stats.mean.as_secs_f64() < 1.0 { 1.0 } else { 0.5 };
let throughput_score = if measurements.throughput_stats.current_rps > 10.0 { 1.0 } else { 0.5 };
let error_score = if measurements.error_stats.current_error_rate < 0.01 { 1.0 } else { 0.5 };
(response_time_score + throughput_score + error_score) / 3.0
}
fn summarize_historical_data(historical: &[PerformanceSnapshot]) -> HistoricalSummary {
if historical.is_empty() {
return HistoricalSummary::default();
}
let avg_response_time = historical.iter()
.map(|s| s.response_time_stats.mean.as_secs_f64())
.sum::<f64>() / historical.len() as f64;
let avg_throughput = historical.iter()
.map(|s| s.throughput_stats.current_rps)
.sum::<f64>() / historical.len() as f64;
let avg_error_rate = historical.iter()
.map(|s| s.error_stats.current_error_rate)
.sum::<f64>() / historical.len() as f64;
HistoricalSummary {
avg_response_time: Duration::from_secs_f64(avg_response_time),
avg_throughput,
avg_error_rate,
data_points: historical.len(),
}
}
fn calculate_performance_score(historical: &[PerformanceSnapshot]) -> f64 {
if historical.is_empty() {
return 0.0;
}
let summary = Self::summarize_historical_data(historical);
let response_score = if summary.avg_response_time.as_secs_f64() < 1.0 { 1.0 } else { 0.5 };
let throughput_score = if summary.avg_throughput > 10.0 { 1.0 } else { 0.5 };
let error_score = if summary.avg_error_rate < 0.01 { 1.0 } else { 0.5 };
(response_score + throughput_score + error_score) / 3.0
}
}
#[derive(Debug, Clone)]
pub struct PerformanceReport {
pub timestamp: SystemTime,
pub current_metrics: PerformanceSnapshot,
pub historical_summary: HistoricalSummary,
pub recommendations: Vec<OptimizationRecommendation>,
pub alerts: Vec<PerformanceAlert>,
pub overall_score: f64,
}
#[derive(Debug, Clone)]
pub struct HistoricalSummary {
pub avg_response_time: Duration,
pub avg_throughput: f64,
pub avg_error_rate: f64,
pub data_points: usize,
}
impl Default for PerformanceSnapshot {
fn default() -> Self {
Self {
timestamp: SystemTime::now(),
response_time_stats: ResponseTimeStats::default(),
throughput_stats: ThroughputStats::default(),
error_stats: ErrorStats::default(),
memory_stats: MemoryStats::default(),
network_stats: NetworkStats::default(),
cache_stats: CachePerformanceStats::default(),
}
}
}
impl Default for ResponseTimeStats {
fn default() -> Self {
Self {
mean: Duration::from_millis(500),
median: Duration::from_millis(400),
p95: Duration::from_millis(1000),
p99: Duration::from_millis(1500),
max: Duration::from_millis(2000),
min: Duration::from_millis(100),
std_dev: Duration::from_millis(200),
}
}
}
impl Default for ThroughputStats {
fn default() -> Self {
Self {
current_rps: 15.0,
avg_rps: 12.0,
peak_rps: 25.0,
total_requests: 1000,
concurrent_requests: 3,
}
}
}
impl Default for ErrorStats {
fn default() -> Self {
Self {
current_error_rate: 0.02,
avg_error_rate: 0.015,
total_errors: 20,
error_distribution: HashMap::new(),
}
}
}
impl Default for MemoryStats {
fn default() -> Self {
Self {
current_usage: 50 * 1024 * 1024, peak_usage: 75 * 1024 * 1024, avg_usage: 45 * 1024 * 1024, efficiency: 1024 * 1024, }
}
}
impl Default for NetworkStats {
fn default() -> Self {
Self {
bytes_sent: 1024 * 1024, bytes_received: 5 * 1024 * 1024, pool_utilization: 0.7, dns_resolution_time: Duration::from_millis(50),
connection_time: Duration::from_millis(100),
}
}
}
impl Default for CachePerformanceStats {
fn default() -> Self {
Self {
hit_rate: 0.85, miss_rate: 0.15, avg_lookup_time: Duration::from_millis(5),
efficiency_ratio: 0.9, }
}
}
impl Default for MonitoringStats {
fn default() -> Self {
Self {
total_cycles: 0,
optimizations_applied: 0,
alerts_generated: 0,
uptime: Duration::from_secs(0),
last_optimization: None,
}
}
}
impl Default for HistoricalSummary {
fn default() -> Self {
Self {
avg_response_time: Duration::from_millis(500),
avg_throughput: 10.0,
avg_error_rate: 0.02,
data_points: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_performance_optimizer_creation() {
let config = PerformanceConfig::default();
let optimizer = PerformanceOptimizer::new(config);
let metrics = optimizer.get_current_metrics().await;
assert!(metrics.response_time_stats.mean > Duration::from_secs(0));
}
#[tokio::test]
async fn test_benchmark_execution() {
let config = PerformanceConfig::default();
let optimizer = PerformanceOptimizer::new(config);
let results = optimizer.run_benchmarks().await.unwrap();
assert!(!results.is_empty());
assert!(results[0].performance_score >= 0.0 && results[0].performance_score <= 1.0);
}
#[tokio::test]
async fn test_performance_report_generation() {
let config = PerformanceConfig::default();
let optimizer = PerformanceOptimizer::new(config);
let report = optimizer.generate_performance_report().await.unwrap();
assert!(report.overall_score >= 0.0 && report.overall_score <= 1.0);
}
#[tokio::test]
async fn test_recommendations_generation() {
let config = PerformanceConfig::default();
let optimizer = PerformanceOptimizer::new(config);
let recommendations = optimizer.get_recommendations().await;
assert_eq!(recommendations.len(), 0);
}
#[tokio::test]
async fn test_alert_thresholds() {
let thresholds = AlertThresholds::default();
assert_eq!(thresholds.max_response_time, Duration::from_secs(5));
assert_eq!(thresholds.max_error_rate, 0.05);
}
}