use super::{MetricValue, MetricsCollector};
use std::collections::HashMap;
use std::time::Duration;
pub struct PerformanceAnalyzer {
baselines: HashMap<String, Benchmark>,
thresholds: AnalysisThresholds,
}
#[derive(Debug, Clone)]
pub struct Benchmark {
pub operation: String,
pub avg_duration: Duration,
pub memory_overhead: usize,
pub throughput: f64,
pub accuracy: f64,
pub sample_size: usize,
}
#[derive(Debug, Clone)]
pub struct AnalysisThresholds {
pub max_tracking_overhead: f64,
pub max_allocation_latency: Duration,
pub max_symbol_resolution_time: Duration,
pub min_tracking_completeness: f64,
pub max_analysis_memory: usize,
}
#[derive(Debug, Clone)]
pub struct PerformanceReport {
pub efficiency_score: f64,
pub tracking_performance: TrackingPerformance,
pub symbol_performance: SymbolPerformance,
pub pointer_performance: PointerPerformance,
pub memory_efficiency: MemoryEfficiency,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Default)]
pub struct TrackingPerformance {
pub avg_allocation_time: Duration,
pub completeness: f64,
pub overhead_bytes: usize,
pub throughput: f64,
}
#[derive(Debug, Clone, Default)]
pub struct SymbolPerformance {
pub avg_resolution_time: Duration,
pub cache_hit_ratio: f64,
pub resolution_rate: f64,
pub cache_memory_usage: usize,
}
#[derive(Debug, Clone, Default)]
pub struct PointerPerformance {
pub analysis_time: Duration,
pub leak_detection_accuracy: f64,
pub analysis_rate: f64,
}
#[derive(Debug, Clone, Default)]
pub struct MemoryEfficiency {
pub total_memory_mb: f64,
pub memory_per_allocation: f64,
pub growth_rate: f64,
pub fragmentation_ratio: f64,
}
impl PerformanceAnalyzer {
pub fn new() -> Self {
Self {
baselines: HashMap::new(),
thresholds: AnalysisThresholds::default(),
}
}
pub fn with_thresholds(thresholds: AnalysisThresholds) -> Self {
Self {
baselines: HashMap::new(),
thresholds,
}
}
pub fn analyze_performance(&self, collector: &MetricsCollector) -> PerformanceReport {
let tracking_perf = self.analyze_tracking_performance(collector);
let symbol_perf = self.analyze_symbol_performance(collector);
let pointer_perf = self.analyze_pointer_performance(collector);
let memory_eff = self.analyze_memory_efficiency(collector);
let efficiency_score = self.calculate_efficiency_score(
&tracking_perf,
&symbol_perf,
&pointer_perf,
&memory_eff,
);
let recommendations =
self.generate_recommendations(&tracking_perf, &symbol_perf, &pointer_perf, &memory_eff);
PerformanceReport {
efficiency_score,
tracking_performance: tracking_perf,
symbol_performance: symbol_perf,
pointer_performance: pointer_perf,
memory_efficiency: memory_eff,
recommendations,
}
}
pub fn set_baseline(&mut self, operation: &str, benchmark: Benchmark) {
self.baselines.insert(operation.to_string(), benchmark);
}
pub fn compare_to_baseline(
&self,
operation: &str,
current: &Benchmark,
) -> Option<PerformanceComparison> {
self.baselines
.get(operation)
.map(|baseline| PerformanceComparison {
operation: operation.to_string(),
baseline: baseline.clone(),
current: current.clone(),
duration_ratio: current.avg_duration.as_nanos() as f64
/ baseline.avg_duration.as_nanos() as f64,
memory_ratio: current.memory_overhead as f64 / baseline.memory_overhead as f64,
throughput_ratio: current.throughput / baseline.throughput,
accuracy_diff: current.accuracy - baseline.accuracy,
})
}
fn analyze_tracking_performance(&self, collector: &MetricsCollector) -> TrackingPerformance {
let avg_allocation_time = collector
.get_metric("allocation_tracking_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let completeness = collector
.get_metric("tracking_completeness")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let overhead_bytes = collector
.get_metric("tracking_memory_overhead")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value as usize),
_ => None,
})
.unwrap_or(0);
let throughput = collector
.get_metric("allocations_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
TrackingPerformance {
avg_allocation_time,
completeness,
overhead_bytes,
throughput,
}
}
fn analyze_symbol_performance(&self, collector: &MetricsCollector) -> SymbolPerformance {
let avg_resolution_time = collector
.get_metric("symbol_resolution_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let cache_hit_ratio = collector
.get_metric("symbol_cache_hit_ratio")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let resolution_rate = collector
.get_metric("symbols_resolved_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
let cache_memory_usage = collector
.get_metric("symbol_cache_memory")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value as usize),
_ => None,
})
.unwrap_or(0);
SymbolPerformance {
avg_resolution_time,
cache_hit_ratio,
resolution_rate,
cache_memory_usage,
}
}
fn analyze_pointer_performance(&self, collector: &MetricsCollector) -> PointerPerformance {
let analysis_time = collector
.get_metric("pointer_analysis_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let leak_detection_accuracy = collector
.get_metric("leak_detection_accuracy")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let analysis_rate = collector
.get_metric("pointers_analyzed_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
PointerPerformance {
analysis_time,
leak_detection_accuracy,
analysis_rate,
}
}
fn analyze_memory_efficiency(&self, collector: &MetricsCollector) -> MemoryEfficiency {
let total_memory_mb = collector
.get_metric("total_analysis_memory")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let memory_per_allocation = collector
.get_metric("memory_per_tracked_allocation")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let growth_rate = collector
.get_metric("memory_growth_rate")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let fragmentation_ratio = collector
.get_metric("memory_fragmentation")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
MemoryEfficiency {
total_memory_mb,
memory_per_allocation,
growth_rate,
fragmentation_ratio,
}
}
fn calculate_efficiency_score(
&self,
tracking: &TrackingPerformance,
symbol: &SymbolPerformance,
pointer: &PointerPerformance,
memory: &MemoryEfficiency,
) -> f64 {
let tracking_score = self.score_tracking_performance(tracking);
let symbol_score = self.score_symbol_performance(symbol);
let pointer_score = self.score_pointer_performance(pointer);
let memory_score = self.score_memory_efficiency(memory);
tracking_score * 0.4 + symbol_score * 0.25 + pointer_score * 0.2 + memory_score * 0.15
}
fn score_tracking_performance(&self, tracking: &TrackingPerformance) -> f64 {
let mut score = 1.0;
if tracking.avg_allocation_time > self.thresholds.max_allocation_latency {
score *= 0.7;
}
if tracking.completeness < self.thresholds.min_tracking_completeness {
score *= tracking.completeness / self.thresholds.min_tracking_completeness;
}
if tracking.throughput > 10000.0 {
score *= 1.1;
}
score.clamp(0.0, 1.0)
}
fn score_symbol_performance(&self, symbol: &SymbolPerformance) -> f64 {
let mut score = 1.0;
if symbol.avg_resolution_time > self.thresholds.max_symbol_resolution_time {
score *= 0.8;
}
score *= symbol.cache_hit_ratio;
if symbol.cache_memory_usage > 100 * 1024 * 1024 {
score *= 0.9;
}
score.clamp(0.0, 1.0)
}
fn score_pointer_performance(&self, _pointer: &PointerPerformance) -> f64 {
let mut score: f64 = 1.0;
score *= _pointer.leak_detection_accuracy;
if _pointer.analysis_time > Duration::from_millis(100) {
score *= 0.8;
}
score.clamp(0.0, 1.0)
}
fn score_memory_efficiency(&self, memory: &MemoryEfficiency) -> f64 {
let mut score: f64 = 1.0;
if memory.total_memory_mb > self.thresholds.max_analysis_memory as f64 {
score *= 0.7;
}
if memory.fragmentation_ratio > 0.3 {
score *= 0.8;
}
if memory.growth_rate > 10.0 {
score *= 0.9;
}
score.clamp(0.0, 1.0)
}
fn generate_recommendations(
&self,
tracking: &TrackingPerformance,
symbol: &SymbolPerformance,
_pointer: &PointerPerformance,
memory: &MemoryEfficiency,
) -> Vec<String> {
let mut recommendations = Vec::new();
if tracking.completeness < 0.95 {
recommendations
.push("Improve tracking completeness by reducing lock contention".to_string());
}
if tracking.avg_allocation_time > Duration::from_micros(100) {
recommendations.push("Optimize allocation tracking path for lower latency".to_string());
}
if symbol.cache_hit_ratio < 0.8 {
recommendations.push("Increase symbol cache size to improve hit ratio".to_string());
}
if symbol.avg_resolution_time > Duration::from_millis(10) {
recommendations.push("Consider preloading frequently used symbols".to_string());
}
if memory.total_memory_mb > 512.0 {
recommendations
.push("Consider reducing memory usage or implementing memory limits".to_string());
}
if memory.fragmentation_ratio > 0.2 {
recommendations.push("Implement memory compaction to reduce fragmentation".to_string());
}
recommendations
}
}
#[derive(Debug, Clone)]
pub struct PerformanceComparison {
pub operation: String,
pub baseline: Benchmark,
pub current: Benchmark,
pub duration_ratio: f64,
pub memory_ratio: f64,
pub throughput_ratio: f64,
pub accuracy_diff: f64,
}
impl Default for AnalysisThresholds {
fn default() -> Self {
Self {
max_tracking_overhead: 0.05, max_allocation_latency: Duration::from_micros(50),
max_symbol_resolution_time: Duration::from_millis(5),
min_tracking_completeness: 0.95,
max_analysis_memory: 512, }
}
}
impl Default for PerformanceAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_performance_analyzer_creation() {
let analyzer = PerformanceAnalyzer::new();
assert!(analyzer.baselines.is_empty());
let custom_thresholds = AnalysisThresholds {
max_tracking_overhead: 0.1,
..Default::default()
};
let analyzer = PerformanceAnalyzer::with_thresholds(custom_thresholds);
assert_eq!(analyzer.thresholds.max_tracking_overhead, 0.1);
}
#[test]
fn test_benchmark_comparison() {
let mut analyzer = PerformanceAnalyzer::new();
let baseline = Benchmark {
operation: "allocation_tracking".to_string(),
avg_duration: Duration::from_micros(100),
memory_overhead: 1024,
throughput: 1000.0,
accuracy: 0.95,
sample_size: 10000,
};
analyzer.set_baseline("allocation_tracking", baseline.clone());
let current = Benchmark {
operation: "allocation_tracking".to_string(),
avg_duration: Duration::from_micros(120),
memory_overhead: 1200,
throughput: 900.0,
accuracy: 0.97,
sample_size: 10000,
};
let comparison = analyzer.compare_to_baseline("allocation_tracking", ¤t);
assert!(comparison.is_some());
let comparison = comparison.expect("Comparison should exist");
assert!(comparison.duration_ratio > 1.0); assert!(comparison.memory_ratio > 1.0); assert!(comparison.throughput_ratio < 1.0); assert!(comparison.accuracy_diff > 0.0); }
#[test]
fn test_efficiency_scoring() {
let analyzer = PerformanceAnalyzer::new();
let good_tracking = TrackingPerformance {
avg_allocation_time: Duration::from_micros(10),
completeness: 0.98,
overhead_bytes: 1024,
throughput: 50000.0,
};
let score = analyzer.score_tracking_performance(&good_tracking);
assert!(score > 0.9);
let bad_tracking = TrackingPerformance {
avg_allocation_time: Duration::from_millis(1),
completeness: 0.8,
overhead_bytes: 10240,
throughput: 100.0,
};
let score = analyzer.score_tracking_performance(&bad_tracking);
assert!(score < 0.7);
}
}