use super::{MetricValue, MetricsCollector};
use std::collections::HashMap;
use std::time::Duration;
pub struct PerformanceAnalyzer {
baselines: HashMap<String, Benchmark>,
thresholds: AnalysisThresholds,
}
#[derive(Debug, Clone)]
pub struct Benchmark {
pub operation: String,
pub avg_duration: Duration,
pub memory_overhead: usize,
pub throughput: f64,
pub accuracy: f64,
pub sample_size: usize,
}
#[derive(Debug, Clone)]
pub struct AnalysisThresholds {
pub max_tracking_overhead: f64,
pub max_allocation_latency: Duration,
pub max_symbol_resolution_time: Duration,
pub min_tracking_completeness: f64,
pub max_analysis_memory: usize,
}
#[derive(Debug, Clone)]
pub struct PerformanceReport {
pub efficiency_score: f64,
pub tracking_performance: TrackingPerformance,
pub symbol_performance: SymbolPerformance,
pub pointer_performance: PointerPerformance,
pub memory_efficiency: MemoryEfficiency,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Default)]
pub struct TrackingPerformance {
pub avg_allocation_time: Duration,
pub completeness: f64,
pub overhead_bytes: usize,
pub throughput: f64,
}
#[derive(Debug, Clone, Default)]
pub struct SymbolPerformance {
pub avg_resolution_time: Duration,
pub cache_hit_ratio: f64,
pub resolution_rate: f64,
pub cache_memory_usage: usize,
}
#[derive(Debug, Clone, Default)]
pub struct PointerPerformance {
pub analysis_time: Duration,
pub leak_detection_accuracy: f64,
pub analysis_rate: f64,
}
#[derive(Debug, Clone, Default)]
pub struct MemoryEfficiency {
pub total_memory_mb: f64,
pub memory_per_allocation: f64,
pub growth_rate: f64,
pub fragmentation_ratio: f64,
}
impl PerformanceAnalyzer {
pub fn new() -> Self {
Self {
baselines: HashMap::new(),
thresholds: AnalysisThresholds::default(),
}
}
pub fn with_thresholds(thresholds: AnalysisThresholds) -> Self {
Self {
baselines: HashMap::new(),
thresholds,
}
}
pub fn analyze_performance(&self, collector: &MetricsCollector) -> PerformanceReport {
let tracking_perf = self.analyze_tracking_performance(collector);
let symbol_perf = self.analyze_symbol_performance(collector);
let pointer_perf = self.analyze_pointer_performance(collector);
let memory_eff = self.analyze_memory_efficiency(collector);
let efficiency_score = self.calculate_efficiency_score(
&tracking_perf,
&symbol_perf,
&pointer_perf,
&memory_eff,
);
let recommendations =
self.generate_recommendations(&tracking_perf, &symbol_perf, &pointer_perf, &memory_eff);
PerformanceReport {
efficiency_score,
tracking_performance: tracking_perf,
symbol_performance: symbol_perf,
pointer_performance: pointer_perf,
memory_efficiency: memory_eff,
recommendations,
}
}
pub fn set_baseline(&mut self, operation: &str, benchmark: Benchmark) {
self.baselines.insert(operation.to_string(), benchmark);
}
pub fn compare_to_baseline(
&self,
operation: &str,
current: &Benchmark,
) -> Option<PerformanceComparison> {
self.baselines
.get(operation)
.map(|baseline| PerformanceComparison {
operation: operation.to_string(),
baseline: baseline.clone(),
current: current.clone(),
duration_ratio: current.avg_duration.as_nanos() as f64
/ baseline.avg_duration.as_nanos() as f64,
memory_ratio: current.memory_overhead as f64 / baseline.memory_overhead as f64,
throughput_ratio: current.throughput / baseline.throughput,
accuracy_diff: current.accuracy - baseline.accuracy,
})
}
fn analyze_tracking_performance(&self, collector: &MetricsCollector) -> TrackingPerformance {
let avg_allocation_time = collector
.get_metric("allocation_tracking_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let completeness = collector
.get_metric("tracking_completeness")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let overhead_bytes = collector
.get_metric("tracking_memory_overhead")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value as usize),
_ => None,
})
.unwrap_or(0);
let throughput = collector
.get_metric("allocations_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
TrackingPerformance {
avg_allocation_time,
completeness,
overhead_bytes,
throughput,
}
}
fn analyze_symbol_performance(&self, collector: &MetricsCollector) -> SymbolPerformance {
let avg_resolution_time = collector
.get_metric("symbol_resolution_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let cache_hit_ratio = collector
.get_metric("symbol_cache_hit_ratio")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let resolution_rate = collector
.get_metric("symbols_resolved_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
let cache_memory_usage = collector
.get_metric("symbol_cache_memory")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value as usize),
_ => None,
})
.unwrap_or(0);
SymbolPerformance {
avg_resolution_time,
cache_hit_ratio,
resolution_rate,
cache_memory_usage,
}
}
fn analyze_pointer_performance(&self, collector: &MetricsCollector) -> PointerPerformance {
let analysis_time = collector
.get_metric("pointer_analysis_time")
.and_then(|m| match &m.value {
MetricValue::Timer(timer) => Some(timer.average_duration()),
_ => None,
})
.unwrap_or(Duration::from_nanos(0));
let leak_detection_accuracy = collector
.get_metric("leak_detection_accuracy")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let analysis_rate = collector
.get_metric("pointers_analyzed_per_second")
.and_then(|m| match &m.value {
MetricValue::Rate(rate) => Some(rate.current_rate),
_ => None,
})
.unwrap_or(0.0);
PointerPerformance {
analysis_time,
leak_detection_accuracy,
analysis_rate,
}
}
fn analyze_memory_efficiency(&self, collector: &MetricsCollector) -> MemoryEfficiency {
let total_memory_mb = collector
.get_metric("total_analysis_memory")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let memory_per_allocation = collector
.get_metric("memory_per_tracked_allocation")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let growth_rate = collector
.get_metric("memory_growth_rate")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
let fragmentation_ratio = collector
.get_metric("memory_fragmentation")
.and_then(|m| match &m.value {
MetricValue::Gauge(value) => Some(*value),
_ => None,
})
.unwrap_or(0.0);
MemoryEfficiency {
total_memory_mb,
memory_per_allocation,
growth_rate,
fragmentation_ratio,
}
}
fn calculate_efficiency_score(
&self,
tracking: &TrackingPerformance,
symbol: &SymbolPerformance,
pointer: &PointerPerformance,
memory: &MemoryEfficiency,
) -> f64 {
let tracking_score = self.score_tracking_performance(tracking);
let symbol_score = self.score_symbol_performance(symbol);
let pointer_score = self.score_pointer_performance(pointer);
let memory_score = self.score_memory_efficiency(memory);
tracking_score * 0.4 + symbol_score * 0.25 + pointer_score * 0.2 + memory_score * 0.15
}
fn score_tracking_performance(&self, tracking: &TrackingPerformance) -> f64 {
let mut score = 1.0;
if tracking.avg_allocation_time > self.thresholds.max_allocation_latency {
score *= 0.7;
}
if tracking.completeness < self.thresholds.min_tracking_completeness {
score *= tracking.completeness / self.thresholds.min_tracking_completeness;
}
if tracking.throughput > 10000.0 {
score *= 1.1;
}
score.clamp(0.0, 1.0)
}
fn score_symbol_performance(&self, symbol: &SymbolPerformance) -> f64 {
let mut score = 1.0;
if symbol.avg_resolution_time > self.thresholds.max_symbol_resolution_time {
score *= 0.8;
}
score *= symbol.cache_hit_ratio;
if symbol.cache_memory_usage > 100 * 1024 * 1024 {
score *= 0.9;
}
score.clamp(0.0, 1.0)
}
fn score_pointer_performance(&self, _pointer: &PointerPerformance) -> f64 {
let mut score: f64 = 1.0;
score *= _pointer.leak_detection_accuracy;
if _pointer.analysis_time > Duration::from_millis(100) {
score *= 0.8;
}
score.clamp(0.0, 1.0)
}
fn score_memory_efficiency(&self, memory: &MemoryEfficiency) -> f64 {
let mut score: f64 = 1.0;
if memory.total_memory_mb > self.thresholds.max_analysis_memory as f64 {
score *= 0.7;
}
if memory.fragmentation_ratio > 0.3 {
score *= 0.8;
}
if memory.growth_rate > 10.0 {
score *= 0.9;
}
score.clamp(0.0, 1.0)
}
fn generate_recommendations(
&self,
tracking: &TrackingPerformance,
symbol: &SymbolPerformance,
_pointer: &PointerPerformance,
memory: &MemoryEfficiency,
) -> Vec<String> {
let mut recommendations = Vec::new();
if tracking.completeness < 0.95 {
recommendations
.push("Improve tracking completeness by reducing lock contention".to_string());
}
if tracking.avg_allocation_time > Duration::from_micros(100) {
recommendations.push("Optimize allocation tracking path for lower latency".to_string());
}
if symbol.cache_hit_ratio < 0.8 {
recommendations.push("Increase symbol cache size to improve hit ratio".to_string());
}
if symbol.avg_resolution_time > Duration::from_millis(10) {
recommendations.push("Consider preloading frequently used symbols".to_string());
}
if memory.total_memory_mb > 512.0 {
recommendations
.push("Consider reducing memory usage or implementing memory limits".to_string());
}
if memory.fragmentation_ratio > 0.2 {
recommendations.push("Implement memory compaction to reduce fragmentation".to_string());
}
recommendations
}
}
#[derive(Debug, Clone)]
pub struct PerformanceComparison {
pub operation: String,
pub baseline: Benchmark,
pub current: Benchmark,
pub duration_ratio: f64,
pub memory_ratio: f64,
pub throughput_ratio: f64,
pub accuracy_diff: f64,
}
impl Default for AnalysisThresholds {
fn default() -> Self {
Self {
max_tracking_overhead: 0.05, max_allocation_latency: Duration::from_micros(50),
max_symbol_resolution_time: Duration::from_millis(5),
min_tracking_completeness: 0.95,
max_analysis_memory: 512, }
}
}
impl Default for PerformanceAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_performance_analyzer_creation() {
let analyzer = PerformanceAnalyzer::new();
assert!(
analyzer.baselines.is_empty(),
"New analyzer should have empty baselines"
);
assert_eq!(
analyzer.thresholds.max_tracking_overhead, 0.05,
"Default max tracking overhead should be 0.05"
);
}
#[test]
fn test_performance_analyzer_custom_thresholds() {
let custom_thresholds = AnalysisThresholds {
max_tracking_overhead: 0.1,
max_allocation_latency: Duration::from_micros(100),
max_symbol_resolution_time: Duration::from_millis(10),
min_tracking_completeness: 0.9,
max_analysis_memory: 1024,
};
let analyzer = PerformanceAnalyzer::with_thresholds(custom_thresholds);
assert_eq!(
analyzer.thresholds.max_tracking_overhead, 0.1,
"Custom max tracking overhead should be 0.1"
);
assert_eq!(
analyzer.thresholds.max_analysis_memory, 1024,
"Custom max analysis memory should be 1024"
);
}
#[test]
fn test_performance_analyzer_default() {
let analyzer = PerformanceAnalyzer::default();
assert!(
analyzer.baselines.is_empty(),
"Default analyzer should have empty baselines"
);
}
#[test]
fn test_analysis_thresholds_default() {
let thresholds = AnalysisThresholds::default();
assert_eq!(
thresholds.max_tracking_overhead, 0.05,
"Default max tracking overhead should be 5%"
);
assert_eq!(
thresholds.max_allocation_latency,
Duration::from_micros(50),
"Default max allocation latency should be 50us"
);
assert_eq!(
thresholds.min_tracking_completeness, 0.95,
"Default min tracking completeness should be 95%"
);
}
#[test]
fn test_benchmark_comparison() {
let mut analyzer = PerformanceAnalyzer::new();
let baseline = Benchmark {
operation: "allocation_tracking".to_string(),
avg_duration: Duration::from_micros(100),
memory_overhead: 1024,
throughput: 1000.0,
accuracy: 0.95,
sample_size: 10000,
};
analyzer.set_baseline("allocation_tracking", baseline.clone());
let current = Benchmark {
operation: "allocation_tracking".to_string(),
avg_duration: Duration::from_micros(120),
memory_overhead: 1200,
throughput: 900.0,
accuracy: 0.97,
sample_size: 10000,
};
let comparison = analyzer.compare_to_baseline("allocation_tracking", ¤t);
assert!(
comparison.is_some(),
"Comparison should exist for known operation"
);
let comparison = comparison.expect("Comparison should exist");
assert!(
comparison.duration_ratio > 1.0,
"Duration ratio should be > 1.0 for slower current"
);
assert!(
comparison.memory_ratio > 1.0,
"Memory ratio should be > 1.0 for higher memory"
);
assert!(
comparison.throughput_ratio < 1.0,
"Throughput ratio should be < 1.0 for lower throughput"
);
assert!(
comparison.accuracy_diff > 0.0,
"Accuracy diff should be > 0.0 for better accuracy"
);
}
#[test]
fn test_benchmark_comparison_unknown_operation() {
let analyzer = PerformanceAnalyzer::new();
let current = Benchmark {
operation: "unknown".to_string(),
avg_duration: Duration::from_micros(100),
memory_overhead: 1024,
throughput: 1000.0,
accuracy: 0.95,
sample_size: 10000,
};
let comparison = analyzer.compare_to_baseline("unknown", ¤t);
assert!(
comparison.is_none(),
"Comparison should be None for unknown operation"
);
}
#[test]
fn test_efficiency_scoring_good() {
let analyzer = PerformanceAnalyzer::new();
let good_tracking = TrackingPerformance {
avg_allocation_time: Duration::from_micros(10),
completeness: 0.98,
overhead_bytes: 1024,
throughput: 50000.0,
};
let score = analyzer.score_tracking_performance(&good_tracking);
assert!(
score > 0.9,
"Good tracking performance should score > 0.9, got {}",
score
);
}
#[test]
fn test_efficiency_scoring_bad() {
let analyzer = PerformanceAnalyzer::new();
let bad_tracking = TrackingPerformance {
avg_allocation_time: Duration::from_millis(1),
completeness: 0.8,
overhead_bytes: 10240,
throughput: 100.0,
};
let score = analyzer.score_tracking_performance(&bad_tracking);
assert!(
score < 0.7,
"Bad tracking performance should score < 0.7, got {}",
score
);
}
#[test]
fn test_symbol_performance_scoring() {
let analyzer = PerformanceAnalyzer::new();
let good_symbol = SymbolPerformance {
avg_resolution_time: Duration::from_micros(100),
cache_hit_ratio: 0.95,
resolution_rate: 10000.0,
cache_memory_usage: 50 * 1024 * 1024,
};
let score = analyzer.score_symbol_performance(&good_symbol);
assert!(
score > 0.8,
"Good symbol performance should score > 0.8, got {}",
score
);
let bad_symbol = SymbolPerformance {
avg_resolution_time: Duration::from_millis(20),
cache_hit_ratio: 0.5,
resolution_rate: 100.0,
cache_memory_usage: 200 * 1024 * 1024,
};
let score = analyzer.score_symbol_performance(&bad_symbol);
assert!(
score < 0.6,
"Bad symbol performance should score < 0.6, got {}",
score
);
}
#[test]
fn test_pointer_performance_scoring() {
let analyzer = PerformanceAnalyzer::new();
let good_pointer = PointerPerformance {
analysis_time: Duration::from_millis(10),
leak_detection_accuracy: 0.98,
analysis_rate: 5000.0,
};
let score = analyzer.score_pointer_performance(&good_pointer);
assert!(
score > 0.9,
"Good pointer performance should score > 0.9, got {}",
score
);
let bad_pointer = PointerPerformance {
analysis_time: Duration::from_millis(200),
leak_detection_accuracy: 0.7,
analysis_rate: 100.0,
};
let score = analyzer.score_pointer_performance(&bad_pointer);
assert!(
score < 0.7,
"Bad pointer performance should score < 0.7, got {}",
score
);
}
#[test]
fn test_memory_efficiency_scoring() {
let analyzer = PerformanceAnalyzer::new();
let good_memory = MemoryEfficiency {
total_memory_mb: 100.0,
memory_per_allocation: 50.0,
growth_rate: 5.0,
fragmentation_ratio: 0.1,
};
let score = analyzer.score_memory_efficiency(&good_memory);
assert!(
score > 0.9,
"Good memory efficiency should score > 0.9, got {}",
score
);
let bad_memory = MemoryEfficiency {
total_memory_mb: 1000.0,
memory_per_allocation: 500.0,
growth_rate: 50.0,
fragmentation_ratio: 0.5,
};
let score = analyzer.score_memory_efficiency(&bad_memory);
assert!(
score < 0.7,
"Bad memory efficiency should score < 0.7, got {}",
score
);
}
#[test]
fn test_analyze_performance_empty_collector() {
let analyzer = PerformanceAnalyzer::new();
let collector = MetricsCollector::new();
let report = analyzer.analyze_performance(&collector);
assert!(
report.efficiency_score >= 0.0 && report.efficiency_score <= 1.0,
"Efficiency score should be between 0 and 1"
);
assert_eq!(
report.tracking_performance.avg_allocation_time,
Duration::from_nanos(0),
"Empty collector should have zero allocation time"
);
assert_eq!(
report.symbol_performance.cache_hit_ratio, 0.0,
"Empty collector should have zero cache hit ratio"
);
}
#[test]
fn test_generate_recommendations() {
let analyzer = PerformanceAnalyzer::new();
let tracking = TrackingPerformance {
avg_allocation_time: Duration::from_micros(200),
completeness: 0.9,
overhead_bytes: 1024,
throughput: 5000.0,
};
let symbol = SymbolPerformance {
avg_resolution_time: Duration::from_millis(20),
cache_hit_ratio: 0.7,
resolution_rate: 100.0,
cache_memory_usage: 50 * 1024 * 1024,
};
let pointer = PointerPerformance {
analysis_time: Duration::from_millis(50),
leak_detection_accuracy: 0.95,
analysis_rate: 1000.0,
};
let memory = MemoryEfficiency {
total_memory_mb: 600.0,
memory_per_allocation: 100.0,
growth_rate: 15.0,
fragmentation_ratio: 0.3,
};
let recommendations =
analyzer.generate_recommendations(&tracking, &symbol, &pointer, &memory);
assert!(
recommendations
.iter()
.any(|r| r.contains("tracking completeness")),
"Should recommend improving tracking completeness"
);
assert!(
recommendations
.iter()
.any(|r| r.contains("allocation tracking")),
"Should recommend optimizing allocation tracking"
);
assert!(
recommendations
.iter()
.any(|r| r.contains("cache") || r.contains("symbol")),
"Should recommend improving cache"
);
assert!(
recommendations.iter().any(|r| r.contains("memory usage")),
"Should recommend reducing memory usage"
);
}
#[test]
fn test_calculate_efficiency_score() {
let analyzer = PerformanceAnalyzer::new();
let tracking = TrackingPerformance {
avg_allocation_time: Duration::from_micros(10),
completeness: 1.0,
overhead_bytes: 1024,
throughput: 20000.0,
};
let symbol = SymbolPerformance {
avg_resolution_time: Duration::from_micros(100),
cache_hit_ratio: 1.0,
resolution_rate: 10000.0,
cache_memory_usage: 50 * 1024 * 1024,
};
let pointer = PointerPerformance {
analysis_time: Duration::from_millis(10),
leak_detection_accuracy: 1.0,
analysis_rate: 5000.0,
};
let memory = MemoryEfficiency {
total_memory_mb: 100.0,
memory_per_allocation: 50.0,
growth_rate: 5.0,
fragmentation_ratio: 0.1,
};
let score = analyzer.calculate_efficiency_score(&tracking, &symbol, &pointer, &memory);
assert!(
score > 0.9,
"All good performance should result in high score, got {}",
score
);
}
#[test]
fn test_performance_report_structure() {
let analyzer = PerformanceAnalyzer::new();
let collector = MetricsCollector::new();
let report = analyzer.analyze_performance(&collector);
assert!(
!report.recommendations.is_empty() || report.efficiency_score >= 0.0,
"Report should have recommendations or valid score"
);
}
#[test]
fn test_benchmark_clone() {
let original = Benchmark {
operation: "test".to_string(),
avg_duration: Duration::from_micros(100),
memory_overhead: 1024,
throughput: 1000.0,
accuracy: 0.95,
sample_size: 10000,
};
let cloned = original.clone();
assert_eq!(
original.operation, cloned.operation,
"Operation should match"
);
assert_eq!(
original.avg_duration, cloned.avg_duration,
"Duration should match"
);
assert_eq!(
original.throughput, cloned.throughput,
"Throughput should match"
);
}
#[test]
fn test_performance_comparison_structure() {
let mut analyzer = PerformanceAnalyzer::new();
let baseline = Benchmark {
operation: "test".to_string(),
avg_duration: Duration::from_micros(100),
memory_overhead: 1000,
throughput: 1000.0,
accuracy: 0.9,
sample_size: 100,
};
analyzer.set_baseline("test", baseline);
let current = Benchmark {
operation: "test".to_string(),
avg_duration: Duration::from_micros(200),
memory_overhead: 2000,
throughput: 500.0,
accuracy: 0.95,
sample_size: 100,
};
let comparison = analyzer.compare_to_baseline("test", ¤t).unwrap();
assert_eq!(comparison.operation, "test", "Operation name should match");
assert_eq!(
comparison.duration_ratio, 2.0,
"Duration ratio should be 2.0"
);
assert_eq!(comparison.memory_ratio, 2.0, "Memory ratio should be 2.0");
assert_eq!(
comparison.throughput_ratio, 0.5,
"Throughput ratio should be 0.5"
);
assert!(
(comparison.accuracy_diff - 0.05).abs() < 0.001,
"Accuracy diff should be approximately 0.05"
);
}
#[test]
fn test_tracking_performance_default() {
let perf = TrackingPerformance::default();
assert_eq!(
perf.avg_allocation_time,
Duration::from_nanos(0),
"Default allocation time should be zero"
);
assert_eq!(
perf.completeness, 0.0,
"Default completeness should be zero"
);
assert_eq!(perf.overhead_bytes, 0, "Default overhead should be zero");
assert_eq!(perf.throughput, 0.0, "Default throughput should be zero");
}
#[test]
fn test_symbol_performance_default() {
let perf = SymbolPerformance::default();
assert_eq!(
perf.avg_resolution_time,
Duration::from_nanos(0),
"Default resolution time should be zero"
);
assert_eq!(
perf.cache_hit_ratio, 0.0,
"Default cache hit ratio should be zero"
);
}
#[test]
fn test_pointer_performance_default() {
let perf = PointerPerformance::default();
assert_eq!(
perf.analysis_time,
Duration::from_nanos(0),
"Default analysis time should be zero"
);
assert_eq!(
perf.leak_detection_accuracy, 0.0,
"Default leak detection accuracy should be zero"
);
}
#[test]
fn test_memory_efficiency_default() {
let eff = MemoryEfficiency::default();
assert_eq!(
eff.total_memory_mb, 0.0,
"Default total memory should be zero"
);
assert_eq!(
eff.memory_per_allocation, 0.0,
"Default memory per allocation should be zero"
);
}
#[test]
fn test_score_clamping() {
let analyzer = PerformanceAnalyzer::new();
let extreme_tracking = TrackingPerformance {
avg_allocation_time: Duration::from_secs(1),
completeness: 0.0,
overhead_bytes: 0,
throughput: 0.0,
};
let score = analyzer.score_tracking_performance(&extreme_tracking);
assert!(
(0.0..=1.0).contains(&score),
"Score should be clamped to [0, 1], got {}",
score
);
}
}