Skip to main content

memscope_rs/analysis/metrics/
analyzer.rs

1use super::{MetricValue, MetricsCollector};
2use std::collections::HashMap;
3use std::time::Duration;
4
5/// Memory analysis performance analyzer
6/// Focused on offline memory profiling and analysis efficiency
7pub struct PerformanceAnalyzer {
8    /// Baseline benchmarks for comparison
9    baselines: HashMap<String, Benchmark>,
10    /// Performance thresholds for memory operations
11    thresholds: AnalysisThresholds,
12}
13
14/// Performance benchmark for memory analysis operations
15#[derive(Debug, Clone)]
16pub struct Benchmark {
17    /// Operation name (e.g., "allocation_tracking", "symbol_resolution")
18    pub operation: String,
19    /// Average execution time
20    pub avg_duration: Duration,
21    /// Memory overhead in bytes
22    pub memory_overhead: usize,
23    /// Throughput (operations per second)
24    pub throughput: f64,
25    /// Accuracy percentage (0.0 to 1.0)
26    pub accuracy: f64,
27    /// Sample size used for benchmark
28    pub sample_size: usize,
29}
30
31/// Performance thresholds for memory analysis
32#[derive(Debug, Clone)]
33pub struct AnalysisThresholds {
34    /// Max acceptable tracking overhead (percentage of app memory)
35    pub max_tracking_overhead: f64,
36    /// Max allocation tracking latency (microseconds)
37    pub max_allocation_latency: Duration,
38    /// Max symbol resolution time per frame (milliseconds)
39    pub max_symbol_resolution_time: Duration,
40    /// Min acceptable tracking completeness (0.0 to 1.0)
41    pub min_tracking_completeness: f64,
42    /// Max memory usage for analysis tools (MB)
43    pub max_analysis_memory: usize,
44}
45
46/// Comprehensive performance report for memory analysis
47#[derive(Debug, Clone)]
48pub struct PerformanceReport {
49    /// Overall analysis efficiency score (0.0 to 1.0)
50    pub efficiency_score: f64,
51    /// Memory tracking performance
52    pub tracking_performance: TrackingPerformance,
53    /// Symbol resolution performance  
54    pub symbol_performance: SymbolPerformance,
55    /// Smart pointer analysis performance
56    pub pointer_performance: PointerPerformance,
57    /// Memory usage efficiency
58    pub memory_efficiency: MemoryEfficiency,
59    /// Recommendations for improvement
60    pub recommendations: Vec<String>,
61}
62
63/// Memory tracking performance metrics
64#[derive(Debug, Clone, Default)]
65pub struct TrackingPerformance {
66    /// Average allocation tracking time
67    pub avg_allocation_time: Duration,
68    /// Tracking completeness percentage
69    pub completeness: f64,
70    /// Memory overhead of tracking
71    pub overhead_bytes: usize,
72    /// Allocations tracked per second
73    pub throughput: f64,
74}
75
76/// Symbol resolution performance metrics
77#[derive(Debug, Clone, Default)]
78pub struct SymbolPerformance {
79    /// Average symbol resolution time
80    pub avg_resolution_time: Duration,
81    /// Cache hit ratio
82    pub cache_hit_ratio: f64,
83    /// Symbols resolved per second
84    pub resolution_rate: f64,
85    /// Memory used by symbol cache
86    pub cache_memory_usage: usize,
87}
88
89/// Smart pointer analysis performance
90#[derive(Debug, Clone, Default)]
91pub struct PointerPerformance {
92    /// Time to analyze pointer patterns
93    pub analysis_time: Duration,
94    /// Leak detection accuracy
95    pub leak_detection_accuracy: f64,
96    /// Pointers analyzed per second
97    pub analysis_rate: f64,
98}
99
100/// Memory usage efficiency of analysis tools
101#[derive(Debug, Clone, Default)]
102pub struct MemoryEfficiency {
103    /// Total memory used by analysis tools
104    pub total_memory_mb: f64,
105    /// Memory usage per tracked allocation
106    pub memory_per_allocation: f64,
107    /// Memory growth rate (MB per hour)
108    pub growth_rate: f64,
109    /// Memory fragmentation level
110    pub fragmentation_ratio: f64,
111}
112
113impl PerformanceAnalyzer {
114    /// Create analyzer with default thresholds
115    pub fn new() -> Self {
116        Self {
117            baselines: HashMap::new(),
118            thresholds: AnalysisThresholds::default(),
119        }
120    }
121
122    /// Create analyzer with custom thresholds
123    pub fn with_thresholds(thresholds: AnalysisThresholds) -> Self {
124        Self {
125            baselines: HashMap::new(),
126            thresholds,
127        }
128    }
129
130    /// Analyze current performance metrics
131    pub fn analyze_performance(&self, collector: &MetricsCollector) -> PerformanceReport {
132        let tracking_perf = self.analyze_tracking_performance(collector);
133        let symbol_perf = self.analyze_symbol_performance(collector);
134        let pointer_perf = self.analyze_pointer_performance(collector);
135        let memory_eff = self.analyze_memory_efficiency(collector);
136
137        let efficiency_score = self.calculate_efficiency_score(
138            &tracking_perf,
139            &symbol_perf,
140            &pointer_perf,
141            &memory_eff,
142        );
143
144        let recommendations =
145            self.generate_recommendations(&tracking_perf, &symbol_perf, &pointer_perf, &memory_eff);
146
147        PerformanceReport {
148            efficiency_score,
149            tracking_performance: tracking_perf,
150            symbol_performance: symbol_perf,
151            pointer_performance: pointer_perf,
152            memory_efficiency: memory_eff,
153            recommendations,
154        }
155    }
156
157    /// Set baseline benchmark for operation
158    pub fn set_baseline(&mut self, operation: &str, benchmark: Benchmark) {
159        self.baselines.insert(operation.to_string(), benchmark);
160    }
161
162    /// Compare current performance against baseline
163    pub fn compare_to_baseline(
164        &self,
165        operation: &str,
166        current: &Benchmark,
167    ) -> Option<PerformanceComparison> {
168        self.baselines
169            .get(operation)
170            .map(|baseline| PerformanceComparison {
171                operation: operation.to_string(),
172                baseline: baseline.clone(),
173                current: current.clone(),
174                duration_ratio: current.avg_duration.as_nanos() as f64
175                    / baseline.avg_duration.as_nanos() as f64,
176                memory_ratio: current.memory_overhead as f64 / baseline.memory_overhead as f64,
177                throughput_ratio: current.throughput / baseline.throughput,
178                accuracy_diff: current.accuracy - baseline.accuracy,
179            })
180    }
181
182    fn analyze_tracking_performance(&self, collector: &MetricsCollector) -> TrackingPerformance {
183        let avg_allocation_time = collector
184            .get_metric("allocation_tracking_time")
185            .and_then(|m| match &m.value {
186                MetricValue::Timer(timer) => Some(timer.average_duration()),
187                _ => None,
188            })
189            .unwrap_or(Duration::from_nanos(0));
190
191        let completeness = collector
192            .get_metric("tracking_completeness")
193            .and_then(|m| match &m.value {
194                MetricValue::Gauge(value) => Some(*value),
195                _ => None,
196            })
197            .unwrap_or(0.0);
198
199        let overhead_bytes = collector
200            .get_metric("tracking_memory_overhead")
201            .and_then(|m| match &m.value {
202                MetricValue::Gauge(value) => Some(*value as usize),
203                _ => None,
204            })
205            .unwrap_or(0);
206
207        let throughput = collector
208            .get_metric("allocations_per_second")
209            .and_then(|m| match &m.value {
210                MetricValue::Rate(rate) => Some(rate.current_rate),
211                _ => None,
212            })
213            .unwrap_or(0.0);
214
215        TrackingPerformance {
216            avg_allocation_time,
217            completeness,
218            overhead_bytes,
219            throughput,
220        }
221    }
222
223    fn analyze_symbol_performance(&self, collector: &MetricsCollector) -> SymbolPerformance {
224        let avg_resolution_time = collector
225            .get_metric("symbol_resolution_time")
226            .and_then(|m| match &m.value {
227                MetricValue::Timer(timer) => Some(timer.average_duration()),
228                _ => None,
229            })
230            .unwrap_or(Duration::from_nanos(0));
231
232        let cache_hit_ratio = collector
233            .get_metric("symbol_cache_hit_ratio")
234            .and_then(|m| match &m.value {
235                MetricValue::Gauge(value) => Some(*value),
236                _ => None,
237            })
238            .unwrap_or(0.0);
239
240        let resolution_rate = collector
241            .get_metric("symbols_resolved_per_second")
242            .and_then(|m| match &m.value {
243                MetricValue::Rate(rate) => Some(rate.current_rate),
244                _ => None,
245            })
246            .unwrap_or(0.0);
247
248        let cache_memory_usage = collector
249            .get_metric("symbol_cache_memory")
250            .and_then(|m| match &m.value {
251                MetricValue::Gauge(value) => Some(*value as usize),
252                _ => None,
253            })
254            .unwrap_or(0);
255
256        SymbolPerformance {
257            avg_resolution_time,
258            cache_hit_ratio,
259            resolution_rate,
260            cache_memory_usage,
261        }
262    }
263
264    fn analyze_pointer_performance(&self, collector: &MetricsCollector) -> PointerPerformance {
265        let analysis_time = collector
266            .get_metric("pointer_analysis_time")
267            .and_then(|m| match &m.value {
268                MetricValue::Timer(timer) => Some(timer.average_duration()),
269                _ => None,
270            })
271            .unwrap_or(Duration::from_nanos(0));
272
273        let leak_detection_accuracy = collector
274            .get_metric("leak_detection_accuracy")
275            .and_then(|m| match &m.value {
276                MetricValue::Gauge(value) => Some(*value),
277                _ => None,
278            })
279            .unwrap_or(0.0);
280
281        let analysis_rate = collector
282            .get_metric("pointers_analyzed_per_second")
283            .and_then(|m| match &m.value {
284                MetricValue::Rate(rate) => Some(rate.current_rate),
285                _ => None,
286            })
287            .unwrap_or(0.0);
288
289        PointerPerformance {
290            analysis_time,
291            leak_detection_accuracy,
292            analysis_rate,
293        }
294    }
295
296    fn analyze_memory_efficiency(&self, collector: &MetricsCollector) -> MemoryEfficiency {
297        let total_memory_mb = collector
298            .get_metric("total_analysis_memory")
299            .and_then(|m| match &m.value {
300                MetricValue::Gauge(value) => Some(*value),
301                _ => None,
302            })
303            .unwrap_or(0.0);
304
305        let memory_per_allocation = collector
306            .get_metric("memory_per_tracked_allocation")
307            .and_then(|m| match &m.value {
308                MetricValue::Gauge(value) => Some(*value),
309                _ => None,
310            })
311            .unwrap_or(0.0);
312
313        let growth_rate = collector
314            .get_metric("memory_growth_rate")
315            .and_then(|m| match &m.value {
316                MetricValue::Gauge(value) => Some(*value),
317                _ => None,
318            })
319            .unwrap_or(0.0);
320
321        let fragmentation_ratio = collector
322            .get_metric("memory_fragmentation")
323            .and_then(|m| match &m.value {
324                MetricValue::Gauge(value) => Some(*value),
325                _ => None,
326            })
327            .unwrap_or(0.0);
328
329        MemoryEfficiency {
330            total_memory_mb,
331            memory_per_allocation,
332            growth_rate,
333            fragmentation_ratio,
334        }
335    }
336
337    fn calculate_efficiency_score(
338        &self,
339        tracking: &TrackingPerformance,
340        symbol: &SymbolPerformance,
341        pointer: &PointerPerformance,
342        memory: &MemoryEfficiency,
343    ) -> f64 {
344        let tracking_score = self.score_tracking_performance(tracking);
345        let symbol_score = self.score_symbol_performance(symbol);
346        let pointer_score = self.score_pointer_performance(pointer);
347        let memory_score = self.score_memory_efficiency(memory);
348
349        // Weighted average (tracking is most important for memory analysis)
350        tracking_score * 0.4 + symbol_score * 0.25 + pointer_score * 0.2 + memory_score * 0.15
351    }
352
353    fn score_tracking_performance(&self, tracking: &TrackingPerformance) -> f64 {
354        let mut score = 1.0;
355
356        // Penalize high latency
357        if tracking.avg_allocation_time > self.thresholds.max_allocation_latency {
358            score *= 0.7;
359        }
360
361        // Penalize low completeness
362        if tracking.completeness < self.thresholds.min_tracking_completeness {
363            score *= tracking.completeness / self.thresholds.min_tracking_completeness;
364        }
365
366        // Reward high throughput
367        if tracking.throughput > 10000.0 {
368            score *= 1.1;
369        }
370
371        score.clamp(0.0, 1.0)
372    }
373
374    fn score_symbol_performance(&self, symbol: &SymbolPerformance) -> f64 {
375        let mut score = 1.0;
376
377        // Penalize slow symbol resolution
378        if symbol.avg_resolution_time > self.thresholds.max_symbol_resolution_time {
379            score *= 0.8;
380        }
381
382        // Reward high cache hit ratio
383        score *= symbol.cache_hit_ratio;
384
385        // Penalize excessive cache memory usage
386        if symbol.cache_memory_usage > 100 * 1024 * 1024 {
387            // 100MB
388            score *= 0.9;
389        }
390
391        score.clamp(0.0, 1.0)
392    }
393
394    fn score_pointer_performance(&self, _pointer: &PointerPerformance) -> f64 {
395        let mut score: f64 = 1.0;
396
397        // Reward high leak detection accuracy
398        score *= _pointer.leak_detection_accuracy;
399
400        // Penalize slow analysis
401        if _pointer.analysis_time > Duration::from_millis(100) {
402            score *= 0.8;
403        }
404
405        score.clamp(0.0, 1.0)
406    }
407
408    fn score_memory_efficiency(&self, memory: &MemoryEfficiency) -> f64 {
409        let mut score: f64 = 1.0;
410
411        // Penalize excessive memory usage
412        if memory.total_memory_mb > self.thresholds.max_analysis_memory as f64 {
413            score *= 0.7;
414        }
415
416        // Penalize high fragmentation
417        if memory.fragmentation_ratio > 0.3 {
418            score *= 0.8;
419        }
420
421        // Penalize rapid growth
422        if memory.growth_rate > 10.0 {
423            // 10MB/hour
424            score *= 0.9;
425        }
426
427        score.clamp(0.0, 1.0)
428    }
429
430    fn generate_recommendations(
431        &self,
432        tracking: &TrackingPerformance,
433        symbol: &SymbolPerformance,
434        _pointer: &PointerPerformance,
435        memory: &MemoryEfficiency,
436    ) -> Vec<String> {
437        let mut recommendations = Vec::new();
438
439        // Tracking recommendations
440        if tracking.completeness < 0.95 {
441            recommendations
442                .push("Improve tracking completeness by reducing lock contention".to_string());
443        }
444        if tracking.avg_allocation_time > Duration::from_micros(100) {
445            recommendations.push("Optimize allocation tracking path for lower latency".to_string());
446        }
447
448        // Symbol recommendations
449        if symbol.cache_hit_ratio < 0.8 {
450            recommendations.push("Increase symbol cache size to improve hit ratio".to_string());
451        }
452        if symbol.avg_resolution_time > Duration::from_millis(10) {
453            recommendations.push("Consider preloading frequently used symbols".to_string());
454        }
455
456        // Memory recommendations
457        if memory.total_memory_mb > 512.0 {
458            recommendations
459                .push("Consider reducing memory usage or implementing memory limits".to_string());
460        }
461        if memory.fragmentation_ratio > 0.2 {
462            recommendations.push("Implement memory compaction to reduce fragmentation".to_string());
463        }
464
465        recommendations
466    }
467}
468
469/// Performance comparison between baseline and current
470#[derive(Debug, Clone)]
471pub struct PerformanceComparison {
472    /// Operation being compared
473    pub operation: String,
474    /// Baseline benchmark
475    pub baseline: Benchmark,
476    /// Current benchmark
477    pub current: Benchmark,
478    /// Duration ratio (current/baseline)
479    pub duration_ratio: f64,
480    /// Memory ratio (current/baseline)
481    pub memory_ratio: f64,
482    /// Throughput ratio (current/baseline)
483    pub throughput_ratio: f64,
484    /// Accuracy difference (current - baseline)
485    pub accuracy_diff: f64,
486}
487
488impl Default for AnalysisThresholds {
489    fn default() -> Self {
490        Self {
491            max_tracking_overhead: 0.05, // 5% of app memory
492            max_allocation_latency: Duration::from_micros(50),
493            max_symbol_resolution_time: Duration::from_millis(5),
494            min_tracking_completeness: 0.95,
495            max_analysis_memory: 512, // 512MB
496        }
497    }
498}
499
500impl Default for PerformanceAnalyzer {
501    fn default() -> Self {
502        Self::new()
503    }
504}
505
506#[cfg(test)]
507mod tests {
508    use super::*;
509
510    /// Objective: Verify PerformanceAnalyzer creation with default thresholds
511    /// Invariants: New analyzer should have empty baselines and default thresholds
512    #[test]
513    fn test_performance_analyzer_creation() {
514        let analyzer = PerformanceAnalyzer::new();
515        assert!(
516            analyzer.baselines.is_empty(),
517            "New analyzer should have empty baselines"
518        );
519        assert_eq!(
520            analyzer.thresholds.max_tracking_overhead, 0.05,
521            "Default max tracking overhead should be 0.05"
522        );
523    }
524
525    /// Objective: Verify PerformanceAnalyzer with custom thresholds
526    /// Invariants: Custom thresholds should be applied correctly
527    #[test]
528    fn test_performance_analyzer_custom_thresholds() {
529        let custom_thresholds = AnalysisThresholds {
530            max_tracking_overhead: 0.1,
531            max_allocation_latency: Duration::from_micros(100),
532            max_symbol_resolution_time: Duration::from_millis(10),
533            min_tracking_completeness: 0.9,
534            max_analysis_memory: 1024,
535        };
536        let analyzer = PerformanceAnalyzer::with_thresholds(custom_thresholds);
537        assert_eq!(
538            analyzer.thresholds.max_tracking_overhead, 0.1,
539            "Custom max tracking overhead should be 0.1"
540        );
541        assert_eq!(
542            analyzer.thresholds.max_analysis_memory, 1024,
543            "Custom max analysis memory should be 1024"
544        );
545    }
546
547    /// Objective: Verify Default trait for PerformanceAnalyzer
548    /// Invariants: Default should create same as new()
549    #[test]
550    fn test_performance_analyzer_default() {
551        let analyzer = PerformanceAnalyzer::default();
552        assert!(
553            analyzer.baselines.is_empty(),
554            "Default analyzer should have empty baselines"
555        );
556    }
557
558    /// Objective: Verify Default trait for AnalysisThresholds
559    /// Invariants: Default thresholds should have sensible values
560    #[test]
561    fn test_analysis_thresholds_default() {
562        let thresholds = AnalysisThresholds::default();
563        assert_eq!(
564            thresholds.max_tracking_overhead, 0.05,
565            "Default max tracking overhead should be 5%"
566        );
567        assert_eq!(
568            thresholds.max_allocation_latency,
569            Duration::from_micros(50),
570            "Default max allocation latency should be 50us"
571        );
572        assert_eq!(
573            thresholds.min_tracking_completeness, 0.95,
574            "Default min tracking completeness should be 95%"
575        );
576    }
577
578    /// Objective: Verify set_baseline and compare_to_baseline
579    /// Invariants: Baseline should be stored and compared correctly
580    #[test]
581    fn test_benchmark_comparison() {
582        let mut analyzer = PerformanceAnalyzer::new();
583
584        let baseline = Benchmark {
585            operation: "allocation_tracking".to_string(),
586            avg_duration: Duration::from_micros(100),
587            memory_overhead: 1024,
588            throughput: 1000.0,
589            accuracy: 0.95,
590            sample_size: 10000,
591        };
592
593        analyzer.set_baseline("allocation_tracking", baseline.clone());
594
595        let current = Benchmark {
596            operation: "allocation_tracking".to_string(),
597            avg_duration: Duration::from_micros(120),
598            memory_overhead: 1200,
599            throughput: 900.0,
600            accuracy: 0.97,
601            sample_size: 10000,
602        };
603
604        let comparison = analyzer.compare_to_baseline("allocation_tracking", &current);
605        assert!(
606            comparison.is_some(),
607            "Comparison should exist for known operation"
608        );
609
610        let comparison = comparison.expect("Comparison should exist");
611        assert!(
612            comparison.duration_ratio > 1.0,
613            "Duration ratio should be > 1.0 for slower current"
614        );
615        assert!(
616            comparison.memory_ratio > 1.0,
617            "Memory ratio should be > 1.0 for higher memory"
618        );
619        assert!(
620            comparison.throughput_ratio < 1.0,
621            "Throughput ratio should be < 1.0 for lower throughput"
622        );
623        assert!(
624            comparison.accuracy_diff > 0.0,
625            "Accuracy diff should be > 0.0 for better accuracy"
626        );
627    }
628
629    /// Objective: Verify compare_to_baseline returns None for unknown operation
630    /// Invariants: Should return None when baseline doesn't exist
631    #[test]
632    fn test_benchmark_comparison_unknown_operation() {
633        let analyzer = PerformanceAnalyzer::new();
634
635        let current = Benchmark {
636            operation: "unknown".to_string(),
637            avg_duration: Duration::from_micros(100),
638            memory_overhead: 1024,
639            throughput: 1000.0,
640            accuracy: 0.95,
641            sample_size: 10000,
642        };
643
644        let comparison = analyzer.compare_to_baseline("unknown", &current);
645        assert!(
646            comparison.is_none(),
647            "Comparison should be None for unknown operation"
648        );
649    }
650
651    /// Objective: Verify efficiency scoring for good performance
652    /// Invariants: Good performance should score high
653    #[test]
654    fn test_efficiency_scoring_good() {
655        let analyzer = PerformanceAnalyzer::new();
656
657        let good_tracking = TrackingPerformance {
658            avg_allocation_time: Duration::from_micros(10),
659            completeness: 0.98,
660            overhead_bytes: 1024,
661            throughput: 50000.0,
662        };
663
664        let score = analyzer.score_tracking_performance(&good_tracking);
665        assert!(
666            score > 0.9,
667            "Good tracking performance should score > 0.9, got {}",
668            score
669        );
670    }
671
672    /// Objective: Verify efficiency scoring for bad performance
673    /// Invariants: Bad performance should score low
674    #[test]
675    fn test_efficiency_scoring_bad() {
676        let analyzer = PerformanceAnalyzer::new();
677
678        let bad_tracking = TrackingPerformance {
679            avg_allocation_time: Duration::from_millis(1),
680            completeness: 0.8,
681            overhead_bytes: 10240,
682            throughput: 100.0,
683        };
684
685        let score = analyzer.score_tracking_performance(&bad_tracking);
686        assert!(
687            score < 0.7,
688            "Bad tracking performance should score < 0.7, got {}",
689            score
690        );
691    }
692
693    /// Objective: Verify symbol performance scoring
694    /// Invariants: High cache hit ratio should improve score
695    #[test]
696    fn test_symbol_performance_scoring() {
697        let analyzer = PerformanceAnalyzer::new();
698
699        let good_symbol = SymbolPerformance {
700            avg_resolution_time: Duration::from_micros(100),
701            cache_hit_ratio: 0.95,
702            resolution_rate: 10000.0,
703            cache_memory_usage: 50 * 1024 * 1024,
704        };
705
706        let score = analyzer.score_symbol_performance(&good_symbol);
707        assert!(
708            score > 0.8,
709            "Good symbol performance should score > 0.8, got {}",
710            score
711        );
712
713        let bad_symbol = SymbolPerformance {
714            avg_resolution_time: Duration::from_millis(20),
715            cache_hit_ratio: 0.5,
716            resolution_rate: 100.0,
717            cache_memory_usage: 200 * 1024 * 1024,
718        };
719
720        let score = analyzer.score_symbol_performance(&bad_symbol);
721        assert!(
722            score < 0.6,
723            "Bad symbol performance should score < 0.6, got {}",
724            score
725        );
726    }
727
728    /// Objective: Verify pointer performance scoring
729    /// Invariants: High leak detection accuracy should improve score
730    #[test]
731    fn test_pointer_performance_scoring() {
732        let analyzer = PerformanceAnalyzer::new();
733
734        let good_pointer = PointerPerformance {
735            analysis_time: Duration::from_millis(10),
736            leak_detection_accuracy: 0.98,
737            analysis_rate: 5000.0,
738        };
739
740        let score = analyzer.score_pointer_performance(&good_pointer);
741        assert!(
742            score > 0.9,
743            "Good pointer performance should score > 0.9, got {}",
744            score
745        );
746
747        let bad_pointer = PointerPerformance {
748            analysis_time: Duration::from_millis(200),
749            leak_detection_accuracy: 0.7,
750            analysis_rate: 100.0,
751        };
752
753        let score = analyzer.score_pointer_performance(&bad_pointer);
754        assert!(
755            score < 0.7,
756            "Bad pointer performance should score < 0.7, got {}",
757            score
758        );
759    }
760
761    /// Objective: Verify memory efficiency scoring
762    /// Invariants: Low memory usage should improve score
763    #[test]
764    fn test_memory_efficiency_scoring() {
765        let analyzer = PerformanceAnalyzer::new();
766
767        let good_memory = MemoryEfficiency {
768            total_memory_mb: 100.0,
769            memory_per_allocation: 50.0,
770            growth_rate: 5.0,
771            fragmentation_ratio: 0.1,
772        };
773
774        let score = analyzer.score_memory_efficiency(&good_memory);
775        assert!(
776            score > 0.9,
777            "Good memory efficiency should score > 0.9, got {}",
778            score
779        );
780
781        let bad_memory = MemoryEfficiency {
782            total_memory_mb: 1000.0,
783            memory_per_allocation: 500.0,
784            growth_rate: 50.0,
785            fragmentation_ratio: 0.5,
786        };
787
788        let score = analyzer.score_memory_efficiency(&bad_memory);
789        assert!(
790            score < 0.7,
791            "Bad memory efficiency should score < 0.7, got {}",
792            score
793        );
794    }
795
796    /// Objective: Verify analyze_performance with empty collector
797    /// Invariants: Should return valid report with default values
798    #[test]
799    fn test_analyze_performance_empty_collector() {
800        let analyzer = PerformanceAnalyzer::new();
801        let collector = MetricsCollector::new();
802
803        let report = analyzer.analyze_performance(&collector);
804
805        assert!(
806            report.efficiency_score >= 0.0 && report.efficiency_score <= 1.0,
807            "Efficiency score should be between 0 and 1"
808        );
809        assert_eq!(
810            report.tracking_performance.avg_allocation_time,
811            Duration::from_nanos(0),
812            "Empty collector should have zero allocation time"
813        );
814        assert_eq!(
815            report.symbol_performance.cache_hit_ratio, 0.0,
816            "Empty collector should have zero cache hit ratio"
817        );
818    }
819
820    /// Objective: Verify generate_recommendations for various conditions
821    /// Invariants: Should generate appropriate recommendations
822    #[test]
823    fn test_generate_recommendations() {
824        let analyzer = PerformanceAnalyzer::new();
825
826        let tracking = TrackingPerformance {
827            avg_allocation_time: Duration::from_micros(200),
828            completeness: 0.9,
829            overhead_bytes: 1024,
830            throughput: 5000.0,
831        };
832
833        let symbol = SymbolPerformance {
834            avg_resolution_time: Duration::from_millis(20),
835            cache_hit_ratio: 0.7,
836            resolution_rate: 100.0,
837            cache_memory_usage: 50 * 1024 * 1024,
838        };
839
840        let pointer = PointerPerformance {
841            analysis_time: Duration::from_millis(50),
842            leak_detection_accuracy: 0.95,
843            analysis_rate: 1000.0,
844        };
845
846        let memory = MemoryEfficiency {
847            total_memory_mb: 600.0,
848            memory_per_allocation: 100.0,
849            growth_rate: 15.0,
850            fragmentation_ratio: 0.3,
851        };
852
853        let recommendations =
854            analyzer.generate_recommendations(&tracking, &symbol, &pointer, &memory);
855
856        assert!(
857            recommendations
858                .iter()
859                .any(|r| r.contains("tracking completeness")),
860            "Should recommend improving tracking completeness"
861        );
862        assert!(
863            recommendations
864                .iter()
865                .any(|r| r.contains("allocation tracking")),
866            "Should recommend optimizing allocation tracking"
867        );
868        assert!(
869            recommendations
870                .iter()
871                .any(|r| r.contains("cache") || r.contains("symbol")),
872            "Should recommend improving cache"
873        );
874        assert!(
875            recommendations.iter().any(|r| r.contains("memory usage")),
876            "Should recommend reducing memory usage"
877        );
878    }
879
880    /// Objective: Verify calculate_efficiency_score weighted average
881    /// Invariants: Score should be weighted average of component scores
882    #[test]
883    fn test_calculate_efficiency_score() {
884        let analyzer = PerformanceAnalyzer::new();
885
886        let tracking = TrackingPerformance {
887            avg_allocation_time: Duration::from_micros(10),
888            completeness: 1.0,
889            overhead_bytes: 1024,
890            throughput: 20000.0,
891        };
892
893        let symbol = SymbolPerformance {
894            avg_resolution_time: Duration::from_micros(100),
895            cache_hit_ratio: 1.0,
896            resolution_rate: 10000.0,
897            cache_memory_usage: 50 * 1024 * 1024,
898        };
899
900        let pointer = PointerPerformance {
901            analysis_time: Duration::from_millis(10),
902            leak_detection_accuracy: 1.0,
903            analysis_rate: 5000.0,
904        };
905
906        let memory = MemoryEfficiency {
907            total_memory_mb: 100.0,
908            memory_per_allocation: 50.0,
909            growth_rate: 5.0,
910            fragmentation_ratio: 0.1,
911        };
912
913        let score = analyzer.calculate_efficiency_score(&tracking, &symbol, &pointer, &memory);
914
915        assert!(
916            score > 0.9,
917            "All good performance should result in high score, got {}",
918            score
919        );
920    }
921
922    /// Objective: Verify PerformanceReport structure
923    /// Invariants: All fields should be populated
924    #[test]
925    fn test_performance_report_structure() {
926        let analyzer = PerformanceAnalyzer::new();
927        let collector = MetricsCollector::new();
928
929        let report = analyzer.analyze_performance(&collector);
930
931        assert!(
932            !report.recommendations.is_empty() || report.efficiency_score >= 0.0,
933            "Report should have recommendations or valid score"
934        );
935    }
936
937    /// Objective: Verify Benchmark clone functionality
938    /// Invariants: Cloned benchmark should have same values
939    #[test]
940    fn test_benchmark_clone() {
941        let original = Benchmark {
942            operation: "test".to_string(),
943            avg_duration: Duration::from_micros(100),
944            memory_overhead: 1024,
945            throughput: 1000.0,
946            accuracy: 0.95,
947            sample_size: 10000,
948        };
949
950        let cloned = original.clone();
951
952        assert_eq!(
953            original.operation, cloned.operation,
954            "Operation should match"
955        );
956        assert_eq!(
957            original.avg_duration, cloned.avg_duration,
958            "Duration should match"
959        );
960        assert_eq!(
961            original.throughput, cloned.throughput,
962            "Throughput should match"
963        );
964    }
965
966    /// Objective: Verify PerformanceComparison structure
967    /// Invariants: All fields should be accessible
968    #[test]
969    fn test_performance_comparison_structure() {
970        let mut analyzer = PerformanceAnalyzer::new();
971
972        let baseline = Benchmark {
973            operation: "test".to_string(),
974            avg_duration: Duration::from_micros(100),
975            memory_overhead: 1000,
976            throughput: 1000.0,
977            accuracy: 0.9,
978            sample_size: 100,
979        };
980
981        analyzer.set_baseline("test", baseline);
982
983        let current = Benchmark {
984            operation: "test".to_string(),
985            avg_duration: Duration::from_micros(200),
986            memory_overhead: 2000,
987            throughput: 500.0,
988            accuracy: 0.95,
989            sample_size: 100,
990        };
991
992        let comparison = analyzer.compare_to_baseline("test", &current).unwrap();
993
994        assert_eq!(comparison.operation, "test", "Operation name should match");
995        assert_eq!(
996            comparison.duration_ratio, 2.0,
997            "Duration ratio should be 2.0"
998        );
999        assert_eq!(comparison.memory_ratio, 2.0, "Memory ratio should be 2.0");
1000        assert_eq!(
1001            comparison.throughput_ratio, 0.5,
1002            "Throughput ratio should be 0.5"
1003        );
1004        assert!(
1005            (comparison.accuracy_diff - 0.05).abs() < 0.001,
1006            "Accuracy diff should be approximately 0.05"
1007        );
1008    }
1009
1010    /// Objective: Verify TrackingPerformance default
1011    /// Invariants: Default should have zero values
1012    #[test]
1013    fn test_tracking_performance_default() {
1014        let perf = TrackingPerformance::default();
1015
1016        assert_eq!(
1017            perf.avg_allocation_time,
1018            Duration::from_nanos(0),
1019            "Default allocation time should be zero"
1020        );
1021        assert_eq!(
1022            perf.completeness, 0.0,
1023            "Default completeness should be zero"
1024        );
1025        assert_eq!(perf.overhead_bytes, 0, "Default overhead should be zero");
1026        assert_eq!(perf.throughput, 0.0, "Default throughput should be zero");
1027    }
1028
1029    /// Objective: Verify SymbolPerformance default
1030    /// Invariants: Default should have zero values
1031    #[test]
1032    fn test_symbol_performance_default() {
1033        let perf = SymbolPerformance::default();
1034
1035        assert_eq!(
1036            perf.avg_resolution_time,
1037            Duration::from_nanos(0),
1038            "Default resolution time should be zero"
1039        );
1040        assert_eq!(
1041            perf.cache_hit_ratio, 0.0,
1042            "Default cache hit ratio should be zero"
1043        );
1044    }
1045
1046    /// Objective: Verify PointerPerformance default
1047    /// Invariants: Default should have zero values
1048    #[test]
1049    fn test_pointer_performance_default() {
1050        let perf = PointerPerformance::default();
1051
1052        assert_eq!(
1053            perf.analysis_time,
1054            Duration::from_nanos(0),
1055            "Default analysis time should be zero"
1056        );
1057        assert_eq!(
1058            perf.leak_detection_accuracy, 0.0,
1059            "Default leak detection accuracy should be zero"
1060        );
1061    }
1062
1063    /// Objective: Verify MemoryEfficiency default
1064    /// Invariants: Default should have zero values
1065    #[test]
1066    fn test_memory_efficiency_default() {
1067        let eff = MemoryEfficiency::default();
1068
1069        assert_eq!(
1070            eff.total_memory_mb, 0.0,
1071            "Default total memory should be zero"
1072        );
1073        assert_eq!(
1074            eff.memory_per_allocation, 0.0,
1075            "Default memory per allocation should be zero"
1076        );
1077    }
1078
1079    /// Objective: Verify score clamping to [0.0, 1.0]
1080    /// Invariants: Scores should never exceed bounds
1081    #[test]
1082    fn test_score_clamping() {
1083        let analyzer = PerformanceAnalyzer::new();
1084
1085        let extreme_tracking = TrackingPerformance {
1086            avg_allocation_time: Duration::from_secs(1),
1087            completeness: 0.0,
1088            overhead_bytes: 0,
1089            throughput: 0.0,
1090        };
1091
1092        let score = analyzer.score_tracking_performance(&extreme_tracking);
1093        assert!(
1094            (0.0..=1.0).contains(&score),
1095            "Score should be clamped to [0, 1], got {}",
1096            score
1097        );
1098    }
1099}