Skip to main content

scirs2_graph/
graph_memory_profiler.rs

1//! Memory Usage Profiler for Advanced Mode
2//!
3//! This module provides comprehensive memory profiling and optimization analysis
4//! for advanced mode components, including detailed memory usage tracking,
5//! optimization recommendations, and performance analysis.
6
7#![allow(missing_docs)]
8
9use crate::advanced::AdvancedProcessor;
10use crate::base::{EdgeWeight, Graph, Node};
11use crate::error::Result;
12use scirs2_core::random::{Rng, RngExt};
13use std::collections::{HashMap, VecDeque};
14use std::time::{Duration, SystemTime};
15
16/// Memory usage statistics for different components
17#[derive(Debug, Clone)]
18pub struct MemoryStats {
19    /// Current memory usage in bytes
20    pub current_usage: usize,
21    /// Peak memory usage in bytes
22    pub peak_usage: usize,
23    /// Average memory usage in bytes
24    pub average_usage: f64,
25    /// Number of allocations
26    pub allocation_count: usize,
27    /// Number of deallocations
28    pub deallocation_count: usize,
29    /// Memory fragmentation ratio (0.0 = no fragmentation, 1.0 = highly fragmented)
30    pub fragmentation_ratio: f64,
31    /// Memory efficiency score (0.0 = inefficient, 1.0 = highly efficient)
32    pub efficiency_score: f64,
33}
34
35impl Default for MemoryStats {
36    fn default() -> Self {
37        Self {
38            current_usage: 0,
39            peak_usage: 0,
40            average_usage: 0.0,
41            allocation_count: 0,
42            deallocation_count: 0,
43            fragmentation_ratio: 0.0,
44            efficiency_score: 1.0,
45        }
46    }
47}
48
49/// Memory allocation pattern analysis
50#[derive(Debug, Clone)]
51pub struct AllocationPattern {
52    /// Size of allocation in bytes
53    pub size: usize,
54    /// Timestamp of allocation
55    pub timestamp: SystemTime,
56    /// Lifetime of allocation (if deallocated)
57    pub lifetime: Option<Duration>,
58    /// Category of allocation (graph data, algorithm workspace, cache, etc.)
59    pub category: String,
60    /// Whether this allocation was predicted by the memory manager
61    pub was_predicted: bool,
62}
63
64/// Memory usage profiling data for different advanced components
65#[derive(Debug)]
66pub struct MemoryProfile {
67    /// Overall memory statistics
68    pub overall_stats: MemoryStats,
69    /// Memory usage by component
70    pub component_stats: HashMap<String, MemoryStats>,
71    /// Allocation patterns over time
72    pub allocation_patterns: Vec<AllocationPattern>,
73    /// Memory usage history (timestamp, usage_bytes)
74    pub usage_history: VecDeque<(SystemTime, usize)>,
75    /// Optimization opportunities identified
76    pub optimization_opportunities: Vec<OptimizationOpportunity>,
77    /// Memory efficiency analysis
78    pub efficiency_analysis: EfficiencyAnalysis,
79}
80
81/// Identified memory optimization opportunity
82#[derive(Debug, Clone)]
83pub struct OptimizationOpportunity {
84    /// Type of optimization
85    pub optimization_type: OptimizationType,
86    /// Estimated memory savings in bytes
87    pub estimated_savings: usize,
88    /// Estimated performance impact (negative = performance loss, positive = gain)
89    pub performance_impact: f64,
90    /// Implementation complexity (1-5, 1 = easy, 5 = very complex)
91    pub implementation_complexity: u8,
92    /// Description of the optimization
93    pub description: String,
94    /// Priority (1-5, 5 = highest priority)
95    pub priority: u8,
96}
97
98/// Types of memory optimizations
99#[derive(Debug, Clone, PartialEq)]
100pub enum OptimizationType {
101    /// Use memory pools for frequent allocations
102    MemoryPooling,
103    /// Reduce data structure sizes
104    DataStructureOptimization,
105    /// Implement lazy evaluation
106    LazyEvaluation,
107    /// Use more compact data representations
108    CompactRepresentation,
109    /// Optimize caching strategies
110    CacheOptimization,
111    /// Reduce memory fragmentation
112    FragmentationReduction,
113    /// Use streaming algorithms for large data
114    StreamingProcessing,
115    /// Optimize garbage collection patterns
116    GarbageCollectionOptimization,
117}
118
119/// Memory efficiency analysis results
120#[derive(Debug, Clone)]
121pub struct EfficiencyAnalysis {
122    /// Overall efficiency score (0.0-1.0)
123    pub overall_efficiency: f64,
124    /// Memory utilization ratio (used / allocated)
125    pub utilization_ratio: f64,
126    /// Cache effectiveness score
127    pub cache_effectiveness: f64,
128    /// Memory access pattern efficiency
129    pub access_pattern_efficiency: f64,
130    /// Temporal locality score
131    pub temporal_locality: f64,
132    /// Spatial locality score
133    pub spatial_locality: f64,
134    /// Recommendations for improvement
135    pub recommendations: Vec<String>,
136}
137
138/// Comprehensive memory profiler for advanced mode
139pub struct AdvancedMemoryProfiler {
140    /// Current memory profile data
141    profile: MemoryProfile,
142    /// Profiling configuration
143    config: MemoryProfilerConfig,
144    /// Active memory tracking
145    active_allocations: HashMap<String, AllocationPattern>,
146    /// Profiling start time
147    start_time: SystemTime,
148    /// Last garbage collection time
149    #[allow(dead_code)]
150    last_gc_time: SystemTime,
151    /// Memory pressure threshold
152    memory_pressure_threshold: usize,
153}
154
155/// Configuration for memory profiling
156#[derive(Debug, Clone)]
157pub struct MemoryProfilerConfig {
158    /// Enable detailed allocation tracking
159    pub track_allocations: bool,
160    /// Enable memory pattern analysis
161    pub analyze_patterns: bool,
162    /// Enable optimization detection
163    pub detect_optimizations: bool,
164    /// Maximum history entries to keep
165    pub max_history_entries: usize,
166    /// Memory sampling interval
167    pub sampling_interval: Duration,
168    /// Enable real-time monitoring
169    pub real_time_monitoring: bool,
170}
171
172impl Default for MemoryProfilerConfig {
173    fn default() -> Self {
174        Self {
175            track_allocations: true,
176            analyze_patterns: true,
177            detect_optimizations: true,
178            max_history_entries: 10000,
179            sampling_interval: Duration::from_millis(100),
180            real_time_monitoring: true,
181        }
182    }
183}
184
185impl AdvancedMemoryProfiler {
186    /// Create a new memory profiler
187    pub fn new(config: MemoryProfilerConfig) -> Self {
188        let now = SystemTime::now();
189        Self {
190            profile: MemoryProfile {
191                overall_stats: MemoryStats::default(),
192                component_stats: HashMap::new(),
193                allocation_patterns: Vec::new(),
194                usage_history: VecDeque::new(),
195                optimization_opportunities: Vec::new(),
196                efficiency_analysis: EfficiencyAnalysis {
197                    overall_efficiency: 1.0,
198                    utilization_ratio: 1.0,
199                    cache_effectiveness: 1.0,
200                    access_pattern_efficiency: 1.0,
201                    temporal_locality: 1.0,
202                    spatial_locality: 1.0,
203                    recommendations: Vec::new(),
204                },
205            },
206            config,
207            active_allocations: HashMap::new(),
208            start_time: now,
209            last_gc_time: now,
210            memory_pressure_threshold: 1024 * 1024 * 1024, // 1GB default
211        }
212    }
213
214    /// Start profiling an advanced processor
215    pub fn start_profiling(&mut self, processor: &AdvancedProcessor) {
216        self.start_time = SystemTime::now();
217        self.record_initial_state(processor);
218
219        if self.config.real_time_monitoring {
220            self.start_real_time_monitoring();
221        }
222    }
223
224    /// Record memory allocation
225    pub fn record_allocation(
226        &mut self,
227        component: &str,
228        size: usize,
229        category: &str,
230        predicted: bool,
231    ) {
232        let allocation = AllocationPattern {
233            size,
234            timestamp: SystemTime::now(),
235            lifetime: None,
236            category: category.to_string(),
237            was_predicted: predicted,
238        };
239
240        let allocation_id = format!(
241            "{}_{}_{}_{}",
242            component,
243            category,
244            size,
245            allocation
246                .timestamp
247                .duration_since(self.start_time)
248                .unwrap_or_default()
249                .as_nanos()
250        );
251
252        self.active_allocations
253            .insert(allocation_id.clone(), allocation.clone());
254        self.profile.allocation_patterns.push(allocation);
255
256        // Update component statistics
257        let component_stats = self
258            .profile
259            .component_stats
260            .entry(component.to_string())
261            .or_default();
262        component_stats.current_usage += size;
263        component_stats.peak_usage = component_stats
264            .peak_usage
265            .max(component_stats.current_usage);
266        component_stats.allocation_count += 1;
267
268        // Update overall statistics
269        self.profile.overall_stats.current_usage += size;
270        self.profile.overall_stats.peak_usage = self
271            .profile
272            .overall_stats
273            .peak_usage
274            .max(self.profile.overall_stats.current_usage);
275        self.profile.overall_stats.allocation_count += 1;
276
277        // Check for memory pressure
278        if self.profile.overall_stats.current_usage > self.memory_pressure_threshold {
279            self.analyze_memory_pressure();
280        }
281    }
282
283    /// Record memory deallocation
284    pub fn record_deallocation(&mut self, component: &str, size: usize, category: &str) {
285        // Find and remove the allocation
286        let allocation_key = self
287            .active_allocations
288            .keys()
289            .find(|k| k.starts_with(component) && k.contains(category))
290            .cloned();
291
292        if let Some(key) = allocation_key {
293            if let Some(mut allocation) = self.active_allocations.remove(&key) {
294                allocation.lifetime = Some(
295                    SystemTime::now()
296                        .duration_since(allocation.timestamp)
297                        .unwrap_or_default(),
298                );
299
300                // Update statistics
301                let component_stats = self
302                    .profile
303                    .component_stats
304                    .entry(component.to_string())
305                    .or_default();
306                component_stats.current_usage = component_stats.current_usage.saturating_sub(size);
307                component_stats.deallocation_count += 1;
308
309                self.profile.overall_stats.current_usage = self
310                    .profile
311                    .overall_stats
312                    .current_usage
313                    .saturating_sub(size);
314                self.profile.overall_stats.deallocation_count += 1;
315            }
316        }
317    }
318
319    /// Record memory usage snapshot
320    pub fn record_memory_snapshot(&mut self, processor: &AdvancedProcessor) {
321        let current_time = SystemTime::now();
322        let current_usage = self.estimate_processor_memory_usage(processor);
323
324        self.profile
325            .usage_history
326            .push_back((current_time, current_usage));
327
328        // Keep history within limits
329        while self.profile.usage_history.len() > self.config.max_history_entries {
330            self.profile.usage_history.pop_front();
331        }
332
333        // Update average usage
334        let total_usage: usize = self
335            .profile
336            .usage_history
337            .iter()
338            .map(|(_, usage)| usage)
339            .sum();
340        self.profile.overall_stats.average_usage =
341            total_usage as f64 / self.profile.usage_history.len() as f64;
342    }
343
344    /// Analyze memory usage patterns and identify optimizations
345    pub fn analyze_memory_patterns(&mut self) {
346        self.analyze_allocation_patterns();
347        self.detect_optimization_opportunities();
348        self.calculate_efficiency_metrics();
349        self.generate_recommendations();
350    }
351
352    /// Profile memory usage during algorithm execution
353    pub fn profile_algorithm_execution<N, E, Ix, T>(
354        &mut self,
355        processor: &mut AdvancedProcessor,
356        graph: &Graph<N, E, Ix>,
357        algorithm_name: &str,
358        algorithm: impl FnOnce(&Graph<N, E, Ix>) -> Result<T>,
359    ) -> Result<(T, MemoryExecutionProfile)>
360    where
361        N: Node + Clone + std::hash::Hash + Eq + std::fmt::Debug,
362        E: EdgeWeight,
363        Ix: petgraph::graph::IndexType,
364    {
365        let execution_start = SystemTime::now();
366        let initial_memory = self.profile.overall_stats.current_usage;
367
368        // Record pre-execution state
369        self.record_memory_snapshot(processor);
370
371        // Estimate graph memory usage
372        let graph_memory = self.estimate_graph_memory_usage(graph);
373        self.record_allocation("graph", graph_memory, "input_data", false);
374
375        // Execute algorithm with memory tracking
376        let result = crate::advanced::execute_with_enhanced_advanced(graph, algorithm);
377
378        let execution_end = SystemTime::now();
379        let final_memory = self.profile.overall_stats.current_usage;
380
381        // Record post-execution state
382        self.record_memory_snapshot(processor);
383
384        // Calculate execution profile
385        let execution_profile = MemoryExecutionProfile {
386            algorithm_name: algorithm_name.to_string(),
387            execution_time: execution_end
388                .duration_since(execution_start)
389                .unwrap_or_default(),
390            initial_memory,
391            peak_memory: self.profile.overall_stats.peak_usage,
392            final_memory,
393            memory_growth: final_memory.saturating_sub(initial_memory),
394            graph_memory,
395            workspace_memory: self.estimate_workspace_memory(algorithm_name),
396            cache_memory: self.estimate_cache_memory(processor),
397            memory_efficiency: self.calculate_execution_efficiency(initial_memory, final_memory),
398        };
399
400        match result {
401            Ok(value) => Ok((value, execution_profile)),
402            Err(e) => Err(e),
403        }
404    }
405
406    /// Generate comprehensive memory usage report
407    pub fn generate_memory_report(&self) -> MemoryUsageReport {
408        MemoryUsageReport {
409            profile_duration: SystemTime::now()
410                .duration_since(self.start_time)
411                .unwrap_or_default(),
412            overall_stats: self.profile.overall_stats.clone(),
413            component_breakdown: self.profile.component_stats.clone(),
414            optimization_opportunities: self.profile.optimization_opportunities.clone(),
415            efficiency_analysis: self.profile.efficiency_analysis.clone(),
416            memory_timeline: self.generate_memory_timeline(),
417            allocation_analysis: self.analyze_allocation_efficiency(),
418            recommendations: self.generate_optimization_recommendations(),
419        }
420    }
421
422    /// Estimate memory usage of a graph
423    fn estimate_graph_memory_usage<N, E, Ix>(&self, graph: &Graph<N, E, Ix>) -> usize
424    where
425        N: Node + std::fmt::Debug,
426        E: EdgeWeight,
427        Ix: petgraph::graph::IndexType,
428    {
429        let node_size = std::mem::size_of::<N>();
430        let edge_size = std::mem::size_of::<E>() + std::mem::size_of::<Ix>() * 2; // source + target
431        let index_size = std::mem::size_of::<Ix>();
432
433        let base_graph_overhead = 1024; // Estimated overhead for graph structure
434        let node_memory = graph.node_count() * (node_size + index_size);
435        let edge_memory = graph.edge_count() * edge_size;
436
437        base_graph_overhead + node_memory + edge_memory
438    }
439
440    /// Estimate memory usage of an advanced processor
441    fn estimate_processor_memory_usage(&self, processor: &AdvancedProcessor) -> usize {
442        let stats = processor.get_optimization_stats();
443
444        // Base processor memory (estimated)
445        let base_memory = 1024 * 1024; // 1MB base
446
447        // Neural RL agent memory (estimated based on optimizations)
448        let neural_memory = stats.total_operations * 1024; // 1KB per optimization
449
450        // Cache memory (estimated)
451        let cache_memory = (stats.memory_efficiency * 10.0 * 1024.0 * 1024.0) as usize; // Based on efficiency
452
453        base_memory + neural_memory + cache_memory
454    }
455
456    /// Estimate workspace memory for an algorithm
457    fn estimate_workspace_memory(&self, algorithmname: &str) -> usize {
458        match algorithmname {
459            name if name.contains("pagerank") => 1024 * 1024, // 1MB for PageRank workspace
460            name if name.contains("community") => 2048 * 1024, // 2MB for community detection
461            name if name.contains("centrality") => 512 * 1024, // 512KB for centrality
462            name if name.contains("shortest") => 1536 * 1024, // 1.5MB for shortest paths
463            _ => 256 * 1024,                                  // 256KB default
464        }
465    }
466
467    /// Estimate cache memory usage
468    fn estimate_cache_memory(&self, processor: &AdvancedProcessor) -> usize {
469        let stats = processor.get_optimization_stats();
470        // Estimate based on optimization count and efficiency
471        (stats.total_operations as f64 * stats.memory_efficiency * 1024.0) as usize
472    }
473
474    /// Calculate execution efficiency
475    fn calculate_execution_efficiency(&self, initial_memory: usize, finalmemory: usize) -> f64 {
476        if initial_memory == 0 {
477            return 1.0;
478        }
479
480        let memory_growth_ratio = finalmemory as f64 / initial_memory as f64;
481        // Efficiency decreases with _memory growth
482        1.0 / memory_growth_ratio.max(1.0)
483    }
484
485    /// Record initial profiling state
486    fn record_initial_state(&mut self, processor: &AdvancedProcessor) {
487        let initial_memory = self.estimate_processor_memory_usage(processor);
488        self.profile.overall_stats.current_usage = initial_memory;
489        self.profile.overall_stats.peak_usage = initial_memory;
490        self.profile.overall_stats.average_usage = initial_memory as f64;
491    }
492
493    /// Start real-time memory monitoring
494    fn start_real_time_monitoring(&mut self) {
495        // In a real implementation, this would start a background thread
496        // For now, we'll simulate this functionality
497        println!("Real-time memory monitoring started");
498    }
499
500    /// Analyze memory pressure and suggest optimizations
501    fn analyze_memory_pressure(&mut self) {
502        let pressure_ratio =
503            self.profile.overall_stats.current_usage as f64 / self.memory_pressure_threshold as f64;
504
505        if pressure_ratio > 0.8 {
506            self.profile
507                .optimization_opportunities
508                .push(OptimizationOpportunity {
509                    optimization_type: OptimizationType::MemoryPooling,
510                    estimated_savings: self.profile.overall_stats.current_usage / 4, // 25% savings estimate
511                    performance_impact: 0.1, // 10% performance improvement
512                    implementation_complexity: 3,
513                    description: "Implement memory pooling to reduce allocation overhead"
514                        .to_string(),
515                    priority: 4,
516                });
517        }
518
519        if pressure_ratio > 0.9 {
520            self.profile
521                .optimization_opportunities
522                .push(OptimizationOpportunity {
523                    optimization_type: OptimizationType::StreamingProcessing,
524                    estimated_savings: self.profile.overall_stats.current_usage / 2, // 50% savings estimate
525                    performance_impact: -0.05, // 5% performance loss
526                    implementation_complexity: 4,
527                    description: "Use streaming algorithms to process data in chunks".to_string(),
528                    priority: 5,
529                });
530        }
531    }
532
533    /// Analyze allocation patterns for optimization opportunities
534    fn analyze_allocation_patterns(&mut self) {
535        let mut pattern_analysis = HashMap::new();
536
537        for allocation in &self.profile.allocation_patterns {
538            let key = format!("{}_{}", allocation.category, allocation.size);
539            let count = pattern_analysis.entry(key).or_insert(0);
540            *count += 1;
541        }
542
543        // Identify frequent allocations for pooling optimization
544        for (pattern, count) in pattern_analysis {
545            if count > 10 {
546                // Frequent allocation threshold
547                self.profile
548                    .optimization_opportunities
549                    .push(OptimizationOpportunity {
550                        optimization_type: OptimizationType::MemoryPooling,
551                        estimated_savings: count * 1024, // Estimate based on frequency
552                        performance_impact: 0.05 * (count as f64 / 100.0), // Performance improvement
553                        implementation_complexity: 2,
554                        description: format!("Pool frequent allocations: {pattern}"),
555                        priority: 3,
556                    });
557            }
558        }
559    }
560
561    /// Detect optimization opportunities
562    fn detect_optimization_opportunities(&mut self) {
563        // Analyze fragmentation
564        self.analyze_fragmentation();
565
566        // Analyze cache effectiveness
567        self.analyze_cache_patterns();
568
569        // Analyze allocation lifetime patterns
570        self.analyze_lifetime_patterns();
571    }
572
573    /// Analyze memory fragmentation
574    fn analyze_fragmentation(&mut self) {
575        let allocation_sizes: Vec<usize> = self
576            .profile
577            .allocation_patterns
578            .iter()
579            .map(|a| a.size)
580            .collect();
581
582        if allocation_sizes.is_empty() {
583            return;
584        }
585
586        let total_size: usize = allocation_sizes.iter().sum();
587        let avg_size = total_size as f64 / allocation_sizes.len() as f64;
588        let variance = allocation_sizes
589            .iter()
590            .map(|&size| (size as f64 - avg_size).powi(2))
591            .sum::<f64>()
592            / allocation_sizes.len() as f64;
593
594        let fragmentation = variance.sqrt() / avg_size;
595        self.profile.overall_stats.fragmentation_ratio = fragmentation.min(1.0);
596
597        if fragmentation > 0.5 {
598            self.profile
599                .optimization_opportunities
600                .push(OptimizationOpportunity {
601                    optimization_type: OptimizationType::FragmentationReduction,
602                    estimated_savings: (total_size as f64 * 0.1) as usize, // 10% savings estimate
603                    performance_impact: 0.15, // 15% performance improvement
604                    implementation_complexity: 3,
605                    description: "Reduce memory fragmentation through better allocation strategies"
606                        .to_string(),
607                    priority: 3,
608                });
609        }
610    }
611
612    /// Analyze cache patterns
613    fn analyze_cache_patterns(&mut self) {
614        let cache_allocations = self
615            .profile
616            .allocation_patterns
617            .iter()
618            .filter(|a| a.category.contains("cache"))
619            .count();
620
621        let total_allocations = self.profile.allocation_patterns.len();
622
623        if total_allocations > 0 {
624            let cache_ratio = cache_allocations as f64 / total_allocations as f64;
625            self.profile.efficiency_analysis.cache_effectiveness = cache_ratio;
626
627            if cache_ratio < 0.1 {
628                self.profile
629                    .optimization_opportunities
630                    .push(OptimizationOpportunity {
631                        optimization_type: OptimizationType::CacheOptimization,
632                        estimated_savings: 0, // Cache optimization focuses on performance
633                        performance_impact: 0.25, // 25% performance improvement
634                        implementation_complexity: 2,
635                        description: "Improve caching strategies to reduce redundant computations"
636                            .to_string(),
637                        priority: 4,
638                    });
639            }
640        }
641    }
642
643    /// Analyze allocation lifetime patterns
644    fn analyze_lifetime_patterns(&mut self) {
645        let lifetimes: Vec<Duration> = self
646            .profile
647            .allocation_patterns
648            .iter()
649            .filter_map(|a| a.lifetime)
650            .collect();
651
652        if lifetimes.is_empty() {
653            return;
654        }
655
656        let avg_lifetime = lifetimes.iter().sum::<Duration>() / lifetimes.len() as u32;
657        let short_lived = lifetimes
658            .iter()
659            .filter(|&&lt| lt < avg_lifetime / 2)
660            .count();
661
662        let short_lived_ratio = short_lived as f64 / lifetimes.len() as f64;
663
664        if short_lived_ratio > 0.7 {
665            self.profile
666                .optimization_opportunities
667                .push(OptimizationOpportunity {
668                    optimization_type: OptimizationType::MemoryPooling,
669                    estimated_savings: short_lived * 512, // Estimate based on short-lived allocations
670                    performance_impact: 0.1,              // 10% performance improvement
671                    implementation_complexity: 2,
672                    description: "Pool short-lived allocations to reduce allocation overhead"
673                        .to_string(),
674                    priority: 3,
675                });
676        }
677    }
678
679    /// Calculate efficiency metrics
680    fn calculate_efficiency_metrics(&mut self) {
681        // Calculate overall efficiency
682        let allocation_efficiency = if self.profile.overall_stats.allocation_count > 0 {
683            self.profile.overall_stats.deallocation_count as f64
684                / self.profile.overall_stats.allocation_count as f64
685        } else {
686            1.0
687        };
688
689        let memory_utilization = if self.profile.overall_stats.peak_usage > 0 {
690            self.profile.overall_stats.average_usage / self.profile.overall_stats.peak_usage as f64
691        } else {
692            1.0
693        };
694
695        self.profile.efficiency_analysis.overall_efficiency = (allocation_efficiency
696            + memory_utilization
697            + (1.0 - self.profile.overall_stats.fragmentation_ratio))
698            / 3.0;
699
700        self.profile.efficiency_analysis.utilization_ratio = memory_utilization;
701
702        // Calculate temporal and spatial locality (simplified)
703        self.profile.efficiency_analysis.temporal_locality = self.calculate_temporal_locality();
704        self.profile.efficiency_analysis.spatial_locality = self.calculate_spatial_locality();
705    }
706
707    /// Calculate temporal locality score
708    fn calculate_temporal_locality(&self) -> f64 {
709        // Simplified temporal locality calculation based on allocation patterns
710        if self.profile.allocation_patterns.len() < 2 {
711            return 1.0;
712        }
713
714        let mut temporal_score = 0.0;
715        let window_size = 10; // Consider last 10 allocations
716
717        for window in self.profile.allocation_patterns.windows(window_size) {
718            let categories: std::collections::HashSet<_> =
719                window.iter().map(|a| &a.category).collect();
720            let locality = 1.0 - (categories.len() as f64 / window_size as f64);
721            temporal_score += locality;
722        }
723
724        temporal_score
725            / (self
726                .profile
727                .allocation_patterns
728                .len()
729                .saturating_sub(window_size - 1)) as f64
730    }
731
732    /// Calculate spatial locality score
733    fn calculate_spatial_locality(&self) -> f64 {
734        // Simplified spatial locality calculation based on allocation sizes
735        if self.profile.allocation_patterns.is_empty() {
736            return 1.0;
737        }
738
739        let sizes: Vec<usize> = self
740            .profile
741            .allocation_patterns
742            .iter()
743            .map(|a| a.size)
744            .collect();
745        let avg_size = sizes.iter().sum::<usize>() as f64 / sizes.len() as f64;
746
747        let size_variance = sizes
748            .iter()
749            .map(|&size| (size as f64 - avg_size).powi(2))
750            .sum::<f64>()
751            / sizes.len() as f64;
752
753        1.0 / (1.0 + size_variance.sqrt() / avg_size)
754    }
755
756    /// Generate optimization recommendations
757    fn generate_recommendations(&mut self) {
758        let mut recommendations = Vec::new();
759
760        // Memory efficiency recommendations
761        if self.profile.efficiency_analysis.overall_efficiency < 0.7 {
762            recommendations.push(
763                "Consider implementing memory pooling for frequently allocated objects".to_string(),
764            );
765        }
766
767        if self.profile.efficiency_analysis.utilization_ratio < 0.6 {
768            recommendations.push("Memory utilization is low - consider reducing buffer sizes or using lazy allocation".to_string());
769        }
770
771        if self.profile.overall_stats.fragmentation_ratio > 0.4 {
772            recommendations.push(
773                "High memory fragmentation detected - consider using a custom allocator"
774                    .to_string(),
775            );
776        }
777
778        if self.profile.efficiency_analysis.cache_effectiveness < 0.3 {
779            recommendations.push(
780                "Low cache effectiveness - review caching strategies and data access patterns"
781                    .to_string(),
782            );
783        }
784
785        if self.profile.efficiency_analysis.temporal_locality < 0.5 {
786            recommendations.push(
787                "Poor temporal locality - consider grouping related operations together"
788                    .to_string(),
789            );
790        }
791
792        if self.profile.efficiency_analysis.spatial_locality < 0.5 {
793            recommendations.push(
794                "Poor spatial locality - consider using more compact data structures".to_string(),
795            );
796        }
797
798        self.profile.efficiency_analysis.recommendations = recommendations;
799    }
800
801    /// Generate memory timeline for visualization
802    fn generate_memory_timeline(&self) -> Vec<(SystemTime, usize)> {
803        self.profile.usage_history.iter().cloned().collect()
804    }
805
806    /// Analyze allocation efficiency
807    fn analyze_allocation_efficiency(&self) -> AllocationEfficiencyAnalysis {
808        let total_allocations = self.profile.allocation_patterns.len();
809        let predicted_allocations = self
810            .profile
811            .allocation_patterns
812            .iter()
813            .filter(|a| a.was_predicted)
814            .count();
815
816        let prediction_accuracy = if total_allocations > 0 {
817            predicted_allocations as f64 / total_allocations as f64
818        } else {
819            0.0
820        };
821
822        let allocation_size_distribution = self.calculate_allocation_size_distribution();
823        let allocation_category_distribution = self.calculate_allocation_category_distribution();
824
825        AllocationEfficiencyAnalysis {
826            prediction_accuracy,
827            allocation_size_distribution,
828            allocation_category_distribution,
829            average_allocation_size: self.calculate_average_allocation_size(),
830            allocation_frequency: self.calculate_allocation_frequency(),
831        }
832    }
833
834    /// Calculate allocation size distribution
835    fn calculate_allocation_size_distribution(&self) -> HashMap<String, usize> {
836        let mut distribution = HashMap::new();
837
838        for allocation in &self.profile.allocation_patterns {
839            let size_range = match allocation.size {
840                0..=1024 => "Small (<1KB)",
841                1025..=10240 => "Medium (1-10KB)",
842                10241..=102400 => "Large (10-100KB)",
843                _ => "Very Large (>100KB)",
844            };
845
846            *distribution.entry(size_range.to_string()).or_insert(0) += 1;
847        }
848
849        distribution
850    }
851
852    /// Calculate allocation category distribution
853    fn calculate_allocation_category_distribution(&self) -> HashMap<String, usize> {
854        let mut distribution = HashMap::new();
855
856        for allocation in &self.profile.allocation_patterns {
857            *distribution.entry(allocation.category.clone()).or_insert(0) += 1;
858        }
859
860        distribution
861    }
862
863    /// Calculate average allocation size
864    fn calculate_average_allocation_size(&self) -> f64 {
865        if self.profile.allocation_patterns.is_empty() {
866            return 0.0;
867        }
868
869        let total_size: usize = self
870            .profile
871            .allocation_patterns
872            .iter()
873            .map(|a| a.size)
874            .sum();
875        total_size as f64 / self.profile.allocation_patterns.len() as f64
876    }
877
878    /// Calculate allocation frequency
879    fn calculate_allocation_frequency(&self) -> f64 {
880        if self.profile.usage_history.is_empty() {
881            return 0.0;
882        }
883
884        let duration = SystemTime::now()
885            .duration_since(self.start_time)
886            .unwrap_or_default();
887        if duration.as_secs() == 0 {
888            return 0.0;
889        }
890
891        self.profile.allocation_patterns.len() as f64 / duration.as_secs() as f64
892    }
893
894    /// Generate comprehensive optimization recommendations
895    fn generate_optimization_recommendations(&self) -> Vec<String> {
896        let mut recommendations = Vec::new();
897
898        // Prioritize optimization opportunities
899        let mut sorted_opportunities = self.profile.optimization_opportunities.clone();
900        sorted_opportunities.sort_by_key(|o| std::cmp::Reverse(o.priority));
901
902        for opportunity in sorted_opportunities.iter().take(5) {
903            recommendations.push(format!(
904                "Priority {}: {} - {} (Est. savings: {} bytes, Performance impact: {:.1}%)",
905                opportunity.priority,
906                format!("{:?}", opportunity.optimization_type).replace("_", " "),
907                opportunity.description,
908                opportunity.estimated_savings,
909                opportunity.performance_impact * 100.0
910            ));
911        }
912
913        recommendations
914    }
915}
916
917/// Memory execution profile for a single algorithm run
918#[derive(Debug, Clone)]
919pub struct MemoryExecutionProfile {
920    pub algorithm_name: String,
921    pub execution_time: Duration,
922    pub initial_memory: usize,
923    pub peak_memory: usize,
924    pub final_memory: usize,
925    pub memory_growth: usize,
926    pub graph_memory: usize,
927    pub workspace_memory: usize,
928    pub cache_memory: usize,
929    pub memory_efficiency: f64,
930}
931
932/// Comprehensive memory usage report
933#[derive(Debug, Clone)]
934pub struct MemoryUsageReport {
935    pub profile_duration: Duration,
936    pub overall_stats: MemoryStats,
937    pub component_breakdown: HashMap<String, MemoryStats>,
938    pub optimization_opportunities: Vec<OptimizationOpportunity>,
939    pub efficiency_analysis: EfficiencyAnalysis,
940    pub memory_timeline: Vec<(SystemTime, usize)>,
941    pub allocation_analysis: AllocationEfficiencyAnalysis,
942    pub recommendations: Vec<String>,
943}
944
945/// Allocation efficiency analysis results
946#[derive(Debug, Clone)]
947pub struct AllocationEfficiencyAnalysis {
948    pub prediction_accuracy: f64,
949    pub allocation_size_distribution: HashMap<String, usize>,
950    pub allocation_category_distribution: HashMap<String, usize>,
951    pub average_allocation_size: f64,
952    pub allocation_frequency: f64,
953}
954
955impl MemoryUsageReport {
956    /// Generate a human-readable summary of the memory usage report
957    pub fn generate_summary(&self) -> String {
958        format!(
959            "Memory Usage Report Summary\n\
960            ===========================\n\
961            Profile Duration: {:.2}s\n\
962            Peak Memory Usage: {:.2} MB\n\
963            Average Memory Usage: {:.2} MB\n\
964            Memory Efficiency: {:.1}%\n\
965            Fragmentation Ratio: {:.1}%\n\
966            Total Allocations: {}\n\
967            Optimization Opportunities: {}\n\
968            \n\
969            Top Recommendations:\n\
970            {}",
971            self.profile_duration.as_secs_f64(),
972            self.overall_stats.peak_usage as f64 / 1_000_000.0,
973            self.overall_stats.average_usage / 1_000_000.0,
974            self.efficiency_analysis.overall_efficiency * 100.0,
975            self.overall_stats.fragmentation_ratio * 100.0,
976            self.overall_stats.allocation_count,
977            self.optimization_opportunities.len(),
978            self.recommendations
979                .iter()
980                .take(3)
981                .map(|r| format!("  • {r}"))
982                .collect::<Vec<_>>()
983                .join("\n")
984        )
985    }
986
987    /// Export report to JSON format
988    pub fn to_json(&self) -> String {
989        use std::time::UNIX_EPOCH;
990
991        // Build component breakdown array
992        let mut components = Vec::new();
993        for (name, stats) in &self.component_breakdown {
994            let component = serde_json::json!({
995                "name": name,
996                "current_bytes": stats.current_usage,
997                "peak_bytes": stats.peak_usage,
998                "average_bytes": stats.average_usage,
999                "allocation_count": stats.allocation_count,
1000                "deallocation_count": stats.deallocation_count,
1001                "fragmentation_ratio": stats.fragmentation_ratio,
1002                "efficiency_score": stats.efficiency_score
1003            });
1004            components.push(component);
1005        }
1006
1007        // Build optimization opportunities array
1008        let opportunities: Vec<serde_json::Value> = self
1009            .optimization_opportunities
1010            .iter()
1011            .map(|op| {
1012                serde_json::json!({
1013                    "type": format!("{:?}", op.optimization_type),
1014                    "estimated_savings_bytes": op.estimated_savings,
1015                    "performance_impact": op.performance_impact,
1016                    "implementation_complexity": op.implementation_complexity,
1017                    "description": op.description,
1018                    "priority": op.priority
1019                })
1020            })
1021            .collect();
1022
1023        // Build timeline array (convert SystemTime to f64 seconds since UNIX_EPOCH)
1024        let timeline: Vec<serde_json::Value> = self
1025            .memory_timeline
1026            .iter()
1027            .map(|(t, usage)| {
1028                let ts = t
1029                    .duration_since(UNIX_EPOCH)
1030                    .map(|d| d.as_secs_f64())
1031                    .unwrap_or(0.0);
1032                serde_json::json!({ "timestamp": ts, "usage_bytes": usage })
1033            })
1034            .collect();
1035
1036        let report = serde_json::json!({
1037            "profile_duration_secs": self.profile_duration.as_secs_f64(),
1038            "overall_stats": {
1039                "current_bytes": self.overall_stats.current_usage,
1040                "peak_bytes": self.overall_stats.peak_usage,
1041                "average_bytes": self.overall_stats.average_usage,
1042                "allocation_count": self.overall_stats.allocation_count,
1043                "deallocation_count": self.overall_stats.deallocation_count,
1044                "fragmentation_ratio": self.overall_stats.fragmentation_ratio,
1045                "efficiency_score": self.overall_stats.efficiency_score
1046            },
1047            "components": components,
1048            "optimization_opportunities": opportunities,
1049            "efficiency_analysis": {
1050                "overall_efficiency": self.efficiency_analysis.overall_efficiency,
1051                "utilization_ratio": self.efficiency_analysis.utilization_ratio,
1052                "cache_effectiveness": self.efficiency_analysis.cache_effectiveness,
1053                "access_pattern_efficiency": self.efficiency_analysis.access_pattern_efficiency,
1054                "temporal_locality": self.efficiency_analysis.temporal_locality,
1055                "spatial_locality": self.efficiency_analysis.spatial_locality
1056            },
1057            "memory_timeline": timeline,
1058            "recommendations": self.recommendations
1059        });
1060
1061        serde_json::to_string_pretty(&report)
1062            .unwrap_or_else(|e| format!("{{\"error\": \"JSON serialization failed: {e}\"}}"))
1063    }
1064}
1065
1066/// Convenience function to create a memory profiler with default configuration
1067#[allow(dead_code)]
1068pub fn create_memory_profiler() -> AdvancedMemoryProfiler {
1069    AdvancedMemoryProfiler::new(MemoryProfilerConfig::default())
1070}
1071
1072/// Convenience function to create a memory profiler optimized for large graphs
1073#[allow(dead_code)]
1074pub fn create_large_graph_memory_profiler() -> AdvancedMemoryProfiler {
1075    let config = MemoryProfilerConfig {
1076        track_allocations: true,
1077        analyze_patterns: true,
1078        detect_optimizations: true,
1079        max_history_entries: 50000, // More history for large graphs
1080        sampling_interval: Duration::from_millis(50), // More frequent sampling
1081        real_time_monitoring: true,
1082    };
1083    AdvancedMemoryProfiler::new(config)
1084}
1085
1086/// Enhanced memory profiler for extreme stress testing
1087#[allow(dead_code)]
1088pub fn create_extreme_stress_memory_profiler() -> AdvancedMemoryProfiler {
1089    let config = MemoryProfilerConfig {
1090        track_allocations: true,
1091        analyze_patterns: true,
1092        detect_optimizations: true,
1093        max_history_entries: 100000, // Extra history for extreme tests
1094        sampling_interval: Duration::from_millis(25), // Very frequent sampling
1095        real_time_monitoring: true,
1096    };
1097    AdvancedMemoryProfiler::new(config)
1098}
1099
1100/// Profile a comprehensive stress test with detailed memory analysis
1101#[allow(dead_code)]
1102pub fn profile_comprehensive_stress_test<F>(
1103    profiler: &mut AdvancedMemoryProfiler,
1104    processor: &mut AdvancedProcessor,
1105    test_name: &str,
1106    test_function: F,
1107) -> Result<(MemoryUsageReport, Duration)>
1108where
1109    F: FnOnce(&mut AdvancedProcessor) -> Result<String>,
1110{
1111    println!("🧠 Starting memory-profiled stress test: {test_name}");
1112
1113    // Start profiling
1114    profiler.start_profiling(processor);
1115    let test_start = std::time::Instant::now();
1116
1117    // Record initial state
1118    profiler.record_allocation("stress_test", 0, "test_initialization", true);
1119
1120    // Execute the test _function
1121    let test_result = test_function(processor);
1122
1123    let test_duration = test_start.elapsed();
1124
1125    // Record final state
1126    profiler.record_memory_snapshot(processor);
1127    profiler.analyze_memory_patterns();
1128
1129    // Generate report
1130    let report = profiler.generate_memory_report();
1131
1132    println!("🧠 Memory profiling completed for {test_name}");
1133    println!(
1134        "   šŸ“Š Peak memory: {:.1} MB",
1135        report.overall_stats.peak_usage as f64 / 1_000_000.0
1136    );
1137    println!(
1138        "   šŸ“Š Memory efficiency: {:.1}%",
1139        report.efficiency_analysis.overall_efficiency * 100.0
1140    );
1141    println!(
1142        "   šŸ“Š Optimization opportunities: {}",
1143        report.optimization_opportunities.len()
1144    );
1145
1146    match test_result {
1147        Ok(_) => Ok((report, test_duration)),
1148        Err(e) => {
1149            println!("āš ļø  Test failed but memory profile still generated: {e:?}");
1150            Ok((report, test_duration))
1151        }
1152    }
1153}
1154
1155/// Memory-aware graph generator with profiling integration
1156#[allow(dead_code)]
1157pub fn generate_profiled_large_graph(
1158    profiler: &mut AdvancedMemoryProfiler,
1159    num_nodes: usize,
1160    graph_type: &str,
1161) -> Result<crate::base::Graph<usize, f64>> {
1162    println!("šŸ—ļø  Generating profiled {graph_type} graph with {num_nodes} _nodes");
1163
1164    let generation_start = std::time::Instant::now();
1165    profiler.record_allocation("graph_generation", num_nodes * 8, "_nodes", true);
1166
1167    let mut graph = crate::base::Graph::new();
1168    let mut rng = scirs2_core::random::rng();
1169
1170    // Add _nodes with memory tracking
1171    const NODE_BATCH_SIZE: usize = 25_000;
1172    for batch_start in (0..num_nodes).step_by(NODE_BATCH_SIZE) {
1173        let batch_end = (batch_start + NODE_BATCH_SIZE).min(num_nodes);
1174
1175        // Record batch allocation
1176        profiler.record_allocation(
1177            "graph_generation",
1178            (batch_end - batch_start) * std::mem::size_of::<usize>(),
1179            "node_batch",
1180            true,
1181        );
1182
1183        for i in batch_start..batch_end {
1184            graph.add_node(i);
1185        }
1186
1187        if batch_start % (NODE_BATCH_SIZE * 10) == 0 {
1188            println!(
1189                "   šŸ“Š Added {} nodes, current memory usage estimate: {:.1} MB",
1190                batch_end,
1191                (batch_end * 16) as f64 / 1_000_000.0
1192            );
1193        }
1194    }
1195
1196    // Add edges based on graph _type
1197    let target_edges = match graph_type {
1198        "sparse" => num_nodes * 2,
1199        "medium" => num_nodes * 4,
1200        "dense" => num_nodes * 8,
1201        "scale_free" => (num_nodes as f64 * 2.5) as usize,
1202        _ => num_nodes * 3, // default
1203    };
1204
1205    profiler.record_allocation("graph_generation", target_edges * 24, "edges", true);
1206
1207    let mut edges_added = 0;
1208    while edges_added < target_edges && edges_added < num_nodes * 10 {
1209        // Prevent infinite loop
1210        let source = rng.random_range(0..num_nodes);
1211        let target = rng.random_range(0..num_nodes);
1212
1213        if source != target {
1214            let weight: f64 = rng.random();
1215            if graph.add_edge(source, target, weight).is_ok() {
1216                edges_added += 1;
1217
1218                if edges_added % 100_000 == 0 {
1219                    println!("   šŸ”— Added {edges_added} edges");
1220                }
1221            }
1222        }
1223    }
1224
1225    let generation_time = generation_start.elapsed();
1226    println!(
1227        "āœ… Graph generation completed in {:?}: {} nodes, {} edges",
1228        generation_time,
1229        graph.node_count(),
1230        graph.edge_count()
1231    );
1232
1233    Ok(graph)
1234}
1235
1236/// Comprehensive memory stress test runner
1237#[allow(dead_code)]
1238pub fn run_memory_stress_tests() -> Result<Vec<MemoryUsageReport>> {
1239    println!("🧠 Starting comprehensive memory stress tests...");
1240    println!("================================================");
1241
1242    let mut reports = Vec::new();
1243    let mut profiler = create_extreme_stress_memory_profiler();
1244
1245    // Test 1: Small graph baseline
1246    println!("\nšŸ“Š Test 1: Small Graph Baseline (100K nodes)");
1247    match generate_profiled_large_graph(&mut profiler, 100_000, "medium") {
1248        Ok(small_graph) => {
1249            let mut processor = crate::advanced::create_large_graph_advanced_processor();
1250
1251            let (report, duration) = profile_comprehensive_stress_test(
1252                &mut profiler,
1253                &mut processor,
1254                "small_graph_baseline",
1255                |proc| {
1256                    // Run basic algorithm
1257                    let _result =
1258                        crate::advanced::execute_with_enhanced_advanced(&small_graph, |g| {
1259                            use crate::algorithms::connectivity::connected_components;
1260                            Ok(connected_components(g))
1261                        });
1262                    Ok("Small graph baseline completed".to_string())
1263                },
1264            )?;
1265
1266            println!("   ā±ļø  Test completed in {duration:?}");
1267            reports.push(report);
1268        }
1269        Err(e) => println!("   āŒ Failed to create small graph: {e}"),
1270    }
1271
1272    // Test 2: Medium graph stress test
1273    println!("\nšŸ“Š Test 2: Medium Graph Stress Test (500K nodes)");
1274    match generate_profiled_large_graph(&mut profiler, 500_000, "sparse") {
1275        Ok(medium_graph) => {
1276            let mut processor = crate::advanced::create_large_graph_advanced_processor();
1277
1278            let (report, duration) = profile_comprehensive_stress_test(
1279                &mut profiler,
1280                &mut processor,
1281                "medium_graph_stress",
1282                |proc| {
1283                    // Run multiple algorithms
1284                    let _cc_result =
1285                        crate::advanced::execute_with_enhanced_advanced(&medium_graph, |g| {
1286                            use crate::algorithms::connectivity::connected_components;
1287                            Ok(connected_components(g))
1288                        });
1289
1290                    let _pr_result =
1291                        crate::advanced::execute_with_enhanced_advanced(&medium_graph, |g| {
1292                            use crate::measures::pagerank_centrality;
1293                            pagerank_centrality(g, 0.85, 1e-3)
1294                        });
1295
1296                    Ok("Medium graph stress test completed".to_string())
1297                },
1298            )?;
1299
1300            println!("   ā±ļø  Test completed in {duration:?}");
1301            reports.push(report);
1302        }
1303        Err(e) => println!("   āŒ Failed to create medium graph: {e}"),
1304    }
1305
1306    // Test 3: Large graph extreme test (if memory allows)
1307    println!("\nšŸ“Š Test 3: Large Graph Extreme Test (1M nodes)");
1308    match generate_profiled_large_graph(&mut profiler, 1_000_000, "sparse") {
1309        Ok(large_graph) => {
1310            let mut processor = crate::advanced::create_large_graph_advanced_processor();
1311
1312            let (report, duration) = profile_comprehensive_stress_test(
1313                &mut profiler,
1314                &mut processor,
1315                "large_graph_extreme",
1316                |proc| {
1317                    // Run memory-intensive test
1318                    let _result =
1319                        crate::advanced::execute_with_enhanced_advanced(&large_graph, |g| {
1320                            // Force memory allocation to test memory management
1321                            let nodes: Vec<_> = g.nodes().into_iter().collect();
1322                            let edges: Vec<_> = g
1323                                .edges()
1324                                .into_iter()
1325                                .map(|e| (e.source, e.target, e.weight))
1326                                .collect();
1327                            let _memory_intensive: Vec<f64> = edges
1328                                .iter()
1329                                .flat_map(|(s, t, w)| vec![*s as f64, *t as f64, *w])
1330                                .collect();
1331
1332                            Ok(nodes.len() + edges.len())
1333                        });
1334
1335                    Ok("Large graph extreme test completed".to_string())
1336                },
1337            )?;
1338
1339            println!("   ā±ļø  Test completed in {duration:?}");
1340            reports.push(report);
1341        }
1342        Err(e) => println!("   āŒ Failed to create large graph: {e}"),
1343    }
1344
1345    // Generate summary
1346    println!("\nšŸ“‹ Memory Stress Test Summary");
1347    println!("=============================");
1348    for (i, report) in reports.iter().enumerate() {
1349        println!(
1350            "Test {}: Peak Memory: {:.1} MB, Efficiency: {:.1}%, Optimizations: {}",
1351            i + 1,
1352            report.overall_stats.peak_usage as f64 / 1_000_000.0,
1353            report.efficiency_analysis.overall_efficiency * 100.0,
1354            report.optimization_opportunities.len()
1355        );
1356    }
1357
1358    Ok(reports)
1359}
1360
1361#[cfg(test)]
1362mod tests {
1363    use super::*;
1364
1365    #[test]
1366    fn test_memory_profiler_creation() {
1367        let profiler = create_memory_profiler();
1368        assert_eq!(profiler.profile.overall_stats.current_usage, 0);
1369        assert_eq!(profiler.profile.overall_stats.allocation_count, 0);
1370    }
1371
1372    #[test]
1373    fn test_allocation_recording() {
1374        let mut profiler = create_memory_profiler();
1375
1376        profiler.record_allocation("test_component", 1024, "workspace", false);
1377
1378        assert_eq!(profiler.profile.overall_stats.current_usage, 1024);
1379        assert_eq!(profiler.profile.overall_stats.allocation_count, 1);
1380        assert_eq!(profiler.profile.allocation_patterns.len(), 1);
1381    }
1382
1383    #[test]
1384    fn test_deallocation_recording() {
1385        let mut profiler = create_memory_profiler();
1386
1387        profiler.record_allocation("test_component", 1024, "workspace", false);
1388        profiler.record_deallocation("test_component", 1024, "workspace");
1389
1390        assert_eq!(profiler.profile.overall_stats.current_usage, 0);
1391        assert_eq!(profiler.profile.overall_stats.deallocation_count, 1);
1392    }
1393
1394    #[test]
1395    fn test_memory_pattern_analysis() {
1396        let mut profiler = create_memory_profiler();
1397
1398        // Create some allocation patterns
1399        for _i in 0..15 {
1400            profiler.record_allocation("test_component", 1024, "frequent_pattern", false);
1401        }
1402
1403        profiler.analyze_memory_patterns();
1404
1405        // Should detect optimization opportunities for frequent allocations
1406        let has_pooling_opportunity = profiler
1407            .profile
1408            .optimization_opportunities
1409            .iter()
1410            .any(|op| op.optimization_type == OptimizationType::MemoryPooling);
1411
1412        assert!(has_pooling_opportunity);
1413    }
1414
1415    #[test]
1416    fn test_efficiency_calculation() {
1417        let mut profiler = create_memory_profiler();
1418
1419        // Simulate some memory activity
1420        profiler.record_allocation("component1", 2048, "data", false);
1421        profiler.record_allocation("component2", 1024, "cache", true);
1422        profiler.record_deallocation("component1", 2048, "data");
1423
1424        profiler.calculate_efficiency_metrics();
1425
1426        assert!(profiler.profile.efficiency_analysis.overall_efficiency > 0.0);
1427        assert!(profiler.profile.efficiency_analysis.overall_efficiency <= 1.0);
1428    }
1429
1430    #[test]
1431    fn test_memory_report_generation() {
1432        let mut profiler = create_memory_profiler();
1433
1434        // Add some test data
1435        profiler.record_allocation("test", 1024, "data", false);
1436        profiler.analyze_memory_patterns();
1437
1438        let report = profiler.generate_memory_report();
1439
1440        assert!(report.profile_duration >= Duration::ZERO);
1441        assert_eq!(report.overall_stats.allocation_count, 1);
1442
1443        let summary = report.generate_summary();
1444        assert!(summary.contains("Memory Usage Report Summary"));
1445    }
1446
1447    #[test]
1448    fn test_large_graph_profiler() {
1449        let profiler = create_large_graph_memory_profiler();
1450
1451        assert_eq!(profiler.config.max_history_entries, 50000);
1452        assert_eq!(profiler.config.sampling_interval, Duration::from_millis(50));
1453    }
1454}