scirs2_graph/memory/
mod.rs

1//! Memory profiling and optimization utilities for graph data structures
2//!
3//! This module provides tools to analyze and optimize memory usage in graph operations.
4
5#![allow(missing_docs)]
6
7pub mod compact;
8
9pub use compact::{BitPackedGraph, CSRGraph, CompressedAdjacencyList, HybridGraph, MemmapGraph};
10
11// Re-export from compact module only
12
13use crate::{DiGraph, Graph};
14use std::collections::HashMap;
15use std::mem;
16use std::sync::{Arc, Mutex};
17use std::thread;
18use std::time::{Duration, Instant};
19use sysinfo::System;
20
21/// Memory usage statistics for a graph
22#[derive(Debug, Clone)]
23pub struct MemoryStats {
24    /// Total memory used by the graph structure (bytes)
25    pub total_bytes: usize,
26    /// Memory used by node storage
27    pub node_bytes: usize,
28    /// Memory used by edge storage
29    pub edge_bytes: usize,
30    /// Memory used by adjacency lists
31    pub adjacency_bytes: usize,
32    /// Overhead from allocator metadata
33    pub overhead_bytes: usize,
34    /// Memory efficiency (useful data / total memory)
35    pub efficiency: f64,
36}
37
38/// Memory profiler for graph structures
39pub struct MemoryProfiler;
40
41impl MemoryProfiler {
42    /// Calculate memory statistics for an undirected graph
43    pub fn profile_graph<N, E, Ix>(graph: &Graph<N, E, Ix>) -> MemoryStats
44    where
45        N: crate::base::Node + std::fmt::Debug,
46        E: crate::base::EdgeWeight,
47        Ix: petgraph::graph::IndexType,
48    {
49        let node_count = graph.node_count();
50        let edge_count = graph.edge_count();
51
52        // Calculate node storage - nodes are stored with their data
53        let node_bytes = node_count
54            * (mem::size_of::<N>() + mem::size_of::<petgraph::graph::NodeIndex<Ix>>())
55            + mem::size_of::<std::collections::HashMap<N, petgraph::graph::NodeIndex<Ix>>>();
56
57        // Calculate adjacency list storage based on actual _graph structure
58        let mut adjacency_bytes = 0;
59        for node in graph.nodes() {
60            if let Ok(neighbors) = graph.neighbors(node) {
61                let neighbor_count = neighbors.len();
62                adjacency_bytes += neighbor_count * mem::size_of::<E>() // edge weights
63                    + mem::size_of::<Vec<E>>(); // Vec overhead per adjacency list
64            }
65        }
66
67        // Calculate edge storage
68        let edge_bytes = edge_count * (mem::size_of::<N>() * 2 + mem::size_of::<E>());
69
70        // Estimate allocator overhead (typically 8-16 bytes per allocation)
71        let allocation_count = node_count + 1; // nodes + main structure
72        let overhead_bytes = allocation_count * 16;
73
74        let total_bytes = node_bytes + adjacency_bytes + edge_bytes + overhead_bytes;
75        let useful_bytes = node_bytes + adjacency_bytes;
76        let efficiency = if total_bytes > 0 {
77            useful_bytes as f64 / total_bytes as f64
78        } else {
79            1.0
80        };
81
82        MemoryStats {
83            total_bytes,
84            node_bytes,
85            edge_bytes,
86            adjacency_bytes,
87            overhead_bytes,
88            efficiency,
89        }
90    }
91
92    /// Calculate memory statistics for a directed graph
93    pub fn profile_digraph<N, E, Ix>(graph: &DiGraph<N, E, Ix>) -> MemoryStats
94    where
95        N: crate::base::Node + std::fmt::Debug,
96        E: crate::base::EdgeWeight,
97        Ix: petgraph::graph::IndexType,
98    {
99        let node_count = graph.node_count();
100        let edge_count = graph.edge_count();
101
102        // Similar to undirected but with separate in/out adjacency lists
103        let node_bytes = node_count
104            * (mem::size_of::<N>() + mem::size_of::<petgraph::graph::NodeIndex<Ix>>())
105            + mem::size_of::<std::collections::HashMap<N, petgraph::graph::NodeIndex<Ix>>>();
106
107        // Both in-edges and out-edges storage - directed graphs have separate lists
108        let mut adjacency_bytes = 0;
109        for node in graph.nodes() {
110            // Count successors (outgoing edges)
111            if let Ok(successors) = graph.successors(node) {
112                adjacency_bytes +=
113                    successors.len() * mem::size_of::<E>() + mem::size_of::<Vec<E>>();
114            }
115            // Count predecessors (incoming edges)
116            if let Ok(predecessors) = graph.predecessors(node) {
117                adjacency_bytes +=
118                    predecessors.len() * mem::size_of::<E>() + mem::size_of::<Vec<E>>();
119            }
120        }
121
122        let edge_bytes = edge_count * (mem::size_of::<N>() * 2 + mem::size_of::<E>());
123
124        let allocation_count = node_count * 2 + 1; // in/out vecs + main structure
125        let overhead_bytes = allocation_count * 16;
126
127        let total_bytes = node_bytes + adjacency_bytes + edge_bytes + overhead_bytes;
128        let useful_bytes = node_bytes + adjacency_bytes;
129        let efficiency = if total_bytes > 0 {
130            useful_bytes as f64 / total_bytes as f64
131        } else {
132            1.0
133        };
134
135        MemoryStats {
136            total_bytes,
137            node_bytes,
138            edge_bytes,
139            adjacency_bytes,
140            overhead_bytes,
141            efficiency,
142        }
143    }
144
145    /// Estimate memory usage for a graph of given size
146    pub fn estimate_memory(nodes: usize, edges: usize, directed: bool) -> usize {
147        let _avg_degree = if nodes > 0 {
148            edges as f64 / nodes as f64
149        } else {
150            0.0
151        };
152
153        // Base node storage
154        let node_bytes = nodes * mem::size_of::<usize>();
155
156        // Adjacency list storage
157        let edge_entry_size = mem::size_of::<(usize, f64)>();
158        let adjacency_multiplier = if directed { 2.0 } else { 1.0 };
159        let adjacency_bytes =
160            (edges as f64 * adjacency_multiplier * edge_entry_size as f64) as usize;
161
162        // Vec overhead (capacity often > size)
163        let vec_overhead =
164            nodes * mem::size_of::<Vec<(usize, f64)>>() * if directed { 2 } else { 1 };
165
166        // Allocator overhead
167        let overhead = (nodes + edges / 100) * 16;
168
169        node_bytes + adjacency_bytes + vec_overhead + overhead
170    }
171
172    /// Analyze memory fragmentation in the graph
173    pub fn analyze_fragmentation<N, E, Ix>(graph: &Graph<N, E, Ix>) -> FragmentationReport
174    where
175        N: crate::base::Node + std::fmt::Debug,
176        E: crate::base::EdgeWeight,
177        Ix: petgraph::graph::IndexType,
178    {
179        let mut degree_distribution = HashMap::new();
180        let mut total_capacity = 0;
181        let mut total_used = 0;
182
183        for node in graph.nodes() {
184            let degree = graph.degree(node);
185            *degree_distribution.entry(degree).or_insert(0) += 1;
186
187            // Estimate Vec capacity vs actual usage
188            // Vecs typically grow by 2x when resizing
189            let capacity = degree.next_power_of_two().max(4);
190            total_capacity += capacity;
191            total_used += degree;
192        }
193
194        let fragmentation = if total_capacity > 0 {
195            1.0 - (total_used as f64 / total_capacity as f64)
196        } else {
197            0.0
198        };
199
200        FragmentationReport {
201            degree_distribution,
202            total_capacity,
203            total_used,
204            fragmentation_ratio: fragmentation,
205            wasted_bytes: (total_capacity - total_used) * mem::size_of::<(N, E)>(),
206        }
207    }
208}
209
210/// Report on memory fragmentation in graph structures
211#[derive(Debug)]
212pub struct FragmentationReport {
213    /// Distribution of node degrees
214    pub degree_distribution: HashMap<usize, usize>,
215    /// Total capacity allocated for adjacency lists
216    pub total_capacity: usize,
217    /// Total capacity actually used
218    pub total_used: usize,
219    /// Fragmentation ratio (0.0 = no fragmentation, 1.0 = all wasted)
220    pub fragmentation_ratio: f64,
221    /// Estimated wasted bytes due to over-allocation
222    pub wasted_bytes: usize,
223}
224
225/// Memory-optimized graph builder
226pub struct OptimizedGraphBuilder {
227    nodes: Vec<usize>,
228    edges: Vec<(usize, usize, f64)>,
229    estimated_edges_per_node: Option<usize>,
230}
231
232impl Default for OptimizedGraphBuilder {
233    fn default() -> Self {
234        Self::new()
235    }
236}
237
238impl OptimizedGraphBuilder {
239    /// Create a new optimized graph builder
240    pub fn new() -> Self {
241        Self {
242            nodes: Vec::new(),
243            edges: Vec::new(),
244            estimated_edges_per_node: None,
245        }
246    }
247
248    /// Set expected number of edges per node for better memory allocation
249    pub fn with_estimated_edges_per_node(mut self, edges_pernode: usize) -> Self {
250        self.estimated_edges_per_node = Some(edges_pernode);
251        self
252    }
253
254    /// Reserve capacity for nodes
255    pub fn reserve_nodes(mut self, capacity: usize) -> Self {
256        self.nodes.reserve(capacity);
257        self
258    }
259
260    /// Reserve capacity for edges
261    pub fn reserve_edges(mut self, capacity: usize) -> Self {
262        self.edges.reserve(capacity);
263        self
264    }
265
266    /// Add a node
267    pub fn add_node(&mut self, node: usize) {
268        self.nodes.push(node);
269    }
270
271    /// Add an edge
272    pub fn add_edge(&mut self, from: usize, to: usize, weight: f64) {
273        self.edges.push((from, to, weight));
274    }
275
276    /// Build the optimized graph
277    pub fn build(self) -> Result<Graph<usize, f64>, String> {
278        let mut graph = Graph::new();
279
280        // Pre-allocate with estimated sizes
281        if let Some(_epn) = self.estimated_edges_per_node {
282            // Reserve capacity in adjacency lists
283            for &node in &self.nodes {
284                let _ = graph.add_node(node);
285                // Internal method to reserve adjacency list capacity
286                // This would need to be added to Graph API
287            }
288        } else {
289            for &node in &self.nodes {
290                let _ = graph.add_node(node);
291            }
292        }
293
294        // Add edges
295        for (from, to, weight) in self.edges {
296            graph
297                .add_edge(from, to, weight)
298                .map_err(|e| format!("Failed to add edge: {e:?}"))?;
299        }
300
301        Ok(graph)
302    }
303}
304
305/// Memory optimization suggestions
306#[derive(Debug)]
307pub struct OptimizationSuggestions {
308    pub suggestions: Vec<String>,
309    pub potential_savings: usize,
310}
311
312/// Analyze a graph and provide memory optimization suggestions
313#[allow(dead_code)]
314pub fn suggest_optimizations(
315    stats: &MemoryStats,
316    fragmentation: &FragmentationReport,
317) -> OptimizationSuggestions {
318    let mut suggestions = Vec::new();
319    let mut potential_savings = 0;
320
321    // Check efficiency
322    if stats.efficiency < 0.7 {
323        suggestions.push(format!(
324            "Low memory efficiency ({:.1}%). Consider using a more compact representation.",
325            stats.efficiency * 100.0
326        ));
327    }
328
329    // Check fragmentation
330    if fragmentation.fragmentation_ratio > 0.3 {
331        suggestions.push(format!(
332            "High fragmentation ({:.1}%). Pre-allocate adjacency lists based on expected degree.",
333            fragmentation.fragmentation_ratio * 100.0
334        ));
335        potential_savings += fragmentation.wasted_bytes;
336    }
337
338    // Check degree distribution
339    let max_degree = fragmentation
340        .degree_distribution
341        .keys()
342        .max()
343        .copied()
344        .unwrap_or(0);
345    let avg_degree =
346        if fragmentation.total_used > 0 && !fragmentation.degree_distribution.is_empty() {
347            fragmentation.total_used as f64 / fragmentation.degree_distribution.len() as f64
348        } else {
349            0.0
350        };
351
352    if max_degree > avg_degree as usize * 10 {
353        suggestions.push(
354            "Highly skewed degree distribution. Consider using a hybrid representation \
355             with different storage for high-degree nodes."
356                .to_string(),
357        );
358    }
359
360    // Check for sparse graphs
361    if avg_degree < 5.0 {
362        suggestions.push(
363            "Very sparse graph. Consider using a sparse matrix representation \
364             or compressed adjacency lists."
365                .to_string(),
366        );
367    }
368
369    OptimizationSuggestions {
370        suggestions,
371        potential_savings,
372    }
373}
374
375/// Real-time memory profiler for monitoring memory usage during algorithm execution
376#[derive(Debug)]
377pub struct RealTimeMemoryProfiler {
378    /// Process ID for monitoring
379    pid: u32,
380    /// System information handle
381    system: Arc<Mutex<System>>,
382    /// Is monitoring active
383    is_monitoring: Arc<Mutex<bool>>,
384    /// Memory usage samples
385    samples: Arc<Mutex<Vec<MemorySample>>>,
386    /// Monitoring thread handle
387    monitor_thread: Option<thread::JoinHandle<()>>,
388}
389
390/// Memory usage sample at a specific time
391#[derive(Debug, Clone)]
392pub struct MemorySample {
393    /// Timestamp when sample was taken
394    pub timestamp: Instant,
395    /// Physical memory used (RSS) in bytes
396    pub physical_memory: u64,
397    /// Virtual memory used in bytes
398    pub virtual_memory: u64,
399    /// Memory growth since last sample in bytes
400    pub growth_rate: i64,
401}
402
403/// Memory monitoring metrics collected over time
404#[derive(Debug, Clone)]
405pub struct MemoryMetrics {
406    /// Peak memory usage in bytes
407    pub peak_memory: u64,
408    /// Average memory usage in bytes
409    pub average_memory: u64,
410    /// Memory growth rate in bytes per second
411    pub growth_rate: f64,
412    /// Total monitoring duration
413    pub duration: Duration,
414    /// Number of samples collected
415    pub sample_count: usize,
416    /// Memory variance (indicates stability)
417    pub memory_variance: f64,
418}
419
420impl Default for RealTimeMemoryProfiler {
421    fn default() -> Self {
422        Self::new()
423    }
424}
425
426impl RealTimeMemoryProfiler {
427    /// Create a new real-time memory profiler
428    pub fn new() -> Self {
429        let mut system = System::new_all();
430        system.refresh_all();
431
432        let pid = std::process::id();
433
434        Self {
435            pid,
436            system: Arc::new(Mutex::new(system)),
437            is_monitoring: Arc::new(Mutex::new(false)),
438            samples: Arc::new(Mutex::new(Vec::new())),
439            monitor_thread: None,
440        }
441    }
442
443    /// Start monitoring memory usage
444    pub fn start_monitoring(&mut self, sampleinterval: Duration) {
445        let mut is_monitoring = self.is_monitoring.lock().unwrap();
446        if *is_monitoring {
447            return; // Already monitoring
448        }
449        *is_monitoring = true;
450        drop(is_monitoring);
451
452        // Clear previous samples
453        self.samples.lock().unwrap().clear();
454
455        let pid = self.pid;
456        let system = Arc::clone(&self.system);
457        let is_monitoring = Arc::clone(&self.is_monitoring);
458        let samples = Arc::clone(&self.samples);
459
460        let handle = thread::spawn(move || {
461            let mut last_memory = 0u64;
462            let _start_time = Instant::now();
463
464            while *is_monitoring.lock().unwrap() {
465                {
466                    let mut sys = system.lock().unwrap();
467                    sys.refresh_processes(
468                        sysinfo::ProcessesToUpdate::Some(&[(pid as usize).into()]),
469                        false,
470                    );
471
472                    if let Some(process) = sys.process((pid as usize).into()) {
473                        let physical_memory = process.memory() * 1024; // Convert KB to bytes
474                        let virtual_memory = process.virtual_memory() * 1024;
475                        let growth_rate = physical_memory as i64 - last_memory as i64;
476
477                        let sample = MemorySample {
478                            timestamp: Instant::now(),
479                            physical_memory,
480                            virtual_memory,
481                            growth_rate,
482                        };
483
484                        samples.lock().unwrap().push(sample);
485                        last_memory = physical_memory;
486                    }
487                }
488
489                thread::sleep(sampleinterval);
490            }
491        });
492
493        self.monitor_thread = Some(handle);
494    }
495
496    /// Stop monitoring and return collected metrics
497    pub fn stop_monitoring(&mut self) -> MemoryMetrics {
498        {
499            let mut is_monitoring = self.is_monitoring.lock().unwrap();
500            *is_monitoring = false;
501        }
502
503        if let Some(handle) = self.monitor_thread.take() {
504            let _ = handle.join();
505        }
506
507        let samples = self.samples.lock().unwrap();
508        self.calculate_metrics(&samples)
509    }
510
511    /// Get current memory metrics without stopping monitoring
512    pub fn get_current_metrics(&self) -> MemoryMetrics {
513        let samples = self.samples.lock().unwrap();
514        self.calculate_metrics(&samples)
515    }
516
517    /// Calculate metrics from collected samples
518    fn calculate_metrics(&self, samples: &[MemorySample]) -> MemoryMetrics {
519        if samples.is_empty() {
520            return MemoryMetrics {
521                peak_memory: 0,
522                average_memory: 0,
523                growth_rate: 0.0,
524                duration: Duration::new(0, 0),
525                sample_count: 0,
526                memory_variance: 0.0,
527            };
528        }
529
530        let peak_memory = samples.iter().map(|s| s.physical_memory).max().unwrap_or(0);
531        let total_memory: u64 = samples.iter().map(|s| s.physical_memory).sum();
532        let average_memory = total_memory / samples.len() as u64;
533
534        let duration = if samples.len() > 1 {
535            samples
536                .last()
537                .unwrap()
538                .timestamp
539                .duration_since(samples[0].timestamp)
540        } else {
541            Duration::new(0, 0)
542        };
543
544        let growth_rate = if duration.as_secs_f64() > 0.0 && samples.len() > 1 {
545            let total_growth =
546                samples.last().unwrap().physical_memory as i64 - samples[0].physical_memory as i64;
547            total_growth as f64 / duration.as_secs_f64()
548        } else {
549            0.0
550        };
551
552        // Calculate memory variance
553        let variance = if samples.len() > 1 {
554            let mean = average_memory as f64;
555            let sum_sq_diff: f64 = samples
556                .iter()
557                .map(|s| {
558                    let diff = s.physical_memory as f64 - mean;
559                    diff * diff
560                })
561                .sum();
562            sum_sq_diff / samples.len() as f64
563        } else {
564            0.0
565        };
566
567        MemoryMetrics {
568            peak_memory,
569            average_memory,
570            growth_rate,
571            duration,
572            sample_count: samples.len(),
573            memory_variance: variance,
574        }
575    }
576
577    /// Check for memory leaks based on growth rate
578    pub fn detect_memory_leaks(&self, threshold_bytes_persec: f64) -> bool {
579        let metrics = self.get_current_metrics();
580        metrics.growth_rate > threshold_bytes_persec && metrics.sample_count > 10
581    }
582
583    /// Generate memory usage report
584    pub fn generate_report(&self) -> String {
585        let metrics = self.get_current_metrics();
586        let _samples = self.samples.lock().unwrap();
587
588        let mut report = String::new();
589        report.push_str("=== Memory Usage Report ===\n");
590        report.push_str(&format!(
591            "Peak Memory: {:.2} MB\n",
592            metrics.peak_memory as f64 / 1_048_576.0
593        ));
594        report.push_str(&format!(
595            "Average Memory: {:.2} MB\n",
596            metrics.average_memory as f64 / 1_048_576.0
597        ));
598        report.push_str(&format!(
599            "Growth Rate: {:.2} KB/s\n",
600            metrics.growth_rate / 1024.0
601        ));
602        report.push_str(&format!(
603            "Duration: {:.2} seconds\n",
604            metrics.duration.as_secs_f64()
605        ));
606        report.push_str(&format!("Samples: {}\n", metrics.sample_count));
607        report.push_str(&format!(
608            "Memory Variance: {:.2} MB²\n",
609            metrics.memory_variance / 1_048_576.0_f64.powi(2)
610        ));
611
612        if metrics.growth_rate > 1024.0 * 1024.0 {
613            // > 1 MB/s
614            report.push_str("\n⚠️  WARNING: High memory growth rate detected!\n");
615        }
616
617        if metrics.memory_variance > (100.0 * 1_048_576.0_f64).powi(2) {
618            // > 100 MB variance
619            report.push_str("⚠️  WARNING: High memory usage variance!\n");
620        }
621
622        report
623    }
624}
625
626/// Advanced memory analysis for graph algorithms
627pub struct AdvancedMemoryAnalyzer;
628
629impl AdvancedMemoryAnalyzer {
630    /// Analyze memory allocation patterns for different graph operations
631    pub fn analyze_operation_memory<F, R>(
632        operation_name: &str,
633        operation: F,
634        sample_interval: Duration,
635    ) -> (R, MemoryMetrics)
636    where
637        F: FnOnce() -> R,
638    {
639        let mut profiler = RealTimeMemoryProfiler::new();
640        profiler.start_monitoring(sample_interval);
641
642        let result = operation();
643
644        let metrics = profiler.stop_monitoring();
645
646        println!(
647            "Memory analysis for '{}': Peak={:.2}MB, Growth={:.2}KB/s",
648            operation_name,
649            metrics.peak_memory as f64 / 1_048_576.0,
650            metrics.growth_rate / 1024.0
651        );
652
653        (result, metrics)
654    }
655
656    /// Compare memory usage between different algorithm implementations
657    pub fn compare_implementations<F1, F2, R>(
658        impl1: F1,
659        impl2: F2,
660        impl1_name: &str,
661        impl2_name: &str,
662    ) -> (MemoryMetrics, MemoryMetrics)
663    where
664        F1: FnOnce() -> R,
665        F2: FnOnce() -> R,
666    {
667        let sample_interval = Duration::from_millis(10);
668
669        let (_, metrics1) = Self::analyze_operation_memory(impl1_name, impl1, sample_interval);
670        let (_, metrics2) = Self::analyze_operation_memory(impl2_name, impl2, sample_interval);
671
672        println!("\n=== Implementation Comparison ===");
673        println!(
674            "{} - Peak: {:.2}MB, Growth: {:.2}KB/s",
675            impl1_name,
676            metrics1.peak_memory as f64 / 1_048_576.0,
677            metrics1.growth_rate / 1024.0
678        );
679        println!(
680            "{} - Peak: {:.2}MB, Growth: {:.2}KB/s",
681            impl2_name,
682            metrics2.peak_memory as f64 / 1_048_576.0,
683            metrics2.growth_rate / 1024.0
684        );
685
686        let memory_improvement = if metrics2.peak_memory > 0 {
687            ((metrics1.peak_memory as f64 - metrics2.peak_memory as f64)
688                / metrics2.peak_memory as f64)
689                * 100.0
690        } else {
691            0.0
692        };
693
694        println!("Memory improvement: {memory_improvement:.1}%");
695
696        (metrics1, metrics2)
697    }
698
699    /// Benchmark memory usage for graph algorithms at different scales
700    pub fn scale_analysis<F>(
701        algorithm_factory: F,
702        scales: Vec<usize>,
703        algorithm_name: &str,
704    ) -> Vec<(usize, MemoryMetrics)>
705    where
706        F: Fn(usize) -> Box<dyn FnOnce()>,
707    {
708        let mut results = Vec::new();
709
710        println!("\n=== Scaling Analysis for {algorithm_name} ===");
711        for scale in scales {
712            let operation = algorithm_factory(scale);
713            let (_, metrics) = Self::analyze_operation_memory(
714                &format!("{algorithm_name} (n={scale})"),
715                operation,
716                Duration::from_millis(5),
717            );
718            results.push((scale, metrics));
719        }
720
721        // Print scaling summary
722        println!("\nScaling Summary:");
723        for (scale, metrics) in &results {
724            println!(
725                "  n={}: {:.2}MB peak",
726                scale,
727                metrics.peak_memory as f64 / 1_048_576.0
728            );
729        }
730
731        results
732    }
733}
734
735#[cfg(test)]
736mod tests {
737    use super::*;
738    use crate::generators;
739
740    #[test]
741    fn test_memory_profiling() {
742        let graph = generators::complete_graph(100).unwrap();
743        let stats = MemoryProfiler::profile_graph(&graph);
744
745        assert!(stats.total_bytes > 0);
746        assert!(stats.efficiency > 0.0 && stats.efficiency <= 1.0);
747        assert_eq!(graph.node_count(), 100);
748        assert_eq!(graph.edge_count(), 100 * 99 / 2); // Complete graph
749    }
750
751    #[test]
752    fn test_memory_estimation() {
753        let estimated = MemoryProfiler::estimate_memory(1000, 5000, false);
754        assert!(estimated > 0);
755
756        let estimated_directed = MemoryProfiler::estimate_memory(1000, 5000, true);
757        assert!(estimated_directed > estimated); // Directed graphs use more memory
758    }
759
760    #[test]
761    fn test_fragmentation_analysis() {
762        let mut graph: Graph<i32, f64> = Graph::new();
763
764        // Create a graph with varied degrees
765        for i in 0..100 {
766            graph.add_node(i);
767        }
768
769        // Add edges to create different degree nodes
770        for i in 0..10 {
771            for j in 10..20 {
772                if i != j {
773                    graph.add_edge(i, j, 1.0).unwrap();
774                }
775            }
776        }
777
778        let report = MemoryProfiler::analyze_fragmentation(&graph);
779        assert!(report.fragmentation_ratio >= 0.0 && report.fragmentation_ratio <= 1.0);
780    }
781
782    #[test]
783    fn test_optimized_builder() {
784        let mut builder = OptimizedGraphBuilder::new()
785            .reserve_nodes(100)
786            .reserve_edges(200)
787            .with_estimated_edges_per_node(4);
788
789        for i in 0..100 {
790            builder.add_node(i);
791        }
792
793        for i in 0..99 {
794            builder.add_edge(i, i + 1, 1.0);
795        }
796
797        let graph = builder.build().unwrap();
798        assert_eq!(graph.node_count(), 100);
799        assert_eq!(graph.edge_count(), 99);
800    }
801
802    #[test]
803    fn test_real_time_memory_profiler() {
804        use std::time::Duration;
805
806        let mut profiler = RealTimeMemoryProfiler::new();
807        profiler.start_monitoring(Duration::from_millis(10));
808
809        // Simulate some work
810        std::thread::sleep(Duration::from_millis(50));
811
812        let metrics = profiler.stop_monitoring();
813        assert!(metrics.sample_count > 0);
814        assert!(metrics.peak_memory > 0);
815    }
816
817    #[test]
818    fn test_advanced_memory_analyzer() {
819        let (result, metrics) = AdvancedMemoryAnalyzer::analyze_operation_memory(
820            "test_operation",
821            || {
822                // Simulate some graph operation with larger memory allocation
823                let mut v = Vec::new();
824                for i in 0..100_000 {
825                    v.push(i);
826                }
827                std::thread::sleep(Duration::from_millis(10)); // Allow time for monitoring
828                v.len()
829            },
830            Duration::from_millis(5),
831        );
832
833        assert_eq!(result, 100_000);
834        // Note: System-level memory monitoring may not always detect small allocations
835        // The analyzer should at least run without crashing
836        assert!(metrics.sample_count > 0);
837    }
838
839    #[test]
840    fn test_memory_leak_detection() {
841        let profiler = RealTimeMemoryProfiler::new();
842
843        // Should not detect leaks for stable memory usage
844        let has_leak = profiler.detect_memory_leaks(1024.0 * 1024.0); // 1MB/s threshold
845        assert!(!has_leak); // Should be false for new profiler
846    }
847
848    #[test]
849    fn test_optimization_suggestions() {
850        // Create a mock fragmentation report with high fragmentation
851        let fragmentation = FragmentationReport {
852            degree_distribution: std::collections::HashMap::new(),
853            total_capacity: 1000,
854            total_used: 500,
855            fragmentation_ratio: 0.5, // High fragmentation
856            wasted_bytes: 500 * mem::size_of::<(usize, f64)>(),
857        };
858
859        let stats = MemoryStats {
860            total_bytes: 10000,
861            node_bytes: 2000,
862            edge_bytes: 3000,
863            adjacency_bytes: 4000,
864            overhead_bytes: 1000,
865            efficiency: 0.5, // Low efficiency
866        };
867
868        let suggestions = suggest_optimizations(&stats, &fragmentation);
869        assert!(!suggestions.suggestions.is_empty());
870        assert!(suggestions.potential_savings > 0);
871    }
872}