1#![allow(missing_docs)]
6
7pub mod compact;
8
9pub use compact::{BitPackedGraph, CSRGraph, CompressedAdjacencyList, HybridGraph, MemmapGraph};
10
11use crate::{DiGraph, Graph};
14use std::collections::HashMap;
15use std::mem;
16use std::sync::{Arc, Mutex};
17use std::thread;
18use std::time::{Duration, Instant};
19use sysinfo::System;
20
21#[derive(Debug, Clone)]
23pub struct MemoryStats {
24 pub total_bytes: usize,
26 pub node_bytes: usize,
28 pub edge_bytes: usize,
30 pub adjacency_bytes: usize,
32 pub overhead_bytes: usize,
34 pub efficiency: f64,
36}
37
38pub struct MemoryProfiler;
40
41impl MemoryProfiler {
42 pub fn profile_graph<N, E, Ix>(graph: &Graph<N, E, Ix>) -> MemoryStats
44 where
45 N: crate::base::Node + std::fmt::Debug,
46 E: crate::base::EdgeWeight,
47 Ix: petgraph::graph::IndexType,
48 {
49 let node_count = graph.node_count();
50 let edge_count = graph.edge_count();
51
52 let node_bytes = node_count
54 * (mem::size_of::<N>() + mem::size_of::<petgraph::graph::NodeIndex<Ix>>())
55 + mem::size_of::<std::collections::HashMap<N, petgraph::graph::NodeIndex<Ix>>>();
56
57 let mut adjacency_bytes = 0;
59 for node in graph.nodes() {
60 if let Ok(neighbors) = graph.neighbors(node) {
61 let neighbor_count = neighbors.len();
62 adjacency_bytes += neighbor_count * mem::size_of::<E>() + mem::size_of::<Vec<E>>(); }
65 }
66
67 let edge_bytes = edge_count * (mem::size_of::<N>() * 2 + mem::size_of::<E>());
69
70 let allocation_count = node_count + 1; let overhead_bytes = allocation_count * 16;
73
74 let total_bytes = node_bytes + adjacency_bytes + edge_bytes + overhead_bytes;
75 let useful_bytes = node_bytes + adjacency_bytes;
76 let efficiency = if total_bytes > 0 {
77 useful_bytes as f64 / total_bytes as f64
78 } else {
79 1.0
80 };
81
82 MemoryStats {
83 total_bytes,
84 node_bytes,
85 edge_bytes,
86 adjacency_bytes,
87 overhead_bytes,
88 efficiency,
89 }
90 }
91
92 pub fn profile_digraph<N, E, Ix>(graph: &DiGraph<N, E, Ix>) -> MemoryStats
94 where
95 N: crate::base::Node + std::fmt::Debug,
96 E: crate::base::EdgeWeight,
97 Ix: petgraph::graph::IndexType,
98 {
99 let node_count = graph.node_count();
100 let edge_count = graph.edge_count();
101
102 let node_bytes = node_count
104 * (mem::size_of::<N>() + mem::size_of::<petgraph::graph::NodeIndex<Ix>>())
105 + mem::size_of::<std::collections::HashMap<N, petgraph::graph::NodeIndex<Ix>>>();
106
107 let mut adjacency_bytes = 0;
109 for node in graph.nodes() {
110 if let Ok(successors) = graph.successors(node) {
112 adjacency_bytes +=
113 successors.len() * mem::size_of::<E>() + mem::size_of::<Vec<E>>();
114 }
115 if let Ok(predecessors) = graph.predecessors(node) {
117 adjacency_bytes +=
118 predecessors.len() * mem::size_of::<E>() + mem::size_of::<Vec<E>>();
119 }
120 }
121
122 let edge_bytes = edge_count * (mem::size_of::<N>() * 2 + mem::size_of::<E>());
123
124 let allocation_count = node_count * 2 + 1; let overhead_bytes = allocation_count * 16;
126
127 let total_bytes = node_bytes + adjacency_bytes + edge_bytes + overhead_bytes;
128 let useful_bytes = node_bytes + adjacency_bytes;
129 let efficiency = if total_bytes > 0 {
130 useful_bytes as f64 / total_bytes as f64
131 } else {
132 1.0
133 };
134
135 MemoryStats {
136 total_bytes,
137 node_bytes,
138 edge_bytes,
139 adjacency_bytes,
140 overhead_bytes,
141 efficiency,
142 }
143 }
144
145 pub fn estimate_memory(nodes: usize, edges: usize, directed: bool) -> usize {
147 let _avg_degree = if nodes > 0 {
148 edges as f64 / nodes as f64
149 } else {
150 0.0
151 };
152
153 let node_bytes = nodes * mem::size_of::<usize>();
155
156 let edge_entry_size = mem::size_of::<(usize, f64)>();
158 let adjacency_multiplier = if directed { 2.0 } else { 1.0 };
159 let adjacency_bytes =
160 (edges as f64 * adjacency_multiplier * edge_entry_size as f64) as usize;
161
162 let vec_overhead =
164 nodes * mem::size_of::<Vec<(usize, f64)>>() * if directed { 2 } else { 1 };
165
166 let overhead = (nodes + edges / 100) * 16;
168
169 node_bytes + adjacency_bytes + vec_overhead + overhead
170 }
171
172 pub fn analyze_fragmentation<N, E, Ix>(graph: &Graph<N, E, Ix>) -> FragmentationReport
174 where
175 N: crate::base::Node + std::fmt::Debug,
176 E: crate::base::EdgeWeight,
177 Ix: petgraph::graph::IndexType,
178 {
179 let mut degree_distribution = HashMap::new();
180 let mut total_capacity = 0;
181 let mut total_used = 0;
182
183 for node in graph.nodes() {
184 let degree = graph.degree(node);
185 *degree_distribution.entry(degree).or_insert(0) += 1;
186
187 let capacity = degree.next_power_of_two().max(4);
190 total_capacity += capacity;
191 total_used += degree;
192 }
193
194 let fragmentation = if total_capacity > 0 {
195 1.0 - (total_used as f64 / total_capacity as f64)
196 } else {
197 0.0
198 };
199
200 FragmentationReport {
201 degree_distribution,
202 total_capacity,
203 total_used,
204 fragmentation_ratio: fragmentation,
205 wasted_bytes: (total_capacity - total_used) * mem::size_of::<(N, E)>(),
206 }
207 }
208}
209
210#[derive(Debug)]
212pub struct FragmentationReport {
213 pub degree_distribution: HashMap<usize, usize>,
215 pub total_capacity: usize,
217 pub total_used: usize,
219 pub fragmentation_ratio: f64,
221 pub wasted_bytes: usize,
223}
224
225pub struct OptimizedGraphBuilder {
227 nodes: Vec<usize>,
228 edges: Vec<(usize, usize, f64)>,
229 estimated_edges_per_node: Option<usize>,
230}
231
232impl Default for OptimizedGraphBuilder {
233 fn default() -> Self {
234 Self::new()
235 }
236}
237
238impl OptimizedGraphBuilder {
239 pub fn new() -> Self {
241 Self {
242 nodes: Vec::new(),
243 edges: Vec::new(),
244 estimated_edges_per_node: None,
245 }
246 }
247
248 pub fn with_estimated_edges_per_node(mut self, edges_pernode: usize) -> Self {
250 self.estimated_edges_per_node = Some(edges_pernode);
251 self
252 }
253
254 pub fn reserve_nodes(mut self, capacity: usize) -> Self {
256 self.nodes.reserve(capacity);
257 self
258 }
259
260 pub fn reserve_edges(mut self, capacity: usize) -> Self {
262 self.edges.reserve(capacity);
263 self
264 }
265
266 pub fn add_node(&mut self, node: usize) {
268 self.nodes.push(node);
269 }
270
271 pub fn add_edge(&mut self, from: usize, to: usize, weight: f64) {
273 self.edges.push((from, to, weight));
274 }
275
276 pub fn build(self) -> Result<Graph<usize, f64>, String> {
278 let mut graph = Graph::new();
279
280 if let Some(_epn) = self.estimated_edges_per_node {
282 for &node in &self.nodes {
284 let _ = graph.add_node(node);
285 }
288 } else {
289 for &node in &self.nodes {
290 let _ = graph.add_node(node);
291 }
292 }
293
294 for (from, to, weight) in self.edges {
296 graph
297 .add_edge(from, to, weight)
298 .map_err(|e| format!("Failed to add edge: {e:?}"))?;
299 }
300
301 Ok(graph)
302 }
303}
304
305#[derive(Debug)]
307pub struct OptimizationSuggestions {
308 pub suggestions: Vec<String>,
309 pub potential_savings: usize,
310}
311
312#[allow(dead_code)]
314pub fn suggest_optimizations(
315 stats: &MemoryStats,
316 fragmentation: &FragmentationReport,
317) -> OptimizationSuggestions {
318 let mut suggestions = Vec::new();
319 let mut potential_savings = 0;
320
321 if stats.efficiency < 0.7 {
323 suggestions.push(format!(
324 "Low memory efficiency ({:.1}%). Consider using a more compact representation.",
325 stats.efficiency * 100.0
326 ));
327 }
328
329 if fragmentation.fragmentation_ratio > 0.3 {
331 suggestions.push(format!(
332 "High fragmentation ({:.1}%). Pre-allocate adjacency lists based on expected degree.",
333 fragmentation.fragmentation_ratio * 100.0
334 ));
335 potential_savings += fragmentation.wasted_bytes;
336 }
337
338 let max_degree = fragmentation
340 .degree_distribution
341 .keys()
342 .max()
343 .copied()
344 .unwrap_or(0);
345 let avg_degree =
346 if fragmentation.total_used > 0 && !fragmentation.degree_distribution.is_empty() {
347 fragmentation.total_used as f64 / fragmentation.degree_distribution.len() as f64
348 } else {
349 0.0
350 };
351
352 if max_degree > avg_degree as usize * 10 {
353 suggestions.push(
354 "Highly skewed degree distribution. Consider using a hybrid representation \
355 with different storage for high-degree nodes."
356 .to_string(),
357 );
358 }
359
360 if avg_degree < 5.0 {
362 suggestions.push(
363 "Very sparse graph. Consider using a sparse matrix representation \
364 or compressed adjacency lists."
365 .to_string(),
366 );
367 }
368
369 OptimizationSuggestions {
370 suggestions,
371 potential_savings,
372 }
373}
374
375#[derive(Debug)]
377pub struct RealTimeMemoryProfiler {
378 pid: u32,
380 system: Arc<Mutex<System>>,
382 is_monitoring: Arc<Mutex<bool>>,
384 samples: Arc<Mutex<Vec<MemorySample>>>,
386 monitor_thread: Option<thread::JoinHandle<()>>,
388}
389
390#[derive(Debug, Clone)]
392pub struct MemorySample {
393 pub timestamp: Instant,
395 pub physical_memory: u64,
397 pub virtual_memory: u64,
399 pub growth_rate: i64,
401}
402
403#[derive(Debug, Clone)]
405pub struct MemoryMetrics {
406 pub peak_memory: u64,
408 pub average_memory: u64,
410 pub growth_rate: f64,
412 pub duration: Duration,
414 pub sample_count: usize,
416 pub memory_variance: f64,
418}
419
420impl Default for RealTimeMemoryProfiler {
421 fn default() -> Self {
422 Self::new()
423 }
424}
425
426impl RealTimeMemoryProfiler {
427 pub fn new() -> Self {
429 let mut system = System::new_all();
430 system.refresh_all();
431
432 let pid = std::process::id();
433
434 Self {
435 pid,
436 system: Arc::new(Mutex::new(system)),
437 is_monitoring: Arc::new(Mutex::new(false)),
438 samples: Arc::new(Mutex::new(Vec::new())),
439 monitor_thread: None,
440 }
441 }
442
443 pub fn start_monitoring(&mut self, sampleinterval: Duration) {
445 let mut is_monitoring = self.is_monitoring.lock().unwrap();
446 if *is_monitoring {
447 return; }
449 *is_monitoring = true;
450 drop(is_monitoring);
451
452 self.samples.lock().unwrap().clear();
454
455 let pid = self.pid;
456 let system = Arc::clone(&self.system);
457 let is_monitoring = Arc::clone(&self.is_monitoring);
458 let samples = Arc::clone(&self.samples);
459
460 let handle = thread::spawn(move || {
461 let mut last_memory = 0u64;
462 let _start_time = Instant::now();
463
464 while *is_monitoring.lock().unwrap() {
465 {
466 let mut sys = system.lock().unwrap();
467 sys.refresh_processes(
468 sysinfo::ProcessesToUpdate::Some(&[(pid as usize).into()]),
469 false,
470 );
471
472 if let Some(process) = sys.process((pid as usize).into()) {
473 let physical_memory = process.memory() * 1024; let virtual_memory = process.virtual_memory() * 1024;
475 let growth_rate = physical_memory as i64 - last_memory as i64;
476
477 let sample = MemorySample {
478 timestamp: Instant::now(),
479 physical_memory,
480 virtual_memory,
481 growth_rate,
482 };
483
484 samples.lock().unwrap().push(sample);
485 last_memory = physical_memory;
486 }
487 }
488
489 thread::sleep(sampleinterval);
490 }
491 });
492
493 self.monitor_thread = Some(handle);
494 }
495
496 pub fn stop_monitoring(&mut self) -> MemoryMetrics {
498 {
499 let mut is_monitoring = self.is_monitoring.lock().unwrap();
500 *is_monitoring = false;
501 }
502
503 if let Some(handle) = self.monitor_thread.take() {
504 let _ = handle.join();
505 }
506
507 let samples = self.samples.lock().unwrap();
508 self.calculate_metrics(&samples)
509 }
510
511 pub fn get_current_metrics(&self) -> MemoryMetrics {
513 let samples = self.samples.lock().unwrap();
514 self.calculate_metrics(&samples)
515 }
516
517 fn calculate_metrics(&self, samples: &[MemorySample]) -> MemoryMetrics {
519 if samples.is_empty() {
520 return MemoryMetrics {
521 peak_memory: 0,
522 average_memory: 0,
523 growth_rate: 0.0,
524 duration: Duration::new(0, 0),
525 sample_count: 0,
526 memory_variance: 0.0,
527 };
528 }
529
530 let peak_memory = samples.iter().map(|s| s.physical_memory).max().unwrap_or(0);
531 let total_memory: u64 = samples.iter().map(|s| s.physical_memory).sum();
532 let average_memory = total_memory / samples.len() as u64;
533
534 let duration = if samples.len() > 1 {
535 samples
536 .last()
537 .unwrap()
538 .timestamp
539 .duration_since(samples[0].timestamp)
540 } else {
541 Duration::new(0, 0)
542 };
543
544 let growth_rate = if duration.as_secs_f64() > 0.0 && samples.len() > 1 {
545 let total_growth =
546 samples.last().unwrap().physical_memory as i64 - samples[0].physical_memory as i64;
547 total_growth as f64 / duration.as_secs_f64()
548 } else {
549 0.0
550 };
551
552 let variance = if samples.len() > 1 {
554 let mean = average_memory as f64;
555 let sum_sq_diff: f64 = samples
556 .iter()
557 .map(|s| {
558 let diff = s.physical_memory as f64 - mean;
559 diff * diff
560 })
561 .sum();
562 sum_sq_diff / samples.len() as f64
563 } else {
564 0.0
565 };
566
567 MemoryMetrics {
568 peak_memory,
569 average_memory,
570 growth_rate,
571 duration,
572 sample_count: samples.len(),
573 memory_variance: variance,
574 }
575 }
576
577 pub fn detect_memory_leaks(&self, threshold_bytes_persec: f64) -> bool {
579 let metrics = self.get_current_metrics();
580 metrics.growth_rate > threshold_bytes_persec && metrics.sample_count > 10
581 }
582
583 pub fn generate_report(&self) -> String {
585 let metrics = self.get_current_metrics();
586 let _samples = self.samples.lock().unwrap();
587
588 let mut report = String::new();
589 report.push_str("=== Memory Usage Report ===\n");
590 report.push_str(&format!(
591 "Peak Memory: {:.2} MB\n",
592 metrics.peak_memory as f64 / 1_048_576.0
593 ));
594 report.push_str(&format!(
595 "Average Memory: {:.2} MB\n",
596 metrics.average_memory as f64 / 1_048_576.0
597 ));
598 report.push_str(&format!(
599 "Growth Rate: {:.2} KB/s\n",
600 metrics.growth_rate / 1024.0
601 ));
602 report.push_str(&format!(
603 "Duration: {:.2} seconds\n",
604 metrics.duration.as_secs_f64()
605 ));
606 report.push_str(&format!("Samples: {}\n", metrics.sample_count));
607 report.push_str(&format!(
608 "Memory Variance: {:.2} MB²\n",
609 metrics.memory_variance / 1_048_576.0_f64.powi(2)
610 ));
611
612 if metrics.growth_rate > 1024.0 * 1024.0 {
613 report.push_str("\n⚠️ WARNING: High memory growth rate detected!\n");
615 }
616
617 if metrics.memory_variance > (100.0 * 1_048_576.0_f64).powi(2) {
618 report.push_str("⚠️ WARNING: High memory usage variance!\n");
620 }
621
622 report
623 }
624}
625
626pub struct AdvancedMemoryAnalyzer;
628
629impl AdvancedMemoryAnalyzer {
630 pub fn analyze_operation_memory<F, R>(
632 operation_name: &str,
633 operation: F,
634 sample_interval: Duration,
635 ) -> (R, MemoryMetrics)
636 where
637 F: FnOnce() -> R,
638 {
639 let mut profiler = RealTimeMemoryProfiler::new();
640 profiler.start_monitoring(sample_interval);
641
642 let result = operation();
643
644 let metrics = profiler.stop_monitoring();
645
646 println!(
647 "Memory analysis for '{}': Peak={:.2}MB, Growth={:.2}KB/s",
648 operation_name,
649 metrics.peak_memory as f64 / 1_048_576.0,
650 metrics.growth_rate / 1024.0
651 );
652
653 (result, metrics)
654 }
655
656 pub fn compare_implementations<F1, F2, R>(
658 impl1: F1,
659 impl2: F2,
660 impl1_name: &str,
661 impl2_name: &str,
662 ) -> (MemoryMetrics, MemoryMetrics)
663 where
664 F1: FnOnce() -> R,
665 F2: FnOnce() -> R,
666 {
667 let sample_interval = Duration::from_millis(10);
668
669 let (_, metrics1) = Self::analyze_operation_memory(impl1_name, impl1, sample_interval);
670 let (_, metrics2) = Self::analyze_operation_memory(impl2_name, impl2, sample_interval);
671
672 println!("\n=== Implementation Comparison ===");
673 println!(
674 "{} - Peak: {:.2}MB, Growth: {:.2}KB/s",
675 impl1_name,
676 metrics1.peak_memory as f64 / 1_048_576.0,
677 metrics1.growth_rate / 1024.0
678 );
679 println!(
680 "{} - Peak: {:.2}MB, Growth: {:.2}KB/s",
681 impl2_name,
682 metrics2.peak_memory as f64 / 1_048_576.0,
683 metrics2.growth_rate / 1024.0
684 );
685
686 let memory_improvement = if metrics2.peak_memory > 0 {
687 ((metrics1.peak_memory as f64 - metrics2.peak_memory as f64)
688 / metrics2.peak_memory as f64)
689 * 100.0
690 } else {
691 0.0
692 };
693
694 println!("Memory improvement: {memory_improvement:.1}%");
695
696 (metrics1, metrics2)
697 }
698
699 pub fn scale_analysis<F>(
701 algorithm_factory: F,
702 scales: Vec<usize>,
703 algorithm_name: &str,
704 ) -> Vec<(usize, MemoryMetrics)>
705 where
706 F: Fn(usize) -> Box<dyn FnOnce()>,
707 {
708 let mut results = Vec::new();
709
710 println!("\n=== Scaling Analysis for {algorithm_name} ===");
711 for scale in scales {
712 let operation = algorithm_factory(scale);
713 let (_, metrics) = Self::analyze_operation_memory(
714 &format!("{algorithm_name} (n={scale})"),
715 operation,
716 Duration::from_millis(5),
717 );
718 results.push((scale, metrics));
719 }
720
721 println!("\nScaling Summary:");
723 for (scale, metrics) in &results {
724 println!(
725 " n={}: {:.2}MB peak",
726 scale,
727 metrics.peak_memory as f64 / 1_048_576.0
728 );
729 }
730
731 results
732 }
733}
734
735#[cfg(test)]
736mod tests {
737 use super::*;
738 use crate::generators;
739
740 #[test]
741 fn test_memory_profiling() {
742 let graph = generators::complete_graph(100).unwrap();
743 let stats = MemoryProfiler::profile_graph(&graph);
744
745 assert!(stats.total_bytes > 0);
746 assert!(stats.efficiency > 0.0 && stats.efficiency <= 1.0);
747 assert_eq!(graph.node_count(), 100);
748 assert_eq!(graph.edge_count(), 100 * 99 / 2); }
750
751 #[test]
752 fn test_memory_estimation() {
753 let estimated = MemoryProfiler::estimate_memory(1000, 5000, false);
754 assert!(estimated > 0);
755
756 let estimated_directed = MemoryProfiler::estimate_memory(1000, 5000, true);
757 assert!(estimated_directed > estimated); }
759
760 #[test]
761 fn test_fragmentation_analysis() {
762 let mut graph: Graph<i32, f64> = Graph::new();
763
764 for i in 0..100 {
766 graph.add_node(i);
767 }
768
769 for i in 0..10 {
771 for j in 10..20 {
772 if i != j {
773 graph.add_edge(i, j, 1.0).unwrap();
774 }
775 }
776 }
777
778 let report = MemoryProfiler::analyze_fragmentation(&graph);
779 assert!(report.fragmentation_ratio >= 0.0 && report.fragmentation_ratio <= 1.0);
780 }
781
782 #[test]
783 fn test_optimized_builder() {
784 let mut builder = OptimizedGraphBuilder::new()
785 .reserve_nodes(100)
786 .reserve_edges(200)
787 .with_estimated_edges_per_node(4);
788
789 for i in 0..100 {
790 builder.add_node(i);
791 }
792
793 for i in 0..99 {
794 builder.add_edge(i, i + 1, 1.0);
795 }
796
797 let graph = builder.build().unwrap();
798 assert_eq!(graph.node_count(), 100);
799 assert_eq!(graph.edge_count(), 99);
800 }
801
802 #[test]
803 fn test_real_time_memory_profiler() {
804 use std::time::Duration;
805
806 let mut profiler = RealTimeMemoryProfiler::new();
807 profiler.start_monitoring(Duration::from_millis(10));
808
809 std::thread::sleep(Duration::from_millis(50));
811
812 let metrics = profiler.stop_monitoring();
813 assert!(metrics.sample_count > 0);
814 assert!(metrics.peak_memory > 0);
815 }
816
817 #[test]
818 fn test_advanced_memory_analyzer() {
819 let (result, metrics) = AdvancedMemoryAnalyzer::analyze_operation_memory(
820 "test_operation",
821 || {
822 let mut v = Vec::new();
824 for i in 0..100_000 {
825 v.push(i);
826 }
827 std::thread::sleep(Duration::from_millis(10)); v.len()
829 },
830 Duration::from_millis(5),
831 );
832
833 assert_eq!(result, 100_000);
834 assert!(metrics.sample_count > 0);
837 }
838
839 #[test]
840 fn test_memory_leak_detection() {
841 let profiler = RealTimeMemoryProfiler::new();
842
843 let has_leak = profiler.detect_memory_leaks(1024.0 * 1024.0); assert!(!has_leak); }
847
848 #[test]
849 fn test_optimization_suggestions() {
850 let fragmentation = FragmentationReport {
852 degree_distribution: std::collections::HashMap::new(),
853 total_capacity: 1000,
854 total_used: 500,
855 fragmentation_ratio: 0.5, wasted_bytes: 500 * mem::size_of::<(usize, f64)>(),
857 };
858
859 let stats = MemoryStats {
860 total_bytes: 10000,
861 node_bytes: 2000,
862 edge_bytes: 3000,
863 adjacency_bytes: 4000,
864 overhead_bytes: 1000,
865 efficiency: 0.5, };
867
868 let suggestions = suggest_optimizations(&stats, &fragmentation);
869 assert!(!suggestions.suggestions.is_empty());
870 assert!(suggestions.potential_savings > 0);
871 }
872}