1use crate::{Edge, FxGraph, Node, TorshResult};
10use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::fs::{File, OpenOptions};
14use std::io::{Read, Seek, SeekFrom, Write};
15use std::path::{Path, PathBuf};
16use std::sync::{Arc, Mutex, RwLock};
17
18pub struct MemoryMappedGraph {
20 file_path: PathBuf,
21 header: GraphHeader,
22 node_data: Option<Arc<RwLock<Vec<u8>>>>,
23 edge_data: Option<Arc<RwLock<Vec<u8>>>>,
24 memory_threshold: usize, }
26
27#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct GraphHeader {
30 pub version: u32,
31 pub node_count: u32,
32 pub edge_count: u32,
33 pub node_data_offset: u64,
34 pub edge_data_offset: u64,
35 pub metadata: HashMap<String, String>,
36}
37
38#[derive(Debug, Clone, Serialize, Deserialize)]
40pub struct MemoryUsageReport {
41 pub total_size_bytes: usize,
42 pub node_data_size: usize,
43 pub edge_data_size: usize,
44 pub metadata_size: usize,
45 pub memory_efficiency: f64, pub recommendations: Vec<String>,
47 pub hotspots: Vec<MemoryHotspot>,
48}
49
50#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct MemoryHotspot {
53 pub location: String,
54 pub size_bytes: usize,
55 pub percentage: f64,
56 pub optimization_suggestions: Vec<String>,
57}
58
59impl MemoryMappedGraph {
60 pub fn new<P: AsRef<Path>>(file_path: P, memory_threshold: usize) -> TorshResult<Self> {
62 let file_path = file_path.as_ref().to_path_buf();
63
64 let header = GraphHeader {
66 version: 1,
67 node_count: 0,
68 edge_count: 0,
69 node_data_offset: std::mem::size_of::<GraphHeader>() as u64,
70 edge_data_offset: 0, metadata: HashMap::new(),
72 };
73
74 Ok(Self {
75 file_path,
76 header,
77 node_data: None,
78 edge_data: None,
79 memory_threshold,
80 })
81 }
82
83 pub fn save_graph(&mut self, graph: &FxGraph) -> TorshResult<()> {
85 let node_data = self.serialize_nodes(graph)?;
87 let edge_data = self.serialize_edges(graph)?;
88
89 self.header.node_count = graph.node_count() as u32;
91 self.header.edge_count = graph.edge_count() as u32;
92 self.header.edge_data_offset = self.header.node_data_offset + node_data.len() as u64;
93
94 let total_size = node_data.len() + edge_data.len();
96
97 if total_size > self.memory_threshold {
98 self.save_memory_mapped(&node_data, &edge_data)?;
99 } else {
100 self.save_in_memory(node_data, edge_data);
101 }
102
103 Ok(())
104 }
105
106 pub fn load_graph(&mut self) -> TorshResult<FxGraph> {
108 if !self.file_path.exists() {
109 return Err(torsh_core::error::TorshError::IoError(
110 "Memory-mapped file does not exist".to_string(),
111 ));
112 }
113
114 self.load_header()?;
116
117 let file_size = std::fs::metadata(&self.file_path)
119 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?
120 .len() as usize;
121
122 if file_size > self.memory_threshold {
123 self.load_memory_mapped()
124 } else {
125 self.load_from_file()
126 }
127 }
128
129 fn save_memory_mapped(&mut self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<()> {
131 let mut file = OpenOptions::new()
132 .create(true)
133 .write(true)
134 .truncate(true)
135 .open(&self.file_path)
136 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
137
138 let header_data = oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
140 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
141 file.write_all(&header_data)
142 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
143
144 file.write_all(node_data)
146 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
147
148 file.write_all(edge_data)
150 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
151
152 file.sync_all()
153 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
154
155 Ok(())
156 }
157
158 fn save_in_memory(&mut self, node_data: Vec<u8>, edge_data: Vec<u8>) {
160 self.node_data = Some(Arc::new(RwLock::new(node_data.clone())));
162 self.edge_data = Some(Arc::new(RwLock::new(edge_data.clone())));
163
164 if let Err(_) = self.write_to_file(&node_data, &edge_data) {
166 }
168 }
169
170 fn write_to_file(&mut self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<()> {
172 use std::io::Write;
173
174 let mut file = File::create(&self.file_path)
175 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
176
177 let header_data = oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
179 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
180
181 let header_size_bytes = 4u32; let header_size = header_data.len() as u64;
183
184 self.header.node_data_offset = header_size_bytes as u64 + header_size;
186 self.header.edge_data_offset = self.header.node_data_offset + node_data.len() as u64;
187
188 let updated_header_data =
190 oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
191 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
192
193 let header_size = updated_header_data.len() as u32;
195 file.write_all(&header_size.to_le_bytes())
196 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
197
198 file.write_all(&updated_header_data)
200 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
201
202 file.write_all(node_data)
204 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
205
206 file.write_all(edge_data)
208 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
209
210 file.flush()
211 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
212
213 Ok(())
214 }
215
216 fn load_header(&mut self) -> TorshResult<()> {
218 use std::io::Read;
219
220 let mut file = File::open(&self.file_path)
221 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
222
223 let mut size_bytes = [0u8; 4];
225 file.read_exact(&mut size_bytes)
226 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
227
228 let header_size = u32::from_le_bytes(size_bytes) as usize;
229
230 let mut header_data = vec![0u8; header_size];
232 file.read_exact(&mut header_data)
233 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
234
235 let (header, _): (GraphHeader, usize) =
236 oxicode::serde::decode_from_slice(&header_data, oxicode::config::standard())
237 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
238 self.header = header;
239
240 Ok(())
241 }
242
243 fn load_memory_mapped(&self) -> TorshResult<FxGraph> {
245 let mut file = File::open(&self.file_path)
246 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
247
248 file.seek(SeekFrom::Start(self.header.node_data_offset))
250 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
251
252 let node_data = self.read_chunked_data(&mut file, self.header.node_count as usize)?;
254
255 file.seek(SeekFrom::Start(self.header.edge_data_offset))
257 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
258
259 let edge_data = self.read_chunked_data(&mut file, self.header.edge_count as usize)?;
260
261 self.deserialize_graph(&node_data, &edge_data)
262 }
263
264 fn load_from_file(&self) -> TorshResult<FxGraph> {
266 let mut file = File::open(&self.file_path)
267 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
268
269 file.seek(SeekFrom::Start(self.header.node_data_offset))
271 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
272
273 let mut node_data = Vec::new();
274 let mut edge_data = Vec::new();
275
276 let node_end = self.header.edge_data_offset;
278 let node_size = (node_end - self.header.node_data_offset) as usize;
279 node_data.resize(node_size, 0);
280 file.read_exact(&mut node_data)
281 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
282
283 file.read_to_end(&mut edge_data)
285 .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
286
287 self.deserialize_graph(&node_data, &edge_data)
288 }
289
290 fn read_chunked_data(&self, file: &mut File, _item_count: usize) -> TorshResult<Vec<u8>> {
292 let mut data = Vec::new();
293 let mut buffer = [0u8; 4096]; loop {
296 match file.read(&mut buffer) {
297 Ok(0) => break, Ok(n) => data.extend_from_slice(&buffer[..n]),
299 Err(e) => return Err(torsh_core::error::TorshError::IoError(e.to_string())),
300 }
301 }
302
303 Ok(data)
304 }
305
306 fn serialize_nodes(&self, graph: &FxGraph) -> TorshResult<Vec<u8>> {
308 let nodes: Vec<(usize, Node)> = graph
309 .nodes()
310 .map(|(idx, node)| (idx.index(), node.clone()))
311 .collect();
312
313 oxicode::serde::encode_to_vec(&nodes, oxicode::config::standard())
314 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))
315 }
316
317 fn serialize_edges(&self, graph: &FxGraph) -> TorshResult<Vec<u8>> {
319 let edges: Vec<(usize, usize, Edge)> = graph
320 .graph
321 .edge_references()
322 .map(|edge_ref| {
323 use petgraph::visit::EdgeRef;
324 (
325 edge_ref.source().index(),
326 edge_ref.target().index(),
327 edge_ref.weight().clone(),
328 )
329 })
330 .collect();
331
332 oxicode::serde::encode_to_vec(&edges, oxicode::config::standard())
333 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))
334 }
335
336 fn deserialize_graph(&self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<FxGraph> {
338 let (nodes, _): (Vec<(usize, Node)>, usize) =
339 oxicode::serde::decode_from_slice(node_data, oxicode::config::standard())
340 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
341
342 let (edges, _): (Vec<(usize, usize, Edge)>, usize) =
343 oxicode::serde::decode_from_slice(edge_data, oxicode::config::standard())
344 .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
345
346 let mut graph = petgraph::Graph::new();
348 let mut node_mapping = HashMap::new();
349
350 for (original_idx, node) in nodes {
352 let new_idx = graph.add_node(node);
353 node_mapping.insert(original_idx, new_idx);
354 }
355
356 for (src_idx, target_idx, edge) in edges {
358 if let (Some(&src), Some(&target)) =
359 (node_mapping.get(&src_idx), node_mapping.get(&target_idx))
360 {
361 graph.add_edge(src, target, edge);
362 }
363 }
364
365 Ok(FxGraph {
367 graph,
368 inputs: Vec::new(), outputs: Vec::new(), })
371 }
372}
373
374pub struct MemoryAnalyzer;
376
377impl MemoryAnalyzer {
378 pub fn analyze_memory_usage(graph: &FxGraph) -> MemoryUsageReport {
380 let node_data_size = Self::calculate_node_data_size(graph);
381 let edge_data_size = Self::calculate_edge_data_size(graph);
382 let metadata_size = Self::calculate_metadata_size(graph);
383 let total_size_bytes = node_data_size + edge_data_size + metadata_size;
384
385 let ideal_size = graph.node_count() * 32 + graph.edge_count() * 16; let memory_efficiency = if total_size_bytes > 0 {
388 (ideal_size as f64 / total_size_bytes as f64).min(1.0)
389 } else {
390 1.0
391 };
392
393 let hotspots = Self::identify_memory_hotspots(graph, total_size_bytes);
394 let recommendations = Self::generate_memory_recommendations(graph, memory_efficiency);
395
396 MemoryUsageReport {
397 total_size_bytes,
398 node_data_size,
399 edge_data_size,
400 metadata_size,
401 memory_efficiency,
402 recommendations,
403 hotspots,
404 }
405 }
406
407 fn calculate_node_data_size(graph: &FxGraph) -> usize {
409 let mut total_size = 0;
410
411 for (_, node) in graph.nodes() {
412 total_size += match node {
413 Node::Input(name) => 16 + name.len(), Node::Call(op, args) => {
415 32 + op.len() + args.iter().map(|arg| arg.len()).sum::<usize>()
416 }
417 Node::Output => 8,
418 Node::Conditional {
419 condition,
420 then_branch,
421 else_branch,
422 } => {
423 64 + condition.len()
424 + then_branch.iter().map(|s| s.len()).sum::<usize>()
425 + else_branch.iter().map(|s| s.len()).sum::<usize>()
426 }
427 Node::Loop {
428 condition,
429 body,
430 loop_vars,
431 } => {
432 64 + condition.len()
433 + body.iter().map(|s| s.len()).sum::<usize>()
434 + loop_vars.iter().map(|s| s.len()).sum::<usize>()
435 }
436 Node::Merge { inputs } => 32 + inputs.iter().map(|s| s.len()).sum::<usize>(),
437 Node::GetAttr { target, attr } => 24 + target.len() + attr.len(),
438 };
439 }
440
441 total_size
442 }
443
444 fn calculate_edge_data_size(graph: &FxGraph) -> usize {
446 let mut total_size = 0;
447
448 for edge_ref in graph.graph.edge_references() {
449 total_size += 16 + edge_ref.weight().name.len(); }
452
453 total_size
454 }
455
456 fn calculate_metadata_size(graph: &FxGraph) -> usize {
458 graph.inputs().len() * 8 + graph.outputs().len() * 8 + 64
460 }
461
462 fn identify_memory_hotspots(graph: &FxGraph, total_size: usize) -> Vec<MemoryHotspot> {
464 let mut hotspots = Vec::new();
465
466 for (idx, node) in graph.nodes() {
468 let node_size = match node {
469 Node::Call(op, args)
470 if op.len() > 100 || args.iter().any(|arg| arg.len() > 100) =>
471 {
472 op.len() + args.iter().map(|arg| arg.len()).sum::<usize>()
473 }
474 Node::Conditional {
475 condition,
476 then_branch,
477 else_branch,
478 } if condition.len() > 50 || then_branch.len() > 20 || else_branch.len() > 20 => {
479 condition.len()
480 + then_branch.iter().map(|s| s.len()).sum::<usize>()
481 + else_branch.iter().map(|s| s.len()).sum::<usize>()
482 }
483 _ => 0,
484 };
485
486 if node_size > 1000 {
487 let percentage = (node_size as f64 / total_size as f64) * 100.0;
489 hotspots.push(MemoryHotspot {
490 location: format!("Node {idx:?}"),
491 size_bytes: node_size,
492 percentage,
493 optimization_suggestions: vec![
494 "Consider using references instead of owned strings".to_string(),
495 "Use string interning for repeated values".to_string(),
496 ],
497 });
498 }
499 }
500
501 for (idx, _) in graph.nodes() {
503 let edge_count = graph.graph.edges(idx).count();
504 if edge_count > 50 {
505 let edge_size = edge_count * 24; let percentage = (edge_size as f64 / total_size as f64) * 100.0;
507 hotspots.push(MemoryHotspot {
508 location: format!("Node {idx:?} edges"),
509 size_bytes: edge_size,
510 percentage,
511 optimization_suggestions: vec![
512 "Consider reducing fan-out through intermediate nodes".to_string(),
513 "Use broadcast operations instead of multiple edges".to_string(),
514 ],
515 });
516 }
517 }
518
519 hotspots.sort_by(|a, b| b.size_bytes.cmp(&a.size_bytes));
520 hotspots
521 }
522
523 fn generate_memory_recommendations(graph: &FxGraph, efficiency: f64) -> Vec<String> {
525 let mut recommendations = Vec::new();
526
527 if efficiency < 0.5 {
528 recommendations.push("Consider using more compact node representations".to_string());
529 }
530
531 if graph.node_count() > 10000 {
532 recommendations.push("Use memory-mapped storage for large graphs".to_string());
533 }
534
535 if graph.edge_count() > graph.node_count() * 3 {
536 recommendations
537 .push("High edge density detected - consider graph simplification".to_string());
538 }
539
540 recommendations.push("Enable compression for graph serialization".to_string());
541 recommendations.push("Use lazy loading for large subgraphs".to_string());
542 recommendations.push("Consider graph partitioning for distributed processing".to_string());
543
544 recommendations
545 }
546}
547
548pub struct AdaptiveMemoryManager {
550 allocation_strategy: AllocationStrategy,
551 memory_pressure_threshold: f64,
552 current_memory_usage: Arc<Mutex<usize>>,
553 max_memory_limit: Option<usize>,
554}
555
556#[derive(Debug, Clone)]
557pub enum AllocationStrategy {
558 Conservative, Balanced, Aggressive, Adaptive, }
563
564impl AdaptiveMemoryManager {
565 pub fn new(strategy: AllocationStrategy) -> Self {
567 Self {
568 allocation_strategy: strategy,
569 memory_pressure_threshold: 0.8, current_memory_usage: Arc::new(Mutex::new(0)),
571 max_memory_limit: None,
572 }
573 }
574
575 pub fn with_memory_limit(mut self, limit: usize) -> Self {
577 self.max_memory_limit = Some(limit);
578 self
579 }
580
581 pub fn allocate_graph_memory(&self, graph: &FxGraph) -> TorshResult<GraphMemoryLayout> {
583 let memory_report = MemoryAnalyzer::analyze_memory_usage(graph);
584 let required_memory = memory_report.total_size_bytes;
585
586 if let Some(limit) = self.max_memory_limit {
588 let current_usage = *self
589 .current_memory_usage
590 .lock()
591 .expect("lock should not be poisoned");
592 if current_usage + required_memory > limit {
593 return Err(torsh_core::error::TorshError::InvalidArgument(
594 "Memory limit exceeded".to_string(),
595 ));
596 }
597 }
598
599 let strategy = self.determine_strategy(required_memory);
601 let layout = self.create_memory_layout(graph, strategy)?;
602
603 *self
605 .current_memory_usage
606 .lock()
607 .expect("lock should not be poisoned") += required_memory;
608
609 Ok(layout)
610 }
611
612 pub fn deallocate_graph_memory(&self, layout: &GraphMemoryLayout) {
614 let mut current_usage = self
615 .current_memory_usage
616 .lock()
617 .expect("lock should not be poisoned");
618 *current_usage = current_usage.saturating_sub(layout.total_size);
619 }
620
621 fn determine_strategy(&self, required_memory: usize) -> AllocationStrategy {
623 match &self.allocation_strategy {
624 AllocationStrategy::Adaptive => {
625 let current_usage = *self
626 .current_memory_usage
627 .lock()
628 .expect("lock should not be poisoned");
629 let memory_pressure = if let Some(limit) = self.max_memory_limit {
630 current_usage as f64 / limit as f64
631 } else {
632 0.0 };
634
635 if memory_pressure > self.memory_pressure_threshold {
636 AllocationStrategy::Conservative
637 } else if required_memory > 1_000_000 {
638 AllocationStrategy::Balanced
640 } else {
641 AllocationStrategy::Aggressive
642 }
643 }
644 strategy => strategy.clone(),
645 }
646 }
647
648 fn create_memory_layout(
650 &self,
651 graph: &FxGraph,
652 strategy: AllocationStrategy,
653 ) -> TorshResult<GraphMemoryLayout> {
654 let memory_report = MemoryAnalyzer::analyze_memory_usage(graph);
655
656 let layout = match strategy {
657 AllocationStrategy::Conservative => GraphMemoryLayout {
658 total_size: memory_report.total_size_bytes,
659 use_memory_mapping: memory_report.total_size_bytes > 100_000, compression_enabled: true,
661 lazy_loading: true,
662 chunk_size: 4096, prefetch_enabled: false,
664 },
665 AllocationStrategy::Balanced => GraphMemoryLayout {
666 total_size: memory_report.total_size_bytes,
667 use_memory_mapping: memory_report.total_size_bytes > 1_000_000, compression_enabled: memory_report.total_size_bytes > 500_000, lazy_loading: false,
670 chunk_size: 8192, prefetch_enabled: true,
672 },
673 AllocationStrategy::Aggressive => GraphMemoryLayout {
674 total_size: memory_report.total_size_bytes,
675 use_memory_mapping: false, compression_enabled: false,
677 lazy_loading: false,
678 chunk_size: 16384, prefetch_enabled: true,
680 },
681 AllocationStrategy::Adaptive => {
682 self.create_memory_layout(graph, AllocationStrategy::Balanced)?
684 }
685 };
686
687 Ok(layout)
688 }
689}
690
691#[derive(Debug, Clone)]
693pub struct GraphMemoryLayout {
694 pub total_size: usize,
695 pub use_memory_mapping: bool,
696 pub compression_enabled: bool,
697 pub lazy_loading: bool,
698 pub chunk_size: usize,
699 pub prefetch_enabled: bool,
700}
701
702#[cfg(test)]
703mod tests {
704 use super::*;
705 use crate::{Edge, FxGraph, Node};
706 use tempfile::NamedTempFile;
707
708 #[test]
709 fn test_memory_mapped_graph() {
710 let temp_file = NamedTempFile::new().unwrap();
711 let temp_path = temp_file.path().to_path_buf();
712
713 std::fs::write(&temp_path, b"").unwrap();
715
716 let mut mmap_graph = MemoryMappedGraph::new(&temp_path, 1000).unwrap();
717
718 let mut graph = FxGraph::new();
720 let input = graph.graph.add_node(Node::Input("x".to_string()));
721 let relu = graph
722 .graph
723 .add_node(Node::Call("relu".to_string(), vec!["x".to_string()]));
724 let output = graph.graph.add_node(Node::Output);
725
726 graph.graph.add_edge(
727 input,
728 relu,
729 Edge {
730 name: "x".to_string(),
731 },
732 );
733 graph.graph.add_edge(
734 relu,
735 output,
736 Edge {
737 name: "relu_out".to_string(),
738 },
739 );
740 graph.inputs.push(input);
741 graph.outputs.push(output);
742
743 mmap_graph.save_graph(&graph).expect("Failed to save graph");
745 let loaded_graph = mmap_graph.load_graph().expect("Failed to load graph");
746
747 assert_eq!(loaded_graph.node_count(), graph.node_count());
748
749 let _ = std::fs::remove_file(&temp_path);
751 }
752
753 #[test]
754 fn test_memory_analyzer() {
755 let mut graph = FxGraph::new();
756 let input = graph.graph.add_node(Node::Input("x".to_string()));
757 let relu = graph
758 .graph
759 .add_node(Node::Call("relu".to_string(), vec!["x".to_string()]));
760 let output = graph.graph.add_node(Node::Output);
761
762 graph.graph.add_edge(
763 input,
764 relu,
765 Edge {
766 name: "x".to_string(),
767 },
768 );
769 graph.graph.add_edge(
770 relu,
771 output,
772 Edge {
773 name: "relu_out".to_string(),
774 },
775 );
776
777 let report = MemoryAnalyzer::analyze_memory_usage(&graph);
778
779 assert!(report.total_size_bytes > 0);
780 assert!(report.memory_efficiency > 0.0);
781 assert!(!report.recommendations.is_empty());
782 }
783
784 #[test]
785 fn test_adaptive_memory_manager() {
786 let manager =
787 AdaptiveMemoryManager::new(AllocationStrategy::Adaptive).with_memory_limit(1_000_000); let mut graph = FxGraph::new();
790 let _input = graph.graph.add_node(Node::Input("x".to_string()));
791 let _output = graph.graph.add_node(Node::Output);
792
793 let layout = manager.allocate_graph_memory(&graph).unwrap();
794 assert!(layout.total_size > 0);
795
796 manager.deallocate_graph_memory(&layout);
797 }
798
799 #[test]
800 fn test_memory_hotspot_detection() {
801 let mut graph = FxGraph::new();
802
803 let large_op = "very_long_operation_name_that_should_be_detected_as_hotspot".repeat(20);
805 let _large_node = graph
806 .graph
807 .add_node(Node::Call(large_op, vec!["arg".to_string()]));
808
809 let report = MemoryAnalyzer::analyze_memory_usage(&graph);
810
811 assert!(!report.hotspots.is_empty());
813 }
814}