Skip to main content

torsh_fx/
memory_optimization.rs

1//! Memory optimization utilities for FX graphs
2//!
3//! This module provides advanced memory management capabilities including:
4//! - Memory-mapped file support for large graph serialization
5//! - Graph memory usage analysis and optimization
6//! - Adaptive memory allocation strategies
7//! - Memory-efficient graph representations
8
9use crate::{Edge, FxGraph, Node, TorshResult};
10// use petgraph::graph::NodeIndex; // Unused
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::fs::{File, OpenOptions};
14use std::io::{Read, Seek, SeekFrom, Write};
15use std::path::{Path, PathBuf};
16use std::sync::{Arc, Mutex, RwLock};
17
18/// Memory-mapped graph storage for large graphs
19pub struct MemoryMappedGraph {
20    file_path: PathBuf,
21    header: GraphHeader,
22    node_data: Option<Arc<RwLock<Vec<u8>>>>,
23    edge_data: Option<Arc<RwLock<Vec<u8>>>>,
24    memory_threshold: usize, // Size threshold for memory mapping
25}
26
27/// Header information for memory-mapped graphs
28#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct GraphHeader {
30    pub version: u32,
31    pub node_count: u32,
32    pub edge_count: u32,
33    pub node_data_offset: u64,
34    pub edge_data_offset: u64,
35    pub metadata: HashMap<String, String>,
36}
37
38/// Memory usage analysis for graphs
39#[derive(Debug, Clone, Serialize, Deserialize)]
40pub struct MemoryUsageReport {
41    pub total_size_bytes: usize,
42    pub node_data_size: usize,
43    pub edge_data_size: usize,
44    pub metadata_size: usize,
45    pub memory_efficiency: f64, // 0.0 to 1.0
46    pub recommendations: Vec<String>,
47    pub hotspots: Vec<MemoryHotspot>,
48}
49
50/// Memory hotspot identification
51#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct MemoryHotspot {
53    pub location: String,
54    pub size_bytes: usize,
55    pub percentage: f64,
56    pub optimization_suggestions: Vec<String>,
57}
58
59impl MemoryMappedGraph {
60    /// Create a new memory-mapped graph
61    pub fn new<P: AsRef<Path>>(file_path: P, memory_threshold: usize) -> TorshResult<Self> {
62        let file_path = file_path.as_ref().to_path_buf();
63
64        // Create header with default values
65        let header = GraphHeader {
66            version: 1,
67            node_count: 0,
68            edge_count: 0,
69            node_data_offset: std::mem::size_of::<GraphHeader>() as u64,
70            edge_data_offset: 0, // Will be calculated
71            metadata: HashMap::new(),
72        };
73
74        Ok(Self {
75            file_path,
76            header,
77            node_data: None,
78            edge_data: None,
79            memory_threshold,
80        })
81    }
82
83    /// Save a graph using memory-mapped storage
84    pub fn save_graph(&mut self, graph: &FxGraph) -> TorshResult<()> {
85        // Serialize graph data
86        let node_data = self.serialize_nodes(graph)?;
87        let edge_data = self.serialize_edges(graph)?;
88
89        // Update header
90        self.header.node_count = graph.node_count() as u32;
91        self.header.edge_count = graph.edge_count() as u32;
92        self.header.edge_data_offset = self.header.node_data_offset + node_data.len() as u64;
93
94        // Determine if we should use memory mapping
95        let total_size = node_data.len() + edge_data.len();
96
97        if total_size > self.memory_threshold {
98            self.save_memory_mapped(&node_data, &edge_data)?;
99        } else {
100            self.save_in_memory(node_data, edge_data);
101        }
102
103        Ok(())
104    }
105
106    /// Load a graph from memory-mapped storage
107    pub fn load_graph(&mut self) -> TorshResult<FxGraph> {
108        if !self.file_path.exists() {
109            return Err(torsh_core::error::TorshError::IoError(
110                "Memory-mapped file does not exist".to_string(),
111            ));
112        }
113
114        // Load header
115        self.load_header()?;
116
117        // Load data based on size
118        let file_size = std::fs::metadata(&self.file_path)
119            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?
120            .len() as usize;
121
122        if file_size > self.memory_threshold {
123            self.load_memory_mapped()
124        } else {
125            self.load_from_file()
126        }
127    }
128
129    /// Save using memory-mapped files
130    fn save_memory_mapped(&mut self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<()> {
131        let mut file = OpenOptions::new()
132            .create(true)
133            .write(true)
134            .truncate(true)
135            .open(&self.file_path)
136            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
137
138        // Write header
139        let header_data = oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
140            .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
141        file.write_all(&header_data)
142            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
143
144        // Write node data
145        file.write_all(node_data)
146            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
147
148        // Write edge data
149        file.write_all(edge_data)
150            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
151
152        file.sync_all()
153            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
154
155        Ok(())
156    }
157
158    /// Save in memory for small graphs
159    fn save_in_memory(&mut self, node_data: Vec<u8>, edge_data: Vec<u8>) {
160        // Store in memory
161        self.node_data = Some(Arc::new(RwLock::new(node_data.clone())));
162        self.edge_data = Some(Arc::new(RwLock::new(edge_data.clone())));
163
164        // Also write to file for persistence
165        if let Err(_) = self.write_to_file(&node_data, &edge_data) {
166            // If file write fails, just continue with in-memory storage
167        }
168    }
169
170    /// Write header and data to file
171    fn write_to_file(&mut self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<()> {
172        use std::io::Write;
173
174        let mut file = File::create(&self.file_path)
175            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
176
177        // Calculate correct offsets accounting for the header size prefix
178        let header_data = oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
179            .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
180
181        let header_size_bytes = 4u32; // u32 for header size
182        let header_size = header_data.len() as u64;
183
184        // Update header with correct offsets
185        self.header.node_data_offset = header_size_bytes as u64 + header_size;
186        self.header.edge_data_offset = self.header.node_data_offset + node_data.len() as u64;
187
188        // Re-serialize header with correct offsets
189        let updated_header_data =
190            oxicode::serde::encode_to_vec(&self.header, oxicode::config::standard())
191                .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
192
193        // Write header size first (as u32)
194        let header_size = updated_header_data.len() as u32;
195        file.write_all(&header_size.to_le_bytes())
196            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
197
198        // Write header data
199        file.write_all(&updated_header_data)
200            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
201
202        // Write node data
203        file.write_all(node_data)
204            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
205
206        // Write edge data
207        file.write_all(edge_data)
208            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
209
210        file.flush()
211            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
212
213        Ok(())
214    }
215
216    /// Load header from file
217    fn load_header(&mut self) -> TorshResult<()> {
218        use std::io::Read;
219
220        let mut file = File::open(&self.file_path)
221            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
222
223        // Read header size first (4 bytes for u32)
224        let mut size_bytes = [0u8; 4];
225        file.read_exact(&mut size_bytes)
226            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
227
228        let header_size = u32::from_le_bytes(size_bytes) as usize;
229
230        // Read header data of exact size
231        let mut header_data = vec![0u8; header_size];
232        file.read_exact(&mut header_data)
233            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
234
235        let (header, _): (GraphHeader, usize) =
236            oxicode::serde::decode_from_slice(&header_data, oxicode::config::standard())
237                .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
238        self.header = header;
239
240        Ok(())
241    }
242
243    /// Load graph using memory mapping
244    fn load_memory_mapped(&self) -> TorshResult<FxGraph> {
245        let mut file = File::open(&self.file_path)
246            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
247
248        // Seek to node data
249        file.seek(SeekFrom::Start(self.header.node_data_offset))
250            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
251
252        // Read node data in chunks to save memory
253        let node_data = self.read_chunked_data(&mut file, self.header.node_count as usize)?;
254
255        // Seek to edge data
256        file.seek(SeekFrom::Start(self.header.edge_data_offset))
257            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
258
259        let edge_data = self.read_chunked_data(&mut file, self.header.edge_count as usize)?;
260
261        self.deserialize_graph(&node_data, &edge_data)
262    }
263
264    /// Load graph from file into memory
265    fn load_from_file(&self) -> TorshResult<FxGraph> {
266        let mut file = File::open(&self.file_path)
267            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
268
269        // Skip header
270        file.seek(SeekFrom::Start(self.header.node_data_offset))
271            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
272
273        let mut node_data = Vec::new();
274        let mut edge_data = Vec::new();
275
276        // Read all node data
277        let node_end = self.header.edge_data_offset;
278        let node_size = (node_end - self.header.node_data_offset) as usize;
279        node_data.resize(node_size, 0);
280        file.read_exact(&mut node_data)
281            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
282
283        // Read all edge data
284        file.read_to_end(&mut edge_data)
285            .map_err(|e| torsh_core::error::TorshError::IoError(e.to_string()))?;
286
287        self.deserialize_graph(&node_data, &edge_data)
288    }
289
290    /// Read data in chunks to save memory
291    fn read_chunked_data(&self, file: &mut File, _item_count: usize) -> TorshResult<Vec<u8>> {
292        let mut data = Vec::new();
293        let mut buffer = [0u8; 4096]; // 4KB chunks
294
295        loop {
296            match file.read(&mut buffer) {
297                Ok(0) => break, // EOF
298                Ok(n) => data.extend_from_slice(&buffer[..n]),
299                Err(e) => return Err(torsh_core::error::TorshError::IoError(e.to_string())),
300            }
301        }
302
303        Ok(data)
304    }
305
306    /// Serialize nodes to binary format
307    fn serialize_nodes(&self, graph: &FxGraph) -> TorshResult<Vec<u8>> {
308        let nodes: Vec<(usize, Node)> = graph
309            .nodes()
310            .map(|(idx, node)| (idx.index(), node.clone()))
311            .collect();
312
313        oxicode::serde::encode_to_vec(&nodes, oxicode::config::standard())
314            .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))
315    }
316
317    /// Serialize edges to binary format
318    fn serialize_edges(&self, graph: &FxGraph) -> TorshResult<Vec<u8>> {
319        let edges: Vec<(usize, usize, Edge)> = graph
320            .graph
321            .edge_references()
322            .map(|edge_ref| {
323                use petgraph::visit::EdgeRef;
324                (
325                    edge_ref.source().index(),
326                    edge_ref.target().index(),
327                    edge_ref.weight().clone(),
328                )
329            })
330            .collect();
331
332        oxicode::serde::encode_to_vec(&edges, oxicode::config::standard())
333            .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))
334    }
335
336    /// Deserialize graph from binary data
337    fn deserialize_graph(&self, node_data: &[u8], edge_data: &[u8]) -> TorshResult<FxGraph> {
338        let (nodes, _): (Vec<(usize, Node)>, usize) =
339            oxicode::serde::decode_from_slice(node_data, oxicode::config::standard())
340                .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
341
342        let (edges, _): (Vec<(usize, usize, Edge)>, usize) =
343            oxicode::serde::decode_from_slice(edge_data, oxicode::config::standard())
344                .map_err(|e| torsh_core::error::TorshError::SerializationError(e.to_string()))?;
345
346        // Reconstruct graph
347        let mut graph = petgraph::Graph::new();
348        let mut node_mapping = HashMap::new();
349
350        // Add nodes
351        for (original_idx, node) in nodes {
352            let new_idx = graph.add_node(node);
353            node_mapping.insert(original_idx, new_idx);
354        }
355
356        // Add edges
357        for (src_idx, target_idx, edge) in edges {
358            if let (Some(&src), Some(&target)) =
359                (node_mapping.get(&src_idx), node_mapping.get(&target_idx))
360            {
361                graph.add_edge(src, target, edge);
362            }
363        }
364
365        // Create FxGraph (inputs and outputs would need to be stored separately)
366        Ok(FxGraph {
367            graph,
368            inputs: Vec::new(),  // Would need to be restored from metadata
369            outputs: Vec::new(), // Would need to be restored from metadata
370        })
371    }
372}
373
374/// Memory usage analyzer for graphs
375pub struct MemoryAnalyzer;
376
377impl MemoryAnalyzer {
378    /// Analyze memory usage of a graph
379    pub fn analyze_memory_usage(graph: &FxGraph) -> MemoryUsageReport {
380        let node_data_size = Self::calculate_node_data_size(graph);
381        let edge_data_size = Self::calculate_edge_data_size(graph);
382        let metadata_size = Self::calculate_metadata_size(graph);
383        let total_size_bytes = node_data_size + edge_data_size + metadata_size;
384
385        // Calculate memory efficiency (heuristic)
386        let ideal_size = graph.node_count() * 32 + graph.edge_count() * 16; // Rough estimate
387        let memory_efficiency = if total_size_bytes > 0 {
388            (ideal_size as f64 / total_size_bytes as f64).min(1.0)
389        } else {
390            1.0
391        };
392
393        let hotspots = Self::identify_memory_hotspots(graph, total_size_bytes);
394        let recommendations = Self::generate_memory_recommendations(graph, memory_efficiency);
395
396        MemoryUsageReport {
397            total_size_bytes,
398            node_data_size,
399            edge_data_size,
400            metadata_size,
401            memory_efficiency,
402            recommendations,
403            hotspots,
404        }
405    }
406
407    /// Calculate size of node data
408    fn calculate_node_data_size(graph: &FxGraph) -> usize {
409        let mut total_size = 0;
410
411        for (_, node) in graph.nodes() {
412            total_size += match node {
413                Node::Input(name) => 16 + name.len(), // Base size + string length
414                Node::Call(op, args) => {
415                    32 + op.len() + args.iter().map(|arg| arg.len()).sum::<usize>()
416                }
417                Node::Output => 8,
418                Node::Conditional {
419                    condition,
420                    then_branch,
421                    else_branch,
422                } => {
423                    64 + condition.len()
424                        + then_branch.iter().map(|s| s.len()).sum::<usize>()
425                        + else_branch.iter().map(|s| s.len()).sum::<usize>()
426                }
427                Node::Loop {
428                    condition,
429                    body,
430                    loop_vars,
431                } => {
432                    64 + condition.len()
433                        + body.iter().map(|s| s.len()).sum::<usize>()
434                        + loop_vars.iter().map(|s| s.len()).sum::<usize>()
435                }
436                Node::Merge { inputs } => 32 + inputs.iter().map(|s| s.len()).sum::<usize>(),
437                Node::GetAttr { target, attr } => 24 + target.len() + attr.len(),
438            };
439        }
440
441        total_size
442    }
443
444    /// Calculate size of edge data
445    fn calculate_edge_data_size(graph: &FxGraph) -> usize {
446        let mut total_size = 0;
447
448        for edge_ref in graph.graph.edge_references() {
449            // use petgraph::visit::EdgeRef; // Unused import
450            total_size += 16 + edge_ref.weight().name.len(); // Base edge size + name
451        }
452
453        total_size
454    }
455
456    /// Calculate size of metadata
457    fn calculate_metadata_size(graph: &FxGraph) -> usize {
458        // Rough estimate for graph metadata
459        graph.inputs().len() * 8 + graph.outputs().len() * 8 + 64
460    }
461
462    /// Identify memory hotspots in the graph
463    fn identify_memory_hotspots(graph: &FxGraph, total_size: usize) -> Vec<MemoryHotspot> {
464        let mut hotspots = Vec::new();
465
466        // Check for nodes with large string data
467        for (idx, node) in graph.nodes() {
468            let node_size = match node {
469                Node::Call(op, args)
470                    if op.len() > 100 || args.iter().any(|arg| arg.len() > 100) =>
471                {
472                    op.len() + args.iter().map(|arg| arg.len()).sum::<usize>()
473                }
474                Node::Conditional {
475                    condition,
476                    then_branch,
477                    else_branch,
478                } if condition.len() > 50 || then_branch.len() > 20 || else_branch.len() > 20 => {
479                    condition.len()
480                        + then_branch.iter().map(|s| s.len()).sum::<usize>()
481                        + else_branch.iter().map(|s| s.len()).sum::<usize>()
482                }
483                _ => 0,
484            };
485
486            if node_size > 1000 {
487                // Threshold for considering as hotspot
488                let percentage = (node_size as f64 / total_size as f64) * 100.0;
489                hotspots.push(MemoryHotspot {
490                    location: format!("Node {idx:?}"),
491                    size_bytes: node_size,
492                    percentage,
493                    optimization_suggestions: vec![
494                        "Consider using references instead of owned strings".to_string(),
495                        "Use string interning for repeated values".to_string(),
496                    ],
497                });
498            }
499        }
500
501        // Check for high fan-out nodes (many edges)
502        for (idx, _) in graph.nodes() {
503            let edge_count = graph.graph.edges(idx).count();
504            if edge_count > 50 {
505                let edge_size = edge_count * 24; // Approximate edge size
506                let percentage = (edge_size as f64 / total_size as f64) * 100.0;
507                hotspots.push(MemoryHotspot {
508                    location: format!("Node {idx:?} edges"),
509                    size_bytes: edge_size,
510                    percentage,
511                    optimization_suggestions: vec![
512                        "Consider reducing fan-out through intermediate nodes".to_string(),
513                        "Use broadcast operations instead of multiple edges".to_string(),
514                    ],
515                });
516            }
517        }
518
519        hotspots.sort_by(|a, b| b.size_bytes.cmp(&a.size_bytes));
520        hotspots
521    }
522
523    /// Generate memory optimization recommendations
524    fn generate_memory_recommendations(graph: &FxGraph, efficiency: f64) -> Vec<String> {
525        let mut recommendations = Vec::new();
526
527        if efficiency < 0.5 {
528            recommendations.push("Consider using more compact node representations".to_string());
529        }
530
531        if graph.node_count() > 10000 {
532            recommendations.push("Use memory-mapped storage for large graphs".to_string());
533        }
534
535        if graph.edge_count() > graph.node_count() * 3 {
536            recommendations
537                .push("High edge density detected - consider graph simplification".to_string());
538        }
539
540        recommendations.push("Enable compression for graph serialization".to_string());
541        recommendations.push("Use lazy loading for large subgraphs".to_string());
542        recommendations.push("Consider graph partitioning for distributed processing".to_string());
543
544        recommendations
545    }
546}
547
548/// Adaptive memory allocation strategies
549pub struct AdaptiveMemoryManager {
550    allocation_strategy: AllocationStrategy,
551    memory_pressure_threshold: f64,
552    current_memory_usage: Arc<Mutex<usize>>,
553    max_memory_limit: Option<usize>,
554}
555
556#[derive(Debug, Clone)]
557pub enum AllocationStrategy {
558    Conservative, // Minimize memory usage
559    Balanced,     // Balance memory and performance
560    Aggressive,   // Optimize for performance
561    Adaptive,     // Change strategy based on conditions
562}
563
564impl AdaptiveMemoryManager {
565    /// Create a new adaptive memory manager
566    pub fn new(strategy: AllocationStrategy) -> Self {
567        Self {
568            allocation_strategy: strategy,
569            memory_pressure_threshold: 0.8, // 80% memory usage
570            current_memory_usage: Arc::new(Mutex::new(0)),
571            max_memory_limit: None,
572        }
573    }
574
575    /// Set maximum memory limit
576    pub fn with_memory_limit(mut self, limit: usize) -> Self {
577        self.max_memory_limit = Some(limit);
578        self
579    }
580
581    /// Allocate memory for graph operations
582    pub fn allocate_graph_memory(&self, graph: &FxGraph) -> TorshResult<GraphMemoryLayout> {
583        let memory_report = MemoryAnalyzer::analyze_memory_usage(graph);
584        let required_memory = memory_report.total_size_bytes;
585
586        // Check memory limits
587        if let Some(limit) = self.max_memory_limit {
588            let current_usage = *self
589                .current_memory_usage
590                .lock()
591                .expect("lock should not be poisoned");
592            if current_usage + required_memory > limit {
593                return Err(torsh_core::error::TorshError::InvalidArgument(
594                    "Memory limit exceeded".to_string(),
595                ));
596            }
597        }
598
599        // Determine allocation strategy
600        let strategy = self.determine_strategy(required_memory);
601        let layout = self.create_memory_layout(graph, strategy)?;
602
603        // Update memory usage
604        *self
605            .current_memory_usage
606            .lock()
607            .expect("lock should not be poisoned") += required_memory;
608
609        Ok(layout)
610    }
611
612    /// Deallocate memory for graph operations
613    pub fn deallocate_graph_memory(&self, layout: &GraphMemoryLayout) {
614        let mut current_usage = self
615            .current_memory_usage
616            .lock()
617            .expect("lock should not be poisoned");
618        *current_usage = current_usage.saturating_sub(layout.total_size);
619    }
620
621    /// Determine the best allocation strategy based on current conditions
622    fn determine_strategy(&self, required_memory: usize) -> AllocationStrategy {
623        match &self.allocation_strategy {
624            AllocationStrategy::Adaptive => {
625                let current_usage = *self
626                    .current_memory_usage
627                    .lock()
628                    .expect("lock should not be poisoned");
629                let memory_pressure = if let Some(limit) = self.max_memory_limit {
630                    current_usage as f64 / limit as f64
631                } else {
632                    0.0 // No limit, assume low pressure
633                };
634
635                if memory_pressure > self.memory_pressure_threshold {
636                    AllocationStrategy::Conservative
637                } else if required_memory > 1_000_000 {
638                    // 1MB threshold
639                    AllocationStrategy::Balanced
640                } else {
641                    AllocationStrategy::Aggressive
642                }
643            }
644            strategy => strategy.clone(),
645        }
646    }
647
648    /// Create memory layout based on strategy
649    fn create_memory_layout(
650        &self,
651        graph: &FxGraph,
652        strategy: AllocationStrategy,
653    ) -> TorshResult<GraphMemoryLayout> {
654        let memory_report = MemoryAnalyzer::analyze_memory_usage(graph);
655
656        let layout = match strategy {
657            AllocationStrategy::Conservative => GraphMemoryLayout {
658                total_size: memory_report.total_size_bytes,
659                use_memory_mapping: memory_report.total_size_bytes > 100_000, // 100KB threshold
660                compression_enabled: true,
661                lazy_loading: true,
662                chunk_size: 4096, // 4KB chunks
663                prefetch_enabled: false,
664            },
665            AllocationStrategy::Balanced => GraphMemoryLayout {
666                total_size: memory_report.total_size_bytes,
667                use_memory_mapping: memory_report.total_size_bytes > 1_000_000, // 1MB threshold
668                compression_enabled: memory_report.total_size_bytes > 500_000,  // 500KB threshold
669                lazy_loading: false,
670                chunk_size: 8192, // 8KB chunks
671                prefetch_enabled: true,
672            },
673            AllocationStrategy::Aggressive => GraphMemoryLayout {
674                total_size: memory_report.total_size_bytes,
675                use_memory_mapping: false, // Keep in memory
676                compression_enabled: false,
677                lazy_loading: false,
678                chunk_size: 16384, // 16KB chunks
679                prefetch_enabled: true,
680            },
681            AllocationStrategy::Adaptive => {
682                // Should not reach here as adaptive is resolved above
683                self.create_memory_layout(graph, AllocationStrategy::Balanced)?
684            }
685        };
686
687        Ok(layout)
688    }
689}
690
691/// Memory layout configuration for graphs
692#[derive(Debug, Clone)]
693pub struct GraphMemoryLayout {
694    pub total_size: usize,
695    pub use_memory_mapping: bool,
696    pub compression_enabled: bool,
697    pub lazy_loading: bool,
698    pub chunk_size: usize,
699    pub prefetch_enabled: bool,
700}
701
702#[cfg(test)]
703mod tests {
704    use super::*;
705    use crate::{Edge, FxGraph, Node};
706    use tempfile::NamedTempFile;
707
708    #[test]
709    fn test_memory_mapped_graph() {
710        let temp_file = NamedTempFile::new().unwrap();
711        let temp_path = temp_file.path().to_path_buf();
712
713        // Ensure the temporary file exists and is writable
714        std::fs::write(&temp_path, b"").unwrap();
715
716        let mut mmap_graph = MemoryMappedGraph::new(&temp_path, 1000).unwrap();
717
718        // Create test graph
719        let mut graph = FxGraph::new();
720        let input = graph.graph.add_node(Node::Input("x".to_string()));
721        let relu = graph
722            .graph
723            .add_node(Node::Call("relu".to_string(), vec!["x".to_string()]));
724        let output = graph.graph.add_node(Node::Output);
725
726        graph.graph.add_edge(
727            input,
728            relu,
729            Edge {
730                name: "x".to_string(),
731            },
732        );
733        graph.graph.add_edge(
734            relu,
735            output,
736            Edge {
737                name: "relu_out".to_string(),
738            },
739        );
740        graph.inputs.push(input);
741        graph.outputs.push(output);
742
743        // Save and load - use expect to get better error messages
744        mmap_graph.save_graph(&graph).expect("Failed to save graph");
745        let loaded_graph = mmap_graph.load_graph().expect("Failed to load graph");
746
747        assert_eq!(loaded_graph.node_count(), graph.node_count());
748
749        // Clean up
750        let _ = std::fs::remove_file(&temp_path);
751    }
752
753    #[test]
754    fn test_memory_analyzer() {
755        let mut graph = FxGraph::new();
756        let input = graph.graph.add_node(Node::Input("x".to_string()));
757        let relu = graph
758            .graph
759            .add_node(Node::Call("relu".to_string(), vec!["x".to_string()]));
760        let output = graph.graph.add_node(Node::Output);
761
762        graph.graph.add_edge(
763            input,
764            relu,
765            Edge {
766                name: "x".to_string(),
767            },
768        );
769        graph.graph.add_edge(
770            relu,
771            output,
772            Edge {
773                name: "relu_out".to_string(),
774            },
775        );
776
777        let report = MemoryAnalyzer::analyze_memory_usage(&graph);
778
779        assert!(report.total_size_bytes > 0);
780        assert!(report.memory_efficiency > 0.0);
781        assert!(!report.recommendations.is_empty());
782    }
783
784    #[test]
785    fn test_adaptive_memory_manager() {
786        let manager =
787            AdaptiveMemoryManager::new(AllocationStrategy::Adaptive).with_memory_limit(1_000_000); // 1MB limit
788
789        let mut graph = FxGraph::new();
790        let _input = graph.graph.add_node(Node::Input("x".to_string()));
791        let _output = graph.graph.add_node(Node::Output);
792
793        let layout = manager.allocate_graph_memory(&graph).unwrap();
794        assert!(layout.total_size > 0);
795
796        manager.deallocate_graph_memory(&layout);
797    }
798
799    #[test]
800    fn test_memory_hotspot_detection() {
801        let mut graph = FxGraph::new();
802
803        // Create a node with large string data (>1000 bytes to trigger hotspot detection)
804        let large_op = "very_long_operation_name_that_should_be_detected_as_hotspot".repeat(20);
805        let _large_node = graph
806            .graph
807            .add_node(Node::Call(large_op, vec!["arg".to_string()]));
808
809        let report = MemoryAnalyzer::analyze_memory_usage(&graph);
810
811        // Should detect the large node as a hotspot
812        assert!(!report.hotspots.is_empty());
813    }
814}