sqlitegraph 2.0.7

Embedded graph database with full ACID transactions, HNSW vector search, dual backend support, and comprehensive graph algorithms library
Documentation
//! Memory optimization strategies and access pattern management
//!
//! This module provides buffer optimization and access pattern analysis
//! for optimal memory performance.

use super::manager::MemoryResourceManager;
use super::types::AccessPatternHint;
use crate::backend::native::graph_file::buffers::ReadBuffer;

impl<'a> MemoryResourceManager<'a> {
    /// Optimize buffer configurations for current workload
    ///
    /// # Arguments
    /// * `pattern_hint` - Hint about the access pattern for optimization
    pub fn optimize_buffers(&mut self, pattern_hint: AccessPatternHint) {
        match pattern_hint {
            AccessPatternHint::Sequential => {
                // Optimize for sequential access - larger read buffer
                let optimal_capacity = ReadBuffer::adaptive_capacity(8192);
                if optimal_capacity != self.read_buffer.capacity {
                    *self.read_buffer = ReadBuffer::with_capacity(optimal_capacity);
                }
            }
            AccessPatternHint::Random => {
                // Optimize for random access - smaller buffer for better cache locality
                let optimal_capacity = ReadBuffer::adaptive_capacity(256);
                if optimal_capacity != self.read_buffer.capacity {
                    *self.read_buffer = ReadBuffer::with_capacity(optimal_capacity);
                }
            }
            AccessPatternHint::Mixed => {
                // Use default adaptive sizing
                let optimal_capacity = ReadBuffer::adaptive_capacity(512);
                if optimal_capacity != self.read_buffer.capacity {
                    *self.read_buffer = ReadBuffer::with_capacity(optimal_capacity);
                }
            }
        }
    }

    /// Analyze access patterns from recent operations
    ///
    /// This method can be extended to automatically detect access patterns
    /// based on the history of read/write operations.
    pub fn analyze_access_patterns(&self) -> AccessPatternHint {
        // Simple heuristic based on write buffer state
        if self.write_buffer.operations.len() > 10 {
            AccessPatternHint::Random // Many small scattered operations
        } else if self.write_buffer.operations.len() > 2 {
            AccessPatternHint::Mixed
        } else {
            AccessPatternHint::Sequential
        }
    }

    /// Auto-optimize based on detected patterns
    pub fn auto_optimize(&mut self) {
        let detected_pattern = self.analyze_access_patterns();
        self.optimize_buffers(detected_pattern);
    }

    /// Get optimal buffer size for specific workload type
    pub fn optimal_buffer_size_for_workload(&self, workload_type: &str) -> usize {
        match workload_type {
            "bulk_import" | "sequential_scan" => 8192,
            "random_access" | "oltp" => 256,
            "mixed_workload" | "default" => 512,
            _ => 512, // Conservative default
        }
    }

    /// Estimate memory efficiency score
    pub fn memory_efficiency_score(&self) -> f32 {
        let buffer_utilization = if self.read_buffer.capacity > 0 {
            self.read_buffer.size as f32 / self.read_buffer.capacity as f32
        } else {
            0.0
        };

        let write_buffer_efficiency = if self.write_buffer.capacity > 0 {
            self.write_buffer.operations.len() as f32 / self.write_buffer.capacity as f32
        } else {
            0.0
        };

        // Combine metrics into a score (0.0 to 1.0)
        (buffer_utilization + write_buffer_efficiency) / 2.0
    }

    /// Adaptive capacity management based on performance
    pub fn adaptive_capacity_management(&mut self) {
        let efficiency = self.memory_efficiency_score();

        if efficiency < 0.3 {
            // Low efficiency - reduce buffer sizes
            let new_capacity = std::cmp::max(
                self.read_buffer.capacity / 2,
                ReadBuffer::adaptive_capacity(256),
            );
            if new_capacity != self.read_buffer.capacity {
                *self.read_buffer = ReadBuffer::with_capacity(new_capacity);
            }
        } else if efficiency > 0.9 {
            // High efficiency - can afford larger buffers
            let new_capacity = std::cmp::min(
                self.read_buffer.capacity * 2,
                ReadBuffer::adaptive_capacity(8192),
            );
            if new_capacity != self.read_buffer.capacity {
                *self.read_buffer = ReadBuffer::with_capacity(new_capacity);
            }
        }
    }
}