quantrs2_anneal/scientific_performance_optimization/
memory.rs

1//! Memory management types for scientific performance optimization.
2//!
3//! This module contains hierarchical memory management, memory pools,
4//! cache hierarchies, and memory statistics.
5
6use std::collections::{HashMap, VecDeque};
7use std::time::Duration;
8
9use super::config::MemoryOptimizationConfig;
10
11/// Hierarchical memory manager
12pub struct HierarchicalMemoryManager {
13    /// Configuration
14    pub config: MemoryOptimizationConfig,
15    /// Memory pools
16    pub memory_pools: HashMap<usize, MemoryPool>,
17    /// Cache hierarchy
18    pub cache_hierarchy: CacheHierarchy,
19    /// Memory statistics
20    pub memory_stats: MemoryStatistics,
21}
22
23impl HierarchicalMemoryManager {
24    /// Create a new hierarchical memory manager
25    #[must_use]
26    pub fn new(config: MemoryOptimizationConfig) -> Self {
27        Self {
28            config,
29            memory_pools: HashMap::new(),
30            cache_hierarchy: CacheHierarchy::new(),
31            memory_stats: MemoryStatistics::default(),
32        }
33    }
34}
35
36/// Memory pool implementation
37#[derive(Debug)]
38pub struct MemoryPool {
39    /// Pool identifier
40    pub id: String,
41    /// Block size
42    pub block_size: usize,
43    /// Total capacity
44    pub total_capacity: usize,
45    /// Used capacity
46    pub used_capacity: usize,
47    /// Free blocks
48    pub free_blocks: VecDeque<*mut u8>,
49    /// Allocation statistics
50    pub allocation_stats: AllocationStatistics,
51}
52
53/// Cache hierarchy for multi-level caching
54#[derive(Debug)]
55pub struct CacheHierarchy {
56    /// L1 cache (fastest, smallest)
57    pub l1_cache: LRUCache<String, Vec<u8>>,
58    /// L2 cache (medium speed/size)
59    pub l2_cache: LRUCache<String, Vec<u8>>,
60    /// L3 cache (slowest, largest)
61    pub l3_cache: LRUCache<String, Vec<u8>>,
62    /// Cache statistics
63    pub cache_stats: CacheStatistics,
64}
65
66impl CacheHierarchy {
67    /// Create a new cache hierarchy
68    #[must_use]
69    pub fn new() -> Self {
70        Self {
71            l1_cache: LRUCache::new(1024),             // 1KB L1
72            l2_cache: LRUCache::new(1024 * 1024),      // 1MB L2
73            l3_cache: LRUCache::new(10 * 1024 * 1024), // 10MB L3
74            cache_stats: CacheStatistics::default(),
75        }
76    }
77}
78
79impl Default for CacheHierarchy {
80    fn default() -> Self {
81        Self::new()
82    }
83}
84
85/// LRU Cache implementation
86#[derive(Debug)]
87pub struct LRUCache<K, V> {
88    /// Cache capacity
89    pub capacity: usize,
90    /// Current size
91    pub current_size: usize,
92    /// Cache data
93    pub data: HashMap<K, V>,
94    /// Access order
95    pub access_order: VecDeque<K>,
96}
97
98impl<K: Clone + std::hash::Hash + Eq, V> LRUCache<K, V> {
99    /// Create a new LRU cache with given capacity
100    #[must_use]
101    pub fn new(capacity: usize) -> Self {
102        Self {
103            capacity,
104            current_size: 0,
105            data: HashMap::new(),
106            access_order: VecDeque::new(),
107        }
108    }
109
110    /// Get value from cache
111    pub fn get(&mut self, key: &K) -> Option<&V> {
112        if self.data.contains_key(key) {
113            // Move to front of access order
114            self.access_order.retain(|k| k != key);
115            self.access_order.push_front(key.clone());
116            self.data.get(key)
117        } else {
118            None
119        }
120    }
121
122    /// Insert value into cache
123    pub fn insert(&mut self, key: K, value: V) {
124        // Remove old entry if exists
125        if self.data.contains_key(&key) {
126            self.access_order.retain(|k| k != &key);
127        } else if self.current_size >= self.capacity {
128            // Evict least recently used
129            if let Some(lru_key) = self.access_order.pop_back() {
130                self.data.remove(&lru_key);
131                self.current_size = self.current_size.saturating_sub(1);
132            }
133        }
134
135        self.data.insert(key.clone(), value);
136        self.access_order.push_front(key);
137        self.current_size += 1;
138    }
139
140    /// Check if key exists in cache
141    #[must_use]
142    pub fn contains(&self, key: &K) -> bool {
143        self.data.contains_key(key)
144    }
145
146    /// Get current size of cache
147    #[must_use]
148    pub fn len(&self) -> usize {
149        self.current_size
150    }
151
152    /// Check if cache is empty
153    #[must_use]
154    pub fn is_empty(&self) -> bool {
155        self.current_size == 0
156    }
157}
158
159/// Memory usage statistics
160#[derive(Debug, Clone)]
161pub struct MemoryStatistics {
162    /// Total allocated memory
163    pub total_allocated: usize,
164    /// Peak memory usage
165    pub peak_usage: usize,
166    /// Current usage
167    pub current_usage: usize,
168    /// Allocation count
169    pub allocation_count: u64,
170    /// Deallocation count
171    pub deallocation_count: u64,
172    /// Memory efficiency
173    pub memory_efficiency: f64,
174}
175
176impl Default for MemoryStatistics {
177    fn default() -> Self {
178        Self {
179            total_allocated: 0,
180            peak_usage: 0,
181            current_usage: 0,
182            allocation_count: 0,
183            deallocation_count: 0,
184            memory_efficiency: 1.0,
185        }
186    }
187}
188
189impl MemoryStatistics {
190    /// Record an allocation
191    pub fn record_allocation(&mut self, size: usize) {
192        self.total_allocated += size;
193        self.current_usage += size;
194        self.allocation_count += 1;
195
196        if self.current_usage > self.peak_usage {
197            self.peak_usage = self.current_usage;
198        }
199
200        self.update_efficiency();
201    }
202
203    /// Record a deallocation
204    pub fn record_deallocation(&mut self, size: usize) {
205        self.current_usage = self.current_usage.saturating_sub(size);
206        self.deallocation_count += 1;
207        self.update_efficiency();
208    }
209
210    /// Update memory efficiency
211    fn update_efficiency(&mut self) {
212        if self.peak_usage > 0 {
213            self.memory_efficiency = self.current_usage as f64 / self.peak_usage as f64;
214        }
215    }
216}
217
218/// Allocation statistics for memory pools
219#[derive(Debug, Clone)]
220pub struct AllocationStatistics {
221    /// Total allocations
222    pub total_allocations: u64,
223    /// Failed allocations
224    pub failed_allocations: u64,
225    /// Average allocation size
226    pub avg_allocation_size: f64,
227    /// Pool utilization
228    pub utilization: f64,
229}
230
231impl Default for AllocationStatistics {
232    fn default() -> Self {
233        Self {
234            total_allocations: 0,
235            failed_allocations: 0,
236            avg_allocation_size: 0.0,
237            utilization: 0.0,
238        }
239    }
240}
241
242/// Cache performance statistics
243#[derive(Debug, Clone)]
244pub struct CacheStatistics {
245    /// Cache hits
246    pub hits: u64,
247    /// Cache misses
248    pub misses: u64,
249    /// Hit rate
250    pub hit_rate: f64,
251    /// Average access time
252    pub avg_access_time: Duration,
253}
254
255impl Default for CacheStatistics {
256    fn default() -> Self {
257        Self {
258            hits: 0,
259            misses: 0,
260            hit_rate: 0.0,
261            avg_access_time: Duration::from_nanos(0),
262        }
263    }
264}
265
266impl CacheStatistics {
267    /// Record a cache hit
268    pub fn record_hit(&mut self) {
269        self.hits += 1;
270        self.update_hit_rate();
271    }
272
273    /// Record a cache miss
274    pub fn record_miss(&mut self) {
275        self.misses += 1;
276        self.update_hit_rate();
277    }
278
279    /// Update hit rate
280    fn update_hit_rate(&mut self) {
281        let total = self.hits + self.misses;
282        if total > 0 {
283            self.hit_rate = self.hits as f64 / total as f64;
284        }
285    }
286}