quantrs2_circuit/
buffer_manager.rs

1//! Centralized Memory Buffer Management for Large Quantum Circuits
2//!
3//! This module provides optimized memory management to prevent fragmentation
4//! in large quantum circuit processing by centralizing buffer pools and
5//! implementing intelligent allocation strategies.
6
7use quantrs2_core::buffer_pool::BufferPool;
8use scirs2_core::Complex64;
9use std::collections::HashMap;
10use std::sync::{Arc, Mutex, OnceLock};
11
12/// Global buffer manager for optimized memory allocation
13static GLOBAL_BUFFER_MANAGER: OnceLock<Arc<Mutex<GlobalBufferManager>>> = OnceLock::new();
14
15/// Centralized buffer pool manager for preventing memory fragmentation
16pub struct GlobalBufferManager {
17    /// Pool for f64 numerical computations
18    f64_pool: BufferPool<f64>,
19
20    /// Pool for complex number operations
21    complex_pool: BufferPool<Complex64>,
22
23    /// Pool for intermediate vector allocations
24    vector_pools: HashMap<usize, Vec<Vec<f64>>>,
25
26    /// Pool for gate parameter storage
27    parameter_pool: BufferPool<f64>,
28
29    /// Memory usage statistics
30    stats: MemoryStats,
31}
32
33/// Memory usage statistics for monitoring
34#[derive(Debug, Default, Clone)]
35pub struct MemoryStats {
36    pub total_allocated: usize,
37    pub peak_usage: usize,
38    pub pool_hits: usize,
39    pub pool_misses: usize,
40    pub fragmentation_ratio: f64,
41}
42
43impl GlobalBufferManager {
44    /// Create a new buffer manager with optimized pool sizes
45    fn new() -> Self {
46        Self {
47            f64_pool: BufferPool::new(), // BufferPool manages capacity internally
48            complex_pool: BufferPool::new(),
49            vector_pools: HashMap::with_capacity(16),
50            parameter_pool: BufferPool::new(),
51            stats: MemoryStats::default(),
52        }
53    }
54
55    /// Get a reusable f64 buffer
56    pub fn get_f64_buffer(&mut self, size: usize) -> Vec<f64> {
57        self.stats.total_allocated += size * std::mem::size_of::<f64>();
58        self.update_peak_usage();
59        self.stats.pool_hits += 1;
60
61        // Use the correct BufferPool API
62        let mut buffer = self.f64_pool.get(size);
63        buffer.resize(size, 0.0);
64        buffer
65    }
66
67    /// Return a buffer to the pool for reuse
68    pub fn return_f64_buffer(&mut self, buffer: Vec<f64>) {
69        // Only pool buffers of reasonable size to prevent memory bloat
70        if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
71            self.f64_pool.put(buffer);
72        }
73    }
74
75    /// Get a reusable complex buffer
76    pub fn get_complex_buffer(&mut self, size: usize) -> Vec<Complex64> {
77        self.stats.total_allocated += size * std::mem::size_of::<Complex64>();
78        self.update_peak_usage();
79        self.stats.pool_hits += 1;
80
81        // Use the correct BufferPool API
82        let mut buffer = self.complex_pool.get(size);
83        buffer.resize(size, Complex64::new(0.0, 0.0));
84        buffer
85    }
86
87    /// Return a complex buffer to the pool
88    pub fn return_complex_buffer(&mut self, buffer: Vec<Complex64>) {
89        if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
90            self.complex_pool.put(buffer);
91        }
92    }
93
94    /// Get a vector for specific size with pooling
95    pub fn get_sized_vector(&mut self, size: usize) -> Vec<f64> {
96        if let Some(pool) = self.vector_pools.get_mut(&size) {
97            if let Some(vec) = pool.pop() {
98                self.stats.pool_hits += 1;
99                return vec;
100            }
101        }
102
103        self.stats.pool_misses += 1;
104        vec![0.0; size]
105    }
106
107    /// Return a sized vector to the appropriate pool
108    pub fn return_sized_vector(&mut self, mut vector: Vec<f64>) {
109        let size = vector.len();
110        vector.clear();
111
112        // Only pool common sizes to prevent excessive memory usage
113        if size <= 1024 {
114            let pool = self.vector_pools.entry(size).or_default();
115            if pool.len() < 10 {
116                // Limit pool size
117                pool.push(vector);
118            }
119        }
120    }
121
122    /// Get buffer for gate parameters
123    pub fn get_parameter_buffer(&mut self, size: usize) -> Vec<f64> {
124        self.stats.pool_hits += 1;
125        let mut buffer = self.parameter_pool.get(size);
126        buffer.resize(size, 0.0);
127        buffer
128    }
129
130    /// Return parameter buffer
131    pub fn return_parameter_buffer(&mut self, buffer: Vec<f64>) {
132        if buffer.len() <= 100 {
133            // Gate parameters are typically small
134            self.parameter_pool.put(buffer);
135        }
136    }
137
138    /// Force garbage collection of unused buffers
139    pub fn collect_garbage(&mut self) {
140        // Clear oversized vector pools
141        self.vector_pools.retain(|&size, pool| {
142            pool.retain(|v| v.capacity() < size * 2);
143            size <= 1024 && !pool.is_empty()
144        });
145
146        // Update fragmentation ratio
147        let allocated = self.stats.total_allocated;
148        let peak = self.stats.peak_usage;
149        self.stats.fragmentation_ratio = if peak > 0 {
150            1.0 - (allocated as f64 / peak as f64)
151        } else {
152            0.0
153        };
154    }
155
156    /// Get current memory statistics
157    pub const fn get_stats(&self) -> &MemoryStats {
158        &self.stats
159    }
160
161    /// Reset statistics
162    pub fn reset_stats(&mut self) {
163        self.stats = MemoryStats::default();
164    }
165
166    const fn update_peak_usage(&mut self) {
167        if self.stats.total_allocated > self.stats.peak_usage {
168            self.stats.peak_usage = self.stats.total_allocated;
169        }
170    }
171}
172
173/// Public interface for accessing the global buffer manager
174pub struct BufferManager;
175
176impl BufferManager {
177    /// Get the global buffer manager instance
178    pub fn instance() -> Arc<Mutex<GlobalBufferManager>> {
179        GLOBAL_BUFFER_MANAGER
180            .get_or_init(|| Arc::new(Mutex::new(GlobalBufferManager::new())))
181            .clone()
182    }
183
184    /// Allocate an f64 buffer through the global pool
185    #[must_use]
186    pub fn alloc_f64_buffer(size: usize) -> Vec<f64> {
187        Self::instance()
188            .lock()
189            .unwrap_or_else(|poisoned| poisoned.into_inner())
190            .get_f64_buffer(size)
191    }
192
193    /// Return an f64 buffer to the global pool
194    pub fn free_f64_buffer(buffer: Vec<f64>) {
195        Self::instance()
196            .lock()
197            .unwrap_or_else(|poisoned| poisoned.into_inner())
198            .return_f64_buffer(buffer);
199    }
200
201    /// Allocate a complex buffer through the global pool
202    #[must_use]
203    pub fn alloc_complex_buffer(size: usize) -> Vec<Complex64> {
204        Self::instance()
205            .lock()
206            .unwrap_or_else(|poisoned| poisoned.into_inner())
207            .get_complex_buffer(size)
208    }
209
210    /// Return a complex buffer to the global pool
211    pub fn free_complex_buffer(buffer: Vec<Complex64>) {
212        Self::instance()
213            .lock()
214            .unwrap_or_else(|poisoned| poisoned.into_inner())
215            .return_complex_buffer(buffer);
216    }
217
218    /// Allocate a parameter buffer for gate operations
219    #[must_use]
220    pub fn alloc_parameter_buffer(size: usize) -> Vec<f64> {
221        Self::instance()
222            .lock()
223            .unwrap_or_else(|poisoned| poisoned.into_inner())
224            .get_parameter_buffer(size)
225    }
226
227    /// Return a parameter buffer to the pool
228    pub fn free_parameter_buffer(buffer: Vec<f64>) {
229        Self::instance()
230            .lock()
231            .unwrap_or_else(|poisoned| poisoned.into_inner())
232            .return_parameter_buffer(buffer);
233    }
234
235    /// Get memory usage statistics
236    #[must_use]
237    pub fn get_memory_stats() -> MemoryStats {
238        Self::instance()
239            .lock()
240            .unwrap_or_else(|poisoned| poisoned.into_inner())
241            .get_stats()
242            .clone()
243    }
244
245    /// Trigger garbage collection to reduce fragmentation
246    pub fn collect_garbage() {
247        Self::instance()
248            .lock()
249            .unwrap_or_else(|poisoned| poisoned.into_inner())
250            .collect_garbage();
251    }
252
253    /// Reset memory usage statistics
254    pub fn reset_stats() {
255        Self::instance()
256            .lock()
257            .unwrap_or_else(|poisoned| poisoned.into_inner())
258            .reset_stats();
259    }
260}
261
262/// RAII wrapper for automatic buffer return
263pub struct ManagedF64Buffer {
264    buffer: Option<Vec<f64>>,
265}
266
267impl ManagedF64Buffer {
268    /// Create a managed buffer that will be automatically returned to pool
269    #[must_use]
270    pub fn new(size: usize) -> Self {
271        Self {
272            buffer: Some(BufferManager::alloc_f64_buffer(size)),
273        }
274    }
275
276    /// Get mutable access to the buffer
277    pub const fn as_mut(&mut self) -> &mut Vec<f64> {
278        self.buffer
279            .as_mut()
280            .expect("buffer was already taken or not initialized")
281    }
282
283    /// Get immutable access to the buffer
284    #[must_use]
285    pub const fn as_ref(&self) -> &Vec<f64> {
286        self.buffer
287            .as_ref()
288            .expect("buffer was already taken or not initialized")
289    }
290
291    /// Take ownership of the buffer (preventing automatic return)
292    #[must_use]
293    pub fn take(mut self) -> Vec<f64> {
294        self.buffer
295            .take()
296            .expect("buffer was already taken or not initialized")
297    }
298}
299
300impl Drop for ManagedF64Buffer {
301    fn drop(&mut self) {
302        if let Some(buffer) = self.buffer.take() {
303            BufferManager::free_f64_buffer(buffer);
304        }
305    }
306}
307
308/// RAII wrapper for complex buffers
309pub struct ManagedComplexBuffer {
310    buffer: Option<Vec<Complex64>>,
311}
312
313impl ManagedComplexBuffer {
314    #[must_use]
315    pub fn new(size: usize) -> Self {
316        Self {
317            buffer: Some(BufferManager::alloc_complex_buffer(size)),
318        }
319    }
320
321    pub const fn as_mut(&mut self) -> &mut Vec<Complex64> {
322        self.buffer
323            .as_mut()
324            .expect("buffer was already taken or not initialized")
325    }
326
327    #[must_use]
328    pub const fn as_ref(&self) -> &Vec<Complex64> {
329        self.buffer
330            .as_ref()
331            .expect("buffer was already taken or not initialized")
332    }
333
334    #[must_use]
335    pub fn take(mut self) -> Vec<Complex64> {
336        self.buffer
337            .take()
338            .expect("buffer was already taken or not initialized")
339    }
340}
341
342impl Drop for ManagedComplexBuffer {
343    fn drop(&mut self) {
344        if let Some(buffer) = self.buffer.take() {
345            BufferManager::free_complex_buffer(buffer);
346        }
347    }
348}
349
350#[cfg(test)]
351mod tests {
352    use super::*;
353
354    #[test]
355    fn test_buffer_pooling() {
356        let buffer1 = BufferManager::alloc_f64_buffer(100);
357        assert_eq!(buffer1.len(), 100);
358
359        BufferManager::free_f64_buffer(buffer1);
360
361        let buffer2 = BufferManager::alloc_f64_buffer(100);
362        assert_eq!(buffer2.len(), 100);
363
364        BufferManager::free_f64_buffer(buffer2);
365
366        let stats = BufferManager::get_memory_stats();
367        assert!(stats.pool_hits > 0 || stats.pool_misses > 0);
368    }
369
370    #[test]
371    fn test_managed_buffer() {
372        {
373            let mut managed = ManagedF64Buffer::new(50);
374            managed.as_mut()[0] = 42.0;
375            assert_eq!(managed.as_ref()[0], 42.0);
376        } // Buffer automatically returned here
377
378        let stats = BufferManager::get_memory_stats();
379        // Stats should show buffer was used
380        assert!(stats.total_allocated > 0);
381    }
382
383    #[test]
384    fn test_complex_buffer_pooling() {
385        let buffer1 = BufferManager::alloc_complex_buffer(50);
386        assert_eq!(buffer1.len(), 50);
387
388        BufferManager::free_complex_buffer(buffer1);
389
390        let buffer2 = BufferManager::alloc_complex_buffer(50);
391        assert_eq!(buffer2.len(), 50);
392
393        BufferManager::free_complex_buffer(buffer2);
394    }
395
396    #[test]
397    fn test_garbage_collection() {
398        // Allocate and free several buffers
399        for _ in 0..10 {
400            let buffer = BufferManager::alloc_f64_buffer(1000);
401            BufferManager::free_f64_buffer(buffer);
402        }
403
404        BufferManager::collect_garbage();
405        let stats = BufferManager::get_memory_stats();
406
407        // Should have some fragmentation data
408        assert!(stats.fragmentation_ratio >= 0.0);
409    }
410}