quantrs2_circuit/
buffer_manager.rs

1//! Centralized Memory Buffer Management for Large Quantum Circuits
2//!
3//! This module provides optimized memory management to prevent fragmentation
4//! in large quantum circuit processing by centralizing buffer pools and
5//! implementing intelligent allocation strategies.
6
7use scirs2_core::Complex64;
8use quantrs2_core::buffer_pool::BufferPool;
9use std::collections::HashMap;
10use std::sync::{Arc, Mutex, OnceLock};
11
12/// Global buffer manager for optimized memory allocation
13static GLOBAL_BUFFER_MANAGER: OnceLock<Arc<Mutex<GlobalBufferManager>>> = OnceLock::new();
14
15/// Centralized buffer pool manager for preventing memory fragmentation
16pub struct GlobalBufferManager {
17    /// Pool for f64 numerical computations
18    f64_pool: BufferPool<f64>,
19
20    /// Pool for complex number operations
21    complex_pool: BufferPool<Complex64>,
22
23    /// Pool for intermediate vector allocations
24    vector_pools: HashMap<usize, Vec<Vec<f64>>>,
25
26    /// Pool for gate parameter storage
27    parameter_pool: BufferPool<f64>,
28
29    /// Memory usage statistics
30    stats: MemoryStats,
31}
32
33/// Memory usage statistics for monitoring
34#[derive(Debug, Default, Clone)]
35pub struct MemoryStats {
36    pub total_allocated: usize,
37    pub peak_usage: usize,
38    pub pool_hits: usize,
39    pub pool_misses: usize,
40    pub fragmentation_ratio: f64,
41}
42
43impl GlobalBufferManager {
44    /// Create a new buffer manager with optimized pool sizes
45    fn new() -> Self {
46        Self {
47            f64_pool: BufferPool::new(), // BufferPool manages capacity internally
48            complex_pool: BufferPool::new(),
49            vector_pools: HashMap::with_capacity(16),
50            parameter_pool: BufferPool::new(),
51            stats: MemoryStats::default(),
52        }
53    }
54
55    /// Get a reusable f64 buffer
56    pub fn get_f64_buffer(&mut self, size: usize) -> Vec<f64> {
57        self.stats.total_allocated += size * std::mem::size_of::<f64>();
58        self.update_peak_usage();
59        self.stats.pool_hits += 1;
60
61        // Use the correct BufferPool API
62        let mut buffer = self.f64_pool.get(size);
63        buffer.resize(size, 0.0);
64        buffer
65    }
66
67    /// Return a buffer to the pool for reuse
68    pub fn return_f64_buffer(&mut self, buffer: Vec<f64>) {
69        // Only pool buffers of reasonable size to prevent memory bloat
70        if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
71            self.f64_pool.put(buffer);
72        }
73    }
74
75    /// Get a reusable complex buffer
76    pub fn get_complex_buffer(&mut self, size: usize) -> Vec<Complex64> {
77        self.stats.total_allocated += size * std::mem::size_of::<Complex64>();
78        self.update_peak_usage();
79        self.stats.pool_hits += 1;
80
81        // Use the correct BufferPool API
82        let mut buffer = self.complex_pool.get(size);
83        buffer.resize(size, Complex64::new(0.0, 0.0));
84        buffer
85    }
86
87    /// Return a complex buffer to the pool
88    pub fn return_complex_buffer(&mut self, buffer: Vec<Complex64>) {
89        if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
90            self.complex_pool.put(buffer);
91        }
92    }
93
94    /// Get a vector for specific size with pooling
95    pub fn get_sized_vector(&mut self, size: usize) -> Vec<f64> {
96        if let Some(pool) = self.vector_pools.get_mut(&size) {
97            if let Some(vec) = pool.pop() {
98                self.stats.pool_hits += 1;
99                return vec;
100            }
101        }
102
103        self.stats.pool_misses += 1;
104        vec![0.0; size]
105    }
106
107    /// Return a sized vector to the appropriate pool
108    pub fn return_sized_vector(&mut self, mut vector: Vec<f64>) {
109        let size = vector.len();
110        vector.clear();
111
112        // Only pool common sizes to prevent excessive memory usage
113        if size <= 1024 {
114            let pool = self.vector_pools.entry(size).or_insert_with(Vec::new);
115            if pool.len() < 10 {
116                // Limit pool size
117                pool.push(vector);
118            }
119        }
120    }
121
122    /// Get buffer for gate parameters
123    pub fn get_parameter_buffer(&mut self, size: usize) -> Vec<f64> {
124        self.stats.pool_hits += 1;
125        let mut buffer = self.parameter_pool.get(size);
126        buffer.resize(size, 0.0);
127        buffer
128    }
129
130    /// Return parameter buffer
131    pub fn return_parameter_buffer(&mut self, buffer: Vec<f64>) {
132        if buffer.len() <= 100 {
133            // Gate parameters are typically small
134            self.parameter_pool.put(buffer);
135        }
136    }
137
138    /// Force garbage collection of unused buffers
139    pub fn collect_garbage(&mut self) {
140        // Clear oversized vector pools
141        self.vector_pools.retain(|&size, pool| {
142            pool.retain(|v| v.capacity() < size * 2);
143            size <= 1024 && !pool.is_empty()
144        });
145
146        // Update fragmentation ratio
147        let allocated = self.stats.total_allocated;
148        let peak = self.stats.peak_usage;
149        self.stats.fragmentation_ratio = if peak > 0 {
150            1.0 - (allocated as f64 / peak as f64)
151        } else {
152            0.0
153        };
154    }
155
156    /// Get current memory statistics
157    pub fn get_stats(&self) -> &MemoryStats {
158        &self.stats
159    }
160
161    /// Reset statistics
162    pub fn reset_stats(&mut self) {
163        self.stats = MemoryStats::default();
164    }
165
166    fn update_peak_usage(&mut self) {
167        if self.stats.total_allocated > self.stats.peak_usage {
168            self.stats.peak_usage = self.stats.total_allocated;
169        }
170    }
171}
172
173/// Public interface for accessing the global buffer manager
174pub struct BufferManager;
175
176impl BufferManager {
177    /// Get the global buffer manager instance
178    pub fn instance() -> Arc<Mutex<GlobalBufferManager>> {
179        GLOBAL_BUFFER_MANAGER
180            .get_or_init(|| Arc::new(Mutex::new(GlobalBufferManager::new())))
181            .clone()
182    }
183
184    /// Allocate an f64 buffer through the global pool
185    pub fn alloc_f64_buffer(size: usize) -> Vec<f64> {
186        Self::instance().lock().unwrap().get_f64_buffer(size)
187    }
188
189    /// Return an f64 buffer to the global pool
190    pub fn free_f64_buffer(buffer: Vec<f64>) {
191        Self::instance().lock().unwrap().return_f64_buffer(buffer);
192    }
193
194    /// Allocate a complex buffer through the global pool
195    pub fn alloc_complex_buffer(size: usize) -> Vec<Complex64> {
196        Self::instance().lock().unwrap().get_complex_buffer(size)
197    }
198
199    /// Return a complex buffer to the global pool
200    pub fn free_complex_buffer(buffer: Vec<Complex64>) {
201        Self::instance()
202            .lock()
203            .unwrap()
204            .return_complex_buffer(buffer);
205    }
206
207    /// Allocate a parameter buffer for gate operations
208    pub fn alloc_parameter_buffer(size: usize) -> Vec<f64> {
209        Self::instance().lock().unwrap().get_parameter_buffer(size)
210    }
211
212    /// Return a parameter buffer to the pool
213    pub fn free_parameter_buffer(buffer: Vec<f64>) {
214        Self::instance()
215            .lock()
216            .unwrap()
217            .return_parameter_buffer(buffer);
218    }
219
220    /// Get memory usage statistics
221    pub fn get_memory_stats() -> MemoryStats {
222        Self::instance().lock().unwrap().get_stats().clone()
223    }
224
225    /// Trigger garbage collection to reduce fragmentation
226    pub fn collect_garbage() {
227        Self::instance().lock().unwrap().collect_garbage();
228    }
229
230    /// Reset memory usage statistics
231    pub fn reset_stats() {
232        Self::instance().lock().unwrap().reset_stats();
233    }
234}
235
236/// RAII wrapper for automatic buffer return
237pub struct ManagedF64Buffer {
238    buffer: Option<Vec<f64>>,
239}
240
241impl ManagedF64Buffer {
242    /// Create a managed buffer that will be automatically returned to pool
243    pub fn new(size: usize) -> Self {
244        Self {
245            buffer: Some(BufferManager::alloc_f64_buffer(size)),
246        }
247    }
248
249    /// Get mutable access to the buffer
250    pub fn as_mut(&mut self) -> &mut Vec<f64> {
251        self.buffer.as_mut().unwrap()
252    }
253
254    /// Get immutable access to the buffer
255    pub fn as_ref(&self) -> &Vec<f64> {
256        self.buffer.as_ref().unwrap()
257    }
258
259    /// Take ownership of the buffer (preventing automatic return)
260    pub fn take(mut self) -> Vec<f64> {
261        self.buffer.take().unwrap()
262    }
263}
264
265impl Drop for ManagedF64Buffer {
266    fn drop(&mut self) {
267        if let Some(buffer) = self.buffer.take() {
268            BufferManager::free_f64_buffer(buffer);
269        }
270    }
271}
272
273/// RAII wrapper for complex buffers
274pub struct ManagedComplexBuffer {
275    buffer: Option<Vec<Complex64>>,
276}
277
278impl ManagedComplexBuffer {
279    pub fn new(size: usize) -> Self {
280        Self {
281            buffer: Some(BufferManager::alloc_complex_buffer(size)),
282        }
283    }
284
285    pub fn as_mut(&mut self) -> &mut Vec<Complex64> {
286        self.buffer.as_mut().unwrap()
287    }
288
289    pub fn as_ref(&self) -> &Vec<Complex64> {
290        self.buffer.as_ref().unwrap()
291    }
292
293    pub fn take(mut self) -> Vec<Complex64> {
294        self.buffer.take().unwrap()
295    }
296}
297
298impl Drop for ManagedComplexBuffer {
299    fn drop(&mut self) {
300        if let Some(buffer) = self.buffer.take() {
301            BufferManager::free_complex_buffer(buffer);
302        }
303    }
304}
305
306#[cfg(test)]
307mod tests {
308    use super::*;
309
310    #[test]
311    fn test_buffer_pooling() {
312        let buffer1 = BufferManager::alloc_f64_buffer(100);
313        assert_eq!(buffer1.len(), 100);
314
315        BufferManager::free_f64_buffer(buffer1);
316
317        let buffer2 = BufferManager::alloc_f64_buffer(100);
318        assert_eq!(buffer2.len(), 100);
319
320        BufferManager::free_f64_buffer(buffer2);
321
322        let stats = BufferManager::get_memory_stats();
323        assert!(stats.pool_hits > 0 || stats.pool_misses > 0);
324    }
325
326    #[test]
327    fn test_managed_buffer() {
328        {
329            let mut managed = ManagedF64Buffer::new(50);
330            managed.as_mut()[0] = 42.0;
331            assert_eq!(managed.as_ref()[0], 42.0);
332        } // Buffer automatically returned here
333
334        let stats = BufferManager::get_memory_stats();
335        // Stats should show buffer was used
336        assert!(stats.total_allocated > 0);
337    }
338
339    #[test]
340    fn test_complex_buffer_pooling() {
341        let buffer1 = BufferManager::alloc_complex_buffer(50);
342        assert_eq!(buffer1.len(), 50);
343
344        BufferManager::free_complex_buffer(buffer1);
345
346        let buffer2 = BufferManager::alloc_complex_buffer(50);
347        assert_eq!(buffer2.len(), 50);
348
349        BufferManager::free_complex_buffer(buffer2);
350    }
351
352    #[test]
353    fn test_garbage_collection() {
354        // Allocate and free several buffers
355        for _ in 0..10 {
356            let buffer = BufferManager::alloc_f64_buffer(1000);
357            BufferManager::free_f64_buffer(buffer);
358        }
359
360        BufferManager::collect_garbage();
361        let stats = BufferManager::get_memory_stats();
362
363        // Should have some fragmentation data
364        assert!(stats.fragmentation_ratio >= 0.0);
365    }
366}