quantrs2_circuit/
buffer_manager.rs1use quantrs2_core::buffer_pool::BufferPool;
8use scirs2_core::Complex64;
9use std::collections::HashMap;
10use std::sync::{Arc, Mutex, OnceLock};
11
12static GLOBAL_BUFFER_MANAGER: OnceLock<Arc<Mutex<GlobalBufferManager>>> = OnceLock::new();
14
15pub struct GlobalBufferManager {
17 f64_pool: BufferPool<f64>,
19
20 complex_pool: BufferPool<Complex64>,
22
23 vector_pools: HashMap<usize, Vec<Vec<f64>>>,
25
26 parameter_pool: BufferPool<f64>,
28
29 stats: MemoryStats,
31}
32
33#[derive(Debug, Default, Clone)]
35pub struct MemoryStats {
36 pub total_allocated: usize,
37 pub peak_usage: usize,
38 pub pool_hits: usize,
39 pub pool_misses: usize,
40 pub fragmentation_ratio: f64,
41}
42
43impl GlobalBufferManager {
44 fn new() -> Self {
46 Self {
47 f64_pool: BufferPool::new(), complex_pool: BufferPool::new(),
49 vector_pools: HashMap::with_capacity(16),
50 parameter_pool: BufferPool::new(),
51 stats: MemoryStats::default(),
52 }
53 }
54
55 pub fn get_f64_buffer(&mut self, size: usize) -> Vec<f64> {
57 self.stats.total_allocated += size * std::mem::size_of::<f64>();
58 self.update_peak_usage();
59 self.stats.pool_hits += 1;
60
61 let mut buffer = self.f64_pool.get(size);
63 buffer.resize(size, 0.0);
64 buffer
65 }
66
67 pub fn return_f64_buffer(&mut self, buffer: Vec<f64>) {
69 if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
71 self.f64_pool.put(buffer);
72 }
73 }
74
75 pub fn get_complex_buffer(&mut self, size: usize) -> Vec<Complex64> {
77 self.stats.total_allocated += size * std::mem::size_of::<Complex64>();
78 self.update_peak_usage();
79 self.stats.pool_hits += 1;
80
81 let mut buffer = self.complex_pool.get(size);
83 buffer.resize(size, Complex64::new(0.0, 0.0));
84 buffer
85 }
86
87 pub fn return_complex_buffer(&mut self, buffer: Vec<Complex64>) {
89 if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
90 self.complex_pool.put(buffer);
91 }
92 }
93
94 pub fn get_sized_vector(&mut self, size: usize) -> Vec<f64> {
96 if let Some(pool) = self.vector_pools.get_mut(&size) {
97 if let Some(vec) = pool.pop() {
98 self.stats.pool_hits += 1;
99 return vec;
100 }
101 }
102
103 self.stats.pool_misses += 1;
104 vec![0.0; size]
105 }
106
107 pub fn return_sized_vector(&mut self, mut vector: Vec<f64>) {
109 let size = vector.len();
110 vector.clear();
111
112 if size <= 1024 {
114 let pool = self.vector_pools.entry(size).or_default();
115 if pool.len() < 10 {
116 pool.push(vector);
118 }
119 }
120 }
121
122 pub fn get_parameter_buffer(&mut self, size: usize) -> Vec<f64> {
124 self.stats.pool_hits += 1;
125 let mut buffer = self.parameter_pool.get(size);
126 buffer.resize(size, 0.0);
127 buffer
128 }
129
130 pub fn return_parameter_buffer(&mut self, buffer: Vec<f64>) {
132 if buffer.len() <= 100 {
133 self.parameter_pool.put(buffer);
135 }
136 }
137
138 pub fn collect_garbage(&mut self) {
140 self.vector_pools.retain(|&size, pool| {
142 pool.retain(|v| v.capacity() < size * 2);
143 size <= 1024 && !pool.is_empty()
144 });
145
146 let allocated = self.stats.total_allocated;
148 let peak = self.stats.peak_usage;
149 self.stats.fragmentation_ratio = if peak > 0 {
150 1.0 - (allocated as f64 / peak as f64)
151 } else {
152 0.0
153 };
154 }
155
156 pub const fn get_stats(&self) -> &MemoryStats {
158 &self.stats
159 }
160
161 pub fn reset_stats(&mut self) {
163 self.stats = MemoryStats::default();
164 }
165
166 const fn update_peak_usage(&mut self) {
167 if self.stats.total_allocated > self.stats.peak_usage {
168 self.stats.peak_usage = self.stats.total_allocated;
169 }
170 }
171}
172
173pub struct BufferManager;
175
176impl BufferManager {
177 pub fn instance() -> Arc<Mutex<GlobalBufferManager>> {
179 GLOBAL_BUFFER_MANAGER
180 .get_or_init(|| Arc::new(Mutex::new(GlobalBufferManager::new())))
181 .clone()
182 }
183
184 #[must_use]
186 pub fn alloc_f64_buffer(size: usize) -> Vec<f64> {
187 Self::instance()
188 .lock()
189 .unwrap_or_else(|poisoned| poisoned.into_inner())
190 .get_f64_buffer(size)
191 }
192
193 pub fn free_f64_buffer(buffer: Vec<f64>) {
195 Self::instance()
196 .lock()
197 .unwrap_or_else(|poisoned| poisoned.into_inner())
198 .return_f64_buffer(buffer);
199 }
200
201 #[must_use]
203 pub fn alloc_complex_buffer(size: usize) -> Vec<Complex64> {
204 Self::instance()
205 .lock()
206 .unwrap_or_else(|poisoned| poisoned.into_inner())
207 .get_complex_buffer(size)
208 }
209
210 pub fn free_complex_buffer(buffer: Vec<Complex64>) {
212 Self::instance()
213 .lock()
214 .unwrap_or_else(|poisoned| poisoned.into_inner())
215 .return_complex_buffer(buffer);
216 }
217
218 #[must_use]
220 pub fn alloc_parameter_buffer(size: usize) -> Vec<f64> {
221 Self::instance()
222 .lock()
223 .unwrap_or_else(|poisoned| poisoned.into_inner())
224 .get_parameter_buffer(size)
225 }
226
227 pub fn free_parameter_buffer(buffer: Vec<f64>) {
229 Self::instance()
230 .lock()
231 .unwrap_or_else(|poisoned| poisoned.into_inner())
232 .return_parameter_buffer(buffer);
233 }
234
235 #[must_use]
237 pub fn get_memory_stats() -> MemoryStats {
238 Self::instance()
239 .lock()
240 .unwrap_or_else(|poisoned| poisoned.into_inner())
241 .get_stats()
242 .clone()
243 }
244
245 pub fn collect_garbage() {
247 Self::instance()
248 .lock()
249 .unwrap_or_else(|poisoned| poisoned.into_inner())
250 .collect_garbage();
251 }
252
253 pub fn reset_stats() {
255 Self::instance()
256 .lock()
257 .unwrap_or_else(|poisoned| poisoned.into_inner())
258 .reset_stats();
259 }
260}
261
262pub struct ManagedF64Buffer {
264 buffer: Option<Vec<f64>>,
265}
266
267impl ManagedF64Buffer {
268 #[must_use]
270 pub fn new(size: usize) -> Self {
271 Self {
272 buffer: Some(BufferManager::alloc_f64_buffer(size)),
273 }
274 }
275
276 pub const fn as_mut(&mut self) -> &mut Vec<f64> {
278 self.buffer
279 .as_mut()
280 .expect("buffer was already taken or not initialized")
281 }
282
283 #[must_use]
285 pub const fn as_ref(&self) -> &Vec<f64> {
286 self.buffer
287 .as_ref()
288 .expect("buffer was already taken or not initialized")
289 }
290
291 #[must_use]
293 pub fn take(mut self) -> Vec<f64> {
294 self.buffer
295 .take()
296 .expect("buffer was already taken or not initialized")
297 }
298}
299
300impl Drop for ManagedF64Buffer {
301 fn drop(&mut self) {
302 if let Some(buffer) = self.buffer.take() {
303 BufferManager::free_f64_buffer(buffer);
304 }
305 }
306}
307
308pub struct ManagedComplexBuffer {
310 buffer: Option<Vec<Complex64>>,
311}
312
313impl ManagedComplexBuffer {
314 #[must_use]
315 pub fn new(size: usize) -> Self {
316 Self {
317 buffer: Some(BufferManager::alloc_complex_buffer(size)),
318 }
319 }
320
321 pub const fn as_mut(&mut self) -> &mut Vec<Complex64> {
322 self.buffer
323 .as_mut()
324 .expect("buffer was already taken or not initialized")
325 }
326
327 #[must_use]
328 pub const fn as_ref(&self) -> &Vec<Complex64> {
329 self.buffer
330 .as_ref()
331 .expect("buffer was already taken or not initialized")
332 }
333
334 #[must_use]
335 pub fn take(mut self) -> Vec<Complex64> {
336 self.buffer
337 .take()
338 .expect("buffer was already taken or not initialized")
339 }
340}
341
342impl Drop for ManagedComplexBuffer {
343 fn drop(&mut self) {
344 if let Some(buffer) = self.buffer.take() {
345 BufferManager::free_complex_buffer(buffer);
346 }
347 }
348}
349
350#[cfg(test)]
351mod tests {
352 use super::*;
353
354 #[test]
355 fn test_buffer_pooling() {
356 let buffer1 = BufferManager::alloc_f64_buffer(100);
357 assert_eq!(buffer1.len(), 100);
358
359 BufferManager::free_f64_buffer(buffer1);
360
361 let buffer2 = BufferManager::alloc_f64_buffer(100);
362 assert_eq!(buffer2.len(), 100);
363
364 BufferManager::free_f64_buffer(buffer2);
365
366 let stats = BufferManager::get_memory_stats();
367 assert!(stats.pool_hits > 0 || stats.pool_misses > 0);
368 }
369
370 #[test]
371 fn test_managed_buffer() {
372 {
373 let mut managed = ManagedF64Buffer::new(50);
374 managed.as_mut()[0] = 42.0;
375 assert_eq!(managed.as_ref()[0], 42.0);
376 } let stats = BufferManager::get_memory_stats();
379 assert!(stats.total_allocated > 0);
381 }
382
383 #[test]
384 fn test_complex_buffer_pooling() {
385 let buffer1 = BufferManager::alloc_complex_buffer(50);
386 assert_eq!(buffer1.len(), 50);
387
388 BufferManager::free_complex_buffer(buffer1);
389
390 let buffer2 = BufferManager::alloc_complex_buffer(50);
391 assert_eq!(buffer2.len(), 50);
392
393 BufferManager::free_complex_buffer(buffer2);
394 }
395
396 #[test]
397 fn test_garbage_collection() {
398 for _ in 0..10 {
400 let buffer = BufferManager::alloc_f64_buffer(1000);
401 BufferManager::free_f64_buffer(buffer);
402 }
403
404 BufferManager::collect_garbage();
405 let stats = BufferManager::get_memory_stats();
406
407 assert!(stats.fragmentation_ratio >= 0.0);
409 }
410}