quantrs2_circuit/
buffer_manager.rs1use scirs2_core::Complex64;
8use quantrs2_core::buffer_pool::BufferPool;
9use std::collections::HashMap;
10use std::sync::{Arc, Mutex, OnceLock};
11
12static GLOBAL_BUFFER_MANAGER: OnceLock<Arc<Mutex<GlobalBufferManager>>> = OnceLock::new();
14
15pub struct GlobalBufferManager {
17 f64_pool: BufferPool<f64>,
19
20 complex_pool: BufferPool<Complex64>,
22
23 vector_pools: HashMap<usize, Vec<Vec<f64>>>,
25
26 parameter_pool: BufferPool<f64>,
28
29 stats: MemoryStats,
31}
32
33#[derive(Debug, Default, Clone)]
35pub struct MemoryStats {
36 pub total_allocated: usize,
37 pub peak_usage: usize,
38 pub pool_hits: usize,
39 pub pool_misses: usize,
40 pub fragmentation_ratio: f64,
41}
42
43impl GlobalBufferManager {
44 fn new() -> Self {
46 Self {
47 f64_pool: BufferPool::new(), complex_pool: BufferPool::new(),
49 vector_pools: HashMap::with_capacity(16),
50 parameter_pool: BufferPool::new(),
51 stats: MemoryStats::default(),
52 }
53 }
54
55 pub fn get_f64_buffer(&mut self, size: usize) -> Vec<f64> {
57 self.stats.total_allocated += size * std::mem::size_of::<f64>();
58 self.update_peak_usage();
59 self.stats.pool_hits += 1;
60
61 let mut buffer = self.f64_pool.get(size);
63 buffer.resize(size, 0.0);
64 buffer
65 }
66
67 pub fn return_f64_buffer(&mut self, buffer: Vec<f64>) {
69 if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
71 self.f64_pool.put(buffer);
72 }
73 }
74
75 pub fn get_complex_buffer(&mut self, size: usize) -> Vec<Complex64> {
77 self.stats.total_allocated += size * std::mem::size_of::<Complex64>();
78 self.update_peak_usage();
79 self.stats.pool_hits += 1;
80
81 let mut buffer = self.complex_pool.get(size);
83 buffer.resize(size, Complex64::new(0.0, 0.0));
84 buffer
85 }
86
87 pub fn return_complex_buffer(&mut self, buffer: Vec<Complex64>) {
89 if buffer.len() <= 10000 && buffer.capacity() <= 20000 {
90 self.complex_pool.put(buffer);
91 }
92 }
93
94 pub fn get_sized_vector(&mut self, size: usize) -> Vec<f64> {
96 if let Some(pool) = self.vector_pools.get_mut(&size) {
97 if let Some(vec) = pool.pop() {
98 self.stats.pool_hits += 1;
99 return vec;
100 }
101 }
102
103 self.stats.pool_misses += 1;
104 vec![0.0; size]
105 }
106
107 pub fn return_sized_vector(&mut self, mut vector: Vec<f64>) {
109 let size = vector.len();
110 vector.clear();
111
112 if size <= 1024 {
114 let pool = self.vector_pools.entry(size).or_insert_with(Vec::new);
115 if pool.len() < 10 {
116 pool.push(vector);
118 }
119 }
120 }
121
122 pub fn get_parameter_buffer(&mut self, size: usize) -> Vec<f64> {
124 self.stats.pool_hits += 1;
125 let mut buffer = self.parameter_pool.get(size);
126 buffer.resize(size, 0.0);
127 buffer
128 }
129
130 pub fn return_parameter_buffer(&mut self, buffer: Vec<f64>) {
132 if buffer.len() <= 100 {
133 self.parameter_pool.put(buffer);
135 }
136 }
137
138 pub fn collect_garbage(&mut self) {
140 self.vector_pools.retain(|&size, pool| {
142 pool.retain(|v| v.capacity() < size * 2);
143 size <= 1024 && !pool.is_empty()
144 });
145
146 let allocated = self.stats.total_allocated;
148 let peak = self.stats.peak_usage;
149 self.stats.fragmentation_ratio = if peak > 0 {
150 1.0 - (allocated as f64 / peak as f64)
151 } else {
152 0.0
153 };
154 }
155
156 pub fn get_stats(&self) -> &MemoryStats {
158 &self.stats
159 }
160
161 pub fn reset_stats(&mut self) {
163 self.stats = MemoryStats::default();
164 }
165
166 fn update_peak_usage(&mut self) {
167 if self.stats.total_allocated > self.stats.peak_usage {
168 self.stats.peak_usage = self.stats.total_allocated;
169 }
170 }
171}
172
173pub struct BufferManager;
175
176impl BufferManager {
177 pub fn instance() -> Arc<Mutex<GlobalBufferManager>> {
179 GLOBAL_BUFFER_MANAGER
180 .get_or_init(|| Arc::new(Mutex::new(GlobalBufferManager::new())))
181 .clone()
182 }
183
184 pub fn alloc_f64_buffer(size: usize) -> Vec<f64> {
186 Self::instance().lock().unwrap().get_f64_buffer(size)
187 }
188
189 pub fn free_f64_buffer(buffer: Vec<f64>) {
191 Self::instance().lock().unwrap().return_f64_buffer(buffer);
192 }
193
194 pub fn alloc_complex_buffer(size: usize) -> Vec<Complex64> {
196 Self::instance().lock().unwrap().get_complex_buffer(size)
197 }
198
199 pub fn free_complex_buffer(buffer: Vec<Complex64>) {
201 Self::instance()
202 .lock()
203 .unwrap()
204 .return_complex_buffer(buffer);
205 }
206
207 pub fn alloc_parameter_buffer(size: usize) -> Vec<f64> {
209 Self::instance().lock().unwrap().get_parameter_buffer(size)
210 }
211
212 pub fn free_parameter_buffer(buffer: Vec<f64>) {
214 Self::instance()
215 .lock()
216 .unwrap()
217 .return_parameter_buffer(buffer);
218 }
219
220 pub fn get_memory_stats() -> MemoryStats {
222 Self::instance().lock().unwrap().get_stats().clone()
223 }
224
225 pub fn collect_garbage() {
227 Self::instance().lock().unwrap().collect_garbage();
228 }
229
230 pub fn reset_stats() {
232 Self::instance().lock().unwrap().reset_stats();
233 }
234}
235
236pub struct ManagedF64Buffer {
238 buffer: Option<Vec<f64>>,
239}
240
241impl ManagedF64Buffer {
242 pub fn new(size: usize) -> Self {
244 Self {
245 buffer: Some(BufferManager::alloc_f64_buffer(size)),
246 }
247 }
248
249 pub fn as_mut(&mut self) -> &mut Vec<f64> {
251 self.buffer.as_mut().unwrap()
252 }
253
254 pub fn as_ref(&self) -> &Vec<f64> {
256 self.buffer.as_ref().unwrap()
257 }
258
259 pub fn take(mut self) -> Vec<f64> {
261 self.buffer.take().unwrap()
262 }
263}
264
265impl Drop for ManagedF64Buffer {
266 fn drop(&mut self) {
267 if let Some(buffer) = self.buffer.take() {
268 BufferManager::free_f64_buffer(buffer);
269 }
270 }
271}
272
273pub struct ManagedComplexBuffer {
275 buffer: Option<Vec<Complex64>>,
276}
277
278impl ManagedComplexBuffer {
279 pub fn new(size: usize) -> Self {
280 Self {
281 buffer: Some(BufferManager::alloc_complex_buffer(size)),
282 }
283 }
284
285 pub fn as_mut(&mut self) -> &mut Vec<Complex64> {
286 self.buffer.as_mut().unwrap()
287 }
288
289 pub fn as_ref(&self) -> &Vec<Complex64> {
290 self.buffer.as_ref().unwrap()
291 }
292
293 pub fn take(mut self) -> Vec<Complex64> {
294 self.buffer.take().unwrap()
295 }
296}
297
298impl Drop for ManagedComplexBuffer {
299 fn drop(&mut self) {
300 if let Some(buffer) = self.buffer.take() {
301 BufferManager::free_complex_buffer(buffer);
302 }
303 }
304}
305
306#[cfg(test)]
307mod tests {
308 use super::*;
309
310 #[test]
311 fn test_buffer_pooling() {
312 let buffer1 = BufferManager::alloc_f64_buffer(100);
313 assert_eq!(buffer1.len(), 100);
314
315 BufferManager::free_f64_buffer(buffer1);
316
317 let buffer2 = BufferManager::alloc_f64_buffer(100);
318 assert_eq!(buffer2.len(), 100);
319
320 BufferManager::free_f64_buffer(buffer2);
321
322 let stats = BufferManager::get_memory_stats();
323 assert!(stats.pool_hits > 0 || stats.pool_misses > 0);
324 }
325
326 #[test]
327 fn test_managed_buffer() {
328 {
329 let mut managed = ManagedF64Buffer::new(50);
330 managed.as_mut()[0] = 42.0;
331 assert_eq!(managed.as_ref()[0], 42.0);
332 } let stats = BufferManager::get_memory_stats();
335 assert!(stats.total_allocated > 0);
337 }
338
339 #[test]
340 fn test_complex_buffer_pooling() {
341 let buffer1 = BufferManager::alloc_complex_buffer(50);
342 assert_eq!(buffer1.len(), 50);
343
344 BufferManager::free_complex_buffer(buffer1);
345
346 let buffer2 = BufferManager::alloc_complex_buffer(50);
347 assert_eq!(buffer2.len(), 50);
348
349 BufferManager::free_complex_buffer(buffer2);
350 }
351
352 #[test]
353 fn test_garbage_collection() {
354 for _ in 0..10 {
356 let buffer = BufferManager::alloc_f64_buffer(1000);
357 BufferManager::free_f64_buffer(buffer);
358 }
359
360 BufferManager::collect_garbage();
361 let stats = BufferManager::get_memory_stats();
362
363 assert!(stats.fragmentation_ratio >= 0.0);
365 }
366}