chess_vector_engine/utils/
memory_pool.rs1use ndarray::Array1;
2use std::alloc::{alloc, dealloc, Layout};
3use std::collections::VecDeque;
4use std::mem;
5use std::ptr::NonNull;
6use std::sync::{Arc, Mutex, RwLock};
7
8pub struct FixedSizeMemoryPool {
10 free_blocks: Mutex<VecDeque<NonNull<u8>>>,
12 block_size: usize,
14 total_blocks: usize,
16 allocated_blocks: Mutex<usize>,
18 layout: Layout,
20}
21
22impl FixedSizeMemoryPool {
23 pub fn new(block_size: usize, initial_blocks: usize) -> Result<Self, &'static str> {
25 let layout = Layout::from_size_align(block_size, mem::align_of::<u8>())
26 .map_err(|_| "Invalid layout")?;
27
28 let mut free_blocks = VecDeque::with_capacity(initial_blocks);
29
30 for _ in 0..initial_blocks {
32 unsafe {
33 let ptr = alloc(layout);
34 if ptr.is_null() {
35 return Err("Failed to allocate memory");
36 }
37 free_blocks.push_back(NonNull::new_unchecked(ptr));
38 }
39 }
40
41 Ok(Self {
42 free_blocks: Mutex::new(free_blocks),
43 block_size,
44 total_blocks: initial_blocks,
45 allocated_blocks: Mutex::new(0),
46 layout,
47 })
48 }
49
50 pub fn allocate(&self) -> Option<PooledMemory> {
52 let ptr = {
53 let mut free_blocks = self.free_blocks.lock().ok()?;
54
55 if let Some(ptr) = free_blocks.pop_front() {
56 ptr
57 } else {
58 unsafe {
60 let new_ptr = alloc(self.layout);
61 if new_ptr.is_null() {
62 return None;
63 }
64 NonNull::new_unchecked(new_ptr)
65 }
66 }
67 };
68
69 if let Ok(mut allocated) = self.allocated_blocks.lock() {
71 *allocated += 1;
72 }
73
74 Some(PooledMemory {
75 ptr,
76 size: self.block_size,
77 })
78 }
79
80 fn deallocate(&self, ptr: NonNull<u8>) {
82 let mut free_blocks = self.free_blocks.lock().unwrap();
83
84 if free_blocks.len() < self.total_blocks {
86 free_blocks.push_back(ptr);
87 } else {
88 unsafe {
90 dealloc(ptr.as_ptr(), self.layout);
91 }
92 }
93
94 if let Ok(mut allocated) = self.allocated_blocks.lock() {
96 *allocated = allocated.saturating_sub(1);
97 }
98 }
99
100 pub fn stats(&self) -> MemoryPoolStats {
102 let free_count = self.free_blocks.lock().map(|f| f.len()).unwrap_or(0);
103 let allocated_count = self.allocated_blocks.lock().map(|a| *a).unwrap_or(0);
104
105 MemoryPoolStats {
106 block_size: self.block_size,
107 total_blocks: self.total_blocks,
108 free_blocks: free_count,
109 allocated_blocks: allocated_count,
110 memory_usage: allocated_count * self.block_size,
111 }
112 }
113}
114
115impl Drop for FixedSizeMemoryPool {
116 fn drop(&mut self) {
117 let mut free_blocks = self.free_blocks.lock().unwrap();
119 while let Some(ptr) = free_blocks.pop_front() {
120 unsafe {
121 dealloc(ptr.as_ptr(), self.layout);
122 }
123 }
124 }
125}
126
127pub struct PooledMemory {
129 ptr: NonNull<u8>,
130 size: usize,
131}
132
133impl PooledMemory {
134 pub fn as_mut_slice(&mut self) -> &mut [u8] {
136 unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.size) }
137 }
138
139 pub fn as_slice(&self) -> &[u8] {
141 unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.size) }
142 }
143
144 pub fn as_ptr(&self) -> *mut u8 {
146 self.ptr.as_ptr()
147 }
148
149 pub fn size(&self) -> usize {
151 self.size
152 }
153}
154
155impl Drop for PooledMemory {
156 fn drop(&mut self) {
157 }
161}
162
163unsafe impl Send for PooledMemory {}
164unsafe impl Sync for PooledMemory {}
165
166#[derive(Debug, Clone)]
168pub struct MemoryPoolStats {
169 pub block_size: usize,
170 pub total_blocks: usize,
171 pub free_blocks: usize,
172 pub allocated_blocks: usize,
173 pub memory_usage: usize,
174}
175
176pub struct VectorMemoryPool {
178 pools: RwLock<Vec<Arc<FixedSizeMemoryPool>>>,
179}
180
181impl VectorMemoryPool {
182 pub fn new() -> Self {
184 let common_sizes = vec![
185 64 * 4, 128 * 4, 256 * 4, 512 * 4, 1024 * 4, 2048 * 4, ];
192
193 let mut pools = Vec::new();
194 for size in common_sizes {
195 if let Ok(pool) = FixedSizeMemoryPool::new(size, 100) {
196 pools.push(Arc::new(pool));
197 }
198 }
199
200 Self {
201 pools: RwLock::new(pools),
202 }
203 }
204
205 pub fn allocate_for_vector(&self, element_count: usize) -> Option<PooledMemory> {
207 let needed_size = element_count * mem::size_of::<f32>();
208
209 if let Ok(pools) = self.pools.read() {
210 for pool in pools.iter() {
212 if pool.block_size >= needed_size {
213 return pool.allocate();
214 }
215 }
216 }
217
218 None
219 }
220
221 pub fn create_vector(&self, size: usize) -> MemoryPooledVector {
223 if let Some(memory) = self.allocate_for_vector(size) {
224 MemoryPooledVector::with_pooled_memory(size, memory)
225 } else {
226 MemoryPooledVector::new(size)
227 }
228 }
229
230 pub fn stats(&self) -> Vec<MemoryPoolStats> {
232 if let Ok(pools) = self.pools.read() {
233 pools.iter().map(|pool| pool.stats()).collect()
234 } else {
235 Vec::new()
236 }
237 }
238}
239
240impl Default for VectorMemoryPool {
241 fn default() -> Self {
242 Self::new()
243 }
244}
245
246pub struct MemoryPooledVector {
248 data: Array1<f32>,
249 _memory: Option<PooledMemory>,
250}
251
252impl MemoryPooledVector {
253 pub fn new(size: usize) -> Self {
255 Self {
256 data: Array1::zeros(size),
257 _memory: None,
258 }
259 }
260
261 pub fn with_pooled_memory(size: usize, _memory: PooledMemory) -> Self {
263 Self {
265 data: Array1::zeros(size),
266 _memory: Some(_memory),
267 }
268 }
269
270 pub fn as_array(&self) -> &Array1<f32> {
272 &self.data
273 }
274
275 pub fn as_array_mut(&mut self) -> &mut Array1<f32> {
277 &mut self.data
278 }
279
280 pub fn into_array(self) -> Array1<f32> {
282 self.data
283 }
284
285 pub fn len(&self) -> usize {
287 self.data.len()
288 }
289
290 pub fn is_empty(&self) -> bool {
292 self.data.is_empty()
293 }
294}
295
296unsafe impl Send for FixedSizeMemoryPool {}
299unsafe impl Sync for FixedSizeMemoryPool {}
300
301static GLOBAL_VECTOR_POOL: std::sync::OnceLock<Arc<VectorMemoryPool>> = std::sync::OnceLock::new();
303
304pub fn global_vector_pool() -> &'static Arc<VectorMemoryPool> {
306 GLOBAL_VECTOR_POOL.get_or_init(|| Arc::new(VectorMemoryPool::new()))
307}
308
309pub struct ArenaAllocator {
311 memory: Vec<u8>,
312 current_offset: usize,
313 _alignment: usize,
314}
315
316impl ArenaAllocator {
317 pub fn new(size: usize) -> Self {
319 Self {
320 memory: vec![0; size],
321 current_offset: 0,
322 _alignment: mem::align_of::<f32>(),
323 }
324 }
325
326 pub fn allocate<T>(&mut self, count: usize) -> Option<&mut [T]> {
328 let size = count * mem::size_of::<T>();
329 let align = mem::align_of::<T>();
330
331 let aligned_offset = (self.current_offset + align - 1) & !(align - 1);
333
334 if aligned_offset + size > self.memory.len() {
335 return None; }
337
338 let ptr = unsafe { self.memory.as_mut_ptr().add(aligned_offset) as *mut T };
339
340 self.current_offset = aligned_offset + size;
341
342 Some(unsafe { std::slice::from_raw_parts_mut(ptr, count) })
343 }
344
345 pub fn reset(&mut self) {
347 self.current_offset = 0;
348 }
349
350 pub fn stats(&self) -> ArenaStats {
352 ArenaStats {
353 total_size: self.memory.len(),
354 used_size: self.current_offset,
355 free_size: self.memory.len() - self.current_offset,
356 fragmentation: 0.0, }
358 }
359}
360
361#[derive(Debug, Clone)]
363pub struct ArenaStats {
364 pub total_size: usize,
365 pub used_size: usize,
366 pub free_size: usize,
367 pub fragmentation: f32,
368}
369
370pub struct BatchMemoryProcessor<T, U> {
372 arena: ArenaAllocator,
373 batch_size: usize,
374 processor: Box<dyn Fn(&[T]) -> Vec<U>>,
375}
376
377impl<T, U> BatchMemoryProcessor<T, U>
378where
379 T: Copy,
380 U: Clone,
381{
382 pub fn new<F>(arena_size: usize, batch_size: usize, processor: F) -> Self
384 where
385 F: Fn(&[T]) -> Vec<U> + 'static,
386 {
387 Self {
388 arena: ArenaAllocator::new(arena_size),
389 batch_size,
390 processor: Box::new(processor),
391 }
392 }
393
394 pub fn process_batches(&mut self, items: &[T]) -> Vec<U> {
396 let mut results = Vec::new();
397
398 for chunk in items.chunks(self.batch_size) {
399 self.arena.reset();
401
402 let batch_results = (self.processor)(chunk);
404 results.extend(batch_results);
405 }
406
407 results
408 }
409
410 pub fn arena_stats(&self) -> ArenaStats {
412 self.arena.stats()
413 }
414}
415
416#[cfg(test)]
417mod tests {
418 use super::*;
419
420 #[test]
421 fn test_fixed_size_memory_pool() {
422 let pool = FixedSizeMemoryPool::new(1024, 10).unwrap();
423
424 let block1 = pool.allocate().unwrap();
426 let block2 = pool.allocate().unwrap();
427
428 assert_eq!(block1.size(), 1024);
429 assert_eq!(block2.size(), 1024);
430
431 let stats = pool.stats();
432 assert_eq!(stats.allocated_blocks, 2);
433 assert_eq!(stats.free_blocks, 8);
434
435 drop(block1);
437 drop(block2);
438
439 let stats = pool.stats();
440 assert_eq!(stats.allocated_blocks, 0);
441 assert_eq!(stats.free_blocks, 10);
442 }
443
444 #[test]
445 fn test_vector_memory_pool() {
446 let pool = VectorMemoryPool::new();
447
448 let vector1 = pool.create_vector(128);
449 let vector2 = pool.create_vector(1024);
450
451 assert_eq!(vector1.len(), 128);
452 assert_eq!(vector2.len(), 1024);
453
454 assert!(vector1.as_array().iter().all(|&x| x == 0.0));
456 assert!(vector2.as_array().iter().all(|&x| x == 0.0));
457 }
458
459 #[test]
460 fn test_global_vector_pool() {
461 let pool = global_vector_pool();
462 let vector = pool.create_vector(256);
463
464 assert_eq!(vector.len(), 256);
465 assert!(!vector.is_empty());
466 }
467
468 #[test]
469 fn test_arena_allocator() {
470 let mut arena = ArenaAllocator::new(1024);
471
472 let array1 = arena.allocate::<f32>(64).unwrap();
474 assert_eq!(array1.len(), 64);
475
476 {
478 for (i, val) in array1.iter_mut().enumerate() {
480 *val = i as f32;
481 }
482
483 assert_eq!(array1[0], 0.0);
484 assert_eq!(array1[63], 63.0);
485 }
486
487 let array2 = arena.allocate::<f32>(32).unwrap();
488 assert_eq!(array2.len(), 32);
489
490 arena.reset();
492 let array3 = arena.allocate::<f32>(128).unwrap();
493 assert_eq!(array3.len(), 128);
494 }
495
496 #[test]
497 fn test_batch_memory_processor() {
498 let mut processor = BatchMemoryProcessor::new(4096, 10, |batch: &[i32]| {
499 batch.iter().map(|&x| x * 2).collect()
500 });
501
502 let input = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
503 let result = processor.process_batches(&input);
504
505 assert_eq!(result, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]);
506 }
507}