optirs_gpu/memory/allocation/
slab_allocator.rs

1// Slab allocator for GPU memory management
2//
3// This module implements a slab allocator optimized for fixed-size allocations.
4// Slab allocation is highly efficient for objects of the same size and provides
5// excellent cache locality and minimal fragmentation.
6
7#[allow(dead_code)]
8use std::collections::{HashMap, VecDeque};
9use std::ptr::NonNull;
10use std::sync::{Arc, Mutex};
11use std::time::Instant;
12
13/// Slab allocator for fixed-size objects
14pub struct SlabAllocator {
15    /// Cache configurations indexed by object size
16    caches: HashMap<usize, SlabCache>,
17    /// Statistics for the entire allocator
18    stats: SlabStats,
19    /// Configuration
20    config: SlabConfig,
21    /// Memory pool for backing slabs
22    memory_pool: MemoryPool,
23}
24
25/// Slab cache for objects of a specific size
26pub struct SlabCache {
27    /// Object size for this cache
28    object_size: usize,
29    /// List of slabs (pages)
30    slabs: Vec<Slab>,
31    /// Partially filled slabs
32    partial_slabs: VecDeque<usize>,
33    /// Full slabs
34    full_slabs: Vec<usize>,
35    /// Empty slabs
36    empty_slabs: VecDeque<usize>,
37    /// Cache statistics
38    stats: CacheStats,
39    /// Cache configuration
40    config: CacheConfig,
41}
42
43/// Individual slab (page) containing multiple objects
44pub struct Slab {
45    /// Base address of the slab
46    base_ptr: NonNull<u8>,
47    /// Size of the slab in bytes
48    slab_size: usize,
49    /// Object size
50    object_size: usize,
51    /// Number of objects in this slab
52    object_count: usize,
53    /// Free object list (indices)
54    free_objects: VecDeque<usize>,
55    /// Allocated object count
56    allocated_count: usize,
57    /// Allocation bitmap for fast lookups
58    allocation_bitmap: Vec<u64>,
59    /// Slab creation time
60    created_at: Instant,
61    /// Last allocation time
62    last_alloc: Option<Instant>,
63    /// Last deallocation time
64    last_dealloc: Option<Instant>,
65    /// Access frequency counter
66    access_count: u64,
67}
68
69impl Slab {
70    pub fn new(base_ptr: NonNull<u8>, slab_size: usize, object_size: usize) -> Self {
71        let object_count = slab_size / object_size;
72        let bitmap_size = object_count.div_ceil(64); // Round up to nearest 64-bit word
73
74        let mut free_objects = VecDeque::with_capacity(object_count);
75        for i in 0..object_count {
76            free_objects.push_back(i);
77        }
78
79        Self {
80            base_ptr,
81            slab_size,
82            object_size,
83            object_count,
84            free_objects,
85            allocated_count: 0,
86            allocation_bitmap: vec![0; bitmap_size],
87            created_at: Instant::now(),
88            last_alloc: None,
89            last_dealloc: None,
90            access_count: 0,
91        }
92    }
93
94    /// Allocate an object from this slab
95    pub fn allocate(&mut self) -> Option<NonNull<u8>> {
96        if let Some(object_index) = self.free_objects.pop_front() {
97            // Mark object as allocated in bitmap
98            let word_index = object_index / 64;
99            let bit_index = object_index % 64;
100            self.allocation_bitmap[word_index] |= 1u64 << bit_index;
101
102            self.allocated_count += 1;
103            self.last_alloc = Some(Instant::now());
104            self.access_count += 1;
105
106            // Calculate object address
107            let object_offset = object_index * self.object_size;
108            let object_ptr =
109                unsafe { NonNull::new_unchecked(self.base_ptr.as_ptr().add(object_offset)) };
110
111            Some(object_ptr)
112        } else {
113            None
114        }
115    }
116
117    /// Deallocate an object in this slab
118    pub fn deallocate(&mut self, ptr: NonNull<u8>) -> Result<(), SlabError> {
119        // Calculate object index from pointer
120        let ptr_addr = ptr.as_ptr() as usize;
121        let base_addr = self.base_ptr.as_ptr() as usize;
122
123        if ptr_addr < base_addr || ptr_addr >= base_addr + self.slab_size {
124            return Err(SlabError::InvalidPointer(
125                "Pointer not in this slab".to_string(),
126            ));
127        }
128
129        let offset = ptr_addr - base_addr;
130        if !offset.is_multiple_of(self.object_size) {
131            return Err(SlabError::InvalidPointer(
132                "Pointer not aligned to object boundary".to_string(),
133            ));
134        }
135
136        let object_index = offset / self.object_size;
137        if object_index >= self.object_count {
138            return Err(SlabError::InvalidPointer(
139                "Object index out of bounds".to_string(),
140            ));
141        }
142
143        // Check if object is actually allocated
144        let word_index = object_index / 64;
145        let bit_index = object_index % 64;
146        if (self.allocation_bitmap[word_index] & (1u64 << bit_index)) == 0 {
147            return Err(SlabError::DoubleFree("Object already free".to_string()));
148        }
149
150        // Mark as free
151        self.allocation_bitmap[word_index] &= !(1u64 << bit_index);
152        self.free_objects.push_back(object_index);
153        self.allocated_count -= 1;
154        self.last_dealloc = Some(Instant::now());
155
156        Ok(())
157    }
158
159    /// Check if slab is full
160    pub fn is_full(&self) -> bool {
161        self.allocated_count == self.object_count
162    }
163
164    /// Check if slab is empty
165    pub fn is_empty(&self) -> bool {
166        self.allocated_count == 0
167    }
168
169    /// Check if slab is partially filled
170    pub fn is_partial(&self) -> bool {
171        self.allocated_count > 0 && self.allocated_count < self.object_count
172    }
173
174    /// Get utilization ratio (0.0 to 1.0)
175    pub fn get_utilization(&self) -> f64 {
176        self.allocated_count as f64 / self.object_count as f64
177    }
178
179    /// Get slab statistics
180    pub fn get_stats(&self) -> SlabStats {
181        SlabStats {
182            total_objects: self.object_count,
183            allocated_objects: self.allocated_count,
184            free_objects: self.object_count - self.allocated_count,
185            utilization: self.get_utilization(),
186            access_count: self.access_count,
187            age: self.created_at.elapsed(),
188        }
189    }
190}
191
192/// Memory pool for backing slab storage
193pub struct MemoryPool {
194    /// Base address of the memory pool
195    base_ptr: NonNull<u8>,
196    /// Total size of the memory pool
197    total_size: usize,
198    /// Current allocation offset
199    current_offset: usize,
200    /// Free regions for reuse
201    free_regions: VecDeque<FreeRegion>,
202    /// Allocation alignment
203    alignment: usize,
204}
205
206/// Free memory region
207#[derive(Debug, Clone)]
208pub struct FreeRegion {
209    pub offset: usize,
210    pub size: usize,
211    pub freed_at: Instant,
212}
213
214impl MemoryPool {
215    pub fn new(base_ptr: NonNull<u8>, total_size: usize, alignment: usize) -> Self {
216        Self {
217            base_ptr,
218            total_size,
219            current_offset: 0,
220            free_regions: VecDeque::new(),
221            alignment,
222        }
223    }
224
225    /// Allocate a slab from the memory pool
226    pub fn allocate_slab(&mut self, size: usize) -> Option<NonNull<u8>> {
227        let aligned_size = (size + self.alignment - 1) & !(self.alignment - 1);
228
229        // Try to reuse a free region first
230        if let Some(region_index) = self.find_suitable_free_region(aligned_size) {
231            let region = self.free_regions.remove(region_index).unwrap();
232            let ptr = unsafe { NonNull::new_unchecked(self.base_ptr.as_ptr().add(region.offset)) };
233
234            // If region is larger than needed, split it
235            if region.size > aligned_size {
236                let remaining_region = FreeRegion {
237                    offset: region.offset + aligned_size,
238                    size: region.size - aligned_size,
239                    freed_at: region.freed_at,
240                };
241                self.free_regions.push_back(remaining_region);
242            }
243
244            return Some(ptr);
245        }
246
247        // Allocate from the end of the pool
248        if self.current_offset + aligned_size <= self.total_size {
249            let ptr =
250                unsafe { NonNull::new_unchecked(self.base_ptr.as_ptr().add(self.current_offset)) };
251            self.current_offset += aligned_size;
252            Some(ptr)
253        } else {
254            None
255        }
256    }
257
258    /// Free a slab back to the memory pool
259    pub fn free_slab(&mut self, ptr: NonNull<u8>, size: usize) {
260        let base_addr = self.base_ptr.as_ptr() as usize;
261        let ptr_addr = ptr.as_ptr() as usize;
262
263        if ptr_addr >= base_addr && ptr_addr < base_addr + self.total_size {
264            let offset = ptr_addr - base_addr;
265            let region = FreeRegion {
266                offset,
267                size,
268                freed_at: Instant::now(),
269            };
270
271            // Insert in sorted order to facilitate coalescing
272            let insert_pos = self
273                .free_regions
274                .binary_search_by_key(&offset, |r| r.offset)
275                .unwrap_or_else(|pos| pos);
276
277            self.free_regions.insert(insert_pos, region);
278
279            // Try to coalesce adjacent regions
280            self.coalesce_free_regions();
281        }
282    }
283
284    fn find_suitable_free_region(&self, size: usize) -> Option<usize> {
285        self.free_regions
286            .iter()
287            .position(|region| region.size >= size)
288    }
289
290    fn coalesce_free_regions(&mut self) {
291        let mut i = 0;
292        while i < self.free_regions.len().saturating_sub(1) {
293            let current_end = self.free_regions[i].offset + self.free_regions[i].size;
294            if current_end == self.free_regions[i + 1].offset {
295                // Coalesce regions
296                let next_region = self.free_regions.remove(i + 1).unwrap();
297                self.free_regions[i].size += next_region.size;
298            } else {
299                i += 1;
300            }
301        }
302    }
303
304    pub fn get_usage(&self) -> MemoryPoolUsage {
305        let free_size = self.free_regions.iter().map(|r| r.size).sum::<usize>();
306        let allocated_size = self.current_offset - free_size;
307
308        MemoryPoolUsage {
309            total_size: self.total_size,
310            allocated_size,
311            free_size,
312            current_offset: self.current_offset,
313            free_regions: self.free_regions.len(),
314        }
315    }
316}
317
318/// Cache configuration
319#[derive(Debug, Clone)]
320pub struct CacheConfig {
321    /// Objects per slab
322    pub objects_per_slab: usize,
323    /// Maximum number of empty slabs to keep
324    pub max_empty_slabs: usize,
325    /// Enable slab coloring for cache performance
326    pub enable_coloring: bool,
327    /// Color offset for cache line alignment
328    pub color_offset: usize,
329    /// Enable object construction/destruction
330    pub enable_ctor_dtor: bool,
331    /// Object constructor function
332    pub constructor: Option<fn(*mut u8)>,
333    /// Object destructor function  
334    pub destructor: Option<fn(*mut u8)>,
335}
336
337impl Default for CacheConfig {
338    fn default() -> Self {
339        Self {
340            objects_per_slab: 64,
341            max_empty_slabs: 3,
342            enable_coloring: true,
343            color_offset: 0,
344            enable_ctor_dtor: false,
345            constructor: None,
346            destructor: None,
347        }
348    }
349}
350
351/// Slab allocator configuration
352#[derive(Debug, Clone)]
353pub struct SlabConfig {
354    /// Default slab size
355    pub default_slab_size: usize,
356    /// Memory alignment requirement
357    pub alignment: usize,
358    /// Enable statistics collection
359    pub enable_stats: bool,
360    /// Enable debugging features
361    pub enable_debug: bool,
362    /// Memory reclamation threshold
363    pub reclaim_threshold: f64,
364    /// Enable automatic reclamation
365    pub auto_reclaim: bool,
366}
367
368impl Default for SlabConfig {
369    fn default() -> Self {
370        Self {
371            default_slab_size: 4096, // 4KB page size
372            alignment: 256,
373            enable_stats: true,
374            enable_debug: false,
375            reclaim_threshold: 0.8,
376            auto_reclaim: true,
377        }
378    }
379}
380
381/// Cache statistics
382#[derive(Debug, Clone, Default)]
383pub struct CacheStats {
384    pub total_allocations: u64,
385    pub total_deallocations: u64,
386    pub cache_hits: u64,
387    pub cache_misses: u64,
388    pub slab_allocations: u64,
389    pub slab_deallocations: u64,
390    pub objects_allocated: u64,
391    pub objects_free: u64,
392    pub average_utilization: f64,
393}
394
395/// Slab statistics
396#[derive(Debug, Clone, Default)]
397pub struct SlabStats {
398    pub total_objects: usize,
399    pub allocated_objects: usize,
400    pub free_objects: usize,
401    pub utilization: f64,
402    pub access_count: u64,
403    pub age: std::time::Duration,
404}
405
406/// Memory pool usage statistics
407#[derive(Debug, Clone)]
408pub struct MemoryPoolUsage {
409    pub total_size: usize,
410    pub allocated_size: usize,
411    pub free_size: usize,
412    pub current_offset: usize,
413    pub free_regions: usize,
414}
415
416impl SlabCache {
417    pub fn new(object_size: usize, config: CacheConfig) -> Self {
418        Self {
419            object_size,
420            slabs: Vec::new(),
421            partial_slabs: VecDeque::new(),
422            full_slabs: Vec::new(),
423            empty_slabs: VecDeque::new(),
424            stats: CacheStats::default(),
425            config,
426        }
427    }
428
429    /// Allocate an object from this cache
430    pub fn allocate(&mut self, memory_pool: &mut MemoryPool) -> Result<NonNull<u8>, SlabError> {
431        self.stats.total_allocations += 1;
432
433        // Try partial slabs first
434        if let Some(&slab_index) = self.partial_slabs.front() {
435            if let Some(ptr) = self.slabs[slab_index].allocate() {
436                self.stats.cache_hits += 1;
437                self.stats.objects_allocated += 1;
438
439                // Move to full slabs if now full
440                if self.slabs[slab_index].is_full() {
441                    self.partial_slabs.pop_front();
442                    self.full_slabs.push(slab_index);
443                }
444
445                // Apply constructor if enabled
446                if self.config.enable_ctor_dtor {
447                    if let Some(ctor) = self.config.constructor {
448                        ctor(ptr.as_ptr());
449                    }
450                }
451
452                return Ok(ptr);
453            }
454        }
455
456        // Try empty slabs
457        if let Some(slab_index) = self.empty_slabs.pop_front() {
458            if let Some(ptr) = self.slabs[slab_index].allocate() {
459                self.stats.cache_hits += 1;
460                self.stats.objects_allocated += 1;
461                self.partial_slabs.push_back(slab_index);
462
463                if self.config.enable_ctor_dtor {
464                    if let Some(ctor) = self.config.constructor {
465                        ctor(ptr.as_ptr());
466                    }
467                }
468
469                return Ok(ptr);
470            }
471        }
472
473        // Need to allocate a new slab
474        self.stats.cache_misses += 1;
475        self.allocate_new_slab(memory_pool)?;
476
477        // Try allocation again with new slab
478        if let Some(&slab_index) = self.partial_slabs.back() {
479            if let Some(ptr) = self.slabs[slab_index].allocate() {
480                self.stats.objects_allocated += 1;
481
482                if self.config.enable_ctor_dtor {
483                    if let Some(ctor) = self.config.constructor {
484                        ctor(ptr.as_ptr());
485                    }
486                }
487
488                return Ok(ptr);
489            }
490        }
491
492        Err(SlabError::OutOfMemory(
493            "Failed to allocate after creating new slab".to_string(),
494        ))
495    }
496
497    /// Deallocate an object back to this cache
498    pub fn deallocate(&mut self, ptr: NonNull<u8>) -> Result<(), SlabError> {
499        // Apply destructor if enabled
500        if self.config.enable_ctor_dtor {
501            if let Some(dtor) = self.config.destructor {
502                dtor(ptr.as_ptr());
503            }
504        }
505
506        // Find which slab contains this pointer
507        let mut slab_index = None;
508        for (i, slab) in self.slabs.iter().enumerate() {
509            let base_addr = slab.base_ptr.as_ptr() as usize;
510            let ptr_addr = ptr.as_ptr() as usize;
511
512            if ptr_addr >= base_addr && ptr_addr < base_addr + slab.slab_size {
513                slab_index = Some(i);
514                break;
515            }
516        }
517
518        let slab_index = slab_index.ok_or_else(|| {
519            SlabError::InvalidPointer("Pointer not found in any slab".to_string())
520        })?;
521
522        let was_full = self.slabs[slab_index].is_full();
523        self.slabs[slab_index].deallocate(ptr)?;
524
525        self.stats.total_deallocations += 1;
526        self.stats.objects_allocated -= 1;
527        self.stats.objects_free += 1;
528
529        // Update slab lists based on new state
530        if was_full {
531            // Remove from full slabs, add to partial
532            if let Some(pos) = self.full_slabs.iter().position(|&i| i == slab_index) {
533                self.full_slabs.remove(pos);
534                self.partial_slabs.push_back(slab_index);
535            }
536        } else if self.slabs[slab_index].is_empty() {
537            // Remove from partial, add to empty
538            if let Some(pos) = self.partial_slabs.iter().position(|&i| i == slab_index) {
539                self.partial_slabs.remove(pos);
540                self.empty_slabs.push_back(slab_index);
541            }
542        }
543
544        Ok(())
545    }
546
547    fn allocate_new_slab(&mut self, memory_pool: &mut MemoryPool) -> Result<(), SlabError> {
548        let slab_size = self.calculate_slab_size();
549
550        let slab_ptr = memory_pool.allocate_slab(slab_size).ok_or_else(|| {
551            SlabError::OutOfMemory("Cannot allocate slab from memory pool".to_string())
552        })?;
553
554        let slab = Slab::new(slab_ptr, slab_size, self.object_size);
555        let slab_index = self.slabs.len();
556
557        self.slabs.push(slab);
558        self.partial_slabs.push_back(slab_index);
559        self.stats.slab_allocations += 1;
560
561        Ok(())
562    }
563
564    fn calculate_slab_size(&self) -> usize {
565        // Calculate optimal slab size based on object size and configuration
566        let objects_per_slab = self.config.objects_per_slab;
567        let base_size = objects_per_slab * self.object_size;
568
569        // Add coloring offset if enabled
570        if self.config.enable_coloring {
571            base_size + self.config.color_offset
572        } else {
573            base_size
574        }
575    }
576
577    /// Get cache statistics
578    pub fn get_stats(&self) -> &CacheStats {
579        &self.stats
580    }
581
582    /// Get detailed cache information
583    pub fn get_cache_info(&self) -> CacheInfo {
584        let total_objects = self.slabs.iter().map(|s| s.object_count).sum();
585        let allocated_objects = self.slabs.iter().map(|s| s.allocated_count).sum();
586        let average_utilization = if total_objects > 0 {
587            allocated_objects as f64 / total_objects as f64
588        } else {
589            0.0
590        };
591
592        CacheInfo {
593            object_size: self.object_size,
594            total_slabs: self.slabs.len(),
595            partial_slabs: self.partial_slabs.len(),
596            full_slabs: self.full_slabs.len(),
597            empty_slabs: self.empty_slabs.len(),
598            total_objects,
599            allocated_objects,
600            free_objects: total_objects - allocated_objects,
601            average_utilization,
602            memory_overhead: self.calculate_memory_overhead(),
603        }
604    }
605
606    fn calculate_memory_overhead(&self) -> f64 {
607        let useful_memory: usize = self
608            .slabs
609            .iter()
610            .map(|s| s.allocated_count * s.object_size)
611            .sum();
612
613        let total_memory: usize = self.slabs.iter().map(|s| s.slab_size).sum();
614
615        if total_memory > 0 {
616            1.0 - (useful_memory as f64 / total_memory as f64)
617        } else {
618            0.0
619        }
620    }
621
622    /// Reclaim empty slabs
623    pub fn reclaim_empty_slabs(&mut self, memory_pool: &mut MemoryPool) -> usize {
624        let mut reclaimed = 0;
625        let keep_count = self.config.max_empty_slabs;
626
627        while self.empty_slabs.len() > keep_count {
628            if let Some(slab_index) = self.empty_slabs.pop_front() {
629                let slab = &self.slabs[slab_index];
630                memory_pool.free_slab(slab.base_ptr, slab.slab_size);
631                reclaimed += 1;
632                self.stats.slab_deallocations += 1;
633            }
634        }
635
636        reclaimed
637    }
638}
639
640/// Cache information
641#[derive(Debug, Clone)]
642pub struct CacheInfo {
643    pub object_size: usize,
644    pub total_slabs: usize,
645    pub partial_slabs: usize,
646    pub full_slabs: usize,
647    pub empty_slabs: usize,
648    pub total_objects: usize,
649    pub allocated_objects: usize,
650    pub free_objects: usize,
651    pub average_utilization: f64,
652    pub memory_overhead: f64,
653}
654
655impl SlabAllocator {
656    pub fn new(base_ptr: NonNull<u8>, total_size: usize, config: SlabConfig) -> Self {
657        let memory_pool = MemoryPool::new(base_ptr, total_size, config.alignment);
658
659        Self {
660            caches: HashMap::new(),
661            stats: SlabStats::default(),
662            memory_pool,
663            config,
664        }
665    }
666
667    /// Allocate object of specified size
668    pub fn allocate(&mut self, size: usize) -> Result<NonNull<u8>, SlabError> {
669        if size == 0 {
670            return Err(SlabError::InvalidSize(
671                "Cannot allocate zero bytes".to_string(),
672            ));
673        }
674
675        // Round up size to alignment boundary
676        let aligned_size = (size + self.config.alignment - 1) & !(self.config.alignment - 1);
677
678        // Get or create cache for this size
679        self.caches.entry(aligned_size).or_insert_with(|| {
680            let cache_config = CacheConfig::default();
681
682            SlabCache::new(aligned_size, cache_config)
683        });
684
685        let cache = self.caches.get_mut(&aligned_size).unwrap();
686        cache.allocate(&mut self.memory_pool)
687    }
688
689    /// Deallocate object
690    pub fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) -> Result<(), SlabError> {
691        let aligned_size = (size + self.config.alignment - 1) & !(self.config.alignment - 1);
692
693        let cache = self
694            .caches
695            .get_mut(&aligned_size)
696            .ok_or_else(|| SlabError::InvalidPointer("No cache found for this size".to_string()))?;
697
698        cache.deallocate(ptr)
699    }
700
701    /// Get allocator statistics
702    pub fn get_stats(&self) -> SlabAllocatorStats {
703        let mut total_caches = 0;
704        let mut total_slabs = 0;
705        let mut total_objects = 0;
706        let mut allocated_objects = 0;
707        let mut total_allocations = 0;
708        let mut total_deallocations = 0;
709
710        for cache in self.caches.values() {
711            total_caches += 1;
712            let info = cache.get_cache_info();
713            total_slabs += info.total_slabs;
714            total_objects += info.total_objects;
715            allocated_objects += info.allocated_objects;
716
717            let stats = cache.get_stats();
718            total_allocations += stats.total_allocations;
719            total_deallocations += stats.total_deallocations;
720        }
721
722        let memory_usage = self.memory_pool.get_usage();
723
724        SlabAllocatorStats {
725            total_caches,
726            total_slabs,
727            total_objects,
728            allocated_objects,
729            free_objects: total_objects - allocated_objects,
730            total_allocations,
731            total_deallocations,
732            memory_usage,
733            cache_efficiency: if total_allocations > 0 {
734                allocated_objects as f64 / total_allocations as f64
735            } else {
736                0.0
737            },
738        }
739    }
740
741    /// Get information about all caches
742    pub fn get_all_cache_info(&self) -> Vec<(usize, CacheInfo)> {
743        self.caches
744            .iter()
745            .map(|(&size, cache)| (size, cache.get_cache_info()))
746            .collect()
747    }
748
749    /// Reclaim memory from empty slabs
750    pub fn reclaim_memory(&mut self) -> usize {
751        let mut total_reclaimed = 0;
752
753        for cache in self.caches.values_mut() {
754            total_reclaimed += cache.reclaim_empty_slabs(&mut self.memory_pool);
755        }
756
757        total_reclaimed
758    }
759
760    /// Destroy cache for specific size
761    pub fn destroy_cache(&mut self, size: usize) -> Result<(), SlabError> {
762        let aligned_size = (size + self.config.alignment - 1) & !(self.config.alignment - 1);
763
764        if let Some(mut cache) = self.caches.remove(&aligned_size) {
765            // Reclaim all slabs from this cache
766            cache.reclaim_empty_slabs(&mut self.memory_pool);
767            Ok(())
768        } else {
769            Err(SlabError::InvalidSize("Cache not found".to_string()))
770        }
771    }
772
773    /// Get memory pool usage
774    pub fn get_memory_usage(&self) -> MemoryPoolUsage {
775        self.memory_pool.get_usage()
776    }
777}
778
779// Safety: SlabAllocator manages GPU memory pointers via NonNull<u8>. While NonNull is not Send/Sync by default,
780// it's safe to share SlabAllocator across threads when protected by Arc<Mutex<>> because:
781// 1. The pointers point to GPU memory managed by the GPU driver
782// 2. The Mutex provides exclusive access for all mutable operations
783// 3. No thread-local state is maintained
784unsafe impl Send for SlabAllocator {}
785unsafe impl Sync for SlabAllocator {}
786
787/// Slab allocator statistics
788#[derive(Debug, Clone)]
789pub struct SlabAllocatorStats {
790    pub total_caches: usize,
791    pub total_slabs: usize,
792    pub total_objects: usize,
793    pub allocated_objects: usize,
794    pub free_objects: usize,
795    pub total_allocations: u64,
796    pub total_deallocations: u64,
797    pub memory_usage: MemoryPoolUsage,
798    pub cache_efficiency: f64,
799}
800
801/// Slab allocator errors
802#[derive(Debug, Clone)]
803pub enum SlabError {
804    InvalidSize(String),
805    OutOfMemory(String),
806    InvalidPointer(String),
807    DoubleFree(String),
808    CorruptedSlab(String),
809}
810
811impl std::fmt::Display for SlabError {
812    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
813        match self {
814            SlabError::InvalidSize(msg) => write!(f, "Invalid size: {}", msg),
815            SlabError::OutOfMemory(msg) => write!(f, "Out of memory: {}", msg),
816            SlabError::InvalidPointer(msg) => write!(f, "Invalid pointer: {}", msg),
817            SlabError::DoubleFree(msg) => write!(f, "Double free: {}", msg),
818            SlabError::CorruptedSlab(msg) => write!(f, "Corrupted slab: {}", msg),
819        }
820    }
821}
822
823impl std::error::Error for SlabError {}
824
825/// Thread-safe slab allocator wrapper
826pub struct ThreadSafeSlabAllocator {
827    allocator: Arc<Mutex<SlabAllocator>>,
828}
829
830impl ThreadSafeSlabAllocator {
831    pub fn new(base_ptr: NonNull<u8>, total_size: usize, config: SlabConfig) -> Self {
832        let allocator = SlabAllocator::new(base_ptr, total_size, config);
833        Self {
834            allocator: Arc::new(Mutex::new(allocator)),
835        }
836    }
837
838    pub fn allocate(&self, size: usize) -> Result<NonNull<u8>, SlabError> {
839        let mut allocator = self.allocator.lock().unwrap();
840        allocator.allocate(size)
841    }
842
843    pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<(), SlabError> {
844        let mut allocator = self.allocator.lock().unwrap();
845        allocator.deallocate(ptr, size)
846    }
847
848    pub fn get_stats(&self) -> SlabAllocatorStats {
849        let allocator = self.allocator.lock().unwrap();
850        allocator.get_stats()
851    }
852
853    pub fn reclaim_memory(&self) -> usize {
854        let mut allocator = self.allocator.lock().unwrap();
855        allocator.reclaim_memory()
856    }
857}
858
859#[cfg(test)]
860mod tests {
861    use super::*;
862
863    #[test]
864    fn test_slab_creation() {
865        let size = 4096;
866        let memory = vec![0u8; size];
867        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
868
869        let slab = Slab::new(ptr, size, 64);
870        assert_eq!(slab.object_count, size / 64);
871        assert!(slab.is_empty());
872        assert!(!slab.is_full());
873    }
874
875    #[test]
876    fn test_slab_allocation() {
877        let size = 4096;
878        let memory = vec![0u8; size];
879        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
880
881        let mut slab = Slab::new(ptr, size, 64);
882
883        let alloc1 = slab.allocate();
884        assert!(alloc1.is_some());
885        assert!(slab.is_partial());
886
887        let alloc2 = slab.allocate();
888        assert!(alloc2.is_some());
889        assert_ne!(alloc1.unwrap(), alloc2.unwrap());
890    }
891
892    #[test]
893    fn test_slab_deallocation() {
894        let size = 4096;
895        let memory = vec![0u8; size];
896        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
897
898        let mut slab = Slab::new(ptr, size, 64);
899
900        let alloc_ptr = slab.allocate().unwrap();
901        let result = slab.deallocate(alloc_ptr);
902        assert!(result.is_ok());
903    }
904
905    #[test]
906    fn test_memory_pool() {
907        let size = 1024 * 1024;
908        let memory = vec![0u8; size];
909        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
910
911        let mut pool = MemoryPool::new(ptr, size, 256);
912
913        let slab1 = pool.allocate_slab(4096);
914        assert!(slab1.is_some());
915
916        let slab2 = pool.allocate_slab(4096);
917        assert!(slab2.is_some());
918
919        assert_ne!(slab1.unwrap(), slab2.unwrap());
920    }
921
922    #[test]
923    fn test_slab_cache() {
924        let size = 1024 * 1024;
925        let memory = vec![0u8; size];
926        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
927
928        let mut pool = MemoryPool::new(ptr, size, 256);
929        let config = CacheConfig::default();
930        let mut cache = SlabCache::new(64, config);
931
932        let alloc1 = cache.allocate(&mut pool);
933        assert!(alloc1.is_ok());
934
935        let alloc2 = cache.allocate(&mut pool);
936        assert!(alloc2.is_ok());
937    }
938
939    #[test]
940    fn test_slab_allocator() {
941        let size = 1024 * 1024;
942        let memory = vec![0u8; size];
943        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
944
945        let config = SlabConfig::default();
946        let mut allocator = SlabAllocator::new(ptr, size, config);
947
948        let alloc1 = allocator.allocate(64);
949        assert!(alloc1.is_ok());
950
951        let alloc2 = allocator.allocate(128);
952        assert!(alloc2.is_ok());
953
954        let stats = allocator.get_stats();
955        // Note: Depending on the slab configuration, sizes 64 and 128 might map to the same cache class
956        assert!(stats.total_caches >= 1); // At least one cache should be created
957    }
958
959    #[test]
960    fn test_thread_safe_allocator() {
961        let size = 1024 * 1024;
962        let memory = vec![0u8; size];
963        let ptr = NonNull::new(memory.as_ptr() as *mut u8).unwrap();
964
965        let config = SlabConfig::default();
966        let allocator = ThreadSafeSlabAllocator::new(ptr, size, config);
967
968        let alloc_result = allocator.allocate(64);
969        assert!(alloc_result.is_ok());
970
971        let stats = allocator.get_stats();
972        assert!(stats.allocated_objects > 0);
973    }
974}