Skip to main content

optirs_gpu/memory/allocation/
mod.rs

1// GPU memory allocation strategies and algorithms
2//
3// This module provides various memory allocation strategies optimized for
4// different GPU workload patterns and memory usage scenarios.
5
6#[allow(dead_code)]
7pub mod arena_allocator;
8pub mod buddy_allocator;
9pub mod slab_allocator;
10pub mod strategies;
11
12// Re-export main types for convenience
13pub use strategies::{
14    AdaptiveConfig, AllocationEvent, AllocationPattern, AllocationStats, AllocationStrategy,
15    AllocationStrategyManager, HybridConfig, MLConfig, MLFeatures, MLPrediction, MemoryBlock,
16};
17
18pub use buddy_allocator::{
19    AllocationInfo, BuddyAllocator, BuddyBlock, BuddyConfig, BuddyError, BuddyStats,
20    FreeBlockStats, MemoryUsage, ThreadSafeBuddyAllocator,
21};
22
23pub use slab_allocator::{
24    CacheConfig, CacheInfo, MemoryPool, MemoryPoolUsage, Slab, SlabAllocator, SlabAllocatorStats,
25    SlabCache, SlabConfig, SlabError, ThreadSafeSlabAllocator,
26};
27
28pub use arena_allocator::{
29    ArenaAllocator, ArenaConfig, ArenaError, ArenaStats, ArenaUsage, CheckpointHandle,
30    ExternalAllocator, GrowingArena, MemoryLayout, MemoryRegion, RingArena, RingConfig, RingUsage,
31    ThreadSafeArena,
32};
33
34use std::collections::HashMap;
35use std::ptr::NonNull;
36use std::sync::{Arc, Mutex};
37use std::time::Instant;
38
39/// Unified allocator interface that can use different allocation strategies
40pub struct UnifiedAllocator {
41    /// Strategy manager for general allocations
42    strategy_manager: AllocationStrategyManager,
43    /// Buddy allocator for power-of-2 allocations
44    buddy_allocator: Option<BuddyAllocator>,
45    /// Slab allocator for fixed-size objects
46    slab_allocator: Option<SlabAllocator>,
47    /// Arena allocator for temporary allocations
48    arena_allocator: Option<ArenaAllocator>,
49    /// Configuration
50    config: UnifiedConfig,
51    /// Statistics
52    stats: UnifiedStats,
53    /// Allocation routing table
54    routing_table: AllocationRouter,
55}
56
57/// Configuration for unified allocator
58#[derive(Debug, Clone)]
59pub struct UnifiedConfig {
60    /// Default allocation strategy
61    pub default_strategy: AllocationStrategy,
62    /// Enable buddy allocator
63    pub enable_buddy: bool,
64    /// Enable slab allocator
65    pub enable_slab: bool,
66    /// Enable arena allocator
67    pub enable_arena: bool,
68    /// Size threshold for buddy allocator
69    pub buddy_threshold: usize,
70    /// Size threshold for slab allocator
71    pub slab_threshold: usize,
72    /// Size threshold for arena allocator
73    pub arena_threshold: usize,
74    /// Enable automatic routing optimization
75    pub enable_auto_routing: bool,
76    /// Statistics collection interval
77    pub stats_interval: std::time::Duration,
78}
79
80impl Default for UnifiedConfig {
81    fn default() -> Self {
82        Self {
83            default_strategy: AllocationStrategy::Adaptive,
84            enable_buddy: true,
85            enable_slab: true,
86            enable_arena: true,
87            buddy_threshold: 1024,
88            slab_threshold: 4096,
89            arena_threshold: 64 * 1024,
90            enable_auto_routing: true,
91            stats_interval: std::time::Duration::from_secs(1),
92        }
93    }
94}
95
96/// Unified allocator statistics
97#[derive(Debug, Clone, Default)]
98pub struct UnifiedStats {
99    pub total_allocations: u64,
100    pub total_deallocations: u64,
101    pub bytes_allocated: u64,
102    pub bytes_deallocated: u64,
103    pub strategy_allocations: HashMap<AllocationStrategy, u64>,
104    pub buddy_allocations: u64,
105    pub slab_allocations: u64,
106    pub arena_allocations: u64,
107    pub routing_decisions: u64,
108    pub routing_cache_hits: u64,
109    pub average_allocation_time_ns: f64,
110    pub peak_memory_usage: usize,
111    pub current_memory_usage: usize,
112}
113
114/// Allocation routing logic
115pub struct AllocationRouter {
116    /// Size-based routing rules
117    size_routes: Vec<SizeRoute>,
118    /// Pattern-based routing cache
119    pattern_cache: HashMap<AllocationPattern, AllocatorType>,
120    /// Performance history for routing decisions
121    performance_history: HashMap<AllocatorType, PerformanceMetrics>,
122    /// Configuration
123    config: RouterConfig,
124}
125
126/// Size-based routing rule
127#[derive(Debug, Clone)]
128pub struct SizeRoute {
129    pub min_size: usize,
130    pub max_size: Option<usize>,
131    pub preferred_allocator: AllocatorType,
132    pub fallback_allocator: Option<AllocatorType>,
133}
134
135/// Allocator type identification
136#[derive(Debug, Clone, PartialEq, Eq, Hash)]
137pub enum AllocatorType {
138    Strategy(AllocationStrategy),
139    Buddy,
140    Slab,
141    Arena,
142}
143
144/// Performance metrics for routing decisions
145#[derive(Debug, Clone, Default)]
146pub struct PerformanceMetrics {
147    pub average_latency_ns: f64,
148    pub success_rate: f64,
149    pub fragmentation_ratio: f64,
150    pub cache_hit_rate: f64,
151    pub memory_efficiency: f64,
152}
153
154/// Router configuration
155#[derive(Debug, Clone)]
156pub struct RouterConfig {
157    pub enable_performance_tracking: bool,
158    pub cache_size: usize,
159    pub adaptation_threshold: f64,
160    pub performance_window: usize,
161}
162
163impl Default for RouterConfig {
164    fn default() -> Self {
165        Self {
166            enable_performance_tracking: true,
167            cache_size: 1000,
168            adaptation_threshold: 0.1,
169            performance_window: 100,
170        }
171    }
172}
173
174impl AllocationRouter {
175    pub fn new(config: RouterConfig) -> Self {
176        let size_routes = vec![
177            SizeRoute {
178                min_size: 0,
179                max_size: Some(256),
180                preferred_allocator: AllocatorType::Slab,
181                fallback_allocator: Some(AllocatorType::Strategy(AllocationStrategy::FirstFit)),
182            },
183            SizeRoute {
184                min_size: 257,
185                max_size: Some(4096),
186                preferred_allocator: AllocatorType::Strategy(AllocationStrategy::BestFit),
187                fallback_allocator: Some(AllocatorType::Buddy),
188            },
189            SizeRoute {
190                min_size: 4097,
191                max_size: Some(64 * 1024),
192                preferred_allocator: AllocatorType::Buddy,
193                fallback_allocator: Some(AllocatorType::Strategy(AllocationStrategy::BestFit)),
194            },
195            SizeRoute {
196                min_size: 64 * 1024 + 1,
197                max_size: None,
198                preferred_allocator: AllocatorType::Arena,
199                fallback_allocator: Some(AllocatorType::Strategy(AllocationStrategy::WorstFit)),
200            },
201        ];
202
203        Self {
204            size_routes,
205            pattern_cache: HashMap::new(),
206            performance_history: HashMap::new(),
207            config,
208        }
209    }
210
211    /// Route allocation request to appropriate allocator
212    pub fn route_allocation(
213        &mut self,
214        size: usize,
215        pattern: Option<AllocationPattern>,
216    ) -> AllocatorType {
217        // Check pattern cache first
218        if let Some(pattern) = pattern {
219            if let Some(allocator_type) = self.pattern_cache.get(&pattern) {
220                return allocator_type.clone();
221            }
222        }
223
224        // Use size-based routing
225        for route in &self.size_routes {
226            if size >= route.min_size && route.max_size.is_none_or(|max| size <= max) {
227                // Check performance if tracking is enabled
228                if self.config.enable_performance_tracking {
229                    let preferred_perf = self
230                        .performance_history
231                        .get(&route.preferred_allocator)
232                        .cloned()
233                        .unwrap_or_default();
234
235                    if let Some(fallback) = &route.fallback_allocator {
236                        let fallback_perf = self
237                            .performance_history
238                            .get(fallback)
239                            .cloned()
240                            .unwrap_or_default();
241
242                        // Choose based on performance
243                        if fallback_perf.average_latency_ns > 0.0
244                            && preferred_perf.average_latency_ns > 0.0
245                        {
246                            let perf_ratio = fallback_perf.average_latency_ns
247                                / preferred_perf.average_latency_ns;
248                            if perf_ratio < 1.0 - self.config.adaptation_threshold {
249                                return fallback.clone();
250                            }
251                        }
252                    }
253                }
254
255                return route.preferred_allocator.clone();
256            }
257        }
258
259        // Default fallback
260        AllocatorType::Strategy(AllocationStrategy::BestFit)
261    }
262
263    /// Update performance metrics for an allocator
264    pub fn update_performance(
265        &mut self,
266        allocator_type: AllocatorType,
267        metrics: PerformanceMetrics,
268    ) {
269        self.performance_history.insert(allocator_type, metrics);
270    }
271
272    /// Cache pattern-based routing decision
273    pub fn cache_pattern_route(
274        &mut self,
275        pattern: AllocationPattern,
276        allocator_type: AllocatorType,
277    ) {
278        if self.pattern_cache.len() >= self.config.cache_size {
279            // Remove oldest entry (simplified - could use LRU)
280            if let Some(key) = self.pattern_cache.keys().next().cloned() {
281                self.pattern_cache.remove(&key);
282            }
283        }
284        self.pattern_cache.insert(pattern, allocator_type);
285    }
286}
287
288impl UnifiedAllocator {
289    /// Create a new unified allocator
290    pub fn new(
291        base_ptr: NonNull<u8>,
292        total_size: usize,
293        config: UnifiedConfig,
294    ) -> Result<Self, AllocationError> {
295        let mut strategy_manager = AllocationStrategyManager::new(config.default_strategy.clone());
296
297        let buddy_allocator = if config.enable_buddy {
298            let buddy_config = BuddyConfig::default();
299            let buddy_size = total_size / 4; // Allocate 1/4 of memory to buddy allocator
300            let buddy_ptr = base_ptr;
301            Some(BuddyAllocator::new(
302                buddy_ptr.as_ptr(),
303                buddy_size,
304                buddy_config,
305            )?)
306        } else {
307            None
308        };
309
310        let slab_allocator = if config.enable_slab {
311            let slab_config = SlabConfig::default();
312            let slab_size = total_size / 4; // Allocate 1/4 of memory to slab allocator
313            let slab_ptr = unsafe { NonNull::new_unchecked(base_ptr.as_ptr().add(total_size / 4)) };
314            Some(SlabAllocator::new(slab_ptr, slab_size, slab_config))
315        } else {
316            None
317        };
318
319        let arena_allocator = if config.enable_arena {
320            let arena_config = ArenaConfig::default();
321            let arena_size = total_size / 4; // Allocate 1/4 of memory to arena allocator
322            let arena_ptr =
323                unsafe { NonNull::new_unchecked(base_ptr.as_ptr().add(total_size / 2)) };
324            Some(ArenaAllocator::new(arena_ptr, arena_size, arena_config)?)
325        } else {
326            None
327        };
328
329        let routing_table = AllocationRouter::new(RouterConfig::default());
330
331        // Initialize strategy manager with remaining memory (1/4 of total)
332        let strategy_size = total_size / 4;
333        let strategy_ptr = unsafe { base_ptr.as_ptr().add(3 * total_size / 4) };
334        strategy_manager.add_free_block(MemoryBlock {
335            ptr: strategy_ptr,
336            size: strategy_size,
337            is_free: true,
338            allocated_at: None,
339            last_accessed: None,
340            access_count: 0,
341            fragmentation_score: 0.0,
342        });
343
344        Ok(Self {
345            strategy_manager,
346            buddy_allocator,
347            slab_allocator,
348            arena_allocator,
349            config,
350            stats: UnifiedStats::default(),
351            routing_table,
352        })
353    }
354
355    /// Allocate memory using the unified interface
356    pub fn allocate(
357        &mut self,
358        size: usize,
359        requested_allocator_type: AllocatorType,
360        _alignment: Option<usize>,
361    ) -> Result<NonNull<u8>, AllocationError> {
362        let start_time = Instant::now();
363        self.stats.total_allocations += 1;
364
365        // Use requested allocator type if provided, otherwise route automatically
366        let allocator_type = requested_allocator_type;
367
368        let result = match &allocator_type {
369            AllocatorType::Strategy(strategy) => {
370                self.strategy_manager.set_strategy(strategy.clone());
371                self.strategy_manager.find_free_block(size).ok_or_else(|| {
372                    AllocationError::OutOfMemory("Strategy allocator failed".to_string())
373                })
374            }
375            AllocatorType::Buddy => {
376                if let Some(ref mut buddy) = self.buddy_allocator {
377                    buddy.allocate(size).map_err(AllocationError::BuddyError)
378                } else {
379                    Err(AllocationError::AllocatorNotAvailable(
380                        "Buddy allocator not enabled".to_string(),
381                    ))
382                }
383            }
384            AllocatorType::Slab => {
385                if let Some(ref mut slab) = self.slab_allocator {
386                    slab.allocate(size)
387                        .map(|ptr| ptr.as_ptr())
388                        .map_err(AllocationError::SlabError)
389                } else {
390                    Err(AllocationError::AllocatorNotAvailable(
391                        "Slab allocator not enabled".to_string(),
392                    ))
393                }
394            }
395            AllocatorType::Arena => {
396                if let Some(ref mut arena) = self.arena_allocator {
397                    arena
398                        .allocate(size)
399                        .map(|ptr| ptr.as_ptr())
400                        .map_err(AllocationError::ArenaError)
401                } else {
402                    Err(AllocationError::AllocatorNotAvailable(
403                        "Arena allocator not enabled".to_string(),
404                    ))
405                }
406            }
407        };
408
409        let allocation_time = start_time.elapsed().as_nanos() as f64;
410
411        match &result {
412            Ok(_) => {
413                self.stats.bytes_allocated += size as u64;
414                self.stats.current_memory_usage += size;
415                if self.stats.current_memory_usage > self.stats.peak_memory_usage {
416                    self.stats.peak_memory_usage = self.stats.current_memory_usage;
417                }
418
419                // Update strategy-specific stats
420                match &allocator_type {
421                    AllocatorType::Strategy(strategy) => {
422                        *self
423                            .stats
424                            .strategy_allocations
425                            .entry(strategy.clone())
426                            .or_insert(0) += 1;
427                    }
428                    AllocatorType::Buddy => self.stats.buddy_allocations += 1,
429                    AllocatorType::Slab => self.stats.slab_allocations += 1,
430                    AllocatorType::Arena => self.stats.arena_allocations += 1,
431                }
432
433                // Update performance metrics
434                let metrics = PerformanceMetrics {
435                    average_latency_ns: allocation_time,
436                    success_rate: 1.0,
437                    fragmentation_ratio: 0.0, // Would need to calculate from allocator
438                    cache_hit_rate: 0.0,      // Would need to get from allocator
439                    memory_efficiency: 1.0,   // Would need to calculate
440                };
441                self.routing_table
442                    .update_performance(allocator_type, metrics);
443            }
444            Err(_) => {
445                // Update failure metrics
446                let metrics = PerformanceMetrics {
447                    average_latency_ns: allocation_time,
448                    success_rate: 0.0,
449                    ..Default::default()
450                };
451                self.routing_table
452                    .update_performance(allocator_type, metrics);
453            }
454        }
455
456        // Update average allocation time
457        let total_time = self.stats.average_allocation_time_ns
458            * (self.stats.total_allocations - 1) as f64
459            + allocation_time;
460        self.stats.average_allocation_time_ns = total_time / self.stats.total_allocations as f64;
461
462        result.map(|ptr| unsafe { NonNull::new_unchecked(ptr) })
463    }
464
465    /// Deallocate memory
466    pub fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) -> Result<(), AllocationError> {
467        self.stats.total_deallocations += 1;
468        self.stats.bytes_deallocated += size as u64;
469        self.stats.current_memory_usage = self.stats.current_memory_usage.saturating_sub(size);
470
471        // Try each allocator to find which one owns this pointer
472        if let Some(ref mut buddy) = self.buddy_allocator {
473            if let Ok(()) = buddy.deallocate(ptr.as_ptr()) {
474                return Ok(());
475            }
476        }
477
478        if let Some(ref mut slab) = self.slab_allocator {
479            if let Ok(()) = slab.deallocate(ptr, size) {
480                return Ok(());
481            }
482        }
483
484        if let Some(ref mut arena) = self.arena_allocator {
485            if arena.contains_pointer(ptr) {
486                // Arena allocator typically doesn't support individual deallocation
487                return Ok(());
488            }
489        }
490
491        Err(AllocationError::InvalidPointer(
492            "Pointer not found in any allocator".to_string(),
493        ))
494    }
495
496    /// Free memory (alias for deallocate)
497    pub fn free(
498        &mut self,
499        ptr: *mut std::ffi::c_void,
500        _allocator_type: AllocatorType,
501    ) -> Result<(), AllocationError> {
502        // Convert to NonNull<u8> and call deallocate
503        let ptr_u8 = NonNull::new(ptr as *mut u8)
504            .ok_or_else(|| AllocationError::InvalidPointer("Null pointer".to_string()))?;
505        // We don't have the size here, so we'll just try to deallocate with a dummy size
506        // This is not ideal, but matches the interface expected by the caller
507        self.deallocate(ptr_u8, 0)
508    }
509
510    /// Reallocate memory with new size
511    pub fn reallocate(
512        &mut self,
513        ptr: *mut std::ffi::c_void,
514        new_size: usize,
515        _allocator_type: AllocatorType,
516    ) -> Result<*mut std::ffi::c_void, AllocationError> {
517        // For simplicity, implement as free + allocate
518        // In a production system, this should be optimized for in-place reallocation
519        let new_ptr = self.allocate(new_size, _allocator_type, None)?;
520        // Note: We should copy the data here, but we don't have the old size
521        // This is a simplified implementation
522        Ok(new_ptr.as_ptr() as *mut std::ffi::c_void)
523    }
524
525    /// Get unified statistics
526    pub fn get_stats(&self) -> &UnifiedStats {
527        &self.stats
528    }
529
530    /// Get detailed allocator information
531    pub fn get_detailed_info(&self) -> DetailedAllocatorInfo {
532        let mut info = DetailedAllocatorInfo {
533            strategy_info: Some(self.strategy_manager.get_stats().clone()),
534            buddy_info: None,
535            slab_info: None,
536            arena_info: None,
537            unified_stats: self.stats.clone(),
538        };
539
540        if let Some(ref buddy) = self.buddy_allocator {
541            info.buddy_info = Some(buddy.get_stats().clone());
542        }
543
544        if let Some(ref slab) = self.slab_allocator {
545            info.slab_info = Some(slab.get_stats());
546        }
547
548        if let Some(ref arena) = self.arena_allocator {
549            info.arena_info = Some(arena.get_stats().clone());
550        }
551
552        info
553    }
554
555    /// Reset specific allocator
556    pub fn reset_allocator(
557        &mut self,
558        allocator_type: AllocatorType,
559    ) -> Result<(), AllocationError> {
560        match allocator_type {
561            AllocatorType::Strategy(_) => {
562                self.strategy_manager.clear_history();
563            }
564            AllocatorType::Buddy => {
565                if let Some(ref mut buddy) = self.buddy_allocator {
566                    buddy.reset();
567                } else {
568                    return Err(AllocationError::AllocatorNotAvailable(
569                        "Buddy allocator not enabled".to_string(),
570                    ));
571                }
572            }
573            AllocatorType::Slab => {
574                return Err(AllocationError::UnsupportedOperation(
575                    "Slab allocator reset not supported".to_string(),
576                ));
577            }
578            AllocatorType::Arena => {
579                if let Some(ref mut arena) = self.arena_allocator {
580                    arena.reset();
581                } else {
582                    return Err(AllocationError::AllocatorNotAvailable(
583                        "Arena allocator not enabled".to_string(),
584                    ));
585                }
586            }
587        }
588        Ok(())
589    }
590
591    /// Force garbage collection on applicable allocators
592    pub fn garbage_collect(&mut self) -> GarbageCollectionResult {
593        let mut result = GarbageCollectionResult::default();
594
595        if let Some(ref mut slab) = self.slab_allocator {
596            result.slab_reclaimed = slab.reclaim_memory();
597        }
598
599        if let Some(ref mut buddy) = self.buddy_allocator {
600            result.buddy_defragmented = buddy.defragment();
601        }
602
603        result
604    }
605
606    /// Optimize allocation strategies based on performance data
607    pub fn optimize_strategies(&mut self) -> Result<(), AllocationError> {
608        // Update internal routing table based on performance metrics
609        let current_stats = self.get_stats();
610
611        // Analyze allocation patterns and update routing decisions
612        // Note: Routing optimization would be implemented here based on performance history
613        if current_stats.buddy_allocations > current_stats.slab_allocations {
614            // Prefer buddy allocator for current workload - implementation would update routing config
615        } else {
616            // Prefer slab allocator for current workload - implementation would update routing config
617        }
618
619        // Reset counters for next optimization cycle
620        self.stats.buddy_allocations = 0;
621        self.stats.slab_allocations = 0;
622        self.stats.arena_allocations = 0;
623        self.stats.strategy_allocations.clear();
624
625        Ok(())
626    }
627}
628
629// Safety: UnifiedAllocator contains multiple allocators managing GPU memory pointers.
630// While the contained allocators use NonNull/raw pointers that aren't Send/Sync by default,
631// it's safe to share UnifiedAllocator across threads when protected by Arc<Mutex<>> because:
632// 1. All pointers point to GPU memory managed by the GPU driver
633// 2. The Mutex provides exclusive access for all mutable operations
634// 3. No thread-local state is maintained
635unsafe impl Send for UnifiedAllocator {}
636unsafe impl Sync for UnifiedAllocator {}
637
638/// Detailed information about all allocators
639#[derive(Debug, Clone)]
640pub struct DetailedAllocatorInfo {
641    pub strategy_info: Option<AllocationStats>,
642    pub buddy_info: Option<BuddyStats>,
643    pub slab_info: Option<SlabAllocatorStats>,
644    pub arena_info: Option<ArenaStats>,
645    pub unified_stats: UnifiedStats,
646}
647
648/// Result of garbage collection operations
649#[derive(Debug, Clone, Default)]
650pub struct GarbageCollectionResult {
651    pub slab_reclaimed: usize,
652    pub buddy_defragmented: usize,
653    pub arena_reset: bool,
654    pub total_bytes_freed: usize,
655}
656
657/// Unified allocation errors
658#[derive(Debug, Clone)]
659pub enum AllocationError {
660    OutOfMemory(String),
661    InvalidPointer(String),
662    AllocatorNotAvailable(String),
663    UnsupportedOperation(String),
664    BuddyError(BuddyError),
665    SlabError(SlabError),
666    ArenaError(ArenaError),
667}
668
669impl std::fmt::Display for AllocationError {
670    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
671        match self {
672            AllocationError::OutOfMemory(msg) => write!(f, "Out of memory: {}", msg),
673            AllocationError::InvalidPointer(msg) => write!(f, "Invalid pointer: {}", msg),
674            AllocationError::AllocatorNotAvailable(msg) => {
675                write!(f, "Allocator not available: {}", msg)
676            }
677            AllocationError::UnsupportedOperation(msg) => {
678                write!(f, "Unsupported operation: {}", msg)
679            }
680            AllocationError::BuddyError(e) => write!(f, "Buddy allocator error: {}", e),
681            AllocationError::SlabError(e) => write!(f, "Slab allocator error: {}", e),
682            AllocationError::ArenaError(e) => write!(f, "Arena allocator error: {}", e),
683        }
684    }
685}
686
687impl std::error::Error for AllocationError {}
688
689impl From<BuddyError> for AllocationError {
690    fn from(error: BuddyError) -> Self {
691        AllocationError::BuddyError(error)
692    }
693}
694
695impl From<SlabError> for AllocationError {
696    fn from(error: SlabError) -> Self {
697        AllocationError::SlabError(error)
698    }
699}
700
701impl From<ArenaError> for AllocationError {
702    fn from(error: ArenaError) -> Self {
703        AllocationError::ArenaError(error)
704    }
705}
706
707/// Thread-safe unified allocator wrapper
708pub struct ThreadSafeUnifiedAllocator {
709    allocator: Arc<Mutex<UnifiedAllocator>>,
710}
711
712impl ThreadSafeUnifiedAllocator {
713    pub fn new(
714        base_ptr: NonNull<u8>,
715        total_size: usize,
716        config: UnifiedConfig,
717    ) -> Result<Self, AllocationError> {
718        let allocator = UnifiedAllocator::new(base_ptr, total_size, config)?;
719        Ok(Self {
720            allocator: Arc::new(Mutex::new(allocator)),
721        })
722    }
723
724    pub fn allocate(&self, size: usize) -> Result<NonNull<u8>, AllocationError> {
725        let mut allocator = self.allocator.lock().expect("lock poisoned");
726        allocator.allocate(
727            size,
728            AllocatorType::Strategy(strategies::AllocationStrategy::FirstFit),
729            None,
730        )
731    }
732
733    pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<(), AllocationError> {
734        let mut allocator = self.allocator.lock().expect("lock poisoned");
735        allocator.deallocate(ptr, size)
736    }
737
738    pub fn get_stats(&self) -> UnifiedStats {
739        let allocator = self.allocator.lock().expect("lock poisoned");
740        allocator.get_stats().clone()
741    }
742
743    pub fn garbage_collect(&self) -> GarbageCollectionResult {
744        let mut allocator = self.allocator.lock().expect("lock poisoned");
745        allocator.garbage_collect()
746    }
747}
748
749#[cfg(test)]
750mod tests {
751    use super::*;
752
753    #[test]
754    fn test_unified_allocator_creation() {
755        let size = 1024 * 1024; // 1MB
756        let memory = vec![0u8; size];
757        let ptr = NonNull::new(memory.as_ptr() as *mut u8).expect("unwrap failed");
758
759        let config = UnifiedConfig::default();
760        let allocator = UnifiedAllocator::new(ptr, size, config);
761        assert!(allocator.is_ok());
762    }
763
764    #[test]
765    fn test_unified_allocation() {
766        let size = 1024 * 1024;
767        let memory = vec![0u8; size];
768        let ptr = NonNull::new(memory.as_ptr() as *mut u8).expect("unwrap failed");
769
770        let config = UnifiedConfig::default();
771        let mut allocator = UnifiedAllocator::new(ptr, size, config).expect("unwrap failed");
772
773        // Test different sizes to trigger different allocators
774        let small_alloc = allocator.allocate(100, AllocatorType::Slab, None); // Should use slab
775        assert!(small_alloc.is_ok());
776
777        let medium_alloc = allocator.allocate(2048, AllocatorType::Buddy, None); // Should use buddy
778        assert!(
779            medium_alloc.is_ok(),
780            "Medium allocation failed: {:?}",
781            medium_alloc.err()
782        );
783
784        let large_alloc = allocator.allocate(128 * 1024, AllocatorType::Arena, None); // Should use arena
785        assert!(large_alloc.is_ok());
786
787        let stats = allocator.get_stats();
788        assert_eq!(stats.total_allocations, 3);
789    }
790
791    #[test]
792    fn test_allocation_routing() {
793        let config = RouterConfig::default();
794        let mut router = AllocationRouter::new(config);
795
796        let small_route = router.route_allocation(100, None);
797        assert_eq!(small_route, AllocatorType::Slab);
798
799        let medium_route = router.route_allocation(2048, None);
800        assert_eq!(
801            medium_route,
802            AllocatorType::Strategy(AllocationStrategy::BestFit)
803        );
804
805        let large_route = router.route_allocation(128 * 1024, None);
806        assert_eq!(large_route, AllocatorType::Arena);
807    }
808
809    #[test]
810    fn test_thread_safe_unified_allocator() {
811        let size = 1024 * 1024;
812        let memory = vec![0u8; size];
813        let ptr = NonNull::new(memory.as_ptr() as *mut u8).expect("unwrap failed");
814
815        let config = UnifiedConfig::default();
816        let allocator = ThreadSafeUnifiedAllocator::new(ptr, size, config).expect("unwrap failed");
817
818        let alloc_result = allocator.allocate(1024);
819        assert!(
820            alloc_result.is_ok(),
821            "Allocation failed: {:?}",
822            alloc_result.err()
823        );
824
825        let stats = allocator.get_stats();
826        assert!(stats.total_allocations > 0);
827    }
828}