zipora 3.1.4

High-performance Rust implementation providing advanced data structures and compression algorithms with memory safety guarantees. Features LRU page cache, sophisticated caching layer, fiber-based concurrency, real-time compression, secure memory pools, SIMD optimizations, and complete C FFI for migration from C++.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
//! Tiered memory allocator for optimal performance across all allocation sizes
//!
//! This module provides a sophisticated tiered allocation strategy that routes
//! allocations to the most appropriate allocator based on size and usage patterns.

use crate::error::{Result, ZiporaError};
use crate::memory::{
    mmap::{MemoryMappedAllocator, MmapAllocation},
    pool::{MemoryPool, PoolConfig, PoolStats},
};

#[cfg(target_os = "linux")]
use crate::memory::hugepage::{HUGEPAGE_SIZE_2MB, HugePage, HugePageAllocator};

use std::ptr::NonNull;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::thread_local;

/// Size thresholds for different allocation strategies
/// Maximum size for small object allocations (1KB)
pub const SMALL_THRESHOLD: usize = 1024; // 1KB
/// Maximum size for medium object allocations (16KB)
pub const MEDIUM_THRESHOLD: usize = 16 * 1024; // 16KB  
/// Minimum size for huge page allocations (2MB)
pub const LARGE_THRESHOLD: usize = 2 * 1024 * 1024; // 2MB

/// A memory allocation that can come from different allocators
#[derive(Debug)]
pub enum TieredAllocation {
    /// Small allocation from memory pool (pointer, size)
    Small(NonNull<u8>, usize),
    /// Medium allocation from size-classed pools (pointer, size)
    Medium(NonNull<u8>, usize),
    /// Large allocation using memory mapping
    Large(MmapAllocation),
    /// Huge allocation using Linux hugepages
    #[cfg(target_os = "linux")]
    Huge(HugePage),
}

/// Configuration for the tiered memory allocator
#[derive(Debug, Clone)]
pub struct TieredConfig {
    /// Enable small object pools
    pub enable_small_pools: bool,
    /// Enable medium object pools with size classes
    pub enable_medium_pools: bool,
    /// Enable memory-mapped large allocations
    pub enable_mmap_large: bool,
    /// Enable hugepage allocations
    pub enable_hugepages: bool,
    /// Minimum size for memory-mapped allocations
    pub mmap_threshold: usize,
    /// Minimum size for hugepage allocations
    pub hugepage_threshold: usize,
}

impl Default for TieredConfig {
    fn default() -> Self {
        Self {
            enable_small_pools: true,
            enable_medium_pools: true,
            enable_mmap_large: true,
            enable_hugepages: cfg!(target_os = "linux"),
            mmap_threshold: MEDIUM_THRESHOLD,
            hugepage_threshold: LARGE_THRESHOLD,
        }
    }
}

/// Comprehensive statistics for the tiered allocator
#[derive(Debug, Clone)]
pub struct TieredStats {
    /// Number of small allocations made
    pub small_allocations: u64,
    /// Number of medium allocations made
    pub medium_allocations: u64,
    /// Number of large allocations made
    pub large_allocations: u64,
    /// Number of huge allocations made
    pub huge_allocations: u64,
    /// Total bytes currently allocated
    pub total_allocated_bytes: u64,
    /// Statistics for the small object pool
    pub small_pool_stats: PoolStats,
    /// Statistics for each medium object pool (by size class)
    pub medium_pool_stats: Vec<PoolStats>,
    /// Statistics for memory-mapped allocations
    pub mmap_stats: crate::memory::mmap::MmapStats,
}

// Thread-local storage for medium-sized pools to reduce contention
thread_local! {
    static MEDIUM_POOLS: Vec<Arc<MemoryPool>> = {
        // Size classes: 1KB, 2KB, 4KB, 8KB, 16KB
        let size_classes = vec![1024, 2048, 4096, 8192, 16384];

        size_classes.into_iter().map(|size| {
            let config = PoolConfig::new(size, 32, 16); // 32 chunks per pool, 16-byte aligned
            Arc::new(MemoryPool::new(config).expect("memory pool creation"))
        }).collect()
    };
}

/// High-performance tiered memory allocator
pub struct TieredMemoryAllocator {
    config: TieredConfig,

    // Small object pool (< 1KB)
    small_pool: Arc<MemoryPool>,

    // Memory-mapped allocator for large objects
    mmap_allocator: Arc<MemoryMappedAllocator>,

    // Hugepage allocator for very large objects
    #[cfg(target_os = "linux")]
    hugepage_allocator: Arc<HugePageAllocator>,

    // Statistics
    small_allocs: AtomicU64,
    medium_allocs: AtomicU64,
    large_allocs: AtomicU64,
    huge_allocs: AtomicU64,
    total_bytes: AtomicU64,

    // Adaptive allocation tracking
    allocation_history: Arc<Mutex<AllocationHistory>>,
}

/// Tracks allocation patterns for adaptive optimization
struct AllocationHistory {
    size_histogram: [u64; 32], // Histogram of allocation sizes (log2 buckets)
    recent_sizes: Vec<usize>,  // Recent allocation sizes for pattern detection
    max_recent: usize,
}

impl AllocationHistory {
    fn new() -> Self {
        Self {
            size_histogram: [0; 32],
            recent_sizes: Vec::with_capacity(1000),
            max_recent: 1000,
        }
    }

    fn record_allocation(&mut self, size: usize) {
        // Update histogram
        let bucket = if size == 0 {
            0
        } else {
            63 - size.leading_zeros() as usize
        };
        if bucket < 32 {
            self.size_histogram[bucket] += 1;
        }

        // Track recent allocations
        if self.recent_sizes.len() >= self.max_recent {
            self.recent_sizes.remove(0);
        }
        self.recent_sizes.push(size);
    }

    fn get_allocation_pattern(&self) -> AllocationPattern {
        let total: u64 = self.size_histogram.iter().sum();
        if total == 0 {
            return AllocationPattern::Mixed;
        }

        // Analyze dominant allocation sizes
        let small_ratio = self.size_histogram[0..10].iter().sum::<u64>() as f64 / total as f64;
        let medium_ratio = self.size_histogram[10..16].iter().sum::<u64>() as f64 / total as f64;
        let large_ratio = self.size_histogram[16..].iter().sum::<u64>() as f64 / total as f64;

        if small_ratio > 0.7 {
            AllocationPattern::SmallDominated
        } else if medium_ratio > 0.7 {
            AllocationPattern::MediumDominated
        } else if large_ratio > 0.7 {
            AllocationPattern::LargeDominated
        } else {
            AllocationPattern::Mixed
        }
    }
}

/// Detected allocation pattern for adaptive optimization
#[derive(Debug, Clone, Copy)]
pub enum AllocationPattern {
    /// More than 70% of allocations are small (< 1KB)
    SmallDominated,
    /// More than 70% of allocations are medium (1KB-16KB)
    MediumDominated,
    /// More than 70% of allocations are large (> 16KB)
    LargeDominated,
    /// No clear dominant allocation size pattern
    Mixed,
}

impl TieredMemoryAllocator {
    /// Create a new tiered memory allocator
    pub fn new(config: TieredConfig) -> Result<Self> {
        let small_pool = if config.enable_small_pools {
            Arc::new(MemoryPool::new(PoolConfig::new(SMALL_THRESHOLD, 100, 8))?)
        } else {
            Arc::new(MemoryPool::new(PoolConfig::new(64, 1, 8))?) // Minimal pool
        };

        let mmap_allocator = if config.enable_mmap_large {
            Arc::new(MemoryMappedAllocator::new(config.mmap_threshold))
        } else {
            Arc::new(MemoryMappedAllocator::new(usize::MAX)) // Effectively disabled
        };

        #[cfg(target_os = "linux")]
        let hugepage_allocator = if config.enable_hugepages {
            Arc::new(HugePageAllocator::with_config(
                config.hugepage_threshold,
                HUGEPAGE_SIZE_2MB,
            )?)
        } else {
            Arc::new(HugePageAllocator::with_config(
                usize::MAX,
                HUGEPAGE_SIZE_2MB,
            )?) // Disabled
        };

        Ok(Self {
            config,
            small_pool,
            mmap_allocator,
            #[cfg(target_os = "linux")]
            hugepage_allocator,
            small_allocs: AtomicU64::new(0),
            medium_allocs: AtomicU64::new(0),
            large_allocs: AtomicU64::new(0),
            huge_allocs: AtomicU64::new(0),
            total_bytes: AtomicU64::new(0),
            allocation_history: Arc::new(Mutex::new(AllocationHistory::new())),
        })
    }

    /// Create allocator with default configuration
    pub fn default() -> Result<Self> {
        Self::new(TieredConfig::default())
    }

    /// Allocate memory using the optimal strategy for the given size
    pub fn allocate(&self, size: usize) -> Result<TieredAllocation> {
        if size == 0 {
            return Err(ZiporaError::invalid_data("allocation size cannot be zero"));
        }

        // Record allocation for adaptive optimization
        if let Ok(mut history) = self.allocation_history.try_lock() {
            history.record_allocation(size);
        }

        self.total_bytes.fetch_add(size as u64, Ordering::Relaxed);

        // Route to appropriate allocator based on size
        if size <= SMALL_THRESHOLD && self.config.enable_small_pools {
            self.allocate_small(size)
        } else if size <= MEDIUM_THRESHOLD && self.config.enable_medium_pools {
            self.allocate_medium(size)
        } else if size < LARGE_THRESHOLD && self.config.enable_mmap_large {
            self.allocate_large(size)
        } else {
            self.allocate_huge(size)
        }
    }

    /// Deallocate memory
    pub fn deallocate(&self, allocation: TieredAllocation) -> Result<()> {
        match allocation {
            TieredAllocation::Small(ptr, size) => {
                self.small_pool.deallocate(ptr)?;
                self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
            }
            TieredAllocation::Medium(ptr, size) => {
                self.deallocate_medium(ptr, size)?;
                self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
            }
            TieredAllocation::Large(allocation) => {
                let size = allocation.size();
                self.mmap_allocator.deallocate(allocation)?;
                self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
            }
            #[cfg(target_os = "linux")]
            TieredAllocation::Huge(hugepage) => {
                let size = hugepage.size();
                drop(hugepage); // HugePage handles its own deallocation
                self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
            }
        }
        Ok(())
    }

    /// Get comprehensive statistics
    pub fn stats(&self) -> TieredStats {
        let medium_pool_stats =
            MEDIUM_POOLS.with(|pools| pools.iter().map(|pool| pool.stats()).collect());

        TieredStats {
            small_allocations: self.small_allocs.load(Ordering::Relaxed),
            medium_allocations: self.medium_allocs.load(Ordering::Relaxed),
            large_allocations: self.large_allocs.load(Ordering::Relaxed),
            huge_allocations: self.huge_allocs.load(Ordering::Relaxed),
            total_allocated_bytes: self.total_bytes.load(Ordering::Relaxed),
            small_pool_stats: self.small_pool.stats(),
            medium_pool_stats,
            mmap_stats: self.mmap_allocator.stats(),
        }
    }

    /// Get allocation pattern analysis
    pub fn get_allocation_pattern(&self) -> Result<AllocationPattern> {
        if let Ok(history) = self.allocation_history.lock() {
            Ok(history.get_allocation_pattern())
        } else {
            Ok(AllocationPattern::Mixed)
        }
    }

    /// Optimize allocator based on observed allocation patterns
    pub fn optimize_for_pattern(&self) -> Result<()> {
        let pattern = self.get_allocation_pattern()?;

        log::debug!("Optimizing tiered allocator for pattern: {:?}", pattern);

        // Pattern-specific optimizations could be implemented here
        // For example:
        // - Pre-warm pools for dominant allocation sizes
        // - Adjust cache sizes based on usage patterns
        // - Tune memory mapping thresholds

        Ok(())
    }

    fn allocate_small(&self, size: usize) -> Result<TieredAllocation> {
        self.small_allocs.fetch_add(1, Ordering::Relaxed);
        let chunk = self.small_pool.allocate()?;
        Ok(TieredAllocation::Small(chunk, size))
    }

    fn allocate_medium(&self, size: usize) -> Result<TieredAllocation> {
        self.medium_allocs.fetch_add(1, Ordering::Relaxed);

        // Use thread-local medium pools for better performance
        MEDIUM_POOLS.with(|pools| {
            // Find the smallest pool that can accommodate the allocation
            for pool in pools.iter() {
                if pool.config().chunk_size >= size {
                    let chunk = pool.allocate()?;
                    return Ok(TieredAllocation::Medium(chunk, size));
                }
            }

            // No suitable pool found, fall back to mmap
            self.allocate_large(size)
        })
    }

    fn deallocate_medium(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
        MEDIUM_POOLS.with(|pools| {
            // Find the appropriate pool based on size
            for pool in pools.iter() {
                if pool.config().chunk_size >= size {
                    return pool.deallocate(ptr);
                }
            }

            Err(ZiporaError::invalid_data(
                "no suitable pool for deallocation",
            ))
        })
    }

    fn allocate_large(&self, size: usize) -> Result<TieredAllocation> {
        self.large_allocs.fetch_add(1, Ordering::Relaxed);
        let allocation = self.mmap_allocator.allocate(size)?;
        Ok(TieredAllocation::Large(allocation))
    }

    fn allocate_huge(&self, size: usize) -> Result<TieredAllocation> {
        #[cfg(target_os = "linux")]
        {
            if self.config.enable_hugepages && self.hugepage_allocator.should_use_hugepages(size) {
                self.huge_allocs.fetch_add(1, Ordering::Relaxed);
                let hugepage = self.hugepage_allocator.allocate(size)?;
                return Ok(TieredAllocation::Huge(hugepage));
            }
        }

        // Fall back to memory mapping for very large allocations
        self.allocate_large(size)
    }
}

// SAFETY: TieredMemoryAllocator is Send because:
// 1. `config: TieredConfig` - Config is Clone with no raw pointers.
// 2. `small_allocs/medium_allocs/...` - AtomicUsize counters are Send.
// 3. `bump_allocator: BumpAllocator` - BumpAllocator is Send (uses atomics).
// 4. `mmap_allocator: MemoryMappedAllocator` - Is Send (uses Mutex).
// 5. `hugepage_allocator: HugePageAllocator` - Is Send.
unsafe impl Send for TieredMemoryAllocator {}

// SAFETY: TieredMemoryAllocator is Sync because:
// 1. Atomic counters (small_allocs, medium_allocs, etc.) are inherently Sync.
// 2. `bump_allocator: BumpAllocator` - Is Sync (uses CAS for allocation).
// 3. `mmap_allocator: MemoryMappedAllocator` - Is Sync (uses Mutex).
// 4. `hugepage_allocator: HugePageAllocator` - Is Sync (uses Mutex).
// 5. Thread-local pools (SMALL_POOLS, MEDIUM_POOLS) are per-thread.
// All shared state is protected by atomics or Mutex for thread safety.
unsafe impl Sync for TieredMemoryAllocator {}

impl TieredAllocation {
    /// Get the allocated memory as a slice
    pub fn as_slice(&self) -> &[u8] {
        match self {
            // SAFETY: ptr is NonNull from valid pool allocation, size matches allocated chunk
            TieredAllocation::Small(ptr, size) => unsafe {
                std::slice::from_raw_parts(ptr.as_ptr(), *size)
            },
            // SAFETY: ptr is NonNull from valid pool allocation, size matches allocated chunk
            TieredAllocation::Medium(ptr, size) => unsafe {
                std::slice::from_raw_parts(ptr.as_ptr(), *size)
            },
            TieredAllocation::Large(allocation) => allocation.as_slice(),
            #[cfg(target_os = "linux")]
            TieredAllocation::Huge(hugepage) => hugepage.as_slice(),
        }
    }

    /// Get the allocated memory as a mutable slice
    pub fn as_mut_slice(&mut self) -> &mut [u8] {
        match self {
            // SAFETY: ptr is NonNull from valid pool allocation, size matches allocated chunk, &mut guarantees exclusive access
            TieredAllocation::Small(ptr, size) => unsafe {
                std::slice::from_raw_parts_mut(ptr.as_ptr(), *size)
            },
            // SAFETY: ptr is NonNull from valid pool allocation, size matches allocated chunk, &mut guarantees exclusive access
            TieredAllocation::Medium(ptr, size) => unsafe {
                std::slice::from_raw_parts_mut(ptr.as_ptr(), *size)
            },
            TieredAllocation::Large(allocation) => allocation.as_mut_slice(),
            #[cfg(target_os = "linux")]
            TieredAllocation::Huge(hugepage) => hugepage.as_mut_slice(),
        }
    }

    /// Get the size of the allocation
    #[inline]
    pub fn size(&self) -> usize {
        match self {
            TieredAllocation::Small(_, size) => *size,
            TieredAllocation::Medium(_, size) => *size,
            TieredAllocation::Large(allocation) => allocation.size(),
            #[cfg(target_os = "linux")]
            TieredAllocation::Huge(hugepage) => hugepage.size(),
        }
    }

    /// Get the memory as a typed pointer
    pub fn as_ptr<T>(&self) -> *mut T {
        match self {
            TieredAllocation::Small(ptr, _) => ptr.as_ptr() as *mut T,
            TieredAllocation::Medium(ptr, _) => ptr.as_ptr() as *mut T,
            TieredAllocation::Large(allocation) => allocation.as_ptr(),
            #[cfg(target_os = "linux")]
            TieredAllocation::Huge(hugepage) => hugepage.as_slice().as_ptr() as *mut T,
        }
    }
}

/// Global tiered allocator instance
static GLOBAL_TIERED_ALLOCATOR: once_cell::sync::Lazy<TieredMemoryAllocator> =
    once_cell::sync::Lazy::new(|| TieredMemoryAllocator::default().expect("default allocator creation"));

/// Allocate memory using the global tiered allocator
pub fn tiered_allocate(size: usize) -> Result<TieredAllocation> {
    GLOBAL_TIERED_ALLOCATOR.allocate(size)
}

/// Deallocate memory using the global tiered allocator
pub fn tiered_deallocate(allocation: TieredAllocation) -> Result<()> {
    GLOBAL_TIERED_ALLOCATOR.deallocate(allocation)
}

/// Get statistics from the global tiered allocator
pub fn get_tiered_stats() -> TieredStats {
    GLOBAL_TIERED_ALLOCATOR.stats()
}

#[cfg(test)]
mod tests {
    use super::*;
    use std::sync::Mutex;

    // Global mutex to serialize tests that use global allocator state
    // This prevents race conditions that cause segfaults in release mode
    static GLOBAL_ALLOCATOR_TEST_MUTEX: Mutex<()> = Mutex::new(());

    #[test]
    fn test_tiered_allocator_creation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let stats = allocator.stats();

        assert_eq!(stats.small_allocations, 0);
        assert_eq!(stats.medium_allocations, 0);
        assert_eq!(stats.large_allocations, 0);
        assert_eq!(stats.huge_allocations, 0);
    }

    #[test]
    fn test_small_allocation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let size = 512; // Small allocation

        let mut allocation = allocator.allocate(size).unwrap();
        assert_eq!(allocation.size(), size);

        // Test that we can write to the memory
        let slice = allocation.as_mut_slice();
        slice[0] = 42;
        slice[size - 1] = 84;

        let slice = allocation.as_slice();
        assert_eq!(slice[0], 42);
        assert_eq!(slice[size - 1], 84);

        allocator.deallocate(allocation).unwrap();

        let stats = allocator.stats();
        assert_eq!(stats.small_allocations, 1);
        assert_eq!(stats.total_allocated_bytes, 0); // Deallocated
    }

    #[test]
    fn test_medium_allocation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let size = 4 * 1024; // 4KB - medium allocation

        let mut allocation = allocator.allocate(size).unwrap();
        assert_eq!(allocation.size(), size);

        // Test memory access
        let slice = allocation.as_mut_slice();
        slice[0] = 42;
        slice[size - 1] = 84;

        allocator.deallocate(allocation).unwrap();

        let stats = allocator.stats();
        assert_eq!(stats.medium_allocations, 1);
    }

    #[test]
    fn test_large_allocation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let size = 64 * 1024; // 64KB - large allocation

        let mut allocation = allocator.allocate(size).unwrap();
        assert_eq!(allocation.size(), size);

        // Test memory access
        let slice = allocation.as_mut_slice();
        slice[0] = 42;
        slice[size - 1] = 84;

        allocator.deallocate(allocation).unwrap();

        let stats = allocator.stats();
        assert_eq!(stats.large_allocations, 1);
    }

    #[test]
    fn test_huge_allocation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let size = 4 * 1024 * 1024; // 4MB - huge allocation

        // Try allocation, but it might fail on systems without hugepage support
        match allocator.allocate(size) {
            Ok(mut allocation) => {
                assert_eq!(allocation.size(), size);

                // Test memory access
                let slice = allocation.as_mut_slice();
                slice[0] = 42;
                slice[size - 1] = 84;

                allocator.deallocate(allocation).unwrap();

                let stats = allocator.stats();
                // Might be huge or large depending on hugepage availability
                assert!(stats.huge_allocations > 0 || stats.large_allocations > 0);
            }
            Err(_) => {
                // Large allocation might fail on systems with limited memory or no hugepage support
                // This is acceptable in test environments
                println!("Huge allocation failed - this is acceptable in test environments");
            }
        }
    }

    #[test]
    fn test_mixed_allocation_pattern() {
        let allocator = TieredMemoryAllocator::default().unwrap();

        let sizes = vec![128, 2048, 32768, 1048576]; // Mix of small, medium, large
        let mut allocations = Vec::new();

        // Allocate all sizes
        for size in &sizes {
            let allocation = allocator.allocate(*size).unwrap();
            allocations.push(allocation);
        }

        // Deallocate all
        for allocation in allocations {
            allocator.deallocate(allocation).unwrap();
        }

        let stats = allocator.stats();
        assert!(stats.small_allocations > 0);
        assert!(stats.medium_allocations > 0);
        assert!(stats.large_allocations > 0);
    }

    #[test]
    fn test_allocation_pattern_detection() {
        let allocator = TieredMemoryAllocator::default().unwrap();

        // Allocate mostly small objects
        for _ in 0..100 {
            let allocation = allocator.allocate(256).unwrap();
            allocator.deallocate(allocation).unwrap();
        }

        let pattern = allocator.get_allocation_pattern().unwrap();
        // Should detect small-dominated pattern
        matches!(
            pattern,
            AllocationPattern::SmallDominated | AllocationPattern::Mixed
        );
    }

    #[test]
    fn test_global_tiered_allocator() {
        // Serialize access to global allocator to prevent race conditions
        let _guard = GLOBAL_ALLOCATOR_TEST_MUTEX.lock().unwrap();

        let size = 1024;

        let allocation = tiered_allocate(size).unwrap();
        assert_eq!(allocation.size(), size);

        tiered_deallocate(allocation).unwrap();

        let stats = get_tiered_stats();
        assert!(stats.small_allocations > 0 || stats.medium_allocations > 0);
    }

    #[test]
    fn test_zero_size_allocation() {
        let allocator = TieredMemoryAllocator::default().unwrap();
        let result = allocator.allocate(0);
        assert!(result.is_err());
    }

    #[test]
    fn test_allocator_configuration() {
        let config = TieredConfig {
            enable_small_pools: false,
            enable_medium_pools: false,
            enable_mmap_large: true,
            enable_hugepages: false,
            mmap_threshold: 512, // Lower threshold to allow small allocations
            hugepage_threshold: usize::MAX,
        };

        let allocator = TieredMemoryAllocator::new(config).unwrap();

        // Small allocation should fall back to mmap due to disabled pools
        let allocation = allocator.allocate(1024).unwrap(); // Use size above threshold
        allocator.deallocate(allocation).unwrap();

        let stats = allocator.stats();
        // Should have used large allocation (mmap) instead of small pool
        assert_eq!(stats.small_allocations, 0);
        assert!(stats.large_allocations > 0);
    }
}