use crate::error::{Result, ZiporaError};
use crate::memory::{
mmap::{MemoryMappedAllocator, MmapAllocation},
pool::{MemoryPool, PoolConfig, PoolStats},
};
#[cfg(target_os = "linux")]
use crate::memory::hugepage::{HUGEPAGE_SIZE_2MB, HugePage, HugePageAllocator};
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::thread_local;
pub const SMALL_THRESHOLD: usize = 1024; pub const MEDIUM_THRESHOLD: usize = 16 * 1024; pub const LARGE_THRESHOLD: usize = 2 * 1024 * 1024;
#[derive(Debug)]
pub enum TieredAllocation {
Small(NonNull<u8>, usize),
Medium(NonNull<u8>, usize),
Large(MmapAllocation),
#[cfg(target_os = "linux")]
Huge(HugePage),
}
#[derive(Debug, Clone)]
pub struct TieredConfig {
pub enable_small_pools: bool,
pub enable_medium_pools: bool,
pub enable_mmap_large: bool,
pub enable_hugepages: bool,
pub mmap_threshold: usize,
pub hugepage_threshold: usize,
}
impl Default for TieredConfig {
fn default() -> Self {
Self {
enable_small_pools: true,
enable_medium_pools: true,
enable_mmap_large: true,
enable_hugepages: cfg!(target_os = "linux"),
mmap_threshold: MEDIUM_THRESHOLD,
hugepage_threshold: LARGE_THRESHOLD,
}
}
}
#[derive(Debug, Clone)]
pub struct TieredStats {
pub small_allocations: u64,
pub medium_allocations: u64,
pub large_allocations: u64,
pub huge_allocations: u64,
pub total_allocated_bytes: u64,
pub small_pool_stats: PoolStats,
pub medium_pool_stats: Vec<PoolStats>,
pub mmap_stats: crate::memory::mmap::MmapStats,
}
thread_local! {
static MEDIUM_POOLS: Vec<Arc<MemoryPool>> = {
let size_classes = vec![1024, 2048, 4096, 8192, 16384];
size_classes.into_iter().map(|size| {
let config = PoolConfig::new(size, 32, 16); Arc::new(MemoryPool::new(config).expect("memory pool creation"))
}).collect()
};
}
pub struct TieredMemoryAllocator {
config: TieredConfig,
small_pool: Arc<MemoryPool>,
mmap_allocator: Arc<MemoryMappedAllocator>,
#[cfg(target_os = "linux")]
hugepage_allocator: Arc<HugePageAllocator>,
small_allocs: AtomicU64,
medium_allocs: AtomicU64,
large_allocs: AtomicU64,
huge_allocs: AtomicU64,
total_bytes: AtomicU64,
allocation_history: Arc<Mutex<AllocationHistory>>,
}
struct AllocationHistory {
size_histogram: [u64; 32], recent_sizes: Vec<usize>, max_recent: usize,
}
impl AllocationHistory {
fn new() -> Self {
Self {
size_histogram: [0; 32],
recent_sizes: Vec::with_capacity(1000),
max_recent: 1000,
}
}
fn record_allocation(&mut self, size: usize) {
let bucket = if size == 0 {
0
} else {
63 - size.leading_zeros() as usize
};
if bucket < 32 {
self.size_histogram[bucket] += 1;
}
if self.recent_sizes.len() >= self.max_recent {
self.recent_sizes.remove(0);
}
self.recent_sizes.push(size);
}
fn get_allocation_pattern(&self) -> AllocationPattern {
let total: u64 = self.size_histogram.iter().sum();
if total == 0 {
return AllocationPattern::Mixed;
}
let small_ratio = self.size_histogram[0..10].iter().sum::<u64>() as f64 / total as f64;
let medium_ratio = self.size_histogram[10..16].iter().sum::<u64>() as f64 / total as f64;
let large_ratio = self.size_histogram[16..].iter().sum::<u64>() as f64 / total as f64;
if small_ratio > 0.7 {
AllocationPattern::SmallDominated
} else if medium_ratio > 0.7 {
AllocationPattern::MediumDominated
} else if large_ratio > 0.7 {
AllocationPattern::LargeDominated
} else {
AllocationPattern::Mixed
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum AllocationPattern {
SmallDominated,
MediumDominated,
LargeDominated,
Mixed,
}
impl TieredMemoryAllocator {
pub fn new(config: TieredConfig) -> Result<Self> {
let small_pool = if config.enable_small_pools {
Arc::new(MemoryPool::new(PoolConfig::new(SMALL_THRESHOLD, 100, 8))?)
} else {
Arc::new(MemoryPool::new(PoolConfig::new(64, 1, 8))?) };
let mmap_allocator = if config.enable_mmap_large {
Arc::new(MemoryMappedAllocator::new(config.mmap_threshold))
} else {
Arc::new(MemoryMappedAllocator::new(usize::MAX)) };
#[cfg(target_os = "linux")]
let hugepage_allocator = if config.enable_hugepages {
Arc::new(HugePageAllocator::with_config(
config.hugepage_threshold,
HUGEPAGE_SIZE_2MB,
)?)
} else {
Arc::new(HugePageAllocator::with_config(
usize::MAX,
HUGEPAGE_SIZE_2MB,
)?) };
Ok(Self {
config,
small_pool,
mmap_allocator,
#[cfg(target_os = "linux")]
hugepage_allocator,
small_allocs: AtomicU64::new(0),
medium_allocs: AtomicU64::new(0),
large_allocs: AtomicU64::new(0),
huge_allocs: AtomicU64::new(0),
total_bytes: AtomicU64::new(0),
allocation_history: Arc::new(Mutex::new(AllocationHistory::new())),
})
}
pub fn default() -> Result<Self> {
Self::new(TieredConfig::default())
}
pub fn allocate(&self, size: usize) -> Result<TieredAllocation> {
if size == 0 {
return Err(ZiporaError::invalid_data("allocation size cannot be zero"));
}
if let Ok(mut history) = self.allocation_history.try_lock() {
history.record_allocation(size);
}
self.total_bytes.fetch_add(size as u64, Ordering::Relaxed);
if size <= SMALL_THRESHOLD && self.config.enable_small_pools {
self.allocate_small(size)
} else if size <= MEDIUM_THRESHOLD && self.config.enable_medium_pools {
self.allocate_medium(size)
} else if size < LARGE_THRESHOLD && self.config.enable_mmap_large {
self.allocate_large(size)
} else {
self.allocate_huge(size)
}
}
pub fn deallocate(&self, allocation: TieredAllocation) -> Result<()> {
match allocation {
TieredAllocation::Small(ptr, size) => {
self.small_pool.deallocate(ptr)?;
self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
}
TieredAllocation::Medium(ptr, size) => {
self.deallocate_medium(ptr, size)?;
self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
}
TieredAllocation::Large(allocation) => {
let size = allocation.size();
self.mmap_allocator.deallocate(allocation)?;
self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
}
#[cfg(target_os = "linux")]
TieredAllocation::Huge(hugepage) => {
let size = hugepage.size();
drop(hugepage); self.total_bytes.fetch_sub(size as u64, Ordering::Relaxed);
}
}
Ok(())
}
pub fn stats(&self) -> TieredStats {
let medium_pool_stats =
MEDIUM_POOLS.with(|pools| pools.iter().map(|pool| pool.stats()).collect());
TieredStats {
small_allocations: self.small_allocs.load(Ordering::Relaxed),
medium_allocations: self.medium_allocs.load(Ordering::Relaxed),
large_allocations: self.large_allocs.load(Ordering::Relaxed),
huge_allocations: self.huge_allocs.load(Ordering::Relaxed),
total_allocated_bytes: self.total_bytes.load(Ordering::Relaxed),
small_pool_stats: self.small_pool.stats(),
medium_pool_stats,
mmap_stats: self.mmap_allocator.stats(),
}
}
pub fn get_allocation_pattern(&self) -> Result<AllocationPattern> {
if let Ok(history) = self.allocation_history.lock() {
Ok(history.get_allocation_pattern())
} else {
Ok(AllocationPattern::Mixed)
}
}
pub fn optimize_for_pattern(&self) -> Result<()> {
let pattern = self.get_allocation_pattern()?;
log::debug!("Optimizing tiered allocator for pattern: {:?}", pattern);
Ok(())
}
fn allocate_small(&self, size: usize) -> Result<TieredAllocation> {
self.small_allocs.fetch_add(1, Ordering::Relaxed);
let chunk = self.small_pool.allocate()?;
Ok(TieredAllocation::Small(chunk, size))
}
fn allocate_medium(&self, size: usize) -> Result<TieredAllocation> {
self.medium_allocs.fetch_add(1, Ordering::Relaxed);
MEDIUM_POOLS.with(|pools| {
for pool in pools.iter() {
if pool.config().chunk_size >= size {
let chunk = pool.allocate()?;
return Ok(TieredAllocation::Medium(chunk, size));
}
}
self.allocate_large(size)
})
}
fn deallocate_medium(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
MEDIUM_POOLS.with(|pools| {
for pool in pools.iter() {
if pool.config().chunk_size >= size {
return pool.deallocate(ptr);
}
}
Err(ZiporaError::invalid_data(
"no suitable pool for deallocation",
))
})
}
fn allocate_large(&self, size: usize) -> Result<TieredAllocation> {
self.large_allocs.fetch_add(1, Ordering::Relaxed);
let allocation = self.mmap_allocator.allocate(size)?;
Ok(TieredAllocation::Large(allocation))
}
fn allocate_huge(&self, size: usize) -> Result<TieredAllocation> {
#[cfg(target_os = "linux")]
{
if self.config.enable_hugepages && self.hugepage_allocator.should_use_hugepages(size) {
self.huge_allocs.fetch_add(1, Ordering::Relaxed);
let hugepage = self.hugepage_allocator.allocate(size)?;
return Ok(TieredAllocation::Huge(hugepage));
}
}
self.allocate_large(size)
}
}
unsafe impl Send for TieredMemoryAllocator {}
unsafe impl Sync for TieredMemoryAllocator {}
impl TieredAllocation {
pub fn as_slice(&self) -> &[u8] {
match self {
TieredAllocation::Small(ptr, size) => unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), *size)
},
TieredAllocation::Medium(ptr, size) => unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), *size)
},
TieredAllocation::Large(allocation) => allocation.as_slice(),
#[cfg(target_os = "linux")]
TieredAllocation::Huge(hugepage) => hugepage.as_slice(),
}
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
match self {
TieredAllocation::Small(ptr, size) => unsafe {
std::slice::from_raw_parts_mut(ptr.as_ptr(), *size)
},
TieredAllocation::Medium(ptr, size) => unsafe {
std::slice::from_raw_parts_mut(ptr.as_ptr(), *size)
},
TieredAllocation::Large(allocation) => allocation.as_mut_slice(),
#[cfg(target_os = "linux")]
TieredAllocation::Huge(hugepage) => hugepage.as_mut_slice(),
}
}
#[inline]
pub fn size(&self) -> usize {
match self {
TieredAllocation::Small(_, size) => *size,
TieredAllocation::Medium(_, size) => *size,
TieredAllocation::Large(allocation) => allocation.size(),
#[cfg(target_os = "linux")]
TieredAllocation::Huge(hugepage) => hugepage.size(),
}
}
pub fn as_ptr<T>(&self) -> *mut T {
match self {
TieredAllocation::Small(ptr, _) => ptr.as_ptr() as *mut T,
TieredAllocation::Medium(ptr, _) => ptr.as_ptr() as *mut T,
TieredAllocation::Large(allocation) => allocation.as_ptr(),
#[cfg(target_os = "linux")]
TieredAllocation::Huge(hugepage) => hugepage.as_slice().as_ptr() as *mut T,
}
}
}
static GLOBAL_TIERED_ALLOCATOR: once_cell::sync::Lazy<TieredMemoryAllocator> =
once_cell::sync::Lazy::new(|| TieredMemoryAllocator::default().expect("default allocator creation"));
pub fn tiered_allocate(size: usize) -> Result<TieredAllocation> {
GLOBAL_TIERED_ALLOCATOR.allocate(size)
}
pub fn tiered_deallocate(allocation: TieredAllocation) -> Result<()> {
GLOBAL_TIERED_ALLOCATOR.deallocate(allocation)
}
pub fn get_tiered_stats() -> TieredStats {
GLOBAL_TIERED_ALLOCATOR.stats()
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Mutex;
static GLOBAL_ALLOCATOR_TEST_MUTEX: Mutex<()> = Mutex::new(());
#[test]
fn test_tiered_allocator_creation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let stats = allocator.stats();
assert_eq!(stats.small_allocations, 0);
assert_eq!(stats.medium_allocations, 0);
assert_eq!(stats.large_allocations, 0);
assert_eq!(stats.huge_allocations, 0);
}
#[test]
fn test_small_allocation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let size = 512;
let mut allocation = allocator.allocate(size).unwrap();
assert_eq!(allocation.size(), size);
let slice = allocation.as_mut_slice();
slice[0] = 42;
slice[size - 1] = 84;
let slice = allocation.as_slice();
assert_eq!(slice[0], 42);
assert_eq!(slice[size - 1], 84);
allocator.deallocate(allocation).unwrap();
let stats = allocator.stats();
assert_eq!(stats.small_allocations, 1);
assert_eq!(stats.total_allocated_bytes, 0); }
#[test]
fn test_medium_allocation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let size = 4 * 1024;
let mut allocation = allocator.allocate(size).unwrap();
assert_eq!(allocation.size(), size);
let slice = allocation.as_mut_slice();
slice[0] = 42;
slice[size - 1] = 84;
allocator.deallocate(allocation).unwrap();
let stats = allocator.stats();
assert_eq!(stats.medium_allocations, 1);
}
#[test]
fn test_large_allocation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let size = 64 * 1024;
let mut allocation = allocator.allocate(size).unwrap();
assert_eq!(allocation.size(), size);
let slice = allocation.as_mut_slice();
slice[0] = 42;
slice[size - 1] = 84;
allocator.deallocate(allocation).unwrap();
let stats = allocator.stats();
assert_eq!(stats.large_allocations, 1);
}
#[test]
fn test_huge_allocation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let size = 4 * 1024 * 1024;
match allocator.allocate(size) {
Ok(mut allocation) => {
assert_eq!(allocation.size(), size);
let slice = allocation.as_mut_slice();
slice[0] = 42;
slice[size - 1] = 84;
allocator.deallocate(allocation).unwrap();
let stats = allocator.stats();
assert!(stats.huge_allocations > 0 || stats.large_allocations > 0);
}
Err(_) => {
println!("Huge allocation failed - this is acceptable in test environments");
}
}
}
#[test]
fn test_mixed_allocation_pattern() {
let allocator = TieredMemoryAllocator::default().unwrap();
let sizes = vec![128, 2048, 32768, 1048576]; let mut allocations = Vec::new();
for size in &sizes {
let allocation = allocator.allocate(*size).unwrap();
allocations.push(allocation);
}
for allocation in allocations {
allocator.deallocate(allocation).unwrap();
}
let stats = allocator.stats();
assert!(stats.small_allocations > 0);
assert!(stats.medium_allocations > 0);
assert!(stats.large_allocations > 0);
}
#[test]
fn test_allocation_pattern_detection() {
let allocator = TieredMemoryAllocator::default().unwrap();
for _ in 0..100 {
let allocation = allocator.allocate(256).unwrap();
allocator.deallocate(allocation).unwrap();
}
let pattern = allocator.get_allocation_pattern().unwrap();
matches!(
pattern,
AllocationPattern::SmallDominated | AllocationPattern::Mixed
);
}
#[test]
fn test_global_tiered_allocator() {
let _guard = GLOBAL_ALLOCATOR_TEST_MUTEX.lock().unwrap();
let size = 1024;
let allocation = tiered_allocate(size).unwrap();
assert_eq!(allocation.size(), size);
tiered_deallocate(allocation).unwrap();
let stats = get_tiered_stats();
assert!(stats.small_allocations > 0 || stats.medium_allocations > 0);
}
#[test]
fn test_zero_size_allocation() {
let allocator = TieredMemoryAllocator::default().unwrap();
let result = allocator.allocate(0);
assert!(result.is_err());
}
#[test]
fn test_allocator_configuration() {
let config = TieredConfig {
enable_small_pools: false,
enable_medium_pools: false,
enable_mmap_large: true,
enable_hugepages: false,
mmap_threshold: 512, hugepage_threshold: usize::MAX,
};
let allocator = TieredMemoryAllocator::new(config).unwrap();
let allocation = allocator.allocate(1024).unwrap(); allocator.deallocate(allocation).unwrap();
let stats = allocator.stats();
assert_eq!(stats.small_allocations, 0);
assert!(stats.large_allocations > 0);
}
}