use crate::error::{Result, ZiporaError};
use std::alloc::{Layout, alloc, dealloc};
use std::cell::UnsafeCell;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
const ALIGN_SIZE: usize = 8;
const LIST_TAIL: u32 = u32::MAX;
const DEFAULT_SIZE_CLASSES: usize = 32;
#[derive(Debug, Clone)]
pub struct FixedCapacityPoolConfig {
pub max_block_size: usize,
pub total_blocks: usize,
pub alignment: usize,
pub enable_stats: bool,
pub eager_allocation: bool,
pub secure_clear: bool,
}
impl Default for FixedCapacityPoolConfig {
fn default() -> Self {
Self {
max_block_size: 4096,
total_blocks: 1000,
alignment: ALIGN_SIZE,
enable_stats: true,
eager_allocation: true,
secure_clear: false,
}
}
}
impl FixedCapacityPoolConfig {
pub fn small_objects() -> Self {
Self {
max_block_size: 1024,
total_blocks: 10000,
alignment: 8,
enable_stats: true,
eager_allocation: true,
secure_clear: false,
}
}
pub fn medium_objects() -> Self {
Self {
max_block_size: 64 * 1024,
total_blocks: 1000,
alignment: 16,
enable_stats: true,
eager_allocation: true,
secure_clear: false,
}
}
pub fn realtime() -> Self {
Self {
max_block_size: 8192,
total_blocks: 5000,
alignment: 64, enable_stats: false, eager_allocation: true,
secure_clear: false,
}
}
pub fn secure() -> Self {
Self {
max_block_size: 4096,
total_blocks: 2000,
alignment: 8,
enable_stats: true,
eager_allocation: true,
secure_clear: true, }
}
}
#[derive(Debug, Default)]
pub struct FixedCapacityPoolStats {
pub allocations: AtomicU64,
pub deallocations: AtomicU64,
pub active_blocks: AtomicUsize,
pub peak_blocks: AtomicUsize,
pub allocation_failures: AtomicU64,
pub utilization: AtomicU32, }
impl FixedCapacityPoolStats {
pub fn utilization_percent(&self) -> f64 {
self.utilization.load(Ordering::Relaxed) as f64 / 100.0
}
pub fn success_rate(&self) -> f64 {
let successes = self.allocations.load(Ordering::Relaxed);
let failures = self.allocation_failures.load(Ordering::Relaxed);
let total = successes + failures;
if total == 0 { 1.0 } else { successes as f64 / total as f64 }
}
pub fn is_at_capacity(&self, total_blocks: usize) -> bool {
self.active_blocks.load(Ordering::Relaxed) >= total_blocks
}
}
#[derive(Debug)]
struct FreeListHead {
head: AtomicU32,
count: AtomicU32,
}
impl FreeListHead {
fn new() -> Self {
Self {
head: AtomicU32::new(LIST_TAIL),
count: AtomicU32::new(0),
}
}
}
#[repr(C)]
#[derive(Debug)]
struct BlockHeader {
size_class: u32,
magic: u32,
next: u32,
_padding: u32,
}
const BLOCK_HEADER_MAGIC: u32 = 0xDEADBEEF;
pub struct FixedCapacityMemoryPool {
config: FixedCapacityPoolConfig,
memory: UnsafeCell<Option<NonNull<u8>>>,
memory_layout: UnsafeCell<Option<Layout>>,
free_lists: UnsafeCell<Vec<FreeListHead>>,
size_classes: Vec<usize>,
stats: Option<Arc<FixedCapacityPoolStats>>,
init_mutex: Mutex<bool>,
}
unsafe impl Send for FixedCapacityMemoryPool {}
unsafe impl Sync for FixedCapacityMemoryPool {}
impl FixedCapacityMemoryPool {
pub fn new(config: FixedCapacityPoolConfig) -> Result<Self> {
let size_classes = Self::generate_size_classes(config.max_block_size, config.alignment);
let num_classes = size_classes.len();
let mut free_lists = Vec::with_capacity(num_classes);
for _ in 0..num_classes {
free_lists.push(FreeListHead::new());
}
let stats = if config.enable_stats {
Some(Arc::new(FixedCapacityPoolStats::default()))
} else {
None
};
let mut pool = Self {
config,
memory: UnsafeCell::new(None),
memory_layout: UnsafeCell::new(None),
free_lists: UnsafeCell::new(free_lists),
size_classes,
stats,
init_mutex: Mutex::new(false),
};
if pool.config.eager_allocation {
pool.allocate_backing_memory()?;
}
Ok(pool)
}
pub fn allocate(&self, size: usize) -> Result<FixedCapacityAllocation> {
if size == 0 {
return Err(ZiporaError::invalid_data("Cannot allocate zero bytes"));
}
if size > self.config.max_block_size {
return Err(ZiporaError::invalid_data(&format!(
"Allocation size {} exceeds maximum {}", size, self.config.max_block_size
)));
}
self.ensure_memory_allocated()?;
let size_class_index = self.find_size_class(size)?;
let actual_size = self.size_classes[size_class_index];
let ptr = self.allocate_from_free_list(size_class_index)?;
if let Some(stats) = &self.stats {
stats.allocations.fetch_add(1, Ordering::Relaxed);
let active = stats.active_blocks.fetch_add(1, Ordering::Relaxed) + 1;
loop {
let current_peak = stats.peak_blocks.load(Ordering::Relaxed);
if active <= current_peak ||
stats.peak_blocks.compare_exchange_weak(
current_peak, active, Ordering::Relaxed, Ordering::Relaxed
).is_ok() {
break;
}
}
let utilization = (active * 10000 / self.config.total_blocks) as u32;
stats.utilization.store(utilization, Ordering::Relaxed);
}
Ok(FixedCapacityAllocation::new(ptr, actual_size, size_class_index, self))
}
fn deallocate(&self, ptr: NonNull<u8>, size_class_index: usize) -> Result<()> {
self.verify_pointer(ptr)?;
if self.config.secure_clear {
let size = self.size_classes[size_class_index];
unsafe {
std::ptr::write_bytes(ptr.as_ptr(), 0, size);
}
}
self.deallocate_to_free_list(ptr, size_class_index)?;
if let Some(stats) = &self.stats {
stats.deallocations.fetch_add(1, Ordering::Relaxed);
let active = stats.active_blocks.fetch_sub(1, Ordering::Relaxed) - 1;
let utilization = (active * 10000 / self.config.total_blocks) as u32;
stats.utilization.store(utilization, Ordering::Relaxed);
}
Ok(())
}
pub fn stats(&self) -> Option<Arc<FixedCapacityPoolStats>> {
self.stats.clone()
}
pub fn total_capacity(&self) -> usize {
self.config.total_blocks * self.config.max_block_size
}
pub fn available_capacity(&self) -> usize {
if let Some(stats) = &self.stats {
let used_blocks = stats.active_blocks.load(Ordering::Relaxed);
let available_blocks = self.config.total_blocks.saturating_sub(used_blocks);
available_blocks * self.config.max_block_size
} else {
0 }
}
pub fn has_capacity(&self, size: usize) -> bool {
if size > self.config.max_block_size {
return false;
}
if let Some(stats) = &self.stats {
!stats.is_at_capacity(self.config.total_blocks)
} else {
true }
}
fn allocate_backing_memory(&mut self) -> Result<()> {
let total_size = self.config.total_blocks * self.config.max_block_size;
let layout = Layout::from_size_align(total_size, self.config.alignment)
.map_err(|e| ZiporaError::invalid_data(&format!("Invalid layout: {}", e)))?;
let memory = NonNull::new(unsafe { alloc(layout) })
.ok_or_else(|| ZiporaError::out_of_memory(total_size))?;
unsafe {
*self.memory.get() = Some(memory);
*self.memory_layout.get() = Some(layout);
}
self.initialize_free_lists()?;
Ok(())
}
fn allocate_backing_memory_internal(&self) -> Result<()> {
let total_size = self.config.total_blocks * self.config.max_block_size;
let layout = Layout::from_size_align(total_size, self.config.alignment)
.map_err(|e| ZiporaError::invalid_data(&format!("Invalid layout: {}", e)))?;
let memory = NonNull::new(unsafe { alloc(layout) })
.ok_or_else(|| ZiporaError::out_of_memory(total_size))?;
unsafe {
*self.memory.get() = Some(memory);
*self.memory_layout.get() = Some(layout);
}
self.initialize_free_lists_internal()?;
Ok(())
}
fn ensure_memory_allocated(&self) -> Result<()> {
unsafe {
if (*self.memory.get()).is_some() {
return Ok(());
}
}
let mut initialized = self.init_mutex.lock()
.map_err(|e| ZiporaError::resource_busy(format!("Init mutex poisoned: {}", e)))?;
if !*initialized {
self.allocate_backing_memory_internal()?;
*initialized = true;
}
Ok(())
}
fn initialize_free_lists(&mut self) -> Result<()> {
let memory = unsafe { (*self.memory.get()).ok_or_else(||
ZiporaError::invalid_data("Memory not allocated"))? };
let block_size = self.config.max_block_size;
let largest_class = self.size_classes.len() - 1;
let free_lists = unsafe { &mut *self.free_lists.get() };
let free_list = &free_lists[largest_class];
for i in 0..self.config.total_blocks {
let offset = i * block_size;
let block_ptr = unsafe { memory.as_ptr().add(offset) };
let header = unsafe { &mut *(block_ptr as *mut BlockHeader) };
header.size_class = largest_class as u32;
header.magic = BLOCK_HEADER_MAGIC;
if i < self.config.total_blocks - 1 {
header.next = ((i + 1) * block_size) as u32;
} else {
header.next = LIST_TAIL;
}
}
free_list.head.store(0, Ordering::Relaxed);
free_list.count.store(self.config.total_blocks as u32, Ordering::Relaxed);
Ok(())
}
fn initialize_free_lists_internal(&self) -> Result<()> {
let memory = unsafe { (*self.memory.get()).ok_or_else(||
ZiporaError::invalid_data("Memory not allocated"))? };
let block_size = self.config.max_block_size;
let largest_class = self.size_classes.len() - 1;
let free_lists = unsafe { &mut *self.free_lists.get() };
let free_list = &free_lists[largest_class];
for i in 0..self.config.total_blocks {
let offset = i * block_size;
let block_ptr = unsafe { memory.as_ptr().add(offset) };
let header = unsafe { &mut *(block_ptr as *mut BlockHeader) };
header.size_class = largest_class as u32;
header.magic = BLOCK_HEADER_MAGIC;
if i < self.config.total_blocks - 1 {
header.next = ((i + 1) * block_size) as u32;
} else {
header.next = LIST_TAIL;
}
}
free_list.head.store(0, Ordering::Relaxed);
free_list.count.store(self.config.total_blocks as u32, Ordering::Relaxed);
Ok(())
}
fn allocate_from_free_list(&self, size_class_index: usize) -> Result<NonNull<u8>> {
let free_lists = unsafe { &*self.free_lists.get() };
let free_list = &free_lists[size_class_index];
loop {
let current_head = free_list.head.load(Ordering::Acquire);
if current_head == LIST_TAIL {
return self.allocate_by_splitting(size_class_index);
}
let memory = unsafe { (*self.memory.get()).ok_or_else(||
ZiporaError::invalid_data("Memory not allocated"))? };
let block_ptr = unsafe { memory.as_ptr().add(current_head as usize) };
let header = unsafe { &*(block_ptr as *const BlockHeader) };
if header.magic != BLOCK_HEADER_MAGIC {
return Err(ZiporaError::invalid_data("Block header corrupted"));
}
let next_offset = header.next;
if free_list.head.compare_exchange_weak(
current_head,
next_offset,
Ordering::Release,
Ordering::Relaxed,
).is_ok() {
free_list.count.fetch_sub(1, Ordering::Relaxed);
return NonNull::new(block_ptr)
.ok_or_else(|| ZiporaError::invalid_data("Null block pointer"));
}
}
}
fn allocate_by_splitting(&self, size_class_index: usize) -> Result<NonNull<u8>> {
for larger_class in (size_class_index + 1)..self.size_classes.len() {
let free_lists = unsafe { &*self.free_lists.get() };
let free_list = &free_lists[larger_class];
let head = free_list.head.load(Ordering::Acquire);
if head != LIST_TAIL {
if let Ok(ptr) = self.allocate_from_free_list(larger_class) {
return Ok(ptr);
}
}
}
if let Some(stats) = &self.stats {
stats.allocation_failures.fetch_add(1, Ordering::Relaxed);
}
Err(ZiporaError::out_of_memory(0))
}
fn deallocate_to_free_list(&self, ptr: NonNull<u8>, size_class_index: usize) -> Result<()> {
let free_lists = unsafe { &*self.free_lists.get() };
let free_list = &free_lists[size_class_index];
let offset = self.ptr_to_offset(ptr)?;
let header = unsafe { &mut *(ptr.as_ptr() as *mut BlockHeader) };
header.size_class = size_class_index as u32;
header.magic = BLOCK_HEADER_MAGIC;
loop {
let current_head = free_list.head.load(Ordering::Acquire);
header.next = current_head;
if free_list.head.compare_exchange_weak(
current_head,
offset,
Ordering::Release,
Ordering::Relaxed,
).is_ok() {
free_list.count.fetch_add(1, Ordering::Relaxed);
return Ok(());
}
}
}
fn find_size_class(&self, size: usize) -> Result<usize> {
for (index, &class_size) in self.size_classes.iter().enumerate() {
if size <= class_size {
return Ok(index);
}
}
Err(ZiporaError::invalid_data("Size too large"))
}
fn generate_size_classes(max_size: usize, alignment: usize) -> Vec<usize> {
let mut classes = Vec::new();
let mut current_size = alignment;
while current_size <= max_size {
classes.push(current_size);
if current_size < 128 {
current_size += alignment;
} else if current_size < 1024 {
current_size = (current_size * 3) / 2;
} else {
current_size *= 2;
}
current_size = (current_size + alignment - 1) & !(alignment - 1);
}
if classes.is_empty() || classes[classes.len() - 1] != max_size {
classes.push(max_size);
}
classes
}
fn ptr_to_offset(&self, ptr: NonNull<u8>) -> Result<u32> {
let memory = unsafe { (*self.memory.get()).ok_or_else(||
ZiporaError::invalid_data("Memory not allocated"))? };
let base = memory.as_ptr() as usize;
let addr = ptr.as_ptr() as usize;
if addr < base || addr >= base + self.total_capacity() {
return Err(ZiporaError::invalid_data("Pointer outside pool"));
}
Ok((addr - base) as u32)
}
fn verify_pointer(&self, ptr: NonNull<u8>) -> Result<()> {
self.ptr_to_offset(ptr)?;
Ok(())
}
}
impl Drop for FixedCapacityMemoryPool {
fn drop(&mut self) {
unsafe {
if let (Some(memory), Some(layout)) = (*self.memory.get(), *self.memory_layout.get()) {
dealloc(memory.as_ptr(), layout);
}
}
}
}
pub struct FixedCapacityAllocation {
ptr: NonNull<u8>,
size: usize,
size_class_index: usize,
pool: *const FixedCapacityMemoryPool,
}
impl FixedCapacityAllocation {
fn new(
ptr: NonNull<u8>,
size: usize,
size_class_index: usize,
pool: &FixedCapacityMemoryPool
) -> Self {
Self {
ptr,
size,
size_class_index,
pool: pool as *const _
}
}
#[inline]
pub fn as_ptr(&self) -> *mut u8 {
self.ptr.as_ptr()
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.size) }
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.size) }
}
}
impl Drop for FixedCapacityAllocation {
fn drop(&mut self) {
unsafe {
if let Err(e) = (*self.pool).deallocate(self.ptr, self.size_class_index) {
log::error!("Failed to deallocate fixed capacity memory: {}", e);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pool_creation() {
let config = FixedCapacityPoolConfig::default();
let pool = FixedCapacityMemoryPool::new(config).unwrap();
assert!(pool.stats.is_some());
assert_eq!(pool.total_capacity(), 4096 * 1000);
}
#[test]
fn test_basic_allocation() {
let config = FixedCapacityPoolConfig::small_objects();
let pool = FixedCapacityMemoryPool::new(config).unwrap();
let alloc = pool.allocate(64).unwrap();
assert_eq!(alloc.size(), 64); assert!(!alloc.as_ptr().is_null());
}
#[test]
fn test_capacity_limits() {
let config = FixedCapacityPoolConfig {
max_block_size: 128,
total_blocks: 10,
..FixedCapacityPoolConfig::default()
};
let pool = FixedCapacityMemoryPool::new(config).unwrap();
let mut allocations = Vec::new();
for i in 0..15 { match pool.allocate(64) {
Ok(alloc) => allocations.push(alloc),
Err(_) => {
assert!(i >= 10, "Should reach capacity around block 10");
break;
}
}
}
assert!(allocations.len() <= 10);
if let Some(stats) = pool.stats() {
assert!(stats.allocation_failures.load(Ordering::Relaxed) > 0);
}
}
#[test]
fn test_size_classes() {
let classes = FixedCapacityMemoryPool::generate_size_classes(1024, 8);
assert!(classes.len() > 1);
for i in 1..classes.len() {
assert!(classes[i] > classes[i-1]);
}
assert_eq!(classes[classes.len() - 1], 1024);
}
#[test]
fn test_different_configurations() {
let rt_config = FixedCapacityPoolConfig::realtime();
let rt_pool = FixedCapacityMemoryPool::new(rt_config).unwrap();
assert!(rt_pool.stats.is_none());
let secure_config = FixedCapacityPoolConfig::secure();
let secure_pool = FixedCapacityMemoryPool::new(secure_config).unwrap();
assert!(secure_pool.config.secure_clear);
}
#[test]
fn test_lazy_allocation() {
let config = FixedCapacityPoolConfig {
eager_allocation: false,
..FixedCapacityPoolConfig::default()
};
let pool = FixedCapacityMemoryPool::new(config).unwrap();
unsafe { assert!((*pool.memory.get()).is_none()); }
let _alloc = pool.allocate(64).unwrap();
unsafe { assert!((*pool.memory.get()).is_some()); }
}
#[test]
fn test_allocation_statistics() {
let config = FixedCapacityPoolConfig::small_objects();
let pool = FixedCapacityMemoryPool::new(config).unwrap();
let mut allocations = Vec::new();
for _ in 0..5 {
allocations.push(pool.allocate(32).unwrap());
}
if let Some(stats) = pool.stats() {
assert_eq!(stats.allocations.load(Ordering::Relaxed), 5);
assert_eq!(stats.active_blocks.load(Ordering::Relaxed), 5);
assert!(stats.utilization_percent() > 0.0);
}
allocations.clear();
if let Some(stats) = pool.stats() {
assert_eq!(stats.deallocations.load(Ordering::Relaxed), 5);
assert_eq!(stats.active_blocks.load(Ordering::Relaxed), 0);
}
}
#[test]
fn test_secure_clearing() {
let config = FixedCapacityPoolConfig::secure();
let pool = FixedCapacityMemoryPool::new(config).unwrap();
{
let mut alloc = pool.allocate(64).unwrap();
let slice = alloc.as_mut_slice();
slice.fill(0xAA); }
let alloc = pool.allocate(64).unwrap();
let slice = alloc.as_slice();
for &byte in slice.iter().take(64) {
if byte == 0xAA {
break;
}
}
}
}