#![allow(unsafe_code)]
#![allow(clippy::expect_used)]
#![allow(clippy::arc_with_non_send_sync)]
use crate::error::{OxiGdalError, Result};
use parking_lot::{Mutex, RwLock};
use std::alloc::{Layout, alloc, dealloc};
use std::collections::{BTreeMap, HashMap};
use std::ptr::NonNull;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
pub const MIN_ALIGNMENT: usize = 16;
pub const MAX_BLOCK_SIZE: usize = 16 * 1024 * 1024;
pub const SLAB_SIZES: &[usize] = &[
256, 1024, 4096, 16384, 65536, 262_144, 1_048_576, 4_194_304, ];
#[derive(Debug, Default)]
pub struct AllocatorStats {
pub total_allocations: AtomicU64,
pub total_deallocations: AtomicU64,
pub bytes_allocated: AtomicUsize,
pub peak_bytes_allocated: AtomicUsize,
pub allocation_failures: AtomicU64,
pub slab_hits: AtomicU64,
pub slab_misses: AtomicU64,
}
impl AllocatorStats {
#[must_use]
pub fn new() -> Self {
Self::default()
}
pub fn record_allocation(&self, size: usize) {
self.total_allocations.fetch_add(1, Ordering::Relaxed);
let new_allocated = self.bytes_allocated.fetch_add(size, Ordering::Relaxed) + size;
let mut peak = self.peak_bytes_allocated.load(Ordering::Relaxed);
while new_allocated > peak {
match self.peak_bytes_allocated.compare_exchange_weak(
peak,
new_allocated,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => peak = x,
}
}
}
pub fn record_deallocation(&self, size: usize) {
self.total_deallocations.fetch_add(1, Ordering::Relaxed);
self.bytes_allocated.fetch_sub(size, Ordering::Relaxed);
}
pub fn record_failure(&self) {
self.allocation_failures.fetch_add(1, Ordering::Relaxed);
}
pub fn record_slab_hit(&self) {
self.slab_hits.fetch_add(1, Ordering::Relaxed);
}
pub fn record_slab_miss(&self) {
self.slab_misses.fetch_add(1, Ordering::Relaxed);
}
pub fn current_allocations(&self) -> u64 {
self.total_allocations.load(Ordering::Relaxed)
- self.total_deallocations.load(Ordering::Relaxed)
}
pub fn slab_hit_rate(&self) -> f64 {
let hits = self.slab_hits.load(Ordering::Relaxed);
let misses = self.slab_misses.load(Ordering::Relaxed);
let total = hits + misses;
if total == 0 {
0.0
} else {
hits as f64 / total as f64
}
}
}
#[allow(dead_code)]
struct SlabBlock {
ptr: NonNull<u8>,
size: usize,
}
impl Drop for SlabBlock {
fn drop(&mut self) {
unsafe {
let layout = Layout::from_size_align_unchecked(self.size, MIN_ALIGNMENT);
dealloc(self.ptr.as_ptr(), layout);
}
}
}
pub struct SlabAllocator {
free_lists: Arc<RwLock<HashMap<usize, Vec<NonNull<u8>>>>>,
stats: Arc<AllocatorStats>,
#[cfg(debug_assertions)]
allocated_blocks: Arc<Mutex<HashMap<usize, Vec<NonNull<u8>>>>>,
}
unsafe impl Send for SlabAllocator {}
unsafe impl Sync for SlabAllocator {}
impl SlabAllocator {
#[must_use]
pub fn new() -> Self {
let mut free_lists = HashMap::new();
for &size in SLAB_SIZES {
free_lists.insert(size, Vec::new());
}
Self {
free_lists: Arc::new(RwLock::new(free_lists)),
stats: Arc::new(AllocatorStats::new()),
#[cfg(debug_assertions)]
allocated_blocks: Arc::new(Mutex::new(HashMap::new())),
}
}
pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
let slab_size = SLAB_SIZES
.iter()
.find(|&&s| s >= size)
.copied()
.ok_or_else(|| {
self.stats.record_slab_miss();
OxiGdalError::invalid_parameter(
"parameter",
format!(
"Size {} exceeds maximum slab size {}",
size,
SLAB_SIZES.last().copied().unwrap_or(0)
),
)
})?;
{
let mut free_lists = self.free_lists.write();
if let Some(blocks) = free_lists.get_mut(&slab_size) {
if let Some(ptr) = blocks.pop() {
self.stats.record_slab_hit();
self.stats.record_allocation(slab_size);
#[cfg(debug_assertions)]
{
let mut allocated = self.allocated_blocks.lock();
allocated.entry(slab_size).or_default().push(ptr);
}
return Ok(ptr);
}
}
}
self.stats.record_slab_miss();
let layout = Layout::from_size_align(slab_size, MIN_ALIGNMENT)
.map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
let ptr = unsafe {
let raw_ptr = alloc(layout);
if raw_ptr.is_null() {
self.stats.record_failure();
return Err(OxiGdalError::allocation_error(
"Failed to allocate memory".to_string(),
));
}
NonNull::new_unchecked(raw_ptr)
};
self.stats.record_allocation(slab_size);
#[cfg(debug_assertions)]
{
let mut allocated = self.allocated_blocks.lock();
allocated.entry(slab_size).or_default().push(ptr);
}
Ok(ptr)
}
pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
let slab_size = SLAB_SIZES
.iter()
.find(|&&s| s >= size)
.copied()
.ok_or_else(|| {
OxiGdalError::invalid_parameter(
"parameter",
format!("Size {size} exceeds maximum slab size"),
)
})?;
#[cfg(debug_assertions)]
{
let mut allocated = self.allocated_blocks.lock();
if let Some(blocks) = allocated.get_mut(&slab_size) {
if let Some(pos) = blocks.iter().position(|&p| p == ptr) {
blocks.swap_remove(pos);
} else {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Block not found in allocated list".to_string(),
));
}
} else {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Invalid slab size for deallocation".to_string(),
));
}
}
let mut free_lists = self.free_lists.write();
free_lists.entry(slab_size).or_default().push(ptr);
self.stats.record_deallocation(slab_size);
Ok(())
}
#[must_use]
pub fn stats(&self) -> Arc<AllocatorStats> {
Arc::clone(&self.stats)
}
#[cfg(debug_assertions)]
pub fn check_leaks(&self) -> Result<()> {
let allocated = self.allocated_blocks.lock();
let mut total_leaks = 0;
for (size, blocks) in allocated.iter() {
if !blocks.is_empty() {
eprintln!(
"Memory leak detected: {} blocks of size {} bytes",
blocks.len(),
size
);
total_leaks += blocks.len();
}
}
if total_leaks > 0 {
Err(OxiGdalError::invalid_state(format!(
"Detected {total_leaks} memory leaks"
)))
} else {
Ok(())
}
}
}
impl Default for SlabAllocator {
fn default() -> Self {
Self::new()
}
}
pub struct BuddyAllocator {
free_lists: Arc<Mutex<BTreeMap<usize, Vec<NonNull<u8>>>>>,
min_block_size: usize,
max_block_size: usize,
stats: Arc<AllocatorStats>,
}
unsafe impl Send for BuddyAllocator {}
unsafe impl Sync for BuddyAllocator {}
impl BuddyAllocator {
pub fn new(min_block_size: usize, max_block_size: usize) -> Result<Self> {
if !min_block_size.is_power_of_two() || !max_block_size.is_power_of_two() {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Block sizes must be powers of 2".to_string(),
));
}
if min_block_size > max_block_size {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Min block size must be <= max block size".to_string(),
));
}
Ok(Self {
free_lists: Arc::new(Mutex::new(BTreeMap::new())),
min_block_size,
max_block_size,
stats: Arc::new(AllocatorStats::new()),
})
}
pub fn with_defaults() -> Result<Self> {
Self::new(MIN_ALIGNMENT, MAX_BLOCK_SIZE)
}
fn next_power_of_two(&self, size: usize) -> usize {
if size <= self.min_block_size {
self.min_block_size
} else {
size.next_power_of_two().min(self.max_block_size)
}
}
pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
let block_size = self.next_power_of_two(size);
if block_size > self.max_block_size {
self.stats.record_failure();
return Err(OxiGdalError::allocation_error(format!(
"Requested size {} exceeds maximum block size {}",
size, self.max_block_size
)));
}
let mut free_lists = self.free_lists.lock();
let mut found_size = None;
for (&list_size, blocks) in free_lists.range(block_size..) {
if !blocks.is_empty() {
found_size = Some(list_size);
break;
}
}
if let Some(found_size) = found_size {
let blocks = free_lists
.get_mut(&found_size)
.ok_or_else(|| OxiGdalError::invalid_state("Free list disappeared".to_string()))?;
let ptr = blocks
.pop()
.ok_or_else(|| OxiGdalError::invalid_state("Block disappeared".to_string()))?;
let mut current_size = found_size;
while current_size > block_size {
current_size /= 2;
if current_size >= block_size {
let buddy_ptr =
unsafe { NonNull::new_unchecked(ptr.as_ptr().add(current_size)) };
free_lists.entry(current_size).or_default().push(buddy_ptr);
}
}
self.stats.record_allocation(block_size);
Ok(ptr)
} else {
drop(free_lists);
let layout = Layout::from_size_align(block_size, block_size)
.map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
let ptr = unsafe {
let raw_ptr = alloc(layout);
if raw_ptr.is_null() {
self.stats.record_failure();
return Err(OxiGdalError::allocation_error(
"Failed to allocate memory".to_string(),
));
}
NonNull::new_unchecked(raw_ptr)
};
self.stats.record_allocation(block_size);
Ok(ptr)
}
}
pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
let block_size = self.next_power_of_two(size);
let mut free_lists = self.free_lists.lock();
free_lists.entry(block_size).or_default().push(ptr);
self.stats.record_deallocation(block_size);
Ok(())
}
#[must_use]
pub fn stats(&self) -> Arc<AllocatorStats> {
Arc::clone(&self.stats)
}
}
pub struct ThreadLocalAllocator {
slab: SlabAllocator,
buddy: BuddyAllocator,
slab_threshold: usize,
}
unsafe impl Send for ThreadLocalAllocator {}
unsafe impl Sync for ThreadLocalAllocator {}
impl ThreadLocalAllocator {
pub fn new() -> Result<Self> {
Ok(Self {
slab: SlabAllocator::new(),
buddy: BuddyAllocator::with_defaults()?,
slab_threshold: *SLAB_SIZES.last().ok_or_else(|| OxiGdalError::Internal {
message: "SLAB_SIZES is empty".to_string(),
})?,
})
}
pub fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
if alignment > MIN_ALIGNMENT && alignment > size {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Alignment requirements not supported".to_string(),
));
}
if size <= self.slab_threshold {
self.slab.allocate(size)
} else {
self.buddy.allocate(size)
}
}
pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
if size <= self.slab_threshold {
self.slab.deallocate(ptr, size)
} else {
self.buddy.deallocate(ptr, size)
}
}
#[must_use]
pub fn stats(&self) -> (Arc<AllocatorStats>, Arc<AllocatorStats>) {
(self.slab.stats(), self.buddy.stats())
}
}
pub trait Allocator: Send + Sync {
fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>>;
fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()>;
fn stats(&self) -> Arc<AllocatorStats>;
}
impl Allocator for SlabAllocator {
fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
self.allocate(size)
}
fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
self.deallocate(ptr, size)
}
fn stats(&self) -> Arc<AllocatorStats> {
self.stats()
}
}
impl Allocator for BuddyAllocator {
fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
self.allocate(size)
}
fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
self.deallocate(ptr, size)
}
fn stats(&self) -> Arc<AllocatorStats> {
self.stats()
}
}
impl Allocator for ThreadLocalAllocator {
fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
self.allocate(size, alignment)
}
fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
self.deallocate(ptr, size)
}
fn stats(&self) -> Arc<AllocatorStats> {
self.slab.stats()
}
}
#[cfg(test)]
#[allow(useless_ptr_null_checks)]
mod tests {
use super::*;
#[test]
fn test_slab_allocator() {
let allocator = SlabAllocator::new();
let ptr1 = allocator
.allocate(100)
.expect("Slab allocator should allocate 100 bytes");
assert!(!ptr1.as_ptr().is_null());
let ptr2 = allocator
.allocate(200)
.expect("Slab allocator should allocate 200 bytes");
assert!(!ptr2.as_ptr().is_null());
assert_ne!(ptr1, ptr2);
allocator
.deallocate(ptr1, 100)
.expect("Deallocation should succeed");
allocator
.deallocate(ptr2, 200)
.expect("Deallocation should succeed");
let stats = allocator.stats();
assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
assert_eq!(stats.total_deallocations.load(Ordering::Relaxed), 2);
}
#[test]
fn test_buddy_allocator() {
let allocator =
BuddyAllocator::with_defaults().expect("Default buddy allocator should be created");
let ptr1 = allocator
.allocate(1000)
.expect("Buddy allocator should allocate 1000 bytes");
let ptr2 = allocator
.allocate(2000)
.expect("Buddy allocator should allocate 2000 bytes");
assert!(!ptr1.as_ptr().is_null());
assert!(!ptr2.as_ptr().is_null());
allocator
.deallocate(ptr1, 1000)
.expect("Buddy deallocation should succeed");
allocator
.deallocate(ptr2, 2000)
.expect("Buddy deallocation should succeed");
}
#[test]
fn test_thread_local_allocator() {
let allocator =
ThreadLocalAllocator::new().expect("Thread-local allocator should be created");
let ptr1 = allocator
.allocate(256, MIN_ALIGNMENT)
.expect("Thread-local allocator should allocate small block");
assert!(!ptr1.as_ptr().is_null());
let ptr2 = allocator
.allocate(1024 * 1024, MIN_ALIGNMENT)
.expect("Thread-local allocator should allocate large block");
assert!(!ptr2.as_ptr().is_null());
allocator
.deallocate(ptr1, 256)
.expect("Thread-local deallocation should succeed");
allocator
.deallocate(ptr2, 1024 * 1024)
.expect("Thread-local deallocation should succeed");
}
#[test]
fn test_allocator_stats() {
let stats = AllocatorStats::new();
stats.record_allocation(1024);
stats.record_allocation(2048);
assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 3072);
assert_eq!(stats.peak_bytes_allocated.load(Ordering::Relaxed), 3072);
stats.record_deallocation(1024);
assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 2048);
assert_eq!(stats.current_allocations(), 1);
}
}