use crate::{PAGE_SIZE, grow_memory};
unsafe impl Sync for SegregatedBumpAllocator {}
use core::{
alloc::{GlobalAlloc, Layout},
ptr::null_mut,
};
pub struct SegregatedBumpAllocator;
impl SegregatedBumpAllocator {
pub const fn new() -> Self {
SegregatedBumpAllocator
}
pub unsafe fn reset() {
unsafe {
BINS = [null_mut(); 4];
HEAP_TOP = 0;
HEAP_END = 0;
}
}
}
struct Node {
next: *mut Node,
}
static mut BINS: [*mut Node; 4] = [null_mut(); 4];
static mut HEAP_TOP: usize = 0;
static mut HEAP_END: usize = 0;
unsafe impl GlobalAlloc for SegregatedBumpAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() > 16 {
return unsafe { self.bump_alloc(layout.size(), layout.align()) };
}
let size = layout.size().max(16);
if let Some(index) = get_index(size) {
unsafe {
let head = BINS[index];
if !head.is_null() {
let next = (*head).next;
BINS[index] = next;
return head as *mut u8;
}
}
let block_size = 16 << index;
return unsafe { self.bump_alloc(block_size, 16) };
}
unsafe { self.bump_alloc(size, 16) }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() > 16 {
return;
}
let size = layout.size().max(16);
if let Some(index) = get_index(size) {
let node = ptr as *mut Node;
unsafe {
(*node).next = BINS[index];
BINS[index] = node;
}
return;
}
}
#[cfg(feature = "realloc")]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let old_size = layout.size().max(16);
let old_capacity = if layout.align() > 16 {
old_size
} else if let Some(index) = get_index(old_size) {
16 << index
} else {
old_size
};
if new_size <= old_capacity {
return ptr;
}
let heap_top = unsafe { HEAP_TOP };
if ptr as usize + old_capacity == heap_top {
let diff = new_size - old_capacity;
let heap_end = unsafe { HEAP_END };
unsafe {
if HEAP_TOP + diff <= heap_end {
HEAP_TOP += diff;
return ptr;
}
let pages_needed =
((HEAP_TOP + diff - heap_end + PAGE_SIZE - 1) / PAGE_SIZE).max(1);
if grow_memory(pages_needed) != usize::MAX {
HEAP_END += pages_needed * PAGE_SIZE;
HEAP_TOP += diff;
return ptr;
}
}
}
unsafe {
let new_ptr = self.alloc(Layout::from_size_align_unchecked(new_size, layout.align()));
if !new_ptr.is_null() {
core::ptr::copy_nonoverlapping(ptr, new_ptr, layout.size());
self.dealloc(ptr, layout);
}
new_ptr
}
}
}
impl SegregatedBumpAllocator {
unsafe fn bump_alloc(&self, size: usize, align: usize) -> *mut u8 {
unsafe {
let mut ptr = HEAP_TOP;
ptr = (ptr + align - 1) & !(align - 1);
if ptr + size > HEAP_END || ptr < HEAP_TOP {
let bytes_needed = (ptr + size).saturating_sub(HEAP_END);
let pages_needed = ((bytes_needed + PAGE_SIZE - 1) / PAGE_SIZE).max(1);
let prev_page = grow_memory(pages_needed);
if prev_page == usize::MAX {
return null_mut(); }
if HEAP_END == 0 {
let memory_start = prev_page * PAGE_SIZE;
ptr = memory_start;
ptr = (ptr + align - 1) & !(align - 1);
HEAP_END = memory_start + pages_needed * PAGE_SIZE;
} else {
HEAP_END += pages_needed * PAGE_SIZE;
}
}
HEAP_TOP = ptr + size;
ptr as *mut u8
}
}
}
#[inline(always)]
fn get_index(size: usize) -> Option<usize> {
if size > 128 {
return None;
}
let size_val = size as usize;
let power_of_two = size_val.next_power_of_two();
let zeros = power_of_two.leading_zeros();
#[cfg(target_pointer_width = "32")]
const BASE: u32 = 27;
#[cfg(target_pointer_width = "64")]
const BASE: u32 = 59;
Some((BASE - zeros) as usize)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Mutex;
static TEST_LOCK: Mutex<()> = Mutex::new(());
fn with_clean_allocator(f: impl FnOnce()) {
let _guard = TEST_LOCK.lock().unwrap();
unsafe {
SegregatedBumpAllocator::reset();
crate::reset_heap();
}
f();
}
#[test]
fn test_small_alloc_reuse() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(16, 8).unwrap();
unsafe {
let ptr1 = allocator.alloc(layout);
assert!(!ptr1.is_null());
*ptr1 = 0xAA;
allocator.dealloc(ptr1, layout);
let ptr2 = allocator.alloc(layout);
assert_eq!(ptr1, ptr2, "Should reuse the same pointer from bin");
}
});
}
#[test]
fn test_cross_bin_isolation() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
unsafe {
let l0 = Layout::from_size_align(10, 1).unwrap();
let p0 = allocator.alloc(l0);
let l1 = Layout::from_size_align(20, 1).unwrap();
let p1 = allocator.alloc(l1);
assert_ne!(p0, p1);
allocator.dealloc(p0, l0);
let p2 = allocator.alloc(l1);
assert_ne!(p0, p2);
let p3 = allocator.alloc(l0);
assert_eq!(p0, p3);
}
});
}
#[test]
fn test_large_alloc_bypass() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(200, 16).unwrap();
unsafe {
let p1 = allocator.alloc(layout);
assert!(!p1.is_null());
allocator.dealloc(p1, layout);
let p2 = allocator.alloc(layout);
assert_ne!(
p1, p2,
"Large objects should not be implicitly reused in this implementation"
);
}
});
}
#[test]
fn test_memory_growth() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(40 * 1024, 16).unwrap();
unsafe {
let p1 = allocator.alloc(layout);
assert!(!p1.is_null());
let p2 = allocator.alloc(layout);
assert!(!p2.is_null());
assert_ne!(p1, p2);
let dist = (p2 as usize).wrapping_sub(p1 as usize);
assert!(dist >= 40 * 1024);
}
});
}
#[test]
fn test_high_alignment() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(32, 128).unwrap();
unsafe {
let p1 = allocator.alloc(layout);
assert!(!p1.is_null());
assert_eq!(p1 as usize % 128, 0);
allocator.dealloc(p1, layout); }
});
}
#[test]
fn test_realloc_shrink() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(100, 16).unwrap();
unsafe {
let ptr = allocator.alloc(layout);
ptr.write_bytes(0xCC, 100);
let new_size = 80;
let new_ptr = allocator.realloc(ptr, layout, new_size);
#[cfg(feature = "realloc")]
assert_eq!(ptr, new_ptr);
#[cfg(not(feature = "realloc"))]
assert_ne!(ptr, new_ptr);
assert_eq!(*new_ptr, 0xCC);
}
});
}
#[test]
fn test_realloc_grow_large_at_top() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let layout = Layout::from_size_align(256, 16).unwrap();
unsafe {
let ptr = allocator.alloc(layout);
ptr.write_bytes(0xAA, 256);
let new_size = 512;
let new_ptr = allocator.realloc(ptr, layout, new_size);
#[cfg(feature = "realloc")]
assert_eq!(ptr, new_ptr); #[cfg(not(feature = "realloc"))]
assert_ne!(ptr, new_ptr);
assert_eq!(*new_ptr, 0xAA);
assert_eq!(*new_ptr.add(255), 0xAA);
}
});
}
#[test]
fn test_realloc_move() {
with_clean_allocator(|| {
let allocator = SegregatedBumpAllocator::new();
let l1 = Layout::from_size_align(64, 16).unwrap();
let _ = unsafe { allocator.alloc(l1) };
let layout = Layout::from_size_align(256, 16).unwrap();
let ptr = unsafe { allocator.alloc(layout) };
let l3 = Layout::from_size_align(64, 16).unwrap();
let _p3 = unsafe { allocator.alloc(l3) };
unsafe {
ptr.write_bytes(0xBB, 256);
let new_size = 512;
let new_ptr = allocator.realloc(ptr, layout, new_size);
assert_ne!(ptr, new_ptr); assert_eq!(*new_ptr, 0xBB);
}
});
}
}