use lite_alloc::reset_heap;
use lite_alloc::single_threaded::FreeListAllocator;
use std::alloc::{GlobalAlloc, Layout};
use std::sync::{Mutex, MutexGuard};
static TEST_MUTEX: Mutex<()> = Mutex::new(());
struct SafeAllocator {
inner: FreeListAllocator,
_guard: MutexGuard<'static, ()>,
}
impl SafeAllocator {
fn new() -> Self {
let guard = TEST_MUTEX.lock().unwrap();
unsafe {
FreeListAllocator::reset();
reset_heap();
Self {
inner: FreeListAllocator::new(),
_guard: guard,
}
}
}
fn alloc(&self, layout: Layout) -> *mut u8 {
unsafe { self.inner.alloc(layout) }
}
fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe { self.inner.dealloc(ptr, layout) }
}
#[cfg(feature = "realloc")]
#[allow(dead_code)]
fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
unsafe { self.inner.realloc(ptr, layout, new_size) }
}
}
impl Drop for SafeAllocator {
fn drop(&mut self) {
unsafe {
FreeListAllocator::reset();
reset_heap();
}
}
}
#[test]
fn test_small_allocations_min_size() {
let allocator = SafeAllocator::new();
let layout = Layout::from_size_align(1, 1).unwrap();
let ptr = allocator.alloc(layout);
assert!(!ptr.is_null());
assert_eq!(ptr as usize % 16, 0);
unsafe { ptr.write(0xFF) };
allocator.dealloc(ptr, layout);
}
#[test]
fn test_alignment_upgrade_force_16() {
let allocator = SafeAllocator::new();
let layout = Layout::from_size_align(8, 8).unwrap();
let ptr = allocator.alloc(layout);
assert!(!ptr.is_null());
assert_eq!(ptr as usize % 16, 0);
allocator.dealloc(ptr, layout);
}
#[test]
fn test_unsupported_alignment_large() {
let allocator = SafeAllocator::new();
let layout = Layout::from_size_align(32, 32).unwrap();
let ptr = allocator.alloc(layout);
assert!(ptr.is_null());
}
#[test]
fn test_split_block_behavior() {
let allocator = SafeAllocator::new();
let layout_large = Layout::from_size_align(128, 16).unwrap();
let ptr1 = allocator.alloc(layout_large);
assert!(!ptr1.is_null());
allocator.dealloc(ptr1, layout_large);
let layout_small = Layout::from_size_align(112, 16).unwrap();
let ptr2 = allocator.alloc(layout_small);
assert!(!ptr2.is_null());
let diff = unsafe { ptr2.offset_from(ptr1) };
if std::mem::size_of::<usize>() * 2 <= 16 {
assert_eq!(diff, 16);
let layout_tiny = Layout::from_size_align(16, 16).unwrap();
let ptr3 = allocator.alloc(layout_tiny);
assert_eq!(ptr3, ptr1); } else {
}
}
#[test]
fn test_coalescing_fragmentation_random_free() {
let allocator = SafeAllocator::new();
let count = 10;
let layout = Layout::from_size_align(64, 16).unwrap();
let mut ptrs = vec![std::ptr::null_mut(); count];
for i in 0..count {
ptrs[i] = allocator.alloc(layout);
assert!(!ptrs[i].is_null());
if i > 0 {
}
}
let _min_ptr = ptrs.iter().min().unwrap();
let _max_ptr = ptrs.iter().max().unwrap();
for i in (0..count).step_by(2) {
allocator.dealloc(ptrs[i], layout);
}
for i in (1..count).step_by(2) {
allocator.dealloc(ptrs[i], layout);
}
let total_size = count * 64;
let layout_total = Layout::from_size_align(total_size, 16).unwrap();
let ptr_all = allocator.alloc(layout_total);
assert!(!ptr_all.is_null());
allocator.dealloc(ptr_all, layout_total);
}
#[test]
fn test_alloc_large_grows_memory() {
let allocator = SafeAllocator::new();
let layout = Layout::from_size_align(100 * 1024, 16).unwrap();
let ptr = allocator.alloc(layout);
assert!(!ptr.is_null());
unsafe {
ptr.add(100 * 1024 - 1).write(0xAA);
}
allocator.dealloc(ptr, layout);
}
#[test]
fn test_double_free_corruption_check() {
let allocator = SafeAllocator::new();
let layout = Layout::from_size_align(64, 16).unwrap();
let ptr = allocator.alloc(layout);
unsafe {
ptr.write_bytes(0xCC, 64);
}
allocator.dealloc(ptr, layout);
let ptr2 = allocator.alloc(layout);
assert!(!ptr2.is_null());
unsafe {
ptr2.write_bytes(0xDD, 64);
}
allocator.dealloc(ptr2, layout);
}