use rand::{prelude::SliceRandom, Rng};
mod alloc_tests;
mod dealloc_tests;
use core::alloc::Layout;
use std::vec::Vec;
use super::*;
const TEST_SMALLBINS_AMOUNT: usize = DEFAULT_SMALLBINS_AMOUNT;
const TEST_ALIGNMENT_SUB_BINS_AMOUNT: usize = DEFAULT_ALIGNMENT_SUB_BINS_AMOUNT;
#[test]
fn random_alloc_dealloc_realloc() {
const MEM_SIZE: usize = USIZE_SIZE * 113;
const RANDOM_ACTIONS_AMOUNT: usize = 10000;
const TRIALS_AMOUNT: usize = 100;
for _ in 0..TRIALS_AMOUNT {
let mut guard = AllocatorInitGuard::empty();
guard.init(MEM_SIZE);
let mut allocations = Vec::new();
let mut size_left = MEM_SIZE;
let mut rng = rand::thread_rng();
for _ in 0..RANDOM_ACTIONS_AMOUNT {
fn random_size(rng: &mut impl Rng, size_left: usize) -> usize {
let max_chunk_size = size_left - HEADER_SIZE;
let min_chunk_size = MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER - HEADER_SIZE;
let unaligned_size = rng.gen_range(min_chunk_size..=max_chunk_size);
let aligned_size = unsafe { align_up(unaligned_size, MIN_ALIGNMENT) };
aligned_size
}
let rng_chose_allocation_over_deallocation: bool = rng.gen_bool(0.75);
let mut allocation_worked = false;
if allocations.is_empty()
|| (rng_chose_allocation_over_deallocation
&& size_left >= MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER)
{
let size = random_size(&mut rng, size_left);
let alignment = 1 << rng.gen_range(0..=14);
let ptr = unsafe {
guard
.allocator
.alloc(Layout::from_size_align(size, alignment).unwrap())
};
if !ptr.is_null() {
let addr = ptr as usize;
assert!(unsafe { is_aligned(addr, alignment) });
allocations.push((ptr, size, alignment));
size_left -= size + HEADER_SIZE;
allocation_worked = true;
}
}
if !allocation_worked && !allocations.is_empty() {
if rng.gen::<bool>() && size_left >= MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
let random_index = rng.gen_range(0..allocations.len());
let (ptr, allocation_size, alignment) = &mut allocations[random_index];
let new_size = random_size(&mut rng, size_left + *allocation_size);
let new_ptr = unsafe {
guard.allocator.realloc(
*ptr,
Layout::from_size_align(*allocation_size, *alignment).unwrap(),
new_size,
)
};
if !new_ptr.is_null() {
*allocation_size = new_size;
*ptr = new_ptr;
}
} else {
let random_index = rng.gen_range(0..allocations.len());
let (ptr, allocation_size, _alignment) = allocations.swap_remove(random_index);
unsafe { guard.allocator.dealloc(ptr) }
size_left += allocation_size + HEADER_SIZE;
}
}
}
allocations.shuffle(&mut rng);
for (allocation, _size, _alignment) in allocations {
unsafe { guard.allocator.dealloc(allocation) }
}
assert_only_1_free_chunk_which_is_at_heap_start(&mut guard, MEM_SIZE);
}
}
fn assert_only_1_free_chunk_in_bin(guard: &mut AllocatorInitGuard, addr: usize, size: usize) {
let free_chunk = unsafe {
match Chunk::from_addr(addr) {
ChunkRef::Used(_) => panic!("first chunk in heap is marked used after dealloc"),
ChunkRef::Free(free) => free,
}
};
assert_eq!(free_chunk.header.prev_in_use(), true);
assert_eq!(free_chunk.size(), size);
let (fd, bk) = get_expected_fd_and_bk_for_chunk_with_size(
free_chunk.size(),
free_chunk.content_addr(),
guard,
);
assert_eq!(free_chunk.fd, fd);
assert_eq!(free_chunk.ptr_to_fd_of_bk, bk);
if let Some(mut fd_ptr) = fd {
let fd_ref = unsafe { fd_ptr.as_mut() };
assert_eq!(fd_ref.ptr_to_fd_of_bk, &mut free_chunk.fd as *mut _)
}
assert_eq!(
unsafe { *bk },
Some(unsafe { NonNull::new_unchecked(free_chunk as *mut _) })
);
}
fn assert_only_1_free_chunk_which_is_at_heap_start(
guard: &mut AllocatorInitGuard,
mem_size: usize,
) {
assert_only_1_free_chunk_in_bin(guard, guard.addr(), mem_size - HEADER_SIZE)
}
fn get_expected_fd_and_bk_for_chunk_with_size(
size: usize,
content_addr: usize,
guard: &mut AllocatorInitGuard,
) -> (Option<FreeChunkPtr>, *mut Option<FreeChunkPtr>) {
match unsafe {
SmallBins::<TEST_SMALLBINS_AMOUNT, TEST_ALIGNMENT_SUB_BINS_AMOUNT>::smallbin_index(size)
} {
Some(smallbin_index) => {
let alignment_index = unsafe {
SmallBins::<TEST_SMALLBINS_AMOUNT,TEST_ALIGNMENT_SUB_BINS_AMOUNT>::alignment_index_of_chunk_content_addr(
content_addr,
)
};
let sub_bin = &mut guard.allocator.smallbins.small_bins[smallbin_index]
.alignment_sub_bins[alignment_index];
(None, &mut sub_bin.fd as *mut _)
},
None => {
(
Some(unsafe { guard.allocator.fake_chunk_of_other_bin_ptr() }),
guard.allocator.ptr_to_fd_of_fake_chunk_of_other_bin(),
)
},
}
}
#[derive(Debug)]
struct AllocatorInitGuard {
addr: usize,
layout: Layout,
allocator: Allocator<TEST_SMALLBINS_AMOUNT, TEST_ALIGNMENT_SUB_BINS_AMOUNT>,
}
impl AllocatorInitGuard {
const fn empty() -> Self {
Self {
addr: 0,
layout: Layout::new::<u8>(),
allocator: Allocator::empty(),
}
}
fn init(&mut self, mem_size: usize) {
self.init_with_alignment(mem_size, MIN_ALIGNMENT);
}
fn init_with_alignment(&mut self, mem_size: usize, alignment: usize) {
self.layout = Layout::from_size_align(mem_size, alignment).unwrap();
self.addr = unsafe { std::alloc::alloc(self.layout) as usize };
unsafe { self.allocator.init(self.addr, mem_size) }
}
fn addr(&self) -> usize {
self.addr
}
}
impl Drop for AllocatorInitGuard {
fn drop(&mut self) {
unsafe { std::alloc::dealloc(self.addr as *mut u8, self.layout) }
}
}