use crate::*;
use crate::error::ExcessiveSliceRequestedError;
use crate::meta::Meta;
use core::alloc::Layout;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
pub unsafe trait Alloc : Meta {
fn alloc_uninit(&self, size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error> {
self.alloc_zeroed(size).map(|p| p.cast())
}
fn alloc_zeroed(&self, size: usize) -> Result<AllocNN0, Self::Error> {
let layout = Layout::from_size_align(size, 1).map_err(|_| ExcessiveSliceRequestedError { requested: size })?;
let alloc = self.alloc_uninit(size)?;
unsafe { util::slice::from_raw_bytes_layout_mut(alloc, layout) }.fill(MaybeUninit::new(0u8));
Ok(alloc.cast())
}
}
pub unsafe trait Free : meta::Meta {
unsafe fn free(&self, ptr: NonNull<MaybeUninit<u8>>) {
unsafe { self.free_nullable(ptr.as_ptr()) }
}
unsafe fn free_nullable(&self, ptr: *mut MaybeUninit<u8>) {
if let Some(ptr) = NonNull::new(ptr) { unsafe { self.free(ptr) } }
}
}
pub unsafe trait Realloc : Alloc + Free {
const CAN_REALLOC_ZEROED : bool;
unsafe fn realloc_uninit(&self, ptr: NonNull<MaybeUninit<u8>>, new_size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error>;
unsafe fn realloc_zeroed(&self, ptr: NonNull<MaybeUninit<u8>>, new_size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error>;
}
pub unsafe trait SizeOf : SizeOfDebug {
unsafe fn size_of(&self, ptr: NonNull<MaybeUninit<u8>>) -> usize {
unsafe { SizeOfDebug::size_of_debug(self, ptr) }.unwrap()
}
}
pub unsafe trait SizeOfDebug : meta::Meta {
unsafe fn size_of_debug(&self, ptr: NonNull<MaybeUninit<u8>>) -> Option<usize>;
}
#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl<'a, A: Alloc> Alloc for &'a A {
fn alloc_uninit(&self, size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error> { A::alloc_uninit(self, size) }
fn alloc_zeroed(&self, size: usize) -> Result<NonNull< u8 >, Self::Error> { A::alloc_zeroed(self, size) }
}
#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl<'a, A: Free> Free for &'a A {
unsafe fn free( &self, ptr: NonNull<MaybeUninit<u8>> ) { unsafe { A::free( self, ptr) } }
unsafe fn free_nullable(&self, ptr: *mut MaybeUninit<u8> ) { unsafe { A::free_nullable(self, ptr) } }
}
#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl<'a, A: Realloc> Realloc for &'a A {
const CAN_REALLOC_ZEROED : bool = A::CAN_REALLOC_ZEROED;
unsafe fn realloc_uninit(&self, ptr: NonNull<MaybeUninit<u8>>, new_size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error> { unsafe { A::realloc_uninit(self, ptr, new_size) } }
unsafe fn realloc_zeroed(&self, ptr: NonNull<MaybeUninit<u8>>, new_size: usize) -> Result<NonNull<MaybeUninit<u8>>, Self::Error> { unsafe { A::realloc_zeroed(self, ptr, new_size) } }
}
#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl<'a, A: SizeOf> SizeOf for &'a A {
unsafe fn size_of(&self, ptr: NonNull<MaybeUninit<u8>>) -> usize { unsafe { A::size_of(self, ptr) } }
}
#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl<'a, A: SizeOfDebug> SizeOfDebug for &'a A {
unsafe fn size_of_debug(&self, ptr: NonNull<MaybeUninit<u8>>) -> Option<usize> { unsafe { A::size_of_debug(self, ptr) } }
}
pub mod test {
use super::*;
#[cfg(feature = "std")] use std::io::Write;
#[allow(clippy::upper_case_acronyms)]
struct TTB<A: Free>(A, NonNull<MaybeUninit<u8>>);
impl<A: Free> Drop for TTB<A> {
fn drop(&mut self) {
unsafe { self.0.free(self.1) };
}
}
impl<A: Free> TTB<A> {
pub fn try_new_uninit(allocator: A, size: usize) -> Result<Self, A::Error> where A : Alloc { let alloc = allocator.alloc_uninit(size)?; Ok(Self(allocator, alloc)) }
pub fn try_new_zeroed(allocator: A, size: usize) -> Result<Self, A::Error> where A : Alloc { let alloc = allocator.alloc_zeroed(size)?; Ok(Self(allocator, alloc.cast())) }
fn as_ptr(&self) -> *mut MaybeUninit<u8> { self.1.as_ptr() }
fn as_nonnull(&self) -> NonNull<MaybeUninit<u8>> { self.1 }
}
pub fn alignment<A: Alloc + Free>(allocator: A) {
let mut align = A::MAX_ALIGN;
loop {
let unaligned_mask = align.as_usize() - 1;
if let Ok(alloc) = TTB::try_new_uninit(&allocator, align.as_usize()) {
let alloc = alloc.as_ptr();
let addr = alloc as usize;
assert_eq!(0, addr & unaligned_mask, "allocation for size {align:?} @ {alloc:?} had less than expected alignment ({align:?} <= MAX_ALIGN)");
}
let Some(next) = Alignment::new(align.as_usize() >> 1) else { break };
align = next;
}
for size in [1, 2, 4, 8, 16, 32, 64, 128, 256] {
#[cfg(feature = "std")] std::dbg!(size);
let mut addr_bits = 0;
for _ in 0 .. 100 {
if let Ok(alloc) = TTB::try_new_uninit(&allocator, size) {
addr_bits |= alloc.as_ptr() as usize;
}
}
if addr_bits == 0 { continue }
let align = 1 << addr_bits.trailing_zeros(); let expected_align = A::MAX_ALIGN.as_usize().min(size).max(A::MIN_ALIGN.as_usize());
assert!(align >= expected_align);
}
}
pub fn edge_case_sizes<A: Alloc + Free>(allocator: A) {
let boundaries = if cfg!(target_pointer_width = "64") {
&[0, (u32::MAX/2) as usize, (u32::MAX ) as usize, usize::MAX/2, usize::MAX][..]
} else {
&[0, usize::MAX/2, usize::MAX][..]
};
for boundary in boundaries.iter().copied() {
for offset in -64_isize .. 64_isize {
let Some(size) = boundary.checked_add_signed(offset) else { continue };
#[cfg(feature = "std")] std::dbg!(size);
let Ok(alloc) = TTB::try_new_uninit(&allocator, size) else { continue };
if let Some(last_byte_index) = size.checked_sub(1) {
let last_byte_index = last_byte_index.min(isize::MAX as usize);
let last_byte = unsafe { alloc.as_ptr().add(last_byte_index) };
unsafe { last_byte.write_volatile(MaybeUninit::new(42u8)) };
}
}
}
}
pub fn nullable<A: Free>(allocator: A) {
unsafe { allocator.free_nullable(core::ptr::null_mut()) }
}
pub fn size_exact_alloc<A: Alloc + Free + SizeOfDebug>(allocator: A) {
for size in [0, 1, 3, 7, 15, 31, 63, 127] {
let Ok(alloc) = TTB::try_new_uninit(&allocator, size) else { continue };
let query_size = unsafe { allocator.size_of_debug(alloc.as_nonnull()) }.unwrap_or(size);
assert_eq!(size, query_size, "allocator returns oversized allocs, use thin::test::size_over_alloc instead");
}
}
pub fn size_exact_alloc_except_zsts<A: Alloc + Free + SizeOfDebug>(allocator: A) {
for size in [0, 1, 3, 7, 15, 31, 63, 127] {
let Ok(alloc) = TTB::try_new_uninit(&allocator, size) else { continue };
let query_size = unsafe { allocator.size_of_debug(alloc.as_nonnull()) }.unwrap_or(size);
assert_eq!(size.max(1), query_size, "allocator returns oversized allocs, use thin::test::size_over_alloc instead");
}
}
pub fn size_over_alloc<A: Alloc + Free + SizeOfDebug>(allocator: A) {
let mut any_sized = false;
let mut over = false;
for size in [0, 1, 3, 7, 15, 31, 63, 127] {
let Ok(alloc) = TTB::try_new_uninit(&allocator, size) else { continue };
let Some(query_size) = (unsafe { allocator.size_of_debug(alloc.as_nonnull()) }) else { continue };
any_sized = true;
over |= size < query_size;
assert!(size <= query_size, "allocator returns undersized allocs");
}
assert!(over || !any_sized, "no allocations were oversized");
}
#[allow(clippy::missing_safety_doc)] pub unsafe fn uninit_alloc_unsound<A: Alloc + Free>(allocator: A) {
let mut any = false;
for _ in 0 .. 1000 {
if let Ok(alloc) = TTB::try_new_uninit(&allocator, 1) {
any = true;
let byte = unsafe { &mut *alloc.as_ptr() };
let is_uninit = unsafe { byte.assume_init() } != 0;
byte.write(0xFF);
if is_uninit { return } }
}
assert!(!any, "A::alloc_uninit appears to allocate zeroed memory");
}
pub fn uninit_realloc<A: Realloc>(allocator: A) {
#[cfg(feature = "std")] let log_spam = std::env::var_os("IALLOC_TEST_VERBOSE").is_some();
if let Ok(alloc) = allocator.alloc_uninit(0) {
let alloc = unsafe { allocator.realloc_uninit(alloc, 0) }.expect("allocating 0 bytes succeeds, but reallocating to 0 bytes fails: this allocator likely *freed* and should add explicit checks to ban zero-length (re)allocs!");
unsafe { allocator.free(alloc) };
}
for mut size in 0 ..= 100 {
let Ok(mut alloc) = allocator.alloc_uninit(size) else {
#[cfg(feature = "std")] std::eprintln!("initial allocation of {size} bytes failed");
continue
};
#[cfg(feature = "std")] std::eprintln!("initial allocation of {size} bytes @ {alloc:?}");
for (pos, byte) in unsafe { core::slice::from_raw_parts_mut(alloc.as_ptr(), size) }.iter_mut().enumerate() { *byte = MaybeUninit::new(pos as u8); }
for realloc_size in [
size+1, size+2, size+3, size.saturating_sub(1), size.saturating_sub(2), size.saturating_sub(3),
0, 0, 0, 50, 50, 0, 25, 30, 100, 66, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
] {
#[cfg(feature = "std")] let stdout = log_spam.then(|| {
let mut stdout = std::io::stdout().lock();
let _ = write!(stdout, "attempting to realloc_uninit({alloc:?}, ...) from {size} → {realloc_size} bytes...");
let _ = stdout.flush();
stdout
});
if let Ok(realloc) = unsafe { allocator.realloc_uninit(alloc, realloc_size) } {
#[cfg(feature = "std")] if let Some(mut stdout) = stdout {
let _ = writeln!(stdout, "successful reallocation to {realloc:?}");
let _ = stdout.flush();
}
let prev_size = size;
alloc = realloc.cast();
size = realloc_size;
let slice = unsafe { core::slice::from_raw_parts_mut(alloc.as_ptr(), size) };
for (pos, byte) in slice.iter_mut().enumerate() {
if pos < prev_size {
assert_eq!(unsafe { byte.assume_init() }, pos as u8);
} else {
*byte = MaybeUninit::new(pos as u8);
}
}
} else {
#[cfg(feature = "std")] if let Some(mut stdout) = stdout {
let _ = writeln!(stdout, "failed");
let _ = stdout.flush();
}
}
}
unsafe { allocator.free(alloc) };
}
}
pub fn zeroed_alloc<A: Alloc + Free>(allocator: A) {
for _ in 0 .. 1000 {
if let Ok(alloc) = TTB::try_new_zeroed(&allocator, 1) {
let byte : &mut u8 = unsafe { &mut *alloc.as_ptr().cast::<u8>() };
assert!(*byte == 0u8, "A::alloc_zeroed returned unzeroed memory!");
*byte = 0xFF; }
}
}
pub fn zeroed_realloc<A: Realloc>(allocator: A) {
#[cfg(feature = "std")] let log_spam = std::env::var_os("IALLOC_TEST_VERBOSE").is_some();
if A::CAN_REALLOC_ZEROED {
if let Ok(alloc) = allocator.alloc_zeroed(0) {
let alloc = unsafe { allocator.realloc_zeroed(alloc.cast(), 0) }.expect("allocating 0 bytes succeeds, but reallocating to 0 bytes fails: this allocator likely *freed* and should add explicit checks to ban zero-length (re)allocs!");
unsafe { allocator.free(alloc.cast()) };
}
}
for mut size in 0 ..= 100 {
let Ok(alloc) = allocator.alloc_zeroed(size) else {
#[cfg(feature = "std")] std::eprintln!("initial allocation of {size} bytes failed");
continue
};
#[cfg(feature = "std")] std::eprintln!("initial allocation of {size} bytes @ {alloc:?}");
let mut alloc : NonNull<MaybeUninit<u8>> = alloc.cast();
let mut min_prev_size = size;
let mut max_prev_size = size;
for (pos, byte) in unsafe { core::slice::from_raw_parts_mut(alloc.as_ptr(), size) }.iter_mut().enumerate() { *byte = MaybeUninit::new(pos as u8); }
for realloc_size in [
size+1, size+2, size+3, size.saturating_sub(1), size.saturating_sub(2), size.saturating_sub(3),
0, 0, 0, 50, 50, 0, 25, 30, 100, 66, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
] {
#[cfg(feature = "std")] let stdout = log_spam.then(|| {
let mut stdout = std::io::stdout().lock();
let _ = write!(stdout, "attempting to realloc_zeroed({alloc:?}, ...) from {size} → {realloc_size} bytes...");
let _ = stdout.flush();
stdout
});
if let Ok(realloc) = unsafe { allocator.realloc_zeroed(alloc, realloc_size) } {
#[cfg(feature = "std")] if let Some(mut stdout) = stdout {
let _ = writeln!(stdout, "successful reallocation to {realloc:?}");
let _ = stdout.flush();
}
let prev_size = size;
alloc = realloc.cast();
size = realloc_size;
min_prev_size = min_prev_size.min(prev_size);
max_prev_size = max_prev_size.max(prev_size);
let slice = unsafe { core::slice::from_raw_parts_mut(alloc.as_ptr(), size) };
for (pos, byte) in slice.iter_mut().enumerate() {
let byte = unsafe { byte.assume_init() };
if pos < min_prev_size {
assert_eq!(byte, pos as u8);
} else if pos < max_prev_size {
assert!(byte == 0 || byte == pos as u8);
} else {
assert_eq!(byte, 0);
}
}
for (pos, byte) in slice.iter_mut().enumerate().skip(prev_size) { *byte = MaybeUninit::new(pos as u8); }
} else {
#[cfg(feature = "std")] if let Some(mut stdout) = stdout {
let _ = writeln!(stdout, "failed");
let _ = stdout.flush();
}
}
}
unsafe { allocator.free(alloc) };
}
}
pub fn zst_supported_accurate<A: Alloc + Free>(allocator: A) {
let alloc = TTB::try_new_uninit(&allocator, 0);
let alloc = alloc.as_ref().map(|a| a.as_ptr());
assert_eq!(alloc.is_ok(), A::ZST_SUPPORTED, "alloc = {alloc:?}, ZST_SUPPORTED = {}", A::ZST_SUPPORTED);
}
pub fn zst_supported_conservative<A: Alloc + Free>(allocator: A) {
let alloc = TTB::try_new_uninit(&allocator, 0);
let alloc = alloc.as_ref().map(|a| a.as_ptr());
if A::ZST_SUPPORTED { assert!(alloc.is_ok(), "alloc = {alloc:?}, ZST_SUPPORTED = {}", A::ZST_SUPPORTED) }
}
pub fn zst_supported_conservative_leak<A: Alloc>(allocator: A) {
let alloc = allocator.alloc_uninit(0);
if A::ZST_SUPPORTED { assert!(alloc.is_ok(), "alloc = {alloc:?}, ZST_SUPPORTED = {}", A::ZST_SUPPORTED) }
}
}