#![no_std]
#![cfg_attr(
feature = "allocator",
feature(allocator_api, nonnull_slice_from_raw_parts, slice_ptr_get)
)]
#[cfg(test)]
#[macro_use]
extern crate std;
mod alignment;
mod bins;
mod chunks;
mod divisible_by_4_usize;
mod smallest_type_which_has_at_least_n_bits;
#[cfg(test)]
mod tests;
use core::{alloc::Layout, ptr::NonNull};
use alignment::*;
use bins::SmallBins;
pub use bins::{DEFAULT_ALIGNMENT_SUB_BINS_AMOUNT, DEFAULT_SMALLBINS_AMOUNT};
use chunks::*;
use smallest_type_which_has_at_least_n_bits::{
SmallestTypeWhichHasAtLeastNBitsStruct, SmallestTypeWhichHasAtLeastNBitsTrait,
};
const USIZE_ALIGNMENT: usize = core::mem::align_of::<usize>();
const USIZE_SIZE: usize = core::mem::size_of::<usize>();
const MIN_ALIGNMENT: usize = if USIZE_ALIGNMENT < 8 {
8
} else {
USIZE_ALIGNMENT
};
const MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER: usize = core::mem::size_of::<FreeChunk>() + USIZE_SIZE;
const HEADER_SIZE: usize = core::mem::size_of::<Chunk>();
#[derive(Debug)]
pub struct Allocator<
const SMALLBINS_AMOUNT: usize = DEFAULT_SMALLBINS_AMOUNT,
const ALIGNMENT_SUB_BINS_AMOUNT: usize = DEFAULT_ALIGNMENT_SUB_BINS_AMOUNT,
> where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
smallbins: SmallBins<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>,
heap_end_addr: usize,
fake_chunk_of_other_bin: FakeFreeChunk,
}
impl<const SMALLBINS_AMOUNT: usize, const ALIGNMENT_SUB_BINS_AMOUNT: usize>
Allocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
pub const fn empty() -> Self {
Self {
heap_end_addr: 0,
fake_chunk_of_other_bin: FakeFreeChunk {
fd: None,
ptr_to_fd_of_bk: core::ptr::null_mut(),
},
smallbins: SmallBins::new(),
}
}
pub fn was_initialized(&self) -> bool {
self.heap_end_addr != 0
}
pub unsafe fn init(&mut self, heap_start_addr: usize, heap_size: usize) {
if self.was_initialized() {
panic!("the heap was already initialized");
}
let aligned_heap_start_addr = align_up(heap_start_addr, MIN_ALIGNMENT);
let heap_end_addr = heap_start_addr + heap_size;
let aligned_heap_end_addr = align_down(heap_end_addr, MIN_ALIGNMENT);
let aligned_size = aligned_heap_end_addr.saturating_sub(aligned_heap_start_addr);
if aligned_size == 0 {
panic!("heap size is 0 after aligning heap start and end addresses");
}
self.fake_chunk_of_other_bin.fd = Some(self.fake_chunk_of_other_bin_ptr());
self.fake_chunk_of_other_bin.ptr_to_fd_of_bk = self.ptr_to_fd_of_fake_chunk_of_other_bin();
let chunk_size = aligned_size - HEADER_SIZE;
let _ = FreeChunk::create_new_without_updating_next_chunk(
aligned_heap_start_addr,
chunk_size,
self,
);
self.heap_end_addr = aligned_heap_end_addr;
}
unsafe fn fake_chunk_of_other_bin_ptr(&self) -> FreeChunkPtr {
self.fake_chunk_of_other_bin.free_chunk_ptr()
}
fn first_free_chunk_in_other_bin(&self) -> Option<FreeChunkPtr> {
let fd_of_fake_chunk = self.fake_chunk_of_other_bin.fd?;
if fd_of_fake_chunk == unsafe { self.fake_chunk_of_other_bin_ptr() } {
return None;
}
Some(fd_of_fake_chunk)
}
fn ptr_to_fd_of_fake_chunk_of_other_bin(&mut self) -> *mut Option<FreeChunkPtr> {
&mut self.fake_chunk_of_other_bin.fd
}
fn prepare_size(size: usize) -> usize {
unsafe {
align_up(
core::cmp::max(size, MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER - HEADER_SIZE),
MIN_ALIGNMENT,
)
}
}
unsafe fn get_fd_and_bk_pointers_for_inserting_new_free_chunk(
&mut self,
chunk_size: usize,
alignment_index_of_chunk_content_addr: usize,
) -> (Option<FreeChunkPtr>, *mut Option<FreeChunkPtr>) {
if let Some(smallbin_index) =
SmallBins::<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>::smallbin_index(chunk_size)
{
self.smallbins
.get_fd_and_bk_pointers_for_inserting_to_smallbin(
smallbin_index,
alignment_index_of_chunk_content_addr,
)
} else {
self.get_fd_and_bk_pointers_for_inserting_new_free_chunk_in_other_bin(chunk_size)
}
}
fn get_fd_and_bk_pointers_for_inserting_new_free_chunk_in_other_bin(
&mut self,
chunk_size: usize,
) -> (Option<FreeChunkPtr>, *mut Option<FreeChunkPtr>) {
match self.first_free_chunk_in_other_bin() {
Some(mut first_free_chunk) => {
if chunk_size > unsafe { first_free_chunk.as_mut() }.size() {
(
Some(first_free_chunk),
self.ptr_to_fd_of_fake_chunk_of_other_bin(),
)
} else {
(
Some(unsafe { self.fake_chunk_of_other_bin_ptr() }),
self.fake_chunk_of_other_bin.ptr_to_fd_of_bk,
)
}
}
None => (
Some(unsafe { self.fake_chunk_of_other_bin_ptr() }),
self.ptr_to_fd_of_fake_chunk_of_other_bin(),
),
}
}
fn prepare_layout(layout: Layout) -> (usize, usize) {
let layout_size = Self::prepare_size(layout.size());
let layout_align = core::cmp::max(layout.align(), MIN_ALIGNMENT);
(layout_size, layout_align)
}
pub unsafe fn alloc(&mut self, layout: core::alloc::Layout) -> *mut u8 {
if !self.was_initialized() {
return core::ptr::null_mut();
}
let (layout_size, layout_align) = Self::prepare_layout(layout);
let alignment_index_if_size_is_smallbin_size =
if SmallBins::<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>::is_smallbin_size(
layout_size,
) {
Some(
SmallBins::<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>::alignment_index(
layout_align,
),
)
} else {
None
};
if let Some(alignment_index) = alignment_index_if_size_is_smallbin_size {
if let Some(ptr) =
self.alloc_from_optimal_chunk(layout_size, layout_align, alignment_index)
{
return ptr.as_ptr();
}
}
if let Some(ptr) = self.alloc_from_other_bin(layout_size, layout_align) {
return ptr.as_ptr();
}
if let Some(alignment_index) = alignment_index_if_size_is_smallbin_size {
if let Some(ptr) = self.alloc_from_aligned_suboptimal_chunks(
layout_size,
layout_align,
alignment_index,
) {
return ptr.as_ptr();
}
if let Some(ptr) = self.alloc_from_unaligned_suboptimal_chunks(
layout_size,
layout_align,
alignment_index,
) {
return ptr.as_ptr();
}
}
core::ptr::null_mut()
}
unsafe fn alloc_from_optimal_chunk(
&mut self,
layout_size: usize,
layout_align: usize,
alignment_index: usize,
) -> Option<NonNull<u8>> {
let mut optimal_chunk =
self.smallbins
.optimal_chunk(layout_size, layout_align, alignment_index)?;
let chunk = optimal_chunk.as_mut();
Some(self.alloc_aligned_no_end_padding(chunk))
}
unsafe fn alloc_from_other_bin(
&mut self,
layout_size: usize,
layout_align: usize,
) -> Option<NonNull<u8>> {
let fake_chunk_of_other_bin_ptr = self.fake_chunk_of_other_bin_ptr();
let mut cur_chunk_ptr = self.fake_chunk_of_other_bin.fd.unwrap();
loop {
if cur_chunk_ptr == fake_chunk_of_other_bin_ptr {
break;
}
let cur_chunk = cur_chunk_ptr.as_mut();
if cur_chunk.size() >= layout_size {
if is_aligned(cur_chunk.content_addr(), layout_align) {
return Some(self.alloc_aligned(layout_size, cur_chunk_ptr.as_mut()));
} else {
let cur_chunk_ref = cur_chunk_ptr.as_mut();
if let Some(aligned_start_addr) =
self.can_alloc_unaligned(layout_size, layout_align, cur_chunk_ref)
{
return Some(self.alloc_unaligned(
layout_size,
cur_chunk_ptr.as_mut(),
aligned_start_addr,
));
}
}
}
cur_chunk_ptr = cur_chunk.fd().unwrap();
}
None
}
unsafe fn alloc_from_aligned_suboptimal_chunks(
&mut self,
layout_size: usize,
layout_align: usize,
alignment_index: usize,
) -> Option<NonNull<u8>> {
let mut aligned_suboptimal_chunk =
self.smallbins
.aligned_suboptimal_chunk(layout_size, layout_align, alignment_index)?;
Some(self.alloc_aligned_split_end_padding_chunk(
layout_size,
aligned_suboptimal_chunk.end_padding,
aligned_suboptimal_chunk.chunk_ptr.as_mut(),
))
}
unsafe fn alloc_from_unaligned_suboptimal_chunks(
&mut self,
layout_size: usize,
layout_align: usize,
alignment_index: usize,
) -> Option<NonNull<u8>> {
let (allocated_chunk, aligned_start_addr) = {
let mut chunks_that_can_allocate_unaligned = self
.smallbins
.unaligned_suboptimal_chunks(layout_size, layout_align, alignment_index)?
.filter_map(|mut chunk_ptr| {
let chunk = chunk_ptr.as_mut();
if let Some(aligned_start_addr) =
self.can_alloc_unaligned(layout_size, layout_align, chunk_ptr.as_mut())
{
Some((chunk, aligned_start_addr))
} else {
None
}
});
chunks_that_can_allocate_unaligned.next()?
};
Some(self.alloc_unaligned(layout_size, allocated_chunk, aligned_start_addr))
}
pub unsafe fn dealloc(&mut self, ptr: *mut u8) {
let chunk = UsedChunk::from_addr(ptr as usize - HEADER_SIZE);
match (
chunk.prev_chunk_if_free(),
chunk.next_chunk_if_free(self.heap_end_addr),
) {
(None, None) => {
let (fd, bk) = self.get_fd_and_bk_pointers_for_inserting_new_free_chunk(
chunk.0.size(),
SmallBins::<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>::alignment_index_of_chunk_content_addr(
chunk.content_addr(),
),
);
let _ = chunk.mark_as_free(fd, bk, self.heap_end_addr);
}
(None, Some(next_chunk_free)) => {
chunk.set_size(chunk.0.size() + HEADER_SIZE + next_chunk_free.size());
next_chunk_free.unlink(&mut self.smallbins);
let (fd, bk) = self.get_fd_and_bk_pointers_for_inserting_new_free_chunk(
chunk.0.size(),
SmallBins::<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>::alignment_index_of_chunk_content_addr(
chunk.content_addr(),
),
);
let _ = chunk.mark_as_free_without_updating_next_chunk(fd, bk);
}
(Some(prev_chunk_free), None) => {
prev_chunk_free.set_size_and_update_bin(
prev_chunk_free.size() + HEADER_SIZE + chunk.0.size(),
self,
);
if let Some(next_chunk_addr) = chunk.0.next_chunk_addr(self.heap_end_addr) {
Chunk::set_prev_in_use_for_chunk_with_addr(next_chunk_addr, false);
}
}
(Some(prev_chunk_free), Some(next_chunk_free)) => {
next_chunk_free.unlink(&mut self.smallbins);
prev_chunk_free.set_size_and_update_bin(
prev_chunk_free.size()
+ HEADER_SIZE
+ chunk.0.size()
+ HEADER_SIZE
+ next_chunk_free.size(),
self,
);
}
}
}
pub unsafe fn realloc(&mut self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let previously_requested_size = Self::prepare_size(layout.size());
let new_size = Self::prepare_size(new_size);
let chunk = UsedChunk::from_addr(ptr as usize - HEADER_SIZE);
if self.try_realloc_in_place(chunk, previously_requested_size, new_size) {
return ptr;
}
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.realloc_new_region(ptr, layout, new_layout)
}
unsafe fn realloc_new_region(
&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout,
) -> *mut u8 {
let new_ptr = self.alloc(new_layout);
if !new_ptr.is_null() {
core::ptr::copy_nonoverlapping(
ptr,
new_ptr,
core::cmp::min(old_layout.size(), new_layout.size()),
);
self.dealloc(ptr);
}
new_ptr
}
unsafe fn try_realloc_in_place(
&mut self,
chunk: UsedChunkRef,
previously_requested_size: usize,
new_size: usize,
) -> bool {
if new_size > previously_requested_size {
self.try_grow_in_place(chunk, new_size)
} else {
self.shrink_in_place(chunk, new_size);
true
}
}
unsafe fn try_grow_in_place(&mut self, chunk: UsedChunkRef, new_size: usize) -> bool {
if chunk.0.size() >= new_size {
return true;
}
let next_chunk_free = match chunk.next_chunk_if_free(self.heap_end_addr) {
Some(next_chunk_free) => next_chunk_free,
None => {
return false;
}
};
let new_end_addr = chunk.content_addr() + new_size;
let next_chunk_end_addr = next_chunk_free.end_addr();
if new_end_addr > next_chunk_end_addr {
return false;
}
let space_left_at_end_of_next_free_chunk = next_chunk_end_addr - new_end_addr;
if space_left_at_end_of_next_free_chunk > MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
next_chunk_free.move_and_resize_chunk_without_updating_next_chunk(
new_end_addr,
space_left_at_end_of_next_free_chunk - HEADER_SIZE,
self,
);
chunk.set_size(new_size);
} else {
if let Some(next_chunk_of_next_chunk_addr) =
next_chunk_free.header.next_chunk_addr(self.heap_end_addr)
{
Chunk::set_prev_in_use_for_chunk_with_addr(next_chunk_of_next_chunk_addr, true);
}
next_chunk_free.unlink(&mut self.smallbins);
chunk.set_size(next_chunk_end_addr - chunk.content_addr());
}
true
}
unsafe fn shrink_in_place(&mut self, chunk: UsedChunkRef, new_size: usize) {
let new_end_addr = chunk.content_addr() + new_size;
match chunk.next_chunk_if_free(self.heap_end_addr) {
Some(next_chunk_free) => {
next_chunk_free.move_and_resize_chunk_without_updating_next_chunk(
new_end_addr,
next_chunk_free.end_addr() - new_end_addr - HEADER_SIZE,
self,
);
chunk.set_size(new_size);
}
None => {
let space_left_at_end = chunk.0.size() - new_size;
if space_left_at_end >= MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
let end_padding_chunk_size = space_left_at_end - HEADER_SIZE;
let _ = FreeChunk::create_new_and_update_next_chunk(
new_end_addr,
end_padding_chunk_size,
self,
);
chunk.set_size(new_size);
} else {
}
}
}
}
unsafe fn can_alloc_unaligned(
&self,
layout_size: usize,
layout_align: usize,
chunk: FreeChunkRef,
) -> Option<usize> {
let aligned_content_start_addr = align_down(chunk.end_addr() - layout_size, layout_align);
let aligned_start_addr = aligned_content_start_addr - HEADER_SIZE;
let space_left_at_start = aligned_start_addr.saturating_sub(chunk.addr());
if space_left_at_start < MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
return None;
}
Some(aligned_start_addr)
}
unsafe fn alloc_unaligned(
&mut self,
layout_size: usize,
chunk: FreeChunkRef,
aligned_start_addr: usize,
) -> NonNull<u8> {
let aligned_content_start_addr = aligned_start_addr + HEADER_SIZE;
let left_size = chunk.end_addr().saturating_sub(aligned_content_start_addr);
let cur_chunk_new_size = aligned_start_addr - chunk.content_addr();
chunk.set_size_and_update_bin(cur_chunk_new_size, self);
self.alloc_unaligned_after_splitting_start_padding(
layout_size,
aligned_start_addr,
left_size,
)
}
unsafe fn alloc_unaligned_after_splitting_start_padding(
&mut self,
layout_size: usize,
allocated_chunk_addr: usize,
allocated_chunk_size: usize,
) -> NonNull<u8> {
let end_padding = allocated_chunk_size - layout_size;
if end_padding >= MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
self.alloc_unaligned_after_splitting_start_padding_split_end_padding_chunk(
layout_size,
end_padding,
allocated_chunk_addr,
)
} else {
self.alloc_unaligned_after_splitting_start_padding_no_end_padding(
allocated_chunk_addr,
allocated_chunk_size,
)
}
}
unsafe fn alloc_unaligned_after_splitting_start_padding_no_end_padding(
&mut self,
allocated_chunk_addr: usize,
allocated_chunk_size: usize,
) -> NonNull<u8> {
let chunk = UsedChunk::create_new(
allocated_chunk_addr,
allocated_chunk_size,
false,
self.heap_end_addr,
);
NonNull::new_unchecked(chunk.content_addr() as *mut _)
}
unsafe fn alloc_unaligned_after_splitting_start_padding_split_end_padding_chunk(
&mut self,
layout_size: usize,
end_padding: usize,
allocated_chunk_addr: usize,
) -> NonNull<u8> {
let end_padding_chunk_start_addr = allocated_chunk_addr + HEADER_SIZE + layout_size;
let end_padding_chunk_size = end_padding - HEADER_SIZE;
let _ = FreeChunk::create_new_without_updating_next_chunk(
end_padding_chunk_start_addr,
end_padding_chunk_size,
self,
);
let allocated_chunk = UsedChunk::create_new_without_updating_next_chunk(
allocated_chunk_addr,
layout_size,
false,
);
NonNull::new_unchecked(allocated_chunk.content_addr() as *mut _)
}
unsafe fn alloc_aligned(&mut self, layout_size: usize, chunk: FreeChunkRef) -> NonNull<u8> {
let cur_chunk_size = chunk.size();
let end_padding = cur_chunk_size - layout_size;
if end_padding >= MIN_FREE_CHUNK_SIZE_INCLUDING_HEADER {
self.alloc_aligned_split_end_padding_chunk(layout_size, end_padding, chunk)
} else {
self.alloc_aligned_no_end_padding(chunk)
}
}
fn alloc_aligned_no_end_padding(&mut self, chunk: FreeChunkRef) -> NonNull<u8> {
chunk.mark_as_used_unlink(self.heap_end_addr, &mut self.smallbins);
unsafe { NonNull::new_unchecked(chunk.content_addr() as *mut u8) }
}
unsafe fn alloc_aligned_split_end_padding_chunk(
&mut self,
layout_size: usize,
end_padding: usize,
chunk: FreeChunkRef,
) -> NonNull<u8> {
let end_padding_chunk_start_addr = chunk.content_addr() + layout_size;
chunk.unlink(&mut self.smallbins);
let cur_chunk_as_used = chunk.mark_as_used_without_updating_freelist_and_next_chunk();
let end_padding_chunk_size = end_padding - HEADER_SIZE;
let _ = FreeChunk::create_new_without_updating_next_chunk(
end_padding_chunk_start_addr,
end_padding_chunk_size,
self,
);
cur_chunk_as_used.set_size(layout_size);
NonNull::new_unchecked(chunk.content_addr() as *mut u8)
}
}
unsafe impl<const SMALLBINS_AMOUNT: usize, const ALIGNMENT_SUB_BINS_AMOUNT: usize> Send
for Allocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
}
#[cfg(feature = "spin")]
pub struct SpinLockedAllocator<
const SMALLBINS_AMOUNT: usize = DEFAULT_SMALLBINS_AMOUNT,
const ALIGNMENT_SUB_BINS_AMOUNT: usize = DEFAULT_ALIGNMENT_SUB_BINS_AMOUNT,
>(spin::Mutex<Allocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>>)
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait;
#[cfg(feature = "spin")]
impl<const SMALLBINS_AMOUNT: usize, const ALIGNMENT_SUB_BINS_AMOUNT: usize>
SpinLockedAllocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
pub const fn empty() -> Self {
Self(spin::Mutex::new(Allocator::empty()))
}
pub unsafe fn init(&self, heap_start_addr: usize, heap_size: usize) {
let mut allocator = self.0.lock();
allocator.init(heap_start_addr, heap_size);
}
pub fn was_initialized(&self) -> bool {
let allocator = self.0.lock();
allocator.was_initialized()
}
}
#[cfg(feature = "spin")]
unsafe impl<const SMALLBINS_AMOUNT: usize, const ALIGNMENT_SUB_BINS_AMOUNT: usize>
core::alloc::GlobalAlloc for SpinLockedAllocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.0.lock();
allocator.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
let mut allocator = self.0.lock();
allocator.dealloc(ptr)
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let mut allocator = self.0.lock();
allocator.realloc(ptr, layout, new_size)
}
}
#[cfg(feature = "allocator")]
unsafe impl<const SMALLBINS_AMOUNT: usize, const ALIGNMENT_SUB_BINS_AMOUNT: usize>
core::alloc::Allocator for SpinLockedAllocator<SMALLBINS_AMOUNT, ALIGNMENT_SUB_BINS_AMOUNT>
where
SmallestTypeWhichHasAtLeastNBitsStruct<ALIGNMENT_SUB_BINS_AMOUNT>:
SmallestTypeWhichHasAtLeastNBitsTrait,
{
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let mut allocator = self.0.lock();
let ptr =
NonNull::new(unsafe { allocator.alloc(layout) }).ok_or(core::alloc::AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, layout.size()))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) {
let mut allocator = self.0.lock();
allocator.dealloc(ptr.as_ptr());
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let mut allocator = self.0.lock();
let chunk_content_addr = ptr.as_ptr() as usize;
if is_aligned(chunk_content_addr, new_layout.align()) {
let chunk = UsedChunk::from_addr(chunk_content_addr - HEADER_SIZE);
allocator.shrink_in_place(chunk, new_layout.size());
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
}
let ptr = NonNull::new(allocator.realloc_new_region(ptr.as_ptr(), old_layout, new_layout))
.ok_or(core::alloc::AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let mut allocator = self.0.lock();
let chunk_content_addr = ptr.as_ptr() as usize;
if is_aligned(chunk_content_addr, new_layout.align()) {
let chunk = UsedChunk::from_addr(chunk_content_addr - HEADER_SIZE);
if allocator.try_grow_in_place(chunk, new_layout.size()) {
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
}
}
let ptr = NonNull::new(allocator.realloc_new_region(ptr.as_ptr(), old_layout, new_layout))
.ok_or(core::alloc::AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let result = self.grow(ptr, old_layout, new_layout)?;
let ptr = result.as_mut_ptr() as *mut u8;
ptr.add(old_layout.size())
.write_bytes(0, new_layout.size() - old_layout.size());
Ok(result)
}
}