use super::{BStackAllocator, BStackSlice};
use crate::BStack;
use std::io;
#[cfg(feature = "set")]
const ALFF_MAGIC: [u8; 8] = *b"ALFF\x00\x01\x01\x00";
#[cfg(feature = "set")]
const ALFF_MAGIC_PREFIX: [u8; 6] = *b"ALFF\x00\x01";
#[cfg(feature = "set")]
pub struct FirstFitBStackAllocator {
stack: BStack,
}
#[cfg(feature = "set")]
impl FirstFitBStackAllocator {
const OFFSET_SIZE: u64 = 16;
const HEADER_SIZE: u64 = 32;
const BLOCK_HEADER_SIZE: u64 = 16;
const BLOCK_FOOTER_SIZE: u64 = 8;
const BLOCK_OVERHEAD_SIZE: u64 = Self::BLOCK_HEADER_SIZE + Self::BLOCK_FOOTER_SIZE;
const MIN_BLOCK_PAYLOAD_SIZE: u64 = 16;
const FREE_HEAD_OFFSET: u64 = Self::OFFSET_SIZE + 16;
pub fn new(stack: BStack) -> Result<Self, io::Error> {
if stack.is_empty()? {
let mut hdr = [0u8; (Self::OFFSET_SIZE + Self::HEADER_SIZE) as usize];
hdr[Self::OFFSET_SIZE as usize..Self::OFFSET_SIZE as usize + ALFF_MAGIC.len()]
.copy_from_slice(&ALFF_MAGIC);
stack.push(hdr)?;
return Ok(Self { stack });
}
let stack_len = stack.len()?;
if stack_len < Self::OFFSET_SIZE + Self::HEADER_SIZE {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"stack too short to contain allocator header",
));
}
let header = stack.get(Self::OFFSET_SIZE, Self::OFFSET_SIZE + Self::HEADER_SIZE)?;
if header[..ALFF_MAGIC_PREFIX.len()] != ALFF_MAGIC_PREFIX {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid magic prefix: expected ALFF\\x00\\x01",
));
}
let mut recovery_needed = header[ALFF_MAGIC.len()] & 1 != 0;
let free_head = u64::from_le_bytes(
header[ALFF_MAGIC.len() + 8..ALFF_MAGIC.len() + 16]
.try_into()
.unwrap(),
);
if free_head != 0 {
let stack_len = stack.len()?;
if free_head < Self::OFFSET_SIZE + Self::HEADER_SIZE + Self::BLOCK_HEADER_SIZE
|| free_head >= stack_len
{
recovery_needed = true;
}
}
let alloc = Self { stack };
if recovery_needed {
alloc.recovery()?;
}
Ok(alloc)
}
#[inline]
fn set_recovery_needed(&self) -> io::Result<()> {
self.stack
.set(Self::OFFSET_SIZE + 8, 1u32.to_le_bytes().as_slice())
}
#[inline]
fn clear_recovery_needed(&self) -> io::Result<()> {
self.stack.set(Self::OFFSET_SIZE + 8, [0u8; 4].as_slice())
}
#[inline]
fn is_impossible_block_size(&self, size: u64) -> bool {
size < Self::MIN_BLOCK_PAYLOAD_SIZE || size > self.len().unwrap_or(u64::MAX)
}
#[inline]
fn is_impossible_block_start(&self, start: u64) -> bool {
!start.is_multiple_of(8)
|| start < Self::OFFSET_SIZE + Self::HEADER_SIZE + Self::BLOCK_HEADER_SIZE
|| start >= self.len().unwrap_or(u64::MAX)
}
#[inline]
fn is_impossible_block_end(&self, end: u64) -> bool {
end < Self::OFFSET_SIZE
+ Self::HEADER_SIZE
+ Self::BLOCK_HEADER_SIZE
+ Self::MIN_BLOCK_PAYLOAD_SIZE
|| end > self.len().unwrap_or(u64::MAX) - Self::BLOCK_FOOTER_SIZE
}
#[inline]
fn align_len(&self, len: u64) -> u64 {
len.max(Self::MIN_BLOCK_PAYLOAD_SIZE).next_multiple_of(8)
}
fn unlink_from_free_list(&self, payload_start: u64) -> io::Result<()> {
let mut ptrs = [0u8; 16];
self.stack.get_into(payload_start, &mut ptrs)?;
let next = u64::from_le_bytes(ptrs[0..8].try_into().unwrap());
let prev = u64::from_le_bytes(ptrs[8..16].try_into().unwrap());
if prev != 0 {
self.stack.set(prev, next.to_le_bytes())?;
} else {
self.stack.set(Self::FREE_HEAD_OFFSET, next.to_le_bytes())?;
}
if next != 0 {
self.stack.set(next + 8, prev.to_le_bytes())?;
}
Ok(())
}
fn add_to_free_list(&self, block_start: u64) -> io::Result<()> {
let stack_len = self.stack.len()?;
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let block_header_start = block_start - Self::BLOCK_HEADER_SIZE;
let mut size_buf = [0u8; 8];
self.stack.get_into(block_header_start, &mut size_buf)?;
let mut size = u64::from_le_bytes(size_buf);
let mut result_header_start = block_header_start;
self.stack.set(block_header_start + 8, 1u32.to_le_bytes())?;
let next_header = block_header_start + Self::BLOCK_OVERHEAD_SIZE + size;
if next_header + Self::BLOCK_HEADER_SIZE <= stack_len {
let mut next_hdr = [0u8; 16];
self.stack.get_into(next_header, &mut next_hdr)?;
let next_size = u64::from_le_bytes(next_hdr[0..8].try_into().unwrap());
if next_hdr[8] & 1 != 0
&& next_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& next_size % 8 == 0
&& next_header + Self::BLOCK_OVERHEAD_SIZE + next_size <= stack_len
{
self.unlink_from_free_list(next_header + Self::BLOCK_HEADER_SIZE)?;
size += next_size + Self::BLOCK_OVERHEAD_SIZE;
}
}
if block_header_start > arena_start {
let mut prev_footer_buf = [0u8; 8];
self.stack.get_into(
block_header_start - Self::BLOCK_FOOTER_SIZE,
&mut prev_footer_buf,
)?;
let prev_size = u64::from_le_bytes(prev_footer_buf);
if prev_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& prev_size % 8 == 0
&& let Some(prev_header) = block_header_start
.checked_sub(prev_size + Self::BLOCK_OVERHEAD_SIZE)
.filter(|&h| h >= arena_start)
{
let mut prev_hdr = [0u8; 16];
self.stack.get_into(prev_header, &mut prev_hdr)?;
let prev_hdr_size = u64::from_le_bytes(prev_hdr[0..8].try_into().unwrap());
if prev_hdr[8] & 1 != 0 && prev_hdr_size == prev_size {
self.unlink_from_free_list(prev_header + Self::BLOCK_HEADER_SIZE)?;
size += prev_size + Self::BLOCK_OVERHEAD_SIZE;
result_header_start = prev_header;
}
}
}
let result_start = result_header_start + Self::BLOCK_HEADER_SIZE;
self.stack.set(result_header_start, size.to_le_bytes())?;
self.stack.set(result_start + size, size.to_le_bytes())?;
let mut head_buf = [0u8; 8];
self.stack.get_into(Self::FREE_HEAD_OFFSET, &mut head_buf)?;
let next_block = u64::from_le_bytes(head_buf);
let mut update_buf = [0u8; 24];
update_buf[0..4].copy_from_slice(&1u32.to_le_bytes()); update_buf[8..16].copy_from_slice(&next_block.to_le_bytes()); self.stack
.set(result_start - Self::BLOCK_HEADER_SIZE + 8, update_buf)?;
self.stack
.set(Self::FREE_HEAD_OFFSET, result_start.to_le_bytes())?;
if next_block != 0 {
self.stack.set(next_block + 8, result_start.to_le_bytes())?;
}
Ok(())
}
fn find_large_enough_block(&self, size: u64) -> io::Result<(u64, u64)> {
let mut block_found = 0u64;
let mut found_size = 0u64;
let mut head = u64::from_le_bytes(
self.stack
.get(Self::FREE_HEAD_OFFSET, Self::FREE_HEAD_OFFSET + 8)?
.try_into()
.unwrap(),
);
while head != 0 {
let size_flags_and_ptr_buf = &mut [0u8; Self::BLOCK_HEADER_SIZE as usize + 8];
self.stack
.get_into(head - Self::BLOCK_HEADER_SIZE, size_flags_and_ptr_buf)?;
let block_size = u64::from_le_bytes(size_flags_and_ptr_buf[0..8].try_into().unwrap());
let is_free = size_flags_and_ptr_buf[8] & 1 != 0;
debug_assert!(
is_free,
"corrupted free list: block at offset {head} is not marked free"
);
if !is_free {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("corrupted free list: block at offset {head} is not marked free"),
));
} else if self.is_impossible_block_size(block_size) || block_size % 8 != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"corrupted free list: block at offset {head} has invalid size {block_size}"
),
));
}
if block_size >= size {
block_found = head;
found_size = block_size;
break;
}
head = u64::from_le_bytes(
size_flags_and_ptr_buf
[Self::BLOCK_HEADER_SIZE as usize..(Self::BLOCK_HEADER_SIZE as usize + 8)]
.try_into()
.unwrap(),
);
if head != 0 && self.is_impossible_block_start(head) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("corrupted free list: next block offset {head} is invalid"),
));
}
}
Ok((block_found, found_size))
}
fn unlink_block(
&self,
found_start: u64,
found_size: u64,
requested_size: u64,
content_buffer: &mut [u8],
) -> io::Result<()> {
if found_size >= requested_size + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE {
let remaining_size = found_size - requested_size - Self::BLOCK_OVERHEAD_SIZE;
content_buffer[(requested_size + Self::BLOCK_HEADER_SIZE) as usize
..(requested_size + Self::BLOCK_OVERHEAD_SIZE) as usize]
.copy_from_slice(&requested_size.to_le_bytes());
let update_buf = &mut [0u8; Self::BLOCK_OVERHEAD_SIZE as usize];
update_buf[..8].copy_from_slice(&remaining_size.to_le_bytes());
update_buf[8..16].copy_from_slice(&requested_size.to_le_bytes());
self.stack.set(found_start + remaining_size, update_buf)?;
self.stack.set(
found_start + remaining_size + Self::BLOCK_OVERHEAD_SIZE,
&content_buffer[Self::BLOCK_HEADER_SIZE as usize..],
)?;
self.stack.set(
found_start - Self::BLOCK_HEADER_SIZE,
remaining_size.to_le_bytes().as_slice(),
)?;
Ok(())
} else {
let mut pointers_buf = [0u8; 16];
self.stack.get_into(found_start, &mut pointers_buf)?;
let next = u64::from_le_bytes(pointers_buf[0..8].try_into().unwrap());
let prev = u64::from_le_bytes(pointers_buf[8..16].try_into().unwrap());
if prev != 0 {
self.stack.set(prev, next.to_le_bytes())?;
} else {
self.stack.set(Self::FREE_HEAD_OFFSET, next.to_le_bytes())?;
}
if next != 0 {
self.stack.set(next + 8, prev.to_le_bytes())?;
}
content_buffer[8..16].copy_from_slice(&[0u8; 8]);
self.stack.set(
found_start - Self::BLOCK_HEADER_SIZE + 8,
&content_buffer[8..Self::BLOCK_HEADER_SIZE as usize + requested_size as usize],
)?;
Ok(())
}
}
fn cascade_discard_free_tail(&self) -> io::Result<()> {
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let mut needs_clear = false;
loop {
let tail = self.stack.len()?;
if tail <= arena_start {
break;
}
let mut footer_buf = [0u8; 8];
self.stack
.get_into(tail - Self::BLOCK_FOOTER_SIZE, &mut footer_buf)?;
let sz = u64::from_le_bytes(footer_buf);
let Some(hdr) = tail
.checked_sub(sz + Self::BLOCK_OVERHEAD_SIZE)
.filter(|&h| h >= arena_start && sz >= Self::MIN_BLOCK_PAYLOAD_SIZE && sz % 8 == 0)
else {
break;
};
let mut hdr_buf = [0u8; 16];
self.stack.get_into(hdr, &mut hdr_buf)?;
let hdr_size = u64::from_le_bytes(hdr_buf[0..8].try_into().unwrap());
if hdr_buf[8] & 1 == 0 || hdr_size != sz {
break;
}
if !needs_clear {
self.set_recovery_needed()?;
needs_clear = true;
}
self.unlink_from_free_list(hdr + Self::BLOCK_HEADER_SIZE)?;
self.stack.discard(sz + Self::BLOCK_OVERHEAD_SIZE)?;
}
if needs_clear {
self.clear_recovery_needed()?;
}
Ok(())
}
fn recovery(&self) -> io::Result<()> {
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let stack_len = self.stack.len()?;
let mut pos = arena_start;
let mut free_blocks: Vec<u64> = Vec::new();
while pos < stack_len {
let remaining = stack_len - pos;
if remaining < Self::BLOCK_OVERHEAD_SIZE {
self.stack.discard(remaining)?;
break;
}
let mut hdr_buf = [0u8; 16];
self.stack.get_into(pos, &mut hdr_buf)?;
let mut size = u64::from_le_bytes(hdr_buf[0..8].try_into().unwrap());
let is_free = hdr_buf[8] & 1 != 0;
let mut block_total = match size.checked_add(Self::BLOCK_OVERHEAD_SIZE).filter(|&t| {
size >= Self::MIN_BLOCK_PAYLOAD_SIZE && size % 8 == 0 && pos + t <= stack_len
}) {
Some(t) => t,
None => {
self.stack.discard(stack_len - pos)?;
break;
}
};
{
let mut outer_footer_buf = [0u8; 8];
self.stack
.get_into(pos + Self::BLOCK_HEADER_SIZE + size, &mut outer_footer_buf)?;
let f = u64::from_le_bytes(outer_footer_buf);
if f != size
&& f >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& f % 8 == 0
&& let Some(r) = size
.checked_sub(f)
.and_then(|d| d.checked_sub(Self::BLOCK_OVERHEAD_SIZE))
.filter(|&r| r >= Self::MIN_BLOCK_PAYLOAD_SIZE && r % 8 == 0)
{
let inner_footer_pos = pos + Self::BLOCK_HEADER_SIZE + r;
let second_hdr_pos = inner_footer_pos + Self::BLOCK_FOOTER_SIZE;
if second_hdr_pos + Self::BLOCK_HEADER_SIZE <= stack_len {
let mut inner_footer_buf = [0u8; 8];
let mut second_size_buf = [0u8; 8];
self.stack
.get_into(inner_footer_pos, &mut inner_footer_buf)?;
self.stack.get_into(second_hdr_pos, &mut second_size_buf)?;
if u64::from_le_bytes(inner_footer_buf) == r
&& u64::from_le_bytes(second_size_buf) == f
{
self.stack.set(pos, r.to_le_bytes().as_slice())?;
size = r;
block_total = r + Self::BLOCK_OVERHEAD_SIZE;
}
}
}
}
if is_free {
free_blocks.push(pos + Self::BLOCK_HEADER_SIZE);
}
pos += block_total;
}
let count = free_blocks.len();
for i in 0..count {
let curr = free_blocks[i];
let next = if i + 1 < count { free_blocks[i + 1] } else { 0 };
let prev = if i > 0 { free_blocks[i - 1] } else { 0 };
let mut ptr_buf = [0u8; 16];
ptr_buf[0..8].copy_from_slice(&next.to_le_bytes());
ptr_buf[8..16].copy_from_slice(&prev.to_le_bytes());
self.stack.set(curr, ptr_buf)?;
}
let new_free_head = free_blocks.first().copied().unwrap_or(0);
self.stack
.set(Self::FREE_HEAD_OFFSET, new_free_head.to_le_bytes())?;
self.clear_recovery_needed()
}
}
#[cfg(feature = "set")]
impl BStackAllocator for FirstFitBStackAllocator {
type Error = io::Error;
type Allocated<'a> = BStackSlice<'a, Self>;
fn stack(&self) -> &BStack {
&self.stack
}
fn into_stack(self) -> BStack {
self.stack
}
fn alloc(&self, len: u64) -> io::Result<BStackSlice<'_, Self>> {
if len == 0 {
return Ok(unsafe { BStackSlice::from_raw_parts(self, 0, 0) });
}
let aligned_len = self.align_len(len);
let block_found = self.find_large_enough_block(aligned_len)?;
if block_found.0 != 0 {
let mut zero_buf = vec![0u8; (Self::BLOCK_OVERHEAD_SIZE + aligned_len) as usize];
self.set_recovery_needed()?;
self.unlink_block(
block_found.0,
block_found.1,
aligned_len,
zero_buf.as_mut_slice(),
)?;
self.clear_recovery_needed()?;
let payload = if block_found.1
>= aligned_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
block_found.0 + block_found.1 - aligned_len
} else {
block_found.0
};
Ok(unsafe { BStackSlice::from_raw_parts(self, payload, len) })
} else {
let mut block_buf = vec![0u8; (aligned_len + Self::BLOCK_OVERHEAD_SIZE) as usize];
block_buf[..8].copy_from_slice(&aligned_len.to_le_bytes());
block_buf[(aligned_len + Self::BLOCK_HEADER_SIZE) as usize..]
.copy_from_slice(&aligned_len.to_le_bytes());
let ptr = self.stack.push(&block_buf)? + Self::BLOCK_HEADER_SIZE;
Ok(unsafe { BStackSlice::from_raw_parts(self, ptr, len) })
}
}
fn dealloc(&self, slice: BStackSlice<'_, Self>) -> io::Result<()> {
if slice.is_empty() && slice.start() == 0 {
return Ok(());
}
let aligned_len = self.align_len(slice.len());
if self.is_impossible_block_start(slice.start())
|| self.is_impossible_block_end(slice.start() + aligned_len)
|| self.is_impossible_block_size(aligned_len)
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid slice: start or end offset is impossible",
));
}
let current_tail = self.stack.len()?;
if slice.start() + aligned_len == current_tail - Self::BLOCK_FOOTER_SIZE {
self.stack
.discard(aligned_len + Self::BLOCK_OVERHEAD_SIZE)?;
self.cascade_discard_free_tail()?;
return Ok(());
}
self.set_recovery_needed()?;
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()
}
fn realloc<'a>(
&'a self,
slice: BStackSlice<'a, Self>,
new_len: u64,
) -> io::Result<BStackSlice<'a, Self>> {
if slice.is_empty() && slice.start() == 0 {
return self.alloc(new_len);
}
if new_len == 0 {
self.dealloc(slice)?;
return Ok(unsafe { BStackSlice::from_raw_parts(self, 0, 0) });
}
let aligned_current_len = self.align_len(slice.len());
if self.is_impossible_block_start(slice.start())
|| self.is_impossible_block_end(slice.start() + aligned_current_len)
|| self.is_impossible_block_size(aligned_current_len)
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid slice: start or end offset is impossible",
));
}
let aligned_new_len = self.align_len(new_len);
if aligned_new_len == aligned_current_len {
if new_len > slice.len() {
self.stack
.zero(slice.start() + slice.len(), new_len - slice.len())?;
}
return Ok(unsafe { BStackSlice::from_raw_parts(self, slice.start(), new_len) });
}
let current_tail = self.stack.len()?;
if slice.start() + aligned_current_len == current_tail - Self::BLOCK_FOOTER_SIZE {
match aligned_new_len.cmp(&aligned_current_len) {
std::cmp::Ordering::Equal => return Ok(slice), std::cmp::Ordering::Greater => {
self.stack.extend(aligned_new_len - aligned_current_len)?;
self.stack.zero(
slice.start() + slice.len(),
aligned_current_len + Self::BLOCK_FOOTER_SIZE - slice.len(),
)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack.set(
slice.start() + aligned_new_len,
aligned_new_len.to_le_bytes(),
)?;
return Ok(unsafe {
BStackSlice::from_raw_parts(self, slice.start(), new_len)
});
}
std::cmp::Ordering::Less => {
self.stack.set(
slice.start() + aligned_new_len,
aligned_new_len.to_le_bytes(),
)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack.discard(aligned_current_len - aligned_new_len)?;
return Ok(unsafe {
BStackSlice::from_raw_parts(self, slice.start(), new_len)
});
}
}
}
let block_size_buf = self.stack.get(
slice.start() - Self::BLOCK_HEADER_SIZE,
slice.start() - Self::BLOCK_HEADER_SIZE + 8,
)?;
let block_size = u64::from_le_bytes(block_size_buf.try_into().unwrap());
if block_size >= aligned_new_len {
if new_len > slice.len() {
self.stack
.zero(slice.start() + slice.len(), new_len - slice.len())?;
}
return Ok(unsafe { BStackSlice::from_raw_parts(self, slice.start(), new_len) });
}
let next_block = slice.start() + block_size + Self::BLOCK_OVERHEAD_SIZE;
if next_block <= self.stack.len()? - Self::BLOCK_FOOTER_SIZE - Self::MIN_BLOCK_PAYLOAD_SIZE
{
let mut next_hdr_buf = [0u8; 16];
self.stack
.get_into(next_block - Self::BLOCK_HEADER_SIZE, &mut next_hdr_buf)?;
let next_block_size = u64::from_le_bytes(next_hdr_buf[0..8].try_into().unwrap());
let next_block_is_free = next_hdr_buf[8] & 1 != 0;
if next_block_is_free
&& next_block_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& next_block_size % 8 == 0
&& block_size + Self::BLOCK_OVERHEAD_SIZE + next_block_size >= aligned_new_len
{
if slice.len() < block_size {
self.stack
.zero(slice.start() + slice.len(), block_size - slice.len())?;
}
self.set_recovery_needed()?;
self.unlink_from_free_list(next_block)?;
let merged_size = block_size + Self::BLOCK_OVERHEAD_SIZE + next_block_size;
let mut zero_buff = vec![
0u8;
(next_block_size + Self::BLOCK_OVERHEAD_SIZE + Self::BLOCK_FOOTER_SIZE)
as usize
];
if merged_size
>= aligned_new_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
let remainder_size = merged_size - aligned_new_len - Self::BLOCK_OVERHEAD_SIZE;
let new_free_start =
slice.start() + aligned_new_len + Self::BLOCK_OVERHEAD_SIZE;
let mut head_buf = [0u8; 8];
self.stack.get_into(Self::FREE_HEAD_OFFSET, &mut head_buf)?;
let old_head = u64::from_le_bytes(head_buf);
let alloc_footer_off = (aligned_new_len - block_size) as usize;
let free_hdr_off = alloc_footer_off + Self::BLOCK_FOOTER_SIZE as usize;
let free_payload_off = alloc_footer_off + Self::BLOCK_OVERHEAD_SIZE as usize;
let free_footer_off = (next_block_size + Self::BLOCK_OVERHEAD_SIZE) as usize;
zero_buff[alloc_footer_off..alloc_footer_off + 8]
.copy_from_slice(&aligned_new_len.to_le_bytes());
zero_buff[free_hdr_off..free_hdr_off + 8]
.copy_from_slice(&remainder_size.to_le_bytes());
zero_buff[free_hdr_off + 8..free_hdr_off + 12]
.copy_from_slice(&1u32.to_le_bytes()); zero_buff[free_payload_off..free_payload_off + 8]
.copy_from_slice(&old_head.to_le_bytes()); zero_buff[free_footer_off..free_footer_off + 8]
.copy_from_slice(&remainder_size.to_le_bytes());
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
merged_size.to_le_bytes(),
)?;
self.stack.set(slice.start() + block_size, &zero_buff)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack
.set(Self::FREE_HEAD_OFFSET, new_free_start.to_le_bytes())?;
if old_head != 0 {
self.stack.set(old_head + 8, new_free_start.to_le_bytes())?;
}
} else {
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
merged_size.to_le_bytes(),
)?;
zero_buff[(next_block_size + Self::BLOCK_OVERHEAD_SIZE) as usize..]
.copy_from_slice(&merged_size.to_le_bytes());
self.stack.set(slice.start() + block_size, &zero_buff)?;
}
self.clear_recovery_needed()?;
return Ok(unsafe { BStackSlice::from_raw_parts(self, slice.start(), new_len) });
}
}
let block_found = self.find_large_enough_block(aligned_new_len)?;
if block_found.0 != 0 {
let copy_len = slice.len().min(aligned_new_len);
let mut data_buf = vec![0u8; (Self::BLOCK_OVERHEAD_SIZE + aligned_new_len) as usize];
self.stack.get_into(
slice.start(),
&mut data_buf[Self::BLOCK_HEADER_SIZE as usize
..(copy_len + Self::BLOCK_HEADER_SIZE) as usize],
)?;
self.set_recovery_needed()?;
self.unlink_block(
block_found.0,
block_found.1,
aligned_new_len,
data_buf.as_mut_slice(),
)?;
let new_payload = if block_found.1
>= aligned_new_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
block_found.0 + block_found.1 - aligned_new_len
} else {
block_found.0
};
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()?;
Ok(unsafe { BStackSlice::from_raw_parts(self, new_payload, new_len) })
} else {
let copy_len = (slice.len().min(aligned_new_len)) as usize;
let mut block_buf = vec![0u8; (aligned_new_len + Self::BLOCK_OVERHEAD_SIZE) as usize];
block_buf[..8].copy_from_slice(&aligned_new_len.to_le_bytes());
self.stack.get_into(
slice.start(),
&mut block_buf
[Self::BLOCK_HEADER_SIZE as usize..Self::BLOCK_HEADER_SIZE as usize + copy_len],
)?;
block_buf[(aligned_new_len + Self::BLOCK_HEADER_SIZE) as usize..]
.copy_from_slice(&aligned_new_len.to_le_bytes());
self.set_recovery_needed()?;
let ptr = self.stack.push(&block_buf)? + Self::BLOCK_HEADER_SIZE;
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()?;
Ok(unsafe { BStackSlice::from_raw_parts(self, ptr, new_len) })
}
}
}