#![cfg(feature = "alloc")]
use crate::BStack;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::io;
use std::ops::Range;
pub struct BStackSlice<'a, A: BStackAllocator> {
allocator: &'a A,
offset: u64,
len: u64,
}
impl<'a, A: BStackAllocator> Clone for BStackSlice<'a, A> {
fn clone(&self) -> Self {
*self
}
}
impl<'a, A: BStackAllocator> Copy for BStackSlice<'a, A> {}
impl<'a, A: BStackAllocator> fmt::Debug for BStackSlice<'a, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BStackSlice")
.field("start", &self.start())
.field("end", &self.end())
.field("len", &self.len())
.finish_non_exhaustive()
}
}
impl<'a, A: BStackAllocator> BStackSlice<'a, A> {
#[inline]
pub fn new(allocator: &'a A, offset: u64, len: u64) -> Self {
Self {
allocator,
offset,
len,
}
}
#[inline]
pub fn to_bytes(&self) -> [u8; 16] {
let mut out = [0u8; 16];
out[..8].copy_from_slice(&self.offset.to_le_bytes());
out[8..].copy_from_slice(&self.len.to_le_bytes());
out
}
#[inline]
pub fn from_bytes(allocator: &'a A, bytes: [u8; 16]) -> Self {
let offset = u64::from_le_bytes(bytes[..8].try_into().unwrap());
let len = u64::from_le_bytes(bytes[8..].try_into().unwrap());
Self {
allocator,
offset,
len,
}
}
#[inline]
pub fn start(&self) -> u64 {
self.offset
}
#[inline]
pub fn end(&self) -> u64 {
self.offset + self.len
}
#[inline]
pub fn range(&self) -> Range<u64> {
self.start()..self.end()
}
#[inline]
pub fn len(&self) -> u64 {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn allocator(&self) -> &'a A {
self.allocator
}
#[inline]
pub fn stack(&self) -> &BStack {
self.allocator.stack()
}
#[inline]
pub fn subslice(&self, start: u64, end: u64) -> BStackSlice<'a, A> {
self.subslice_range(start..end)
}
pub fn subslice_range(&self, range: Range<u64>) -> BStackSlice<'a, A> {
assert!(range.start <= range.end, "range start must be <= end");
assert!(range.end <= self.len, "range end must be <= slice length");
BStackSlice {
allocator: self.allocator,
offset: self.offset + range.start,
len: range.end - range.start,
}
}
pub fn read(&self) -> io::Result<Vec<u8>> {
self.stack().get(self.start(), self.end())
}
pub fn read_into(&self, buf: &mut [u8]) -> io::Result<()> {
let n = (buf.len() as u64).min(self.len()) as usize;
self.stack().get_into(self.start(), &mut buf[..n])
}
pub fn read_range_into(&self, start: u64, buf: &mut [u8]) -> io::Result<()> {
let end_rel = start + buf.len() as u64;
if end_rel > self.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"range [{start}, {end_rel}) exceeds slice length {}",
self.len()
),
));
}
self.stack().get_into(self.start() + start, buf)
}
#[cfg(feature = "set")]
pub fn write(&self, data: impl AsRef<[u8]>) -> io::Result<()> {
let data = data.as_ref();
let n = (data.len() as u64).min(self.len()) as usize;
self.stack().set(self.start(), &data[..n])
}
#[cfg(feature = "set")]
pub fn write_range(&self, start: u64, data: impl AsRef<[u8]>) -> io::Result<()> {
let data = data.as_ref();
let end_rel = start + data.len() as u64;
if end_rel > self.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"range [{start}, {end_rel}) exceeds slice length {}",
self.len()
),
));
}
self.stack().set(self.start() + start, data)
}
#[cfg(feature = "set")]
pub fn zero(&self) -> io::Result<()> {
self.stack().zero(self.start(), self.len())
}
#[cfg(feature = "set")]
pub fn zero_range(&self, start: u64, n: u64) -> io::Result<()> {
let end_rel = start + n;
if end_rel > self.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"range [{start}, {end_rel}) exceeds slice length {}",
self.len()
),
));
}
self.stack().zero(self.start() + start, n)
}
pub fn reader(&self) -> BStackSliceReader<'a, A> {
BStackSliceReader {
slice: *self,
cursor: 0,
}
}
pub fn reader_at(&self, offset: u64) -> BStackSliceReader<'a, A> {
BStackSliceReader {
slice: *self,
cursor: offset,
}
}
#[cfg(feature = "set")]
pub fn writer(&self) -> BStackSliceWriter<'a, A> {
BStackSliceWriter {
slice: *self,
cursor: 0,
}
}
#[cfg(feature = "set")]
pub fn writer_at(&self, offset: u64) -> BStackSliceWriter<'a, A> {
BStackSliceWriter {
slice: *self,
cursor: offset,
}
}
}
impl<'a, A: BStackAllocator> PartialEq for BStackSlice<'a, A> {
fn eq(&self, other: &Self) -> bool {
self.offset == other.offset && self.len == other.len
}
}
impl<'a, A: BStackAllocator> Eq for BStackSlice<'a, A> {}
impl<'a, A: BStackAllocator> Hash for BStackSlice<'a, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.offset.hash(state);
self.len.hash(state);
}
}
impl<'a, A: BStackAllocator> PartialOrd for BStackSlice<'a, A> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<'a, A: BStackAllocator> Ord for BStackSlice<'a, A> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.offset
.cmp(&other.offset)
.then(self.len.cmp(&other.len))
}
}
impl<'a, A: BStackAllocator> From<BStackSlice<'a, A>> for [u8; 16] {
fn from(slice: BStackSlice<'a, A>) -> Self {
slice.to_bytes()
}
}
impl<'a, A: BStackAllocator> From<BStackSlice<'a, A>> for BStackSliceReader<'a, A> {
fn from(slice: BStackSlice<'a, A>) -> Self {
slice.reader()
}
}
pub struct BStackSliceReader<'a, A: BStackAllocator> {
slice: BStackSlice<'a, A>,
cursor: u64,
}
impl<'a, A: BStackAllocator> Clone for BStackSliceReader<'a, A> {
fn clone(&self) -> Self {
Self {
slice: self.slice,
cursor: self.cursor,
}
}
}
impl<'a, A: BStackAllocator> fmt::Debug for BStackSliceReader<'a, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BStackSliceReader")
.field("start", &self.slice.start())
.field("end", &self.slice.end())
.field("len", &self.slice.len())
.field("cursor", &self.cursor)
.finish_non_exhaustive()
}
}
impl<'a, A: BStackAllocator> BStackSliceReader<'a, A> {
#[inline]
pub fn position(&self) -> u64 {
self.cursor
}
#[inline]
pub fn slice(&self) -> BStackSlice<'a, A> {
self.slice
}
}
impl<'a, A: BStackAllocator> io::Read for BStackSliceReader<'a, A> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() || self.cursor >= self.slice.len {
return Ok(0);
}
let available = (self.slice.len - self.cursor) as usize;
let n = buf.len().min(available);
let abs_start = self.slice.offset + self.cursor;
self.slice.stack().get_into(abs_start, &mut buf[..n])?;
self.cursor += n as u64;
Ok(n)
}
}
impl<'a, A: BStackAllocator> io::Seek for BStackSliceReader<'a, A> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
let len = self.slice.len as i128;
let new_pos = match pos {
io::SeekFrom::Start(n) => n as i128,
io::SeekFrom::End(n) => len + n as i128,
io::SeekFrom::Current(n) => self.cursor as i128 + n as i128,
};
if new_pos < 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"seek before beginning of slice",
));
}
self.cursor = new_pos as u64;
Ok(self.cursor)
}
}
impl<'a, A: BStackAllocator> PartialEq for BStackSliceReader<'a, A> {
fn eq(&self, other: &Self) -> bool {
self.slice == other.slice && self.cursor == other.cursor
}
}
impl<'a, A: BStackAllocator> Eq for BStackSliceReader<'a, A> {}
impl<'a, A: BStackAllocator> Hash for BStackSliceReader<'a, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.slice.hash(state);
self.cursor.hash(state);
}
}
impl<'a, A: BStackAllocator> PartialOrd for BStackSliceReader<'a, A> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<'a, A: BStackAllocator> Ord for BStackSliceReader<'a, A> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let self_pos = self.slice.start() + self.cursor;
let other_pos = other.slice.start() + other.cursor;
self_pos
.cmp(&other_pos)
.then(self.slice.len().cmp(&other.slice.len()))
}
}
impl<'a, A: BStackAllocator> From<BStackSliceReader<'a, A>> for BStackSlice<'a, A> {
fn from(reader: BStackSliceReader<'a, A>) -> Self {
reader.slice()
}
}
#[cfg(feature = "set")]
pub struct BStackSliceWriter<'a, A: BStackAllocator> {
slice: BStackSlice<'a, A>,
cursor: u64,
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> Clone for BStackSliceWriter<'a, A> {
fn clone(&self) -> Self {
Self {
slice: self.slice,
cursor: self.cursor,
}
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> fmt::Debug for BStackSliceWriter<'a, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BStackSliceWriter")
.field("start", &self.slice.start())
.field("end", &self.slice.end())
.field("len", &self.slice.len())
.field("cursor", &self.cursor)
.finish_non_exhaustive()
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> BStackSliceWriter<'a, A> {
#[inline]
pub fn position(&self) -> u64 {
self.cursor
}
#[inline]
pub fn slice(&self) -> BStackSlice<'a, A> {
self.slice
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> io::Write for BStackSliceWriter<'a, A> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if buf.is_empty() || self.cursor >= self.slice.len {
return Ok(0);
}
let available = (self.slice.len - self.cursor) as usize;
let n = buf.len().min(available);
let abs_start = self.slice.offset + self.cursor;
self.slice.stack().set(abs_start, &buf[..n])?;
self.cursor += n as u64;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> io::Seek for BStackSliceWriter<'a, A> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
let len = self.slice.len as i128;
let new_pos = match pos {
io::SeekFrom::Start(n) => n as i128,
io::SeekFrom::End(n) => len + n as i128,
io::SeekFrom::Current(n) => self.cursor as i128 + n as i128,
};
if new_pos < 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"seek before beginning of slice",
));
}
self.cursor = new_pos as u64;
Ok(self.cursor)
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialEq for BStackSliceWriter<'a, A> {
fn eq(&self, other: &Self) -> bool {
self.slice == other.slice && self.cursor == other.cursor
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> Eq for BStackSliceWriter<'a, A> {}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> Hash for BStackSliceWriter<'a, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.slice.hash(state);
self.cursor.hash(state);
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialOrd for BStackSliceWriter<'a, A> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> Ord for BStackSliceWriter<'a, A> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let self_pos = self.slice.start() + self.cursor;
let other_pos = other.slice.start() + other.cursor;
self_pos
.cmp(&other_pos)
.then(self.slice.len().cmp(&other.slice.len()))
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> From<BStackSlice<'a, A>> for BStackSliceWriter<'a, A> {
fn from(slice: BStackSlice<'a, A>) -> Self {
slice.writer()
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> From<BStackSliceWriter<'a, A>> for BStackSlice<'a, A> {
fn from(writer: BStackSliceWriter<'a, A>) -> Self {
writer.slice()
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> From<BStackSliceReader<'a, A>> for BStackSliceWriter<'a, A> {
fn from(reader: BStackSliceReader<'a, A>) -> Self {
BStackSliceWriter {
slice: reader.slice,
cursor: reader.cursor,
}
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> From<BStackSliceWriter<'a, A>> for BStackSliceReader<'a, A> {
fn from(writer: BStackSliceWriter<'a, A>) -> Self {
BStackSliceReader {
slice: writer.slice,
cursor: writer.cursor,
}
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialEq<BStackSliceWriter<'a, A>> for BStackSliceReader<'a, A> {
fn eq(&self, other: &BStackSliceWriter<'a, A>) -> bool {
self.slice == other.slice && self.cursor == other.cursor
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialEq<BStackSliceReader<'a, A>> for BStackSliceWriter<'a, A> {
fn eq(&self, other: &BStackSliceReader<'a, A>) -> bool {
self.slice == other.slice && self.cursor == other.cursor
}
}
impl<'a, A: BStackAllocator> PartialEq<BStackSlice<'a, A>> for BStackSliceReader<'a, A> {
fn eq(&self, other: &BStackSlice<'a, A>) -> bool {
&self.slice == other
}
}
impl<'a, A: BStackAllocator> PartialEq<BStackSliceReader<'a, A>> for BStackSlice<'a, A> {
fn eq(&self, other: &BStackSliceReader<'a, A>) -> bool {
self == &other.slice
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialEq<BStackSlice<'a, A>> for BStackSliceWriter<'a, A> {
fn eq(&self, other: &BStackSlice<'a, A>) -> bool {
&self.slice == other
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialEq<BStackSliceWriter<'a, A>> for BStackSlice<'a, A> {
fn eq(&self, other: &BStackSliceWriter<'a, A>) -> bool {
self == &other.slice
}
}
impl<'a, A: BStackAllocator> PartialOrd<BStackSliceReader<'a, A>> for BStackSlice<'a, A> {
fn partial_cmp(&self, other: &BStackSliceReader<'a, A>) -> Option<std::cmp::Ordering> {
Some(self.cmp(&other.slice()))
}
}
impl<'a, A: BStackAllocator> PartialOrd<BStackSlice<'a, A>> for BStackSliceReader<'a, A> {
fn partial_cmp(&self, other: &BStackSlice<'a, A>) -> Option<std::cmp::Ordering> {
Some(self.slice().cmp(other))
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialOrd<BStackSlice<'a, A>> for BStackSliceWriter<'a, A> {
fn partial_cmp(&self, other: &BStackSlice<'a, A>) -> Option<std::cmp::Ordering> {
Some(self.slice().cmp(other))
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialOrd<BStackSliceWriter<'a, A>> for BStackSliceReader<'a, A> {
fn partial_cmp(&self, other: &BStackSliceWriter<'a, A>) -> Option<std::cmp::Ordering> {
let self_pos = self.slice.start() + self.cursor;
let other_pos = other.slice().start() + other.position();
Some(
self_pos
.cmp(&other_pos)
.then(self.slice.len().cmp(&other.slice().len())),
)
}
}
#[cfg(feature = "set")]
impl<'a, A: BStackAllocator> PartialOrd<BStackSliceReader<'a, A>> for BStackSliceWriter<'a, A> {
fn partial_cmp(&self, other: &BStackSliceReader<'a, A>) -> Option<std::cmp::Ordering> {
let self_pos = self.slice.start() + self.cursor;
let other_pos = other.slice().start() + other.position();
Some(
self_pos
.cmp(&other_pos)
.then(self.slice.len().cmp(&other.slice().len())),
)
}
}
pub trait BStackAllocator: Sized {
fn stack(&self) -> &BStack;
fn into_stack(self) -> BStack;
fn alloc(&self, len: u64) -> io::Result<BStackSlice<'_, Self>>;
fn realloc<'a>(
&'a self,
slice: BStackSlice<'a, Self>,
new_len: u64,
) -> io::Result<BStackSlice<'a, Self>>;
fn dealloc(&self, _slice: BStackSlice<'_, Self>) -> io::Result<()> {
Ok(())
}
fn len(&self) -> io::Result<u64> {
self.stack().len()
}
fn is_empty(&self) -> io::Result<bool> {
self.stack().is_empty()
}
}
pub struct LinearBStackAllocator {
stack: BStack,
}
impl LinearBStackAllocator {
pub fn new(stack: BStack) -> Self {
Self { stack }
}
}
impl fmt::Debug for LinearBStackAllocator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LinearBStackAllocator")
.finish_non_exhaustive()
}
}
impl From<BStack> for LinearBStackAllocator {
fn from(stack: BStack) -> Self {
Self::new(stack)
}
}
impl From<LinearBStackAllocator> for BStack {
fn from(alloc: LinearBStackAllocator) -> Self {
alloc.into_stack()
}
}
impl BStackAllocator for LinearBStackAllocator {
fn stack(&self) -> &BStack {
&self.stack
}
fn into_stack(self) -> BStack {
self.stack
}
fn alloc(&self, len: u64) -> io::Result<BStackSlice<'_, Self>> {
let offset = self.stack.extend(len)?;
Ok(BStackSlice::new(self, offset, len))
}
fn realloc<'a>(
&'a self,
slice: BStackSlice<'a, Self>,
new_len: u64,
) -> io::Result<BStackSlice<'a, Self>> {
let current_tail = self.stack.len()?;
if slice.end() != current_tail {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"LinearBStackAllocator::realloc: non-tail slice cannot be resized in place",
));
}
match new_len.cmp(&slice.len()) {
std::cmp::Ordering::Equal => Ok(slice),
std::cmp::Ordering::Greater => {
self.stack.extend(new_len - slice.len())?;
Ok(BStackSlice::new(self, slice.start(), new_len))
}
std::cmp::Ordering::Less => {
self.stack.discard(slice.len() - new_len)?;
Ok(BStackSlice::new(self, slice.start(), new_len))
}
}
}
fn dealloc(&self, slice: BStackSlice<'_, Self>) -> io::Result<()> {
let current_tail = self.stack.len()?;
if slice.end() == current_tail {
self.stack.discard(slice.len())?;
}
Ok(())
}
}
#[cfg(feature = "set")]
const ALFF_MAGIC: [u8; 8] = *b"ALFF\x00\x01\x01\x00";
#[cfg(feature = "set")]
const ALFF_MAGIC_PREFIX: [u8; 6] = *b"ALFF\x00\x01";
#[cfg(feature = "set")]
pub struct FirstFitBStackAllocator {
stack: BStack,
}
#[cfg(feature = "set")]
impl FirstFitBStackAllocator {
const OFFSET_SIZE: u64 = 16;
const HEADER_SIZE: u64 = 32;
const BLOCK_HEADER_SIZE: u64 = 16;
const BLOCK_FOOTER_SIZE: u64 = 8;
const BLOCK_OVERHEAD_SIZE: u64 = Self::BLOCK_HEADER_SIZE + Self::BLOCK_FOOTER_SIZE;
const MIN_BLOCK_PAYLOAD_SIZE: u64 = 16;
const FREE_HEAD_OFFSET: u64 = Self::OFFSET_SIZE + 16;
pub fn new(stack: BStack) -> Result<Self, io::Error> {
if stack.is_empty()? {
let mut hdr = [0u8; (Self::OFFSET_SIZE + Self::HEADER_SIZE) as usize];
hdr[Self::OFFSET_SIZE as usize..Self::OFFSET_SIZE as usize + ALFF_MAGIC.len()]
.copy_from_slice(&ALFF_MAGIC);
stack.push(hdr)?;
return Ok(Self { stack });
}
let stack_len = stack.len()?;
if stack_len < Self::OFFSET_SIZE + Self::HEADER_SIZE {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"stack too short to contain allocator header",
));
}
let header = stack.get(Self::OFFSET_SIZE, Self::OFFSET_SIZE + Self::HEADER_SIZE)?;
if header[..ALFF_MAGIC_PREFIX.len()] != ALFF_MAGIC_PREFIX {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid magic prefix: expected ALFF\\x00\\x01",
));
}
let mut recovery_needed = header[ALFF_MAGIC.len()] & 1 != 0;
let free_head = u64::from_le_bytes(
header[ALFF_MAGIC.len() + 8..ALFF_MAGIC.len() + 16]
.try_into()
.unwrap(),
);
if free_head != 0 {
let stack_len = stack.len()?;
if free_head < Self::OFFSET_SIZE + Self::HEADER_SIZE + Self::BLOCK_HEADER_SIZE
|| free_head >= stack_len
{
recovery_needed = true;
}
}
let alloc = Self { stack };
if recovery_needed {
alloc.recovery()?;
}
Ok(alloc)
}
#[inline]
fn set_recovery_needed(&self) -> io::Result<()> {
self.stack
.set(Self::OFFSET_SIZE + 8, 1u32.to_le_bytes().as_slice())
}
#[inline]
fn clear_recovery_needed(&self) -> io::Result<()> {
self.stack.set(Self::OFFSET_SIZE + 8, [0u8; 4].as_slice())
}
#[inline]
fn is_impossible_block_size(&self, size: u64) -> bool {
size < Self::MIN_BLOCK_PAYLOAD_SIZE || size > self.len().unwrap_or(u64::MAX)
}
#[inline]
fn is_impossible_block_start(&self, start: u64) -> bool {
!start.is_multiple_of(8)
|| start < Self::OFFSET_SIZE + Self::HEADER_SIZE + Self::BLOCK_HEADER_SIZE
|| start >= self.len().unwrap_or(u64::MAX)
}
#[inline]
fn is_impossible_block_end(&self, end: u64) -> bool {
end < Self::OFFSET_SIZE
+ Self::HEADER_SIZE
+ Self::BLOCK_HEADER_SIZE
+ Self::MIN_BLOCK_PAYLOAD_SIZE
|| end > self.len().unwrap_or(u64::MAX) - Self::BLOCK_FOOTER_SIZE
}
#[inline]
fn align_len(&self, len: u64) -> u64 {
len.max(Self::MIN_BLOCK_PAYLOAD_SIZE).next_multiple_of(8)
}
fn unlink_from_free_list(&self, payload_start: u64) -> io::Result<()> {
let mut ptrs = [0u8; 16];
self.stack.get_into(payload_start, &mut ptrs)?;
let next = u64::from_le_bytes(ptrs[0..8].try_into().unwrap());
let prev = u64::from_le_bytes(ptrs[8..16].try_into().unwrap());
if prev != 0 {
self.stack.set(prev, next.to_le_bytes())?;
} else {
self.stack.set(Self::FREE_HEAD_OFFSET, next.to_le_bytes())?;
}
if next != 0 {
self.stack.set(next + 8, prev.to_le_bytes())?;
}
Ok(())
}
fn add_to_free_list(&self, block_start: u64) -> io::Result<()> {
let stack_len = self.stack.len()?;
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let block_header_start = block_start - Self::BLOCK_HEADER_SIZE;
let mut size_buf = [0u8; 8];
self.stack.get_into(block_header_start, &mut size_buf)?;
let mut size = u64::from_le_bytes(size_buf);
let mut result_header_start = block_header_start;
self.stack.set(block_header_start + 8, 1u32.to_le_bytes())?;
let next_header = block_header_start + Self::BLOCK_OVERHEAD_SIZE + size;
if next_header + Self::BLOCK_HEADER_SIZE <= stack_len {
let mut next_hdr = [0u8; 16];
self.stack.get_into(next_header, &mut next_hdr)?;
let next_size = u64::from_le_bytes(next_hdr[0..8].try_into().unwrap());
if next_hdr[8] & 1 != 0
&& next_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& next_size % 8 == 0
&& next_header + Self::BLOCK_OVERHEAD_SIZE + next_size <= stack_len
{
self.unlink_from_free_list(next_header + Self::BLOCK_HEADER_SIZE)?;
size += next_size + Self::BLOCK_OVERHEAD_SIZE;
}
}
if block_header_start > arena_start {
let mut prev_footer_buf = [0u8; 8];
self.stack.get_into(
block_header_start - Self::BLOCK_FOOTER_SIZE,
&mut prev_footer_buf,
)?;
let prev_size = u64::from_le_bytes(prev_footer_buf);
if prev_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& prev_size % 8 == 0
&& let Some(prev_header) = block_header_start
.checked_sub(prev_size + Self::BLOCK_OVERHEAD_SIZE)
.filter(|&h| h >= arena_start)
{
let mut prev_hdr = [0u8; 16];
self.stack.get_into(prev_header, &mut prev_hdr)?;
let prev_hdr_size = u64::from_le_bytes(prev_hdr[0..8].try_into().unwrap());
if prev_hdr[8] & 1 != 0 && prev_hdr_size == prev_size {
self.unlink_from_free_list(prev_header + Self::BLOCK_HEADER_SIZE)?;
size += prev_size + Self::BLOCK_OVERHEAD_SIZE;
result_header_start = prev_header;
}
}
}
let result_start = result_header_start + Self::BLOCK_HEADER_SIZE;
self.stack.set(result_header_start, size.to_le_bytes())?;
self.stack.set(result_start + size, size.to_le_bytes())?;
let mut head_buf = [0u8; 8];
self.stack.get_into(Self::FREE_HEAD_OFFSET, &mut head_buf)?;
let next_block = u64::from_le_bytes(head_buf);
let mut update_buf = [0u8; 24];
update_buf[0..4].copy_from_slice(&1u32.to_le_bytes()); update_buf[8..16].copy_from_slice(&next_block.to_le_bytes()); self.stack
.set(result_start - Self::BLOCK_HEADER_SIZE + 8, update_buf)?;
self.stack
.set(Self::FREE_HEAD_OFFSET, result_start.to_le_bytes())?;
if next_block != 0 {
self.stack.set(next_block + 8, result_start.to_le_bytes())?;
}
Ok(())
}
fn find_large_enough_block(&self, size: u64) -> io::Result<(u64, u64)> {
let mut block_found = 0u64;
let mut found_size = 0u64;
let mut head = u64::from_le_bytes(
self.stack
.get(Self::FREE_HEAD_OFFSET, Self::FREE_HEAD_OFFSET + 8)?
.try_into()
.unwrap(),
);
while head != 0 {
let size_flags_and_ptr_buf = &mut [0u8; Self::BLOCK_HEADER_SIZE as usize + 8];
self.stack
.get_into(head - Self::BLOCK_HEADER_SIZE, size_flags_and_ptr_buf)?;
let block_size = u64::from_le_bytes(size_flags_and_ptr_buf[0..8].try_into().unwrap());
let is_free = size_flags_and_ptr_buf[8] & 1 != 0;
debug_assert!(
is_free,
"corrupted free list: block at offset {head} is not marked free"
);
if !is_free {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("corrupted free list: block at offset {head} is not marked free"),
));
} else if self.is_impossible_block_size(block_size) || block_size % 8 != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"corrupted free list: block at offset {head} has invalid size {block_size}"
),
));
}
if block_size >= size {
block_found = head;
found_size = block_size;
break;
}
head = u64::from_le_bytes(
size_flags_and_ptr_buf
[Self::BLOCK_HEADER_SIZE as usize..(Self::BLOCK_HEADER_SIZE as usize + 8)]
.try_into()
.unwrap(),
);
if head != 0 && self.is_impossible_block_start(head) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("corrupted free list: next block offset {head} is invalid"),
));
}
}
Ok((block_found, found_size))
}
fn unlink_block(
&self,
found_start: u64,
found_size: u64,
requested_size: u64,
content_buffer: &mut [u8],
) -> io::Result<()> {
if found_size >= requested_size + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE {
let remaining_size = found_size - requested_size - Self::BLOCK_OVERHEAD_SIZE;
content_buffer[(requested_size + Self::BLOCK_HEADER_SIZE) as usize
..(requested_size + Self::BLOCK_OVERHEAD_SIZE) as usize]
.copy_from_slice(&requested_size.to_le_bytes());
let update_buf = &mut [0u8; Self::BLOCK_OVERHEAD_SIZE as usize];
update_buf[..8].copy_from_slice(&remaining_size.to_le_bytes());
update_buf[8..16].copy_from_slice(&requested_size.to_le_bytes());
self.stack.set(found_start + remaining_size, update_buf)?;
self.stack.set(
found_start + remaining_size + Self::BLOCK_OVERHEAD_SIZE,
&content_buffer[Self::BLOCK_HEADER_SIZE as usize..],
)?;
self.stack.set(
found_start - Self::BLOCK_HEADER_SIZE,
remaining_size.to_le_bytes().as_slice(),
)?;
Ok(())
} else {
let mut pointers_buf = [0u8; 16];
self.stack.get_into(found_start, &mut pointers_buf)?;
let next = u64::from_le_bytes(pointers_buf[0..8].try_into().unwrap());
let prev = u64::from_le_bytes(pointers_buf[8..16].try_into().unwrap());
if prev != 0 {
self.stack.set(prev, next.to_le_bytes())?;
} else {
self.stack.set(Self::FREE_HEAD_OFFSET, next.to_le_bytes())?;
}
if next != 0 {
self.stack.set(next + 8, prev.to_le_bytes())?;
}
content_buffer[8..16].copy_from_slice(&[0u8; 8]);
self.stack.set(
found_start - Self::BLOCK_HEADER_SIZE + 8,
&content_buffer[8..Self::BLOCK_HEADER_SIZE as usize + requested_size as usize],
)?;
Ok(())
}
}
fn cascade_discard_free_tail(&self) -> io::Result<()> {
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let mut needs_clear = false;
loop {
let tail = self.stack.len()?;
if tail <= arena_start {
break;
}
let mut footer_buf = [0u8; 8];
self.stack
.get_into(tail - Self::BLOCK_FOOTER_SIZE, &mut footer_buf)?;
let sz = u64::from_le_bytes(footer_buf);
let Some(hdr) = tail
.checked_sub(sz + Self::BLOCK_OVERHEAD_SIZE)
.filter(|&h| h >= arena_start && sz >= Self::MIN_BLOCK_PAYLOAD_SIZE && sz % 8 == 0)
else {
break;
};
let mut hdr_buf = [0u8; 16];
self.stack.get_into(hdr, &mut hdr_buf)?;
let hdr_size = u64::from_le_bytes(hdr_buf[0..8].try_into().unwrap());
if hdr_buf[8] & 1 == 0 || hdr_size != sz {
break;
}
if !needs_clear {
self.set_recovery_needed()?;
needs_clear = true;
}
self.unlink_from_free_list(hdr + Self::BLOCK_HEADER_SIZE)?;
self.stack.discard(sz + Self::BLOCK_OVERHEAD_SIZE)?;
}
if needs_clear {
self.clear_recovery_needed()?;
}
Ok(())
}
fn recovery(&self) -> io::Result<()> {
let arena_start = Self::OFFSET_SIZE + Self::HEADER_SIZE;
let stack_len = self.stack.len()?;
let mut pos = arena_start;
let mut free_blocks: Vec<u64> = Vec::new();
while pos < stack_len {
let remaining = stack_len - pos;
if remaining < Self::BLOCK_OVERHEAD_SIZE {
self.stack.discard(remaining)?;
break;
}
let mut hdr_buf = [0u8; 16];
self.stack.get_into(pos, &mut hdr_buf)?;
let mut size = u64::from_le_bytes(hdr_buf[0..8].try_into().unwrap());
let is_free = hdr_buf[8] & 1 != 0;
let mut block_total = match size.checked_add(Self::BLOCK_OVERHEAD_SIZE).filter(|&t| {
size >= Self::MIN_BLOCK_PAYLOAD_SIZE && size % 8 == 0 && pos + t <= stack_len
}) {
Some(t) => t,
None => {
self.stack.discard(stack_len - pos)?;
break;
}
};
{
let mut outer_footer_buf = [0u8; 8];
self.stack
.get_into(pos + Self::BLOCK_HEADER_SIZE + size, &mut outer_footer_buf)?;
let f = u64::from_le_bytes(outer_footer_buf);
if f != size
&& f >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& f % 8 == 0
&& let Some(r) = size
.checked_sub(f)
.and_then(|d| d.checked_sub(Self::BLOCK_OVERHEAD_SIZE))
.filter(|&r| r >= Self::MIN_BLOCK_PAYLOAD_SIZE && r % 8 == 0)
{
let inner_footer_pos = pos + Self::BLOCK_HEADER_SIZE + r;
let second_hdr_pos = inner_footer_pos + Self::BLOCK_FOOTER_SIZE;
if second_hdr_pos + Self::BLOCK_HEADER_SIZE <= stack_len {
let mut inner_footer_buf = [0u8; 8];
let mut second_size_buf = [0u8; 8];
self.stack
.get_into(inner_footer_pos, &mut inner_footer_buf)?;
self.stack.get_into(second_hdr_pos, &mut second_size_buf)?;
if u64::from_le_bytes(inner_footer_buf) == r
&& u64::from_le_bytes(second_size_buf) == f
{
self.stack.set(pos, r.to_le_bytes().as_slice())?;
size = r;
block_total = r + Self::BLOCK_OVERHEAD_SIZE;
}
}
}
}
if is_free {
free_blocks.push(pos + Self::BLOCK_HEADER_SIZE);
}
pos += block_total;
}
let count = free_blocks.len();
for i in 0..count {
let curr = free_blocks[i];
let next = if i + 1 < count { free_blocks[i + 1] } else { 0 };
let prev = if i > 0 { free_blocks[i - 1] } else { 0 };
let mut ptr_buf = [0u8; 16];
ptr_buf[0..8].copy_from_slice(&next.to_le_bytes());
ptr_buf[8..16].copy_from_slice(&prev.to_le_bytes());
self.stack.set(curr, ptr_buf)?;
}
let new_free_head = free_blocks.first().copied().unwrap_or(0);
self.stack
.set(Self::FREE_HEAD_OFFSET, new_free_head.to_le_bytes())?;
self.clear_recovery_needed()
}
}
#[cfg(feature = "set")]
impl BStackAllocator for FirstFitBStackAllocator {
fn stack(&self) -> &BStack {
&self.stack
}
fn into_stack(self) -> BStack {
self.stack
}
fn alloc(&self, len: u64) -> io::Result<BStackSlice<'_, Self>> {
let aligned_len = self.align_len(len);
let block_found = self.find_large_enough_block(aligned_len)?;
if block_found.0 != 0 {
let mut zero_buf = vec![0u8; (Self::BLOCK_OVERHEAD_SIZE + aligned_len) as usize];
self.set_recovery_needed()?;
self.unlink_block(
block_found.0,
block_found.1,
aligned_len,
zero_buf.as_mut_slice(),
)?;
self.clear_recovery_needed()?;
let payload = if block_found.1
>= aligned_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
block_found.0 + block_found.1 - aligned_len
} else {
block_found.0
};
Ok(BStackSlice::new(self, payload, len))
} else {
let mut block_buf = vec![0u8; (aligned_len + Self::BLOCK_OVERHEAD_SIZE) as usize];
block_buf[..8].copy_from_slice(&aligned_len.to_le_bytes());
block_buf[(aligned_len + Self::BLOCK_HEADER_SIZE) as usize..]
.copy_from_slice(&aligned_len.to_le_bytes());
let ptr = self.stack.push(&block_buf)? + Self::BLOCK_HEADER_SIZE;
Ok(BStackSlice::new(self, ptr, len))
}
}
fn dealloc(&self, slice: BStackSlice<'_, Self>) -> io::Result<()> {
let aligned_len = self.align_len(slice.len());
if self.is_impossible_block_start(slice.start())
|| self.is_impossible_block_end(slice.start() + aligned_len)
|| self.is_impossible_block_size(aligned_len)
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid slice: start or end offset is impossible",
));
}
let current_tail = self.stack.len()?;
if slice.start() + aligned_len == current_tail - Self::BLOCK_FOOTER_SIZE {
self.stack
.discard(aligned_len + Self::BLOCK_OVERHEAD_SIZE)?;
self.cascade_discard_free_tail()?;
return Ok(());
}
self.set_recovery_needed()?;
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()
}
fn realloc<'a>(
&'a self,
slice: BStackSlice<'a, Self>,
new_len: u64,
) -> io::Result<BStackSlice<'a, Self>> {
let aligned_current_len = self.align_len(slice.len());
if self.is_impossible_block_start(slice.start())
|| self.is_impossible_block_end(slice.start() + aligned_current_len)
|| self.is_impossible_block_size(aligned_current_len)
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid slice: start or end offset is impossible",
));
}
let aligned_new_len = self.align_len(new_len);
if aligned_new_len == aligned_current_len {
if new_len > slice.len() {
self.stack
.zero(slice.start() + slice.len(), new_len - slice.len())?;
}
return Ok(BStackSlice::new(self, slice.start(), new_len));
}
let current_tail = self.stack.len()?;
if slice.start() + aligned_current_len == current_tail - Self::BLOCK_FOOTER_SIZE {
match aligned_new_len.cmp(&aligned_current_len) {
std::cmp::Ordering::Equal => return Ok(slice), std::cmp::Ordering::Greater => {
self.stack.extend(aligned_new_len - aligned_current_len)?;
self.stack.zero(
slice.start() + slice.len(),
aligned_current_len + Self::BLOCK_FOOTER_SIZE - slice.len(),
)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack.set(
slice.start() + aligned_new_len,
aligned_new_len.to_le_bytes(),
)?;
return Ok(BStackSlice::new(self, slice.start(), new_len));
}
std::cmp::Ordering::Less => {
self.stack.set(
slice.start() + aligned_new_len,
aligned_new_len.to_le_bytes(),
)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack.discard(aligned_current_len - aligned_new_len)?;
return Ok(BStackSlice::new(self, slice.start(), new_len));
}
}
}
let block_size_buf = self.stack.get(
slice.start() - Self::BLOCK_HEADER_SIZE,
slice.start() - Self::BLOCK_HEADER_SIZE + 8,
)?;
let block_size = u64::from_le_bytes(block_size_buf.try_into().unwrap());
if block_size >= aligned_new_len {
if new_len > slice.len() {
self.stack
.zero(slice.start() + slice.len(), new_len - slice.len())?;
}
return Ok(BStackSlice::new(self, slice.start(), new_len));
}
let next_block = slice.start() + block_size + Self::BLOCK_OVERHEAD_SIZE;
if next_block <= self.stack.len()? - Self::BLOCK_FOOTER_SIZE - Self::MIN_BLOCK_PAYLOAD_SIZE
{
let mut next_hdr_buf = [0u8; 16];
self.stack
.get_into(next_block - Self::BLOCK_HEADER_SIZE, &mut next_hdr_buf)?;
let next_block_size = u64::from_le_bytes(next_hdr_buf[0..8].try_into().unwrap());
let next_block_is_free = next_hdr_buf[8] & 1 != 0;
if next_block_is_free
&& next_block_size >= Self::MIN_BLOCK_PAYLOAD_SIZE
&& next_block_size % 8 == 0
&& block_size + Self::BLOCK_OVERHEAD_SIZE + next_block_size >= aligned_new_len
{
if slice.len() < block_size {
self.stack
.zero(slice.start() + slice.len(), block_size - slice.len())?;
}
self.set_recovery_needed()?;
self.unlink_from_free_list(next_block)?;
let merged_size = block_size + Self::BLOCK_OVERHEAD_SIZE + next_block_size;
let mut zero_buff = vec![
0u8;
(next_block_size + Self::BLOCK_OVERHEAD_SIZE + Self::BLOCK_FOOTER_SIZE)
as usize
];
if merged_size
>= aligned_new_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
let remainder_size = merged_size - aligned_new_len - Self::BLOCK_OVERHEAD_SIZE;
let new_free_start =
slice.start() + aligned_new_len + Self::BLOCK_OVERHEAD_SIZE;
let mut head_buf = [0u8; 8];
self.stack.get_into(Self::FREE_HEAD_OFFSET, &mut head_buf)?;
let old_head = u64::from_le_bytes(head_buf);
let alloc_footer_off = (aligned_new_len - block_size) as usize;
let free_hdr_off = alloc_footer_off + Self::BLOCK_FOOTER_SIZE as usize;
let free_payload_off = alloc_footer_off + Self::BLOCK_OVERHEAD_SIZE as usize;
let free_footer_off = (next_block_size + Self::BLOCK_OVERHEAD_SIZE) as usize;
zero_buff[alloc_footer_off..alloc_footer_off + 8]
.copy_from_slice(&aligned_new_len.to_le_bytes());
zero_buff[free_hdr_off..free_hdr_off + 8]
.copy_from_slice(&remainder_size.to_le_bytes());
zero_buff[free_hdr_off + 8..free_hdr_off + 12]
.copy_from_slice(&1u32.to_le_bytes()); zero_buff[free_payload_off..free_payload_off + 8]
.copy_from_slice(&old_head.to_le_bytes()); zero_buff[free_footer_off..free_footer_off + 8]
.copy_from_slice(&remainder_size.to_le_bytes());
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
merged_size.to_le_bytes(),
)?;
self.stack.set(slice.start() + block_size, &zero_buff)?;
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
aligned_new_len.to_le_bytes(),
)?;
self.stack
.set(Self::FREE_HEAD_OFFSET, new_free_start.to_le_bytes())?;
if old_head != 0 {
self.stack.set(old_head + 8, new_free_start.to_le_bytes())?;
}
} else {
self.stack.set(
slice.start() - Self::BLOCK_HEADER_SIZE,
merged_size.to_le_bytes(),
)?;
zero_buff[(next_block_size + Self::BLOCK_OVERHEAD_SIZE) as usize..]
.copy_from_slice(&merged_size.to_le_bytes());
self.stack.set(slice.start() + block_size, &zero_buff)?;
}
self.clear_recovery_needed()?;
return Ok(BStackSlice::new(self, slice.start(), new_len));
}
}
let block_found = self.find_large_enough_block(aligned_new_len)?;
if block_found.0 != 0 {
let copy_len = slice.len().min(aligned_new_len);
let mut data_buf = vec![0u8; (Self::BLOCK_OVERHEAD_SIZE + aligned_new_len) as usize];
self.stack.get_into(
slice.start(),
&mut data_buf[Self::BLOCK_HEADER_SIZE as usize
..(copy_len + Self::BLOCK_HEADER_SIZE) as usize],
)?;
self.set_recovery_needed()?;
self.unlink_block(
block_found.0,
block_found.1,
aligned_new_len,
data_buf.as_mut_slice(),
)?;
let new_payload = if block_found.1
>= aligned_new_len + Self::BLOCK_OVERHEAD_SIZE + Self::MIN_BLOCK_PAYLOAD_SIZE
{
block_found.0 + block_found.1 - aligned_new_len
} else {
block_found.0
};
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()?;
Ok(BStackSlice::new(self, new_payload, new_len))
} else {
let copy_len = (slice.len().min(aligned_new_len)) as usize;
let mut block_buf = vec![0u8; (aligned_new_len + Self::BLOCK_OVERHEAD_SIZE) as usize];
block_buf[..8].copy_from_slice(&aligned_new_len.to_le_bytes());
self.stack.get_into(
slice.start(),
&mut block_buf
[Self::BLOCK_HEADER_SIZE as usize..Self::BLOCK_HEADER_SIZE as usize + copy_len],
)?;
block_buf[(aligned_new_len + Self::BLOCK_HEADER_SIZE) as usize..]
.copy_from_slice(&aligned_new_len.to_le_bytes());
self.set_recovery_needed()?;
let ptr = self.stack.push(&block_buf)? + Self::BLOCK_HEADER_SIZE;
self.add_to_free_list(slice.start())?;
self.clear_recovery_needed()?;
Ok(BStackSlice::new(self, ptr, new_len))
}
}
}