use std::io;
use mem_dbg::MemSize;
use crate::encoding::varint;
#[derive(Debug)]
pub struct ByteBlockPool {
pub(crate) data: Vec<u8>,
}
impl MemSize for ByteBlockPool {
fn mem_size_rec(
&self,
_flags: mem_dbg::SizeFlags,
_refs: &mut mem_dbg::HashMap<usize, usize>,
) -> usize {
self.data.len()
}
}
const GROWTH_CHUNK: usize = 32 * 1024;
impl ByteBlockPool {
pub fn new(initial_capacity: usize) -> Self {
Self {
data: Vec::with_capacity(initial_capacity),
}
}
pub fn alloc(&mut self, n: usize) -> usize {
let offset = self.data.len();
let needed = offset + n;
if needed > self.data.capacity() {
let grow = n.max(GROWTH_CHUNK);
self.data.reserve_exact(grow);
}
self.data.resize(needed, 0);
offset
}
pub fn reset(&mut self) {
self.data.clear();
}
}
const LEVEL_SIZE_ARRAY: [usize; 10] = [5, 14, 20, 30, 40, 40, 80, 80, 120, 200];
const NEXT_LEVEL_ARRAY: [usize; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 9];
pub const FIRST_LEVEL_SIZE: usize = LEVEL_SIZE_ARRAY[0];
pub struct ByteSlicePool;
impl ByteSlicePool {
pub fn new_slice(pool: &mut ByteBlockPool, size: usize) -> usize {
let upto = pool.alloc(size);
pool.data[upto + size - 1] = 0x10;
upto
}
pub fn alloc_known_size_slice(pool: &mut ByteBlockPool, upto: usize) -> (usize, usize) {
let level = (pool.data[upto] & 0x0F) as usize;
let new_level = NEXT_LEVEL_ARRAY[level];
let new_size = LEVEL_SIZE_ARRAY[new_level];
let new_upto = pool.alloc(new_size);
let mut temp = [0u8; 3];
temp.copy_from_slice(&pool.data[upto - 3..upto]);
let addr_bytes = (new_upto as i32).to_le_bytes();
pool.data[upto - 3..upto + 1].copy_from_slice(&addr_bytes);
pool.data[new_upto..new_upto + 3].copy_from_slice(&temp);
pool.data[new_upto + new_size - 1] = 0x10 | new_level as u8;
(new_upto + 3, new_size - 3)
}
pub fn alloc_slice(pool: &mut ByteBlockPool, upto: usize) -> usize {
Self::alloc_known_size_slice(pool, upto).0
}
}
pub struct ByteSliceReader<'a> {
pool: &'a ByteBlockPool,
upto: usize,
limit: usize,
level: usize,
end_index: usize,
}
impl<'a> ByteSliceReader<'a> {
pub fn new(pool: &'a ByteBlockPool, start_index: usize, end_index: usize) -> Self {
debug_assert!(end_index >= start_index);
let limit = if start_index + FIRST_LEVEL_SIZE >= end_index {
end_index
} else {
start_index + FIRST_LEVEL_SIZE - 4
};
Self {
pool,
upto: start_index,
limit,
level: 0,
end_index,
}
}
pub fn eof(&self) -> bool {
self.upto == self.end_index
}
pub fn read_bytes(&mut self, dest: &mut [u8]) {
let mut offset = 0;
let mut remaining = dest.len();
while remaining > 0 {
let available = self.limit - self.upto;
if available < remaining {
dest[offset..offset + available]
.copy_from_slice(&self.pool.data[self.upto..self.upto + available]);
offset += available;
remaining -= available;
self.next_slice();
} else {
dest[offset..offset + remaining]
.copy_from_slice(&self.pool.data[self.upto..self.upto + remaining]);
self.upto += remaining;
break;
}
}
}
pub fn read_vint(&mut self) -> io::Result<i32> {
varint::read_vint(self)
}
fn next_slice(&mut self) {
let bytes: [u8; 4] = self.pool.data[self.limit..self.limit + 4]
.try_into()
.unwrap();
let next_index = i32::from_le_bytes(bytes) as usize;
self.level = NEXT_LEVEL_ARRAY[self.level];
let new_size = LEVEL_SIZE_ARRAY[self.level];
self.upto = next_index;
if next_index + new_size >= self.end_index {
self.limit = self.end_index;
} else {
self.limit = self.upto + new_size - 4;
}
}
}
impl io::Read for ByteSliceReader<'_> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.eof() {
return Ok(0);
}
let len = buf.len().min(self.end_index - self.upto);
if len == 0 {
return Ok(0);
}
self.read_bytes(&mut buf[..len]);
Ok(len)
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use super::*;
use assertables::*;
#[test]
fn test_alloc_and_read() {
let mut pool = ByteBlockPool::new(1024);
let offset = pool.alloc(10);
assert_eq!(offset, 0);
assert_eq!(pool.data.len(), 10);
pool.data[0] = 0xAB;
pool.data[9] = 0xCD;
assert_eq!(pool.data[0], 0xAB);
assert_eq!(pool.data[9], 0xCD);
}
#[test]
fn test_append_and_read_roundtrip() {
let mut pool = ByteBlockPool::new(1024);
let data: Vec<u8> = (0..=255).collect();
let offset = pool.data.len();
pool.data.extend_from_slice(&data);
assert_eq!(&pool.data[offset..offset + 256], &data[..]);
}
#[test]
fn test_large_data() {
let mut pool = ByteBlockPool::new(1024);
let total = 100_000;
let offset = pool.alloc(total);
for i in 0..total {
pool.data[offset + i] = (i % 256) as u8;
}
for i in 0..total {
assert_eq!(pool.data[offset + i], (i % 256) as u8);
}
}
#[test]
fn test_reset_preserves_capacity() {
let mut pool = ByteBlockPool::new(1024);
pool.alloc(50_000);
let cap_before = pool.data.capacity();
pool.reset();
assert_eq!(pool.data.len(), 0);
assert_eq!(pool.data.capacity(), cap_before);
}
#[test]
fn test_new_slice_writes_level_marker() {
let mut pool = ByteBlockPool::new(1024);
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
assert_eq!(offset, 0);
assert_eq!(pool.data[FIRST_LEVEL_SIZE - 1], 0x10);
}
#[test]
fn test_level_progression() {
let mut pool = ByteBlockPool::new(1024);
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
pool.data[offset] = 0x42;
let (data_start, usable) =
ByteSlicePool::alloc_known_size_slice(&mut pool, offset + FIRST_LEVEL_SIZE - 1);
assert_eq!(usable, 11);
assert_gt!(data_start, 0);
}
#[test]
fn test_forwarding_chain() {
let mut pool = ByteBlockPool::new(4096);
let _offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut upto = FIRST_LEVEL_SIZE - 1;
for (expected_level, &new_size) in LEVEL_SIZE_ARRAY.iter().enumerate().skip(1).take(5) {
let new_upto = ByteSlicePool::alloc_slice(&mut pool, upto);
let marker_pos = new_upto + new_size - 3 - 1;
assert_eq!(pool.data[marker_pos] & 0x0F, expected_level as u8);
upto = marker_pos;
}
}
#[test]
fn test_reader_via_io_read() {
let data: Vec<u8> = (0..150).map(|i| (i * 7 + 3) as u8).collect();
let mut pool = ByteBlockPool::new(4096);
let upto_start = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut upto = upto_start;
for &b in &data {
if (pool.data[upto] & 16) != 0 {
upto = ByteSlicePool::alloc_slice(&mut pool, upto);
}
pool.data[upto] = b;
upto += 1;
}
let end = upto;
let mut reader = ByteSliceReader::new(&pool, upto_start, end);
let mut result = Vec::new();
reader.read_to_end(&mut result).unwrap();
assert_eq!(data, result);
}
#[test]
fn test_reader_eof_empty_data() {
let mut pool = ByteBlockPool::new(1024);
let upto_start = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let end = upto_start;
let reader = ByteSliceReader::new(&pool, upto_start, end);
assert!(reader.eof());
}
#[test]
fn test_multiple_allocs() {
let mut pool = ByteBlockPool::new(32);
let a = pool.alloc(10);
let b = pool.alloc(20);
assert_eq!(a, 0);
assert_eq!(b, 10);
assert_eq!(pool.data.len(), 30);
}
}