use std::cell::Cell;
use std::io;
use std::rc::Rc;
use mem_dbg::MemSize;
const BYTE_BLOCK_SHIFT: usize = 15;
const BYTE_BLOCK_SIZE: usize = 1 << BYTE_BLOCK_SHIFT;
const BYTE_BLOCK_MASK: usize = BYTE_BLOCK_SIZE - 1;
pub trait Allocator {
fn get_byte_block(&mut self) -> Vec<u8>;
fn recycle_byte_blocks(&mut self, blocks: &mut [Vec<u8>]);
}
#[derive(Debug, Default, MemSize)]
#[mem_size_flat]
pub struct DirectAllocator;
impl Allocator for DirectAllocator {
fn get_byte_block(&mut self) -> Vec<u8> {
vec![0u8; BYTE_BLOCK_SIZE]
}
fn recycle_byte_blocks(&mut self, _blocks: &mut [Vec<u8>]) {}
}
#[derive(Debug, Clone)]
pub struct DirectTrackingAllocator {
bytes_used: Rc<Cell<usize>>,
}
impl DirectTrackingAllocator {
pub fn new(bytes_used: Rc<Cell<usize>>) -> Self {
Self { bytes_used }
}
}
impl Allocator for DirectTrackingAllocator {
fn get_byte_block(&mut self) -> Vec<u8> {
self.bytes_used.set(self.bytes_used.get() + BYTE_BLOCK_SIZE);
vec![0u8; BYTE_BLOCK_SIZE]
}
fn recycle_byte_blocks(&mut self, blocks: &mut [Vec<u8>]) {
self.bytes_used
.set(self.bytes_used.get() - blocks.len() * BYTE_BLOCK_SIZE);
}
}
const DEFAULT_BUFFERED_BLOCKS: usize = 64;
#[derive(Debug)]
pub struct RecyclingByteBlockAllocator {
free_byte_blocks: Vec<Vec<u8>>,
max_buffered_blocks: usize,
bytes_used: Rc<Cell<usize>>,
}
impl RecyclingByteBlockAllocator {
pub fn new(max_buffered_blocks: usize, bytes_used: Rc<Cell<usize>>) -> Self {
Self {
free_byte_blocks: Vec::with_capacity(max_buffered_blocks),
max_buffered_blocks,
bytes_used,
}
}
pub fn with_defaults(bytes_used: Rc<Cell<usize>>) -> Self {
Self::new(DEFAULT_BUFFERED_BLOCKS, bytes_used)
}
pub fn num_buffered_blocks(&self) -> usize {
self.free_byte_blocks.len()
}
pub fn bytes_used(&self) -> usize {
self.bytes_used.get()
}
pub fn free_blocks(&mut self, num: usize) -> usize {
let count = num.min(self.free_byte_blocks.len());
self.free_byte_blocks
.truncate(self.free_byte_blocks.len() - count);
self.bytes_used
.set(self.bytes_used.get() - count * BYTE_BLOCK_SIZE);
count
}
}
impl Allocator for RecyclingByteBlockAllocator {
fn get_byte_block(&mut self) -> Vec<u8> {
if let Some(block) = self.free_byte_blocks.pop() {
block
} else {
self.bytes_used.set(self.bytes_used.get() + BYTE_BLOCK_SIZE);
vec![0u8; BYTE_BLOCK_SIZE]
}
}
fn recycle_byte_blocks(&mut self, blocks: &mut [Vec<u8>]) {
let space = self.max_buffered_blocks - self.free_byte_blocks.len();
let to_recycle = space.min(blocks.len());
for block in blocks.iter_mut().take(to_recycle) {
self.free_byte_blocks.push(std::mem::take(block));
}
let dropped = blocks.len() - to_recycle;
self.bytes_used
.set(self.bytes_used.get() - dropped * BYTE_BLOCK_SIZE);
}
}
#[derive(Debug, MemSize)]
pub struct ByteBlockPool<A: Allocator> {
buffers: Vec<Vec<u8>>,
buffer_upto: i32,
byte_upto: usize,
byte_offset: i32,
allocator: A,
}
impl<A: Allocator> ByteBlockPool<A> {
pub fn new(allocator: A) -> Self {
Self {
buffers: Vec::new(),
buffer_upto: -1,
byte_upto: BYTE_BLOCK_SIZE,
byte_offset: -(BYTE_BLOCK_SIZE as i32),
allocator,
}
}
pub fn next_buffer(&mut self) {
let block = self.allocator.get_byte_block();
let new_index = (self.buffer_upto + 1) as usize;
if new_index == self.buffers.len() {
self.buffers.push(block);
} else {
self.buffers[new_index] = block;
}
self.buffer_upto += 1;
self.byte_upto = 0;
self.byte_offset = self
.byte_offset
.checked_add(BYTE_BLOCK_SIZE as i32)
.expect("ByteBlockPool byte_offset overflow");
}
pub fn read_byte(&self, offset: usize) -> u8 {
let buffer_index = offset >> BYTE_BLOCK_SHIFT;
let pos = offset & BYTE_BLOCK_MASK;
self.buffers[buffer_index][pos]
}
pub fn read_bytes(&self, offset: usize, dest: &mut [u8]) {
let mut bytes_left = dest.len();
let mut buffer_index = offset >> BYTE_BLOCK_SHIFT;
let mut pos = offset & BYTE_BLOCK_MASK;
let mut dest_offset = 0;
while bytes_left > 0 {
let chunk = bytes_left.min(BYTE_BLOCK_SIZE - pos);
dest[dest_offset..dest_offset + chunk]
.copy_from_slice(&self.buffers[buffer_index][pos..pos + chunk]);
dest_offset += chunk;
bytes_left -= chunk;
buffer_index += 1;
pos = 0;
}
}
pub fn append(&mut self, data: &[u8]) {
let mut offset = 0;
let mut bytes_left = data.len();
while bytes_left > 0 {
let buffer_left = BYTE_BLOCK_SIZE - self.byte_upto;
if bytes_left < buffer_left {
let buf_idx = self.buffer_upto as usize;
self.buffers[buf_idx][self.byte_upto..self.byte_upto + bytes_left]
.copy_from_slice(&data[offset..offset + bytes_left]);
self.byte_upto += bytes_left;
break;
} else {
if buffer_left > 0 {
let buf_idx = self.buffer_upto as usize;
self.buffers[buf_idx][self.byte_upto..self.byte_upto + buffer_left]
.copy_from_slice(&data[offset..offset + buffer_left]);
}
self.next_buffer();
bytes_left -= buffer_left;
offset += buffer_left;
}
}
}
pub fn reset(&mut self, zero_fill: bool, reuse_first: bool) {
if self.buffer_upto == -1 {
return;
}
if zero_fill {
for i in 0..self.buffer_upto as usize {
self.buffers[i].fill(0);
}
let last = self.buffer_upto as usize;
self.buffers[last][..self.byte_upto].fill(0);
}
if self.buffer_upto > 0 || !reuse_first {
let start = if reuse_first { 1 } else { 0 };
let end = (1 + self.buffer_upto) as usize;
self.allocator
.recycle_byte_blocks(&mut self.buffers[start..end]);
}
if reuse_first {
self.buffer_upto = 0;
self.byte_upto = 0;
self.byte_offset = 0;
} else {
self.buffer_upto = -1;
self.byte_upto = BYTE_BLOCK_SIZE;
self.byte_offset = -(BYTE_BLOCK_SIZE as i32);
}
}
pub fn position(&self) -> usize {
(self.buffer_upto as usize) * BYTE_BLOCK_SIZE + self.byte_upto
}
pub fn get_buffer(&self, index: usize) -> &[u8] {
&self.buffers[index]
}
pub fn byte_offset(&self) -> i32 {
self.byte_offset
}
pub fn num_buffers(&self) -> usize {
self.buffers.len()
}
}
const LEVEL_SIZE_ARRAY: [usize; 10] = [5, 14, 20, 30, 40, 40, 80, 80, 120, 200];
const NEXT_LEVEL_ARRAY: [usize; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 9];
pub const FIRST_LEVEL_SIZE: usize = LEVEL_SIZE_ARRAY[0];
pub struct ByteSlicePool;
impl ByteSlicePool {
pub fn new_slice<A: Allocator>(pool: &mut ByteBlockPool<A>, size: usize) -> usize {
assert!(
size <= BYTE_BLOCK_SIZE,
"slice size {size} exceeds block size {BYTE_BLOCK_SIZE}"
);
if pool.byte_upto > BYTE_BLOCK_SIZE - size {
pool.next_buffer();
}
let upto = pool.byte_upto;
pool.byte_upto += size;
let buf_idx = pool.buffer_upto as usize;
pool.buffers[buf_idx][pool.byte_upto - 1] = 0x10; upto
}
pub fn alloc_known_size_slice<A: Allocator>(
pool: &mut ByteBlockPool<A>,
buffer_index: usize,
upto: usize,
) -> (usize, usize) {
let level = (pool.buffers[buffer_index][upto] & 0x0F) as usize;
let new_level = NEXT_LEVEL_ARRAY[level];
let new_size = LEVEL_SIZE_ARRAY[new_level];
if pool.byte_upto > BYTE_BLOCK_SIZE - new_size {
pool.next_buffer();
}
let new_upto = pool.byte_upto;
let new_global_offset = new_upto as i32 + pool.byte_offset;
pool.byte_upto += new_size;
let mut temp = [0u8; 3];
temp.copy_from_slice(&pool.buffers[buffer_index][upto - 3..upto]);
let addr_bytes = new_global_offset.to_le_bytes();
pool.buffers[buffer_index][upto - 3..upto + 1].copy_from_slice(&addr_bytes);
let new_buf_idx = pool.buffer_upto as usize;
pool.buffers[new_buf_idx][new_upto..new_upto + 3].copy_from_slice(&temp);
pool.buffers[new_buf_idx][pool.byte_upto - 1] = 0x10 | new_level as u8;
(new_upto + 3, new_size - 3)
}
pub fn alloc_slice<A: Allocator>(
pool: &mut ByteBlockPool<A>,
buffer_index: usize,
upto: usize,
) -> usize {
Self::alloc_known_size_slice(pool, buffer_index, upto).0
}
}
#[derive(Debug, MemSize)]
#[mem_size_flat]
pub struct ByteSliceWriter {
buffer_index: usize,
upto: usize,
}
impl ByteSliceWriter {
pub fn new<A: Allocator>(pool: &ByteBlockPool<A>, start_offset: usize) -> Self {
let global = start_offset + pool.byte_offset() as usize;
Self {
buffer_index: global >> BYTE_BLOCK_SHIFT,
upto: global & BYTE_BLOCK_MASK,
}
}
pub fn from_address(addr: usize) -> Self {
Self {
buffer_index: addr >> BYTE_BLOCK_SHIFT,
upto: addr & BYTE_BLOCK_MASK,
}
}
pub fn write_byte<A: Allocator>(&mut self, pool: &mut ByteBlockPool<A>, b: u8) {
if pool.buffers[self.buffer_index][self.upto] != 0 {
let new_offset = ByteSlicePool::alloc_slice(pool, self.buffer_index, self.upto);
let new_buf_idx = pool.buffer_upto as usize;
self.buffer_index = new_buf_idx;
self.upto = new_offset;
}
pool.buffers[self.buffer_index][self.upto] = b;
self.upto += 1;
}
pub fn write_bytes<A: Allocator>(&mut self, pool: &mut ByteBlockPool<A>, data: &[u8]) {
let mut offset = 0;
let end = data.len();
while pool.buffers[self.buffer_index][self.upto] == 0 && offset < end {
pool.buffers[self.buffer_index][self.upto] = data[offset];
self.upto += 1;
offset += 1;
}
while offset < end {
let (slice_offset, slice_length) =
ByteSlicePool::alloc_known_size_slice(pool, self.buffer_index, self.upto);
let new_buf_idx = pool.buffer_upto as usize;
self.buffer_index = new_buf_idx;
let write_length = (slice_length - 1).min(end - offset);
pool.buffers[self.buffer_index][slice_offset..slice_offset + write_length]
.copy_from_slice(&data[offset..offset + write_length]);
self.upto = slice_offset + write_length;
offset += write_length;
}
}
pub fn write_vint<A: Allocator>(&mut self, pool: &mut ByteBlockPool<A>, mut i: i32) {
while (i & !0x7F) != 0 {
self.write_byte(pool, ((i & 0x7F) | 0x80) as u8);
i = ((i as u32) >> 7) as i32;
}
self.write_byte(pool, i as u8);
}
pub fn address(&self) -> usize {
self.upto + self.buffer_index * BYTE_BLOCK_SIZE
}
}
pub struct ByteSliceReader<'a, A: Allocator> {
pool: &'a ByteBlockPool<A>,
buffer_upto: usize,
upto: usize,
limit: usize,
level: usize,
buffer_offset: usize,
end_index: usize,
}
impl<'a, A: Allocator> ByteSliceReader<'a, A> {
pub fn new(pool: &'a ByteBlockPool<A>, start_index: usize, end_index: usize) -> Self {
debug_assert!(end_index >= start_index);
let buffer_upto = start_index / BYTE_BLOCK_SIZE;
let buffer_offset = buffer_upto * BYTE_BLOCK_SIZE;
let upto = start_index & BYTE_BLOCK_MASK;
let limit = if start_index + FIRST_LEVEL_SIZE >= end_index {
end_index & BYTE_BLOCK_MASK
} else {
upto + FIRST_LEVEL_SIZE - 4
};
Self {
pool,
buffer_upto,
upto,
limit,
level: 0,
buffer_offset,
end_index,
}
}
pub fn eof(&self) -> bool {
self.upto + self.buffer_offset == self.end_index
}
pub fn read_byte(&mut self) -> u8 {
debug_assert!(!self.eof());
if self.upto == self.limit {
self.next_slice();
}
let b = self.pool.buffers[self.buffer_upto][self.upto];
self.upto += 1;
b
}
pub fn read_bytes(&mut self, dest: &mut [u8]) {
let mut offset = 0;
let mut remaining = dest.len();
while remaining > 0 {
let available = self.limit - self.upto;
if available < remaining {
dest[offset..offset + available].copy_from_slice(
&self.pool.buffers[self.buffer_upto][self.upto..self.upto + available],
);
offset += available;
remaining -= available;
self.next_slice();
} else {
dest[offset..offset + remaining].copy_from_slice(
&self.pool.buffers[self.buffer_upto][self.upto..self.upto + remaining],
);
self.upto += remaining;
break;
}
}
}
pub fn skip_bytes(&mut self, mut n: usize) {
while n > 0 {
let available = self.limit - self.upto;
if available < n {
n -= available;
self.next_slice();
} else {
self.upto += n;
break;
}
}
}
pub fn write_to(&mut self, out: &mut dyn io::Write) -> io::Result<usize> {
let mut size = 0;
loop {
if self.limit + self.buffer_offset == self.end_index {
let count = self.limit - self.upto;
out.write_all(&self.pool.buffers[self.buffer_upto][self.upto..self.upto + count])?;
size += count;
self.upto = self.limit;
break;
} else {
let count = self.limit - self.upto;
out.write_all(&self.pool.buffers[self.buffer_upto][self.upto..self.upto + count])?;
size += count;
self.next_slice();
}
}
Ok(size)
}
fn next_slice(&mut self) {
let buf = &self.pool.buffers[self.buffer_upto];
let next_index = i32::from_le_bytes([
buf[self.limit],
buf[self.limit + 1],
buf[self.limit + 2],
buf[self.limit + 3],
]) as usize;
self.level = NEXT_LEVEL_ARRAY[self.level];
let new_size = LEVEL_SIZE_ARRAY[self.level];
self.buffer_upto = next_index / BYTE_BLOCK_SIZE;
self.buffer_offset = self.buffer_upto * BYTE_BLOCK_SIZE;
self.upto = next_index & BYTE_BLOCK_MASK;
if next_index + new_size >= self.end_index {
self.limit = self.end_index - self.buffer_offset;
} else {
self.limit = self.upto + new_size - 4;
}
}
}
impl<A: Allocator> io::Read for ByteSliceReader<'_, A> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.eof() {
return Ok(0);
}
let len = buf
.len()
.min(self.end_index - (self.upto + self.buffer_offset));
if len == 0 {
return Ok(0);
}
self.read_bytes(&mut buf[..len]);
Ok(len)
}
}
#[cfg(test)]
mod tests {
use super::*;
use assertables::*;
#[test]
fn test_append_and_read_roundtrip() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let data: Vec<u8> = (0..=255).collect();
pool.append(&data);
let mut result = vec![0u8; 256];
pool.read_bytes(0, &mut result);
assert_eq!(data, result);
}
#[test]
fn test_read_byte() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
pool.append(&[0xAB, 0xCD, 0xEF]);
assert_eq!(pool.read_byte(0), 0xAB);
assert_eq!(pool.read_byte(1), 0xCD);
assert_eq!(pool.read_byte(2), 0xEF);
}
#[test]
fn test_append_across_block_boundary() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let fill = vec![0xAA; BYTE_BLOCK_SIZE - 10];
pool.append(&fill);
assert_eq!(pool.position(), BYTE_BLOCK_SIZE - 10);
let cross: Vec<u8> = (0..20).collect();
pool.append(&cross);
assert_eq!(pool.position(), BYTE_BLOCK_SIZE + 10);
let mut result = vec![0u8; 20];
pool.read_bytes(BYTE_BLOCK_SIZE - 10, &mut result);
assert_eq!(cross, result);
}
#[test]
fn test_large_random_blocks() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let mut items: Vec<Vec<u8>> = Vec::new();
let mut total: usize = 0;
for i in 0..50 {
let size = if i % 2 == 0 { 500 + i * 10 } else { 60000 };
let data: Vec<u8> = (0..size).map(|j| (j % 256) as u8).collect();
pool.append(&data);
total += size;
assert_eq!(pool.position(), total);
items.push(data);
}
let mut pos = 0;
for expected in &items {
let mut actual = vec![0u8; expected.len()];
pool.read_bytes(pos, &mut actual);
assert_eq!(expected, &actual);
pos += expected.len();
}
}
#[test]
fn test_position() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
assert_eq!(pool.position(), 0);
pool.append(&[1, 2, 3]);
assert_eq!(pool.position(), 3);
}
#[test]
fn test_reset_reuse_first() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(DirectTrackingAllocator::new(counter.clone()));
pool.next_buffer();
assert_eq!(counter.get(), BYTE_BLOCK_SIZE);
pool.append(&[1, 2, 3]);
pool.reset(true, true);
assert_eq!(counter.get(), BYTE_BLOCK_SIZE);
assert_eq!(pool.position(), 0);
assert_eq!(pool.read_byte(0), 0);
}
#[test]
fn test_reset_no_reuse() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(DirectTrackingAllocator::new(counter.clone()));
pool.next_buffer();
assert_eq!(counter.get(), BYTE_BLOCK_SIZE);
pool.reset(false, false);
assert_eq!(counter.get(), 0);
}
#[test]
fn test_tracking_allocator_multi_block() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(DirectTrackingAllocator::new(counter.clone()));
pool.next_buffer();
pool.next_buffer();
pool.next_buffer();
assert_eq!(counter.get(), 3 * BYTE_BLOCK_SIZE);
pool.reset(false, true);
assert_eq!(counter.get(), BYTE_BLOCK_SIZE);
}
#[test]
fn test_overflow_detection() {
struct TinyAllocator;
impl Allocator for TinyAllocator {
fn get_byte_block(&mut self) -> Vec<u8> {
Vec::new()
}
fn recycle_byte_blocks(&mut self, _blocks: &mut [Vec<u8>]) {}
}
let mut pool = ByteBlockPool::new(TinyAllocator);
pool.next_buffer();
let mut overflowed = false;
for _ in 0..i32::MAX as usize / BYTE_BLOCK_SIZE + 1 {
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
pool.next_buffer();
}));
if result.is_err() {
overflowed = true;
break;
}
}
assert!(overflowed, "byte_offset should overflow");
}
#[test]
fn test_new_slice_writes_level_marker() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
assert_eq!(offset, 0);
assert_eq!(pool.buffers[0][FIRST_LEVEL_SIZE - 1], 0x10);
}
#[test]
fn test_level_progression() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
pool.buffers[0][offset] = 0x42;
let (data_start, usable) =
ByteSlicePool::alloc_known_size_slice(&mut pool, 0, offset + FIRST_LEVEL_SIZE - 1);
assert_eq!(usable, 11);
assert_gt!(data_start, 0);
}
#[test]
fn test_forwarding_chain() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let _offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut buf_idx: usize = 0;
let mut upto = FIRST_LEVEL_SIZE - 1;
for (expected_level, &new_size) in LEVEL_SIZE_ARRAY.iter().enumerate().skip(1).take(5) {
let new_upto = ByteSlicePool::alloc_slice(&mut pool, buf_idx, upto);
buf_idx = pool.buffer_upto as usize;
let marker_pos = new_upto + new_size - 3 - 1;
assert_eq!(
pool.buffers[buf_idx][marker_pos] & 0x0F,
expected_level as u8
);
upto = marker_pos;
}
}
#[test]
fn test_cross_block_slice_allocation() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let fill_size = BYTE_BLOCK_SIZE - 3;
pool.byte_upto = fill_size;
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
assert_eq!(pool.buffer_upto, 1);
assert_eq!(offset, 0); }
#[test]
fn test_writer_single_bytes() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut writer = ByteSliceWriter::new(&pool, offset);
writer.write_byte(&mut pool, 0xAA);
writer.write_byte(&mut pool, 0xBB);
let end = writer.address();
let mut reader = ByteSliceReader::new(&pool, offset + pool.byte_offset as usize, end);
assert_eq!(reader.read_byte(), 0xAA);
assert_eq!(reader.read_byte(), 0xBB);
assert!(reader.eof());
}
#[test]
fn test_writer_bulk_write() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut writer = ByteSliceWriter::new(&pool, offset);
let data: Vec<u8> = (0..100).collect();
writer.write_bytes(&mut pool, &data);
let end = writer.address();
let start = offset + pool.byte_offset as usize;
let mut reader = ByteSliceReader::new(&pool, start, end);
let mut result = vec![0u8; 100];
reader.read_bytes(&mut result);
assert_eq!(data, result);
assert!(reader.eof());
}
#[test]
fn test_writer_vint_roundtrip() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut writer = ByteSliceWriter::new(&pool, offset);
let values = [0, 1, 127, 128, 16383, 16384, 2097151, i32::MAX];
for &v in &values {
writer.write_vint(&mut pool, v);
}
let end = writer.address();
let start = offset + pool.byte_offset as usize;
let mut reader = ByteSliceReader::new(&pool, start, end);
for &expected in &values {
let actual = read_vint(&mut reader);
assert_eq!(expected, actual, "vint mismatch");
}
assert!(reader.eof());
}
#[test]
fn test_two_interleaved_writers() {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset1 = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut writer1 = ByteSliceWriter::new(&pool, offset1);
let start1 = offset1 + pool.byte_offset as usize;
let offset2 = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut writer2 = ByteSliceWriter::new(&pool, offset2);
let start2 = offset2 + pool.byte_offset as usize;
for i in 0u8..50 {
writer1.write_byte(&mut pool, i);
writer2.write_byte(&mut pool, 200 + i);
}
let end1 = writer1.address();
let end2 = writer2.address();
let mut reader1 = ByteSliceReader::new(&pool, start1, end1);
for i in 0u8..50 {
assert_eq!(reader1.read_byte(), i);
}
assert!(reader1.eof());
let mut reader2 = ByteSliceReader::new(&pool, start2, end2);
for i in 0u8..50 {
assert_eq!(reader2.read_byte(), 200 + i);
}
assert!(reader2.eof());
}
#[test]
fn test_reader_byte_by_byte() {
let data: Vec<u8> = (0..200).map(|i| (i % 256) as u8).collect();
let (pool, start, end) = write_slice_data(&data);
let mut reader = ByteSliceReader::new(&pool, start, end);
for &expected in &data {
assert!(!reader.eof());
assert_eq!(reader.read_byte(), expected);
}
assert!(reader.eof());
}
#[test]
fn test_reader_bulk_read() {
let data: Vec<u8> = (0..300).map(|i| (i % 256) as u8).collect();
let (pool, start, end) = write_slice_data(&data);
let mut reader = ByteSliceReader::new(&pool, start, end);
let mut result = vec![0u8; data.len()];
reader.read_bytes(&mut result);
assert_eq!(data, result);
assert!(reader.eof());
}
#[test]
fn test_reader_skip_bytes() {
let data: Vec<u8> = (0..100).collect();
let (pool, start, end) = write_slice_data(&data);
let mut reader = ByteSliceReader::new(&pool, start, end);
reader.skip_bytes(10);
assert_eq!(reader.read_byte(), 10);
reader.skip_bytes(20);
assert_eq!(reader.read_byte(), 31);
}
#[test]
fn test_reader_write_to() {
let data: Vec<u8> = (0..500).map(|i| (i % 256) as u8).collect();
let (pool, start, end) = write_slice_data(&data);
let mut reader = ByteSliceReader::new(&pool, start, end);
let mut output = Vec::new();
let bytes_written = reader.write_to(&mut output).unwrap();
assert_len_eq_x!(&data, bytes_written);
assert_eq!(data, output);
}
#[test]
fn test_reader_eof_empty_data() {
let data: Vec<u8> = Vec::new();
let (pool, start, end) = write_slice_data(&data);
let reader = ByteSliceReader::new(&pool, start, end);
assert!(reader.eof());
}
#[test]
fn test_reader_matches_java_write_pattern() {
let data: Vec<u8> = (0..150).map(|i| (i * 7 + 3) as u8).collect();
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let upto_start = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let mut buf_idx: usize = 0;
let mut upto = upto_start;
for &b in &data {
if (pool.buffers[buf_idx][upto] & 16) != 0 {
upto = ByteSlicePool::alloc_slice(&mut pool, buf_idx, upto);
buf_idx = pool.buffer_upto as usize;
}
pool.buffers[buf_idx][upto] = b;
upto += 1;
}
let end = upto + buf_idx * BYTE_BLOCK_SIZE;
let mut reader = ByteSliceReader::new(&pool, upto_start, end);
for &expected in &data {
assert_eq!(reader.read_byte(), expected);
}
assert!(reader.eof());
}
fn write_slice_data(data: &[u8]) -> (ByteBlockPool<DirectAllocator>, usize, usize) {
let mut pool = ByteBlockPool::new(DirectAllocator);
pool.next_buffer();
let offset = ByteSlicePool::new_slice(&mut pool, FIRST_LEVEL_SIZE);
let start = offset + pool.byte_offset as usize;
let mut writer = ByteSliceWriter::new(&pool, offset);
for &b in data {
writer.write_byte(&mut pool, b);
}
let end = writer.address();
(pool, start, end)
}
fn read_vint<A: Allocator>(reader: &mut ByteSliceReader<A>) -> i32 {
let mut b = reader.read_byte();
let mut result = (b & 0x7F) as i32;
let mut shift = 7;
while (b & 0x80) != 0 {
b = reader.read_byte();
result |= ((b & 0x7F) as i32) << shift;
shift += 7;
}
result
}
#[test]
fn test_recycling_alloc_reuses_blocks() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(RecyclingByteBlockAllocator::new(2, counter.clone()));
pool.next_buffer();
pool.next_buffer();
assert_eq!(counter.get(), 2 * BYTE_BLOCK_SIZE);
pool.reset(false, false);
assert_eq!(pool.allocator.num_buffered_blocks(), 2);
assert_eq!(counter.get(), 2 * BYTE_BLOCK_SIZE);
pool.next_buffer();
pool.next_buffer();
assert_eq!(counter.get(), 2 * BYTE_BLOCK_SIZE);
assert_eq!(pool.allocator.num_buffered_blocks(), 0);
}
#[test]
fn test_recycling_exceeds_max_buffered() {
let counter = Rc::new(Cell::new(0usize));
let max_buffered = 2;
let mut pool = ByteBlockPool::new(RecyclingByteBlockAllocator::new(
max_buffered,
counter.clone(),
));
pool.next_buffer();
pool.next_buffer();
pool.next_buffer();
pool.next_buffer();
assert_eq!(counter.get(), 4 * BYTE_BLOCK_SIZE);
pool.reset(false, false);
assert_eq!(pool.allocator.num_buffered_blocks(), max_buffered);
assert_eq!(counter.get(), 2 * BYTE_BLOCK_SIZE);
}
#[test]
fn test_recycling_free_blocks() {
let counter = Rc::new(Cell::new(0usize));
let mut alloc = RecyclingByteBlockAllocator::new(10, counter.clone());
let mut blocks: Vec<Vec<u8>> = (0..5).map(|_| alloc.get_byte_block()).collect();
assert_eq!(counter.get(), 5 * BYTE_BLOCK_SIZE);
alloc.recycle_byte_blocks(&mut blocks);
assert_eq!(alloc.num_buffered_blocks(), 5);
assert_eq!(counter.get(), 5 * BYTE_BLOCK_SIZE);
let freed = alloc.free_blocks(3);
assert_eq!(freed, 3);
assert_eq!(alloc.num_buffered_blocks(), 2);
assert_eq!(counter.get(), 2 * BYTE_BLOCK_SIZE);
let freed = alloc.free_blocks(10);
assert_eq!(freed, 2);
assert_eq!(alloc.num_buffered_blocks(), 0);
assert_eq!(counter.get(), 0);
}
#[test]
fn test_recycling_zeroed_on_reuse() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(RecyclingByteBlockAllocator::new(4, counter.clone()));
pool.next_buffer();
pool.append(&[0xFF; 100]);
pool.reset(true, false);
pool.next_buffer();
for i in 0..100 {
assert_eq!(pool.read_byte(i), 0, "byte at offset {i} should be zero");
}
}
#[test]
fn test_recycling_with_defaults() {
let counter = Rc::new(Cell::new(0usize));
let alloc = RecyclingByteBlockAllocator::with_defaults(counter);
assert_eq!(alloc.num_buffered_blocks(), 0);
assert_eq!(alloc.bytes_used(), 0);
}
#[test]
fn test_recycling_data_integrity_across_reuse() {
let counter = Rc::new(Cell::new(0usize));
let mut pool = ByteBlockPool::new(RecyclingByteBlockAllocator::new(4, counter.clone()));
for round in 0u8..3 {
pool.next_buffer();
let data: Vec<u8> = (0..500)
.map(|i| (i as u8).wrapping_add(round * 37))
.collect();
pool.append(&data);
let mut result = vec![0u8; 500];
pool.read_bytes(0, &mut result);
assert_eq!(data, result, "round {round} data mismatch");
pool.reset(true, false);
}
}
}