use parking_lot::Mutex;
use std::collections::VecDeque;
use std::sync::Arc;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AllocationError {
#[error("Buffer pool exhausted")]
PoolExhausted,
#[error("Invalid buffer size: {0}")]
InvalidSize(usize),
#[error("Buffer too small: required {required}, available {available}")]
BufferTooSmall { required: usize, available: usize },
}
pub struct BufferPool {
buffers: Arc<Mutex<VecDeque<Vec<u8>>>>,
buffer_size: usize,
max_buffers: usize,
}
impl BufferPool {
pub fn new(buffer_size: usize, max_buffers: usize) -> Self {
Self {
buffers: Arc::new(Mutex::new(VecDeque::new())),
buffer_size,
max_buffers,
}
}
pub fn acquire(&self) -> PooledBuffer {
let mut buffers = self.buffers.lock();
let buffer = if let Some(mut buf) = buffers.pop_front() {
buf.clear();
buf.reserve(self.buffer_size);
buf
} else {
Vec::with_capacity(self.buffer_size)
};
PooledBuffer {
buffer,
pool: Arc::clone(&self.buffers),
max_buffers: self.max_buffers,
}
}
pub fn size(&self) -> usize {
self.buffers.lock().len()
}
pub fn buffer_size(&self) -> usize {
self.buffer_size
}
}
pub struct PooledBuffer {
buffer: Vec<u8>,
pool: Arc<Mutex<VecDeque<Vec<u8>>>>,
max_buffers: usize,
}
impl PooledBuffer {
#[allow(clippy::should_implement_trait)]
pub fn as_mut(&mut self) -> &mut Vec<u8> {
&mut self.buffer
}
#[allow(clippy::should_implement_trait)]
pub fn as_ref(&self) -> &[u8] {
&self.buffer
}
pub fn len(&self) -> usize {
self.buffer.len()
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
}
impl Drop for PooledBuffer {
fn drop(&mut self) {
let mut pool = self.pool.lock();
if pool.len() < self.max_buffers {
let buffer = std::mem::take(&mut self.buffer);
pool.push_back(buffer);
}
}
}
impl std::ops::Deref for PooledBuffer {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl std::ops::DerefMut for PooledBuffer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
pub struct TypedBufferPool<T> {
buffers: Arc<Mutex<VecDeque<Vec<T>>>>,
buffer_capacity: usize,
max_buffers: usize,
}
impl<T> TypedBufferPool<T> {
pub fn new(buffer_capacity: usize, max_buffers: usize) -> Self {
Self {
buffers: Arc::new(Mutex::new(VecDeque::new())),
buffer_capacity,
max_buffers,
}
}
pub fn acquire(&self) -> TypedPooledBuffer<T> {
let mut buffers = self.buffers.lock();
let buffer = if let Some(mut buf) = buffers.pop_front() {
buf.clear();
buf.reserve(self.buffer_capacity);
buf
} else {
Vec::with_capacity(self.buffer_capacity)
};
TypedPooledBuffer {
buffer,
pool: Arc::clone(&self.buffers),
max_buffers: self.max_buffers,
}
}
pub fn size(&self) -> usize {
self.buffers.lock().len()
}
}
pub struct TypedPooledBuffer<T> {
buffer: Vec<T>,
pool: Arc<Mutex<VecDeque<Vec<T>>>>,
max_buffers: usize,
}
impl<T> TypedPooledBuffer<T> {
#[allow(clippy::should_implement_trait)]
pub fn as_mut(&mut self) -> &mut Vec<T> {
&mut self.buffer
}
#[allow(clippy::should_implement_trait)]
pub fn as_ref(&self) -> &[T] {
&self.buffer
}
pub fn len(&self) -> usize {
self.buffer.len()
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
pub fn push(&mut self, value: T) {
self.buffer.push(value);
}
pub fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
self.buffer.extend(iter);
}
}
impl<T> Drop for TypedPooledBuffer<T> {
fn drop(&mut self) {
let mut pool = self.pool.lock();
if pool.len() < self.max_buffers {
let buffer = std::mem::take(&mut self.buffer);
pool.push_back(buffer);
}
}
}
impl<T> std::ops::Deref for TypedPooledBuffer<T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl<T> std::ops::DerefMut for TypedPooledBuffer<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
pub struct ZeroCopyConverter;
impl ZeroCopyConverter {
#[inline]
pub fn cast_slice<T, U>(slice: &[T]) -> &[U]
where
T: bytemuck::Pod,
U: bytemuck::Pod,
{
bytemuck::cast_slice(slice)
}
#[inline]
pub fn cast_slice_mut<T, U>(slice: &mut [T]) -> &mut [U]
where
T: bytemuck::Pod,
U: bytemuck::Pod,
{
bytemuck::cast_slice_mut(slice)
}
#[inline]
pub fn bytes_to_slice<T: bytemuck::Pod>(bytes: &[u8]) -> &[T] {
bytemuck::cast_slice(bytes)
}
#[inline]
pub fn slice_to_bytes<T: bytemuck::Pod>(slice: &[T]) -> &[u8] {
bytemuck::cast_slice(slice)
}
}
pub struct StackBuffer<const N: usize> {
data: [u8; N],
len: usize,
}
impl<const N: usize> StackBuffer<N> {
#[inline]
pub const fn new() -> Self {
Self {
data: [0u8; N],
len: 0,
}
}
#[inline]
pub const fn capacity(&self) -> usize {
N
}
#[inline]
pub const fn len(&self) -> usize {
self.len
}
#[inline]
pub const fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub const fn remaining(&self) -> usize {
N - self.len
}
#[inline]
pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
if bytes.len() > self.remaining() {
return Err(AllocationError::BufferTooSmall {
required: bytes.len(),
available: self.remaining(),
});
}
self.data[self.len..self.len + bytes.len()].copy_from_slice(bytes);
self.len += bytes.len();
Ok(())
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
&self.data[..self.len]
}
#[inline]
pub fn clear(&mut self) {
self.len = 0;
}
}
impl<const N: usize> Default for StackBuffer<N> {
fn default() -> Self {
Self::new()
}
}
pub enum AdaptiveBuffer {
Stack(Box<StackBuffer<256>>),
Heap(Vec<u8>),
}
impl AdaptiveBuffer {
#[inline]
pub fn new(size_hint: usize) -> Self {
if size_hint <= 256 {
Self::Stack(Box::default())
} else {
Self::Heap(Vec::with_capacity(size_hint))
}
}
pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
match self {
Self::Stack(buf) => {
if buf.remaining() >= bytes.len() {
buf.write(bytes)
} else {
let mut heap = Vec::with_capacity(buf.len() + bytes.len());
heap.extend_from_slice(buf.as_slice());
heap.extend_from_slice(bytes);
*self = Self::Heap(heap);
Ok(())
}
}
Self::Heap(vec) => {
vec.extend_from_slice(bytes);
Ok(())
}
}
}
pub fn as_slice(&self) -> &[u8] {
match self {
Self::Stack(buf) => buf.as_slice(),
Self::Heap(vec) => vec.as_slice(),
}
}
pub fn len(&self) -> usize {
match self {
Self::Stack(buf) => buf.len(),
Self::Heap(vec) => vec.len(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer_pool() {
let pool = BufferPool::new(1024, 4);
let mut buffer1 = pool.acquire();
buffer1.as_mut().extend_from_slice(&[1, 2, 3]);
assert_eq!(buffer1.len(), 3);
drop(buffer1);
assert_eq!(pool.size(), 1);
let buffer2 = pool.acquire();
assert_eq!(buffer2.len(), 0); }
#[test]
fn test_typed_buffer_pool() {
let pool = TypedBufferPool::<f32>::new(100, 4);
let mut buffer1 = pool.acquire();
buffer1.push(1.0);
buffer1.push(2.0);
assert_eq!(buffer1.len(), 2);
drop(buffer1);
assert_eq!(pool.size(), 1);
let buffer2 = pool.acquire();
assert_eq!(buffer2.len(), 0); }
#[test]
fn test_zero_copy_converter() {
let floats: Vec<f32> = vec![1.0, 2.0, 3.0, 4.0];
let bytes = ZeroCopyConverter::slice_to_bytes(&floats);
assert_eq!(bytes.len(), 16);
let floats_back: &[f32] = ZeroCopyConverter::bytes_to_slice(bytes);
assert_eq!(floats_back, &floats);
}
#[test]
fn test_stack_buffer() {
let mut buf = StackBuffer::<64>::new();
assert_eq!(buf.capacity(), 64);
assert_eq!(buf.len(), 0);
buf.write(&[1, 2, 3]).unwrap();
assert_eq!(buf.len(), 3);
assert_eq!(buf.as_slice(), &[1, 2, 3]);
buf.clear();
assert_eq!(buf.len(), 0);
}
#[test]
fn test_stack_buffer_overflow() {
let mut buf = StackBuffer::<4>::new();
assert!(buf.write(&[1, 2, 3, 4]).is_ok());
assert!(buf.write(&[5]).is_err()); }
#[test]
fn test_adaptive_buffer_small() {
let mut buf = AdaptiveBuffer::new(10);
buf.write(&[1, 2, 3]).unwrap();
assert!(matches!(buf, AdaptiveBuffer::Stack(_)));
assert_eq!(buf.as_slice(), &[1, 2, 3]);
}
#[test]
fn test_adaptive_buffer_large() {
let mut buf = AdaptiveBuffer::new(512);
buf.write(&[1, 2, 3]).unwrap();
assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
assert_eq!(buf.as_slice(), &[1, 2, 3]);
}
#[test]
fn test_adaptive_buffer_upgrade() {
let mut buf = AdaptiveBuffer::new(10);
buf.write(&[1; 100]).unwrap(); buf.write(&[2; 200]).unwrap();
assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
assert_eq!(buf.len(), 300);
}
#[test]
fn test_pooled_buffer_deref() {
let pool = BufferPool::new(1024, 4);
let mut buffer = pool.acquire();
buffer.as_mut().extend_from_slice(&[1, 2, 3, 4]);
assert_eq!(buffer[0], 1);
assert_eq!(buffer[3], 4);
}
}