use parking_lot::Mutex;
use std::ops::{Deref, DerefMut};
use std::sync::{
Arc,
atomic::{AtomicU64, Ordering},
};
const SIZE_CLASSES: [usize; 9] = [
4_096, 8_192, 16_384, 32_768, 65_536, 131_072, 262_144, 524_288, 1_048_576, ];
#[derive(Debug, Clone)]
pub struct BufferPoolStats {
pub allocations: u64,
pub recycles: u64,
pub misses: u64,
pub recycle_rate: f64,
}
pub struct BufferPool {
free_lists: Arc<[Mutex<Vec<Vec<u8>>>; 9]>,
capacity_per_class: usize,
allocations: AtomicU64,
recycles: AtomicU64,
misses: AtomicU64,
}
impl BufferPool {
pub fn new(capacity_per_class: usize) -> Arc<Self> {
Arc::new(Self {
free_lists: Arc::new([
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
Mutex::new(Vec::new()),
]),
capacity_per_class,
allocations: AtomicU64::new(0),
recycles: AtomicU64::new(0),
misses: AtomicU64::new(0),
})
}
pub fn acquire(self: &Arc<Self>, min_size: usize) -> PooledBuffer {
self.allocations.fetch_add(1, Ordering::Relaxed);
let class_idx = SIZE_CLASSES.iter().position(|&cap| cap >= min_size);
match class_idx {
Some(idx) => {
let class_size = SIZE_CLASSES[idx];
let maybe_buf = self.free_lists[idx].lock().pop();
match maybe_buf {
Some(mut buf) => {
buf.clear();
buf.resize(class_size, 0u8);
self.recycles.fetch_add(1, Ordering::Relaxed);
PooledBuffer {
data: Some(buf),
pool: Arc::clone(self),
size_class: idx,
}
}
None => {
self.misses.fetch_add(1, Ordering::Relaxed);
let buf = vec![0u8; class_size];
PooledBuffer {
data: Some(buf),
pool: Arc::clone(self),
size_class: idx,
}
}
}
}
None => {
self.misses.fetch_add(1, Ordering::Relaxed);
let buf = vec![0u8; min_size];
PooledBuffer {
data: Some(buf),
pool: Arc::clone(self),
size_class: usize::MAX,
}
}
}
}
pub fn release(&self, buffer: Vec<u8>, size_class: usize) {
if size_class >= SIZE_CLASSES.len() {
return;
}
let mut list = self.free_lists[size_class].lock();
if list.len() < self.capacity_per_class {
list.push(buffer);
}
}
pub fn allocations(&self) -> u64 {
self.allocations.load(Ordering::Relaxed)
}
pub fn recycles(&self) -> u64 {
self.recycles.load(Ordering::Relaxed)
}
pub fn misses(&self) -> u64 {
self.misses.load(Ordering::Relaxed)
}
pub fn stats(&self) -> BufferPoolStats {
let allocations = self.allocations();
let recycles = self.recycles();
let misses = self.misses();
let recycle_rate = if allocations == 0 {
0.0
} else {
recycles as f64 / allocations as f64
};
BufferPoolStats {
allocations,
recycles,
misses,
recycle_rate,
}
}
}
pub struct PooledBuffer {
data: Option<Vec<u8>>,
pool: Arc<BufferPool>,
size_class: usize,
}
impl Drop for PooledBuffer {
fn drop(&mut self) {
let buf = self.data.take().unwrap_or_default();
self.pool.release(buf, self.size_class);
}
}
impl Deref for PooledBuffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.data.as_deref().unwrap_or(&[])
}
}
impl DerefMut for PooledBuffer {
fn deref_mut(&mut self) -> &mut [u8] {
self.data.as_deref_mut().unwrap_or(&mut [])
}
}
impl AsRef<[u8]> for PooledBuffer {
fn as_ref(&self) -> &[u8] {
self.deref()
}
}
impl AsMut<[u8]> for PooledBuffer {
fn as_mut(&mut self) -> &mut [u8] {
self.deref_mut()
}
}
impl PooledBuffer {
pub fn len(&self) -> usize {
self.data.as_ref().map_or(0, |d| d.len())
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn capacity(&self) -> usize {
self.data.as_ref().map_or(0, |d| d.capacity())
}
pub fn as_slice(&self) -> &[u8] {
self.deref()
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
self.deref_mut()
}
pub fn resize(&mut self, new_len: usize, value: u8) {
if let Some(ref mut v) = self.data {
v.resize(new_len, value);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer_pool_basic_acquire() {
let pool = BufferPool::new(4);
let buf = pool.acquire(1_000);
assert!(
buf.len() >= 1_000,
"buffer must be at least the requested size"
);
}
#[test]
fn test_buffer_pool_recycle() {
let pool = BufferPool::new(4);
let buf = pool.acquire(4_096);
drop(buf);
let _buf2 = pool.acquire(4_096);
assert!(pool.recycles() >= 1, "second acquire should be a recycle");
}
#[test]
fn test_buffer_pool_size_classes() {
let pool = BufferPool::new(4);
let cases: &[(usize, usize)] = &[
(1, SIZE_CLASSES[0]), (4_096, SIZE_CLASSES[0]), (8_193, SIZE_CLASSES[2]), (65_537, SIZE_CLASSES[4 + 1]), ];
for &(req, expected_cap) in cases {
let buf = pool.acquire(req);
assert_eq!(
buf.capacity(),
expected_cap,
"request {} → expected size-class capacity {}",
req,
expected_cap
);
}
}
#[test]
fn test_buffer_pool_capacity_limit() {
let capacity = 3usize;
let pool = BufferPool::new(capacity);
let buffers: Vec<_> = (0..capacity + 1).map(|_| pool.acquire(4_096)).collect();
drop(buffers);
let free_count = pool.free_lists[0].lock().len();
assert!(
free_count <= capacity,
"free list must not exceed capacity (got {})",
free_count
);
}
#[test]
fn test_pooled_buffer_deref() {
let pool = BufferPool::new(4);
let mut buf = pool.acquire(4_096);
buf[0] = 0xAA;
buf[1] = 0xBB;
assert_eq!(buf[0], 0xAA);
assert_eq!(buf[1], 0xBB);
}
#[test]
fn test_buffer_pool_stats() {
let pool = BufferPool::new(4);
let b1 = pool.acquire(4_096);
assert_eq!(pool.allocations(), 1);
assert_eq!(pool.misses(), 1);
assert_eq!(pool.recycles(), 0);
drop(b1);
let _b2 = pool.acquire(4_096);
assert_eq!(pool.allocations(), 2);
assert_eq!(pool.misses(), 1);
assert_eq!(pool.recycles(), 1);
let stats = pool.stats();
assert_eq!(stats.allocations, 2);
assert_eq!(stats.recycles, 1);
assert!((stats.recycle_rate - 0.5).abs() < f64::EPSILON);
}
}