1use parking_lot::Mutex;
7use std::collections::VecDeque;
8use std::sync::Arc;
9use thiserror::Error;
10
11#[derive(Error, Debug)]
12pub enum AllocationError {
13 #[error("Buffer pool exhausted")]
14 PoolExhausted,
15
16 #[error("Invalid buffer size: {0}")]
17 InvalidSize(usize),
18
19 #[error("Buffer too small: required {required}, available {available}")]
20 BufferTooSmall { required: usize, available: usize },
21}
22
23pub struct BufferPool {
25 buffers: Arc<Mutex<VecDeque<Vec<u8>>>>,
26 buffer_size: usize,
27 max_buffers: usize,
28}
29
30impl BufferPool {
31 pub fn new(buffer_size: usize, max_buffers: usize) -> Self {
33 Self {
34 buffers: Arc::new(Mutex::new(VecDeque::new())),
35 buffer_size,
36 max_buffers,
37 }
38 }
39
40 pub fn acquire(&self) -> PooledBuffer {
42 let mut buffers = self.buffers.lock();
43
44 let buffer = if let Some(mut buf) = buffers.pop_front() {
45 buf.clear();
46 buf.reserve(self.buffer_size);
47 buf
48 } else {
49 Vec::with_capacity(self.buffer_size)
50 };
51
52 PooledBuffer {
53 buffer,
54 pool: Arc::clone(&self.buffers),
55 max_buffers: self.max_buffers,
56 }
57 }
58
59 pub fn size(&self) -> usize {
61 self.buffers.lock().len()
62 }
63
64 pub fn buffer_size(&self) -> usize {
66 self.buffer_size
67 }
68}
69
70pub struct PooledBuffer {
72 buffer: Vec<u8>,
73 pool: Arc<Mutex<VecDeque<Vec<u8>>>>,
74 max_buffers: usize,
75}
76
77impl PooledBuffer {
78 #[allow(clippy::should_implement_trait)]
80 pub fn as_mut(&mut self) -> &mut Vec<u8> {
81 &mut self.buffer
82 }
83
84 #[allow(clippy::should_implement_trait)]
86 pub fn as_ref(&self) -> &[u8] {
87 &self.buffer
88 }
89
90 pub fn len(&self) -> usize {
92 self.buffer.len()
93 }
94
95 pub fn is_empty(&self) -> bool {
97 self.buffer.is_empty()
98 }
99}
100
101impl Drop for PooledBuffer {
102 fn drop(&mut self) {
103 let mut pool = self.pool.lock();
104 if pool.len() < self.max_buffers {
105 let buffer = std::mem::take(&mut self.buffer);
107 pool.push_back(buffer);
108 }
109 }
110}
111
112impl std::ops::Deref for PooledBuffer {
113 type Target = [u8];
114
115 fn deref(&self) -> &Self::Target {
116 &self.buffer
117 }
118}
119
120impl std::ops::DerefMut for PooledBuffer {
121 fn deref_mut(&mut self) -> &mut Self::Target {
122 &mut self.buffer
123 }
124}
125
126pub struct TypedBufferPool<T> {
128 buffers: Arc<Mutex<VecDeque<Vec<T>>>>,
129 buffer_capacity: usize,
130 max_buffers: usize,
131}
132
133impl<T> TypedBufferPool<T> {
134 pub fn new(buffer_capacity: usize, max_buffers: usize) -> Self {
136 Self {
137 buffers: Arc::new(Mutex::new(VecDeque::new())),
138 buffer_capacity,
139 max_buffers,
140 }
141 }
142
143 pub fn acquire(&self) -> TypedPooledBuffer<T> {
145 let mut buffers = self.buffers.lock();
146
147 let buffer = if let Some(mut buf) = buffers.pop_front() {
148 buf.clear();
149 buf.reserve(self.buffer_capacity);
150 buf
151 } else {
152 Vec::with_capacity(self.buffer_capacity)
153 };
154
155 TypedPooledBuffer {
156 buffer,
157 pool: Arc::clone(&self.buffers),
158 max_buffers: self.max_buffers,
159 }
160 }
161
162 pub fn size(&self) -> usize {
164 self.buffers.lock().len()
165 }
166}
167
168pub struct TypedPooledBuffer<T> {
170 buffer: Vec<T>,
171 pool: Arc<Mutex<VecDeque<Vec<T>>>>,
172 max_buffers: usize,
173}
174
175impl<T> TypedPooledBuffer<T> {
176 #[allow(clippy::should_implement_trait)]
178 pub fn as_mut(&mut self) -> &mut Vec<T> {
179 &mut self.buffer
180 }
181
182 #[allow(clippy::should_implement_trait)]
184 pub fn as_ref(&self) -> &[T] {
185 &self.buffer
186 }
187
188 pub fn len(&self) -> usize {
190 self.buffer.len()
191 }
192
193 pub fn is_empty(&self) -> bool {
195 self.buffer.is_empty()
196 }
197
198 pub fn push(&mut self, value: T) {
200 self.buffer.push(value);
201 }
202
203 pub fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
205 self.buffer.extend(iter);
206 }
207}
208
209impl<T> Drop for TypedPooledBuffer<T> {
210 fn drop(&mut self) {
211 let mut pool = self.pool.lock();
212 if pool.len() < self.max_buffers {
213 let buffer = std::mem::take(&mut self.buffer);
215 pool.push_back(buffer);
216 }
217 }
218}
219
220impl<T> std::ops::Deref for TypedPooledBuffer<T> {
221 type Target = [T];
222
223 fn deref(&self) -> &Self::Target {
224 &self.buffer
225 }
226}
227
228impl<T> std::ops::DerefMut for TypedPooledBuffer<T> {
229 fn deref_mut(&mut self) -> &mut Self::Target {
230 &mut self.buffer
231 }
232}
233
234pub struct ZeroCopyConverter;
236
237impl ZeroCopyConverter {
238 #[inline]
240 pub fn cast_slice<T, U>(slice: &[T]) -> &[U]
241 where
242 T: bytemuck::Pod,
243 U: bytemuck::Pod,
244 {
245 bytemuck::cast_slice(slice)
246 }
247
248 #[inline]
250 pub fn cast_slice_mut<T, U>(slice: &mut [T]) -> &mut [U]
251 where
252 T: bytemuck::Pod,
253 U: bytemuck::Pod,
254 {
255 bytemuck::cast_slice_mut(slice)
256 }
257
258 #[inline]
260 pub fn bytes_to_slice<T: bytemuck::Pod>(bytes: &[u8]) -> &[T] {
261 bytemuck::cast_slice(bytes)
262 }
263
264 #[inline]
266 pub fn slice_to_bytes<T: bytemuck::Pod>(slice: &[T]) -> &[u8] {
267 bytemuck::cast_slice(slice)
268 }
269}
270
271pub struct StackBuffer<const N: usize> {
273 data: [u8; N],
274 len: usize,
275}
276
277impl<const N: usize> StackBuffer<N> {
278 #[inline]
280 pub const fn new() -> Self {
281 Self {
282 data: [0u8; N],
283 len: 0,
284 }
285 }
286
287 #[inline]
289 pub const fn capacity(&self) -> usize {
290 N
291 }
292
293 #[inline]
295 pub const fn len(&self) -> usize {
296 self.len
297 }
298
299 #[inline]
301 pub const fn is_empty(&self) -> bool {
302 self.len == 0
303 }
304
305 #[inline]
307 pub const fn remaining(&self) -> usize {
308 N - self.len
309 }
310
311 #[inline]
313 pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
314 if bytes.len() > self.remaining() {
315 return Err(AllocationError::BufferTooSmall {
316 required: bytes.len(),
317 available: self.remaining(),
318 });
319 }
320
321 self.data[self.len..self.len + bytes.len()].copy_from_slice(bytes);
322 self.len += bytes.len();
323 Ok(())
324 }
325
326 #[inline]
328 pub fn as_slice(&self) -> &[u8] {
329 &self.data[..self.len]
330 }
331
332 #[inline]
334 pub fn clear(&mut self) {
335 self.len = 0;
336 }
337}
338
339impl<const N: usize> Default for StackBuffer<N> {
340 fn default() -> Self {
341 Self::new()
342 }
343}
344
345pub enum AdaptiveBuffer {
347 Stack(Box<StackBuffer<256>>),
348 Heap(Vec<u8>),
349}
350
351impl AdaptiveBuffer {
352 #[inline]
354 pub fn new(size_hint: usize) -> Self {
355 if size_hint <= 256 {
356 Self::Stack(Box::default())
357 } else {
358 Self::Heap(Vec::with_capacity(size_hint))
359 }
360 }
361
362 pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
364 match self {
365 Self::Stack(buf) => {
366 if buf.remaining() >= bytes.len() {
367 buf.write(bytes)
368 } else {
369 let mut heap = Vec::with_capacity(buf.len() + bytes.len());
371 heap.extend_from_slice(buf.as_slice());
372 heap.extend_from_slice(bytes);
373 *self = Self::Heap(heap);
374 Ok(())
375 }
376 }
377 Self::Heap(vec) => {
378 vec.extend_from_slice(bytes);
379 Ok(())
380 }
381 }
382 }
383
384 pub fn as_slice(&self) -> &[u8] {
386 match self {
387 Self::Stack(buf) => buf.as_slice(),
388 Self::Heap(vec) => vec.as_slice(),
389 }
390 }
391
392 pub fn len(&self) -> usize {
394 match self {
395 Self::Stack(buf) => buf.len(),
396 Self::Heap(vec) => vec.len(),
397 }
398 }
399
400 pub fn is_empty(&self) -> bool {
402 self.len() == 0
403 }
404}
405
406#[cfg(test)]
407mod tests {
408 use super::*;
409
410 #[test]
411 fn test_buffer_pool() {
412 let pool = BufferPool::new(1024, 4);
413
414 let mut buffer1 = pool.acquire();
415 buffer1.as_mut().extend_from_slice(&[1, 2, 3]);
416 assert_eq!(buffer1.len(), 3);
417
418 drop(buffer1);
419
420 assert_eq!(pool.size(), 1);
422
423 let buffer2 = pool.acquire();
424 assert_eq!(buffer2.len(), 0); }
426
427 #[test]
428 fn test_typed_buffer_pool() {
429 let pool = TypedBufferPool::<f32>::new(100, 4);
430
431 let mut buffer1 = pool.acquire();
432 buffer1.push(1.0);
433 buffer1.push(2.0);
434 assert_eq!(buffer1.len(), 2);
435
436 drop(buffer1);
437
438 assert_eq!(pool.size(), 1);
440
441 let buffer2 = pool.acquire();
442 assert_eq!(buffer2.len(), 0); }
444
445 #[test]
446 fn test_zero_copy_converter() {
447 let floats: Vec<f32> = vec![1.0, 2.0, 3.0, 4.0];
448 let bytes = ZeroCopyConverter::slice_to_bytes(&floats);
449 assert_eq!(bytes.len(), 16); let floats_back: &[f32] = ZeroCopyConverter::bytes_to_slice(bytes);
452 assert_eq!(floats_back, &floats);
453 }
454
455 #[test]
456 fn test_stack_buffer() {
457 let mut buf = StackBuffer::<64>::new();
458 assert_eq!(buf.capacity(), 64);
459 assert_eq!(buf.len(), 0);
460
461 buf.write(&[1, 2, 3]).unwrap();
462 assert_eq!(buf.len(), 3);
463 assert_eq!(buf.as_slice(), &[1, 2, 3]);
464
465 buf.clear();
466 assert_eq!(buf.len(), 0);
467 }
468
469 #[test]
470 fn test_stack_buffer_overflow() {
471 let mut buf = StackBuffer::<4>::new();
472 assert!(buf.write(&[1, 2, 3, 4]).is_ok());
473 assert!(buf.write(&[5]).is_err()); }
475
476 #[test]
477 fn test_adaptive_buffer_small() {
478 let mut buf = AdaptiveBuffer::new(10);
479 buf.write(&[1, 2, 3]).unwrap();
480
481 assert!(matches!(buf, AdaptiveBuffer::Stack(_)));
482 assert_eq!(buf.as_slice(), &[1, 2, 3]);
483 }
484
485 #[test]
486 fn test_adaptive_buffer_large() {
487 let mut buf = AdaptiveBuffer::new(512);
488 buf.write(&[1, 2, 3]).unwrap();
489
490 assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
491 assert_eq!(buf.as_slice(), &[1, 2, 3]);
492 }
493
494 #[test]
495 fn test_adaptive_buffer_upgrade() {
496 let mut buf = AdaptiveBuffer::new(10);
497 buf.write(&[1; 100]).unwrap(); buf.write(&[2; 200]).unwrap(); assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
502 assert_eq!(buf.len(), 300);
503 }
504
505 #[test]
506 fn test_pooled_buffer_deref() {
507 let pool = BufferPool::new(1024, 4);
508 let mut buffer = pool.acquire();
509
510 buffer.as_mut().extend_from_slice(&[1, 2, 3, 4]);
511
512 assert_eq!(buffer[0], 1);
514 assert_eq!(buffer[3], 4);
515 }
516}