1use std::{
2 mem,
3 sync::{
4 atomic::{AtomicUsize, Ordering},
5 Arc, Weak,
6 },
7};
8
9use crossbeam_queue::SegQueue;
10
11const DEFAULT_BUFFER_CAPACITY: usize = 1024;
12
13#[derive(Debug)]
14pub struct Buffer {
15 data: Box<[u8]>,
16 available: usize,
17 pool: Weak<BufferPool>,
18}
19
20impl Buffer {
21 fn new(data: Box<[u8]>, pool: &Arc<BufferPool>) -> Self {
22 let available = data.len();
23
24 Self {
25 data,
26 available,
27 pool: Arc::downgrade(pool),
28 }
29 }
30
31 fn new_with_available(data: Box<[u8]>, pool: &Arc<BufferPool>, available: usize) -> Self {
32 let available = available.min(data.len());
33
34 Self {
35 data,
36 available,
37 pool: Arc::downgrade(pool),
38 }
39 }
40
41 pub fn capacity(&self) -> usize {
42 self.data.len()
43 }
44
45 pub fn len(&self) -> usize {
46 self.available
47 }
48
49 pub fn is_empty(&self) -> bool {
50 self.available == 0
51 }
52
53 pub fn data(&self) -> &[u8] {
54 &self.data[..self.available]
55 }
56
57 pub fn data_mut(&mut self) -> &mut [u8] {
58 &mut self.data[..self.available]
59 }
60
61 pub fn resize(&mut self, len: usize) {
63 self.available = len.min(self.capacity());
64 }
65}
66
67impl Drop for Buffer {
68 fn drop(&mut self) {
69 if let Some(pool) = self.pool.upgrade() {
70 pool.recycle_buffer(Arc::new(Buffer::new(mem::take(&mut self.data), &pool)));
71 }
72 }
73}
74
75pub struct BufferPool {
76 queue: SegQueue<Arc<Buffer>>,
77 buffer_capacity: AtomicUsize,
78}
79
80impl BufferPool {
81 pub fn new(buffer_capacity: usize) -> Arc<Self> {
82 let buffer_capacity = if buffer_capacity == 0 {
83 DEFAULT_BUFFER_CAPACITY
84 } else {
85 buffer_capacity
86 };
87
88 Arc::new(Self {
89 queue: SegQueue::new(),
90 buffer_capacity: AtomicUsize::new(buffer_capacity),
91 })
92 }
93
94 pub fn available(&self) -> usize {
95 self.queue.len()
96 }
97
98 pub fn get_buffer(self: &Arc<Self>) -> Arc<Buffer> {
99 let buffer_capacity = self.buffer_capacity.load(Ordering::Acquire);
100 if let Some(mut buffer) = self.queue.pop() {
101 if buffer_capacity == buffer.capacity() {
102 if let Some(buffer_mut) = Arc::get_mut(&mut buffer) {
103 buffer_mut.resize(buffer_capacity);
104 return buffer;
105 }
106 }
107 }
108
109 Arc::new(Buffer::new(vec![0u8; buffer_capacity].into_boxed_slice(), self))
110 }
111
112 pub fn get_buffer_with_length(self: &Arc<Self>, len: usize) -> Arc<Buffer> {
113 let mut buffer_capacity = self.buffer_capacity.load(Ordering::Acquire);
114
115 if len > buffer_capacity {
116 self.set_buffer_capacity(len);
117 buffer_capacity = len;
118 }
119
120 if let Some(mut buffer) = self.queue.pop() {
121 if buffer_capacity == buffer.capacity() {
122 if let Some(buffer_mut) = Arc::get_mut(&mut buffer) {
123 buffer_mut.resize(len);
124 return buffer;
125 }
126 }
127 }
128
129 Arc::new(Buffer::new_with_available(vec![0u8; buffer_capacity].into_boxed_slice(), self, len))
130 }
131
132 pub fn recycle_buffer(&self, buffer: Arc<Buffer>) {
133 if buffer.capacity() == self.buffer_capacity.load(Ordering::Acquire) {
134 self.queue.push(buffer);
135 }
136 }
137
138 pub fn get_buffer_capacity(&self) -> usize {
139 self.buffer_capacity.load(Ordering::Relaxed)
140 }
141
142 pub fn set_buffer_capacity(&self, buffer_capacity: usize) {
143 self.buffer_capacity.store(buffer_capacity, Ordering::Release);
144 while self.queue.pop().is_some() {}
145 }
146}