polyfill_rs/
buffer_pool.rs

1//! Buffer pooling for reducing allocation overhead
2//!
3//! This module provides a buffer pool for reusing memory allocations
4//! across multiple HTTP requests, reducing GC pressure and improving performance.
5
6use std::sync::Arc;
7use tokio::sync::Mutex;
8
9/// A pool of reusable buffers for HTTP response bodies
10pub struct BufferPool {
11    buffers: Arc<Mutex<Vec<Vec<u8>>>>,
12    buffer_size: usize,
13    max_pool_size: usize,
14}
15
16impl BufferPool {
17    /// Create a new buffer pool
18    /// 
19    /// # Arguments
20    /// * `buffer_size` - Initial size of each buffer (e.g., 512KB for typical market data)
21    /// * `max_pool_size` - Maximum number of buffers to keep in the pool
22    pub fn new(buffer_size: usize, max_pool_size: usize) -> Self {
23        Self {
24            buffers: Arc::new(Mutex::new(Vec::with_capacity(max_pool_size))),
25            buffer_size,
26            max_pool_size,
27        }
28    }
29
30    /// Get a buffer from the pool, or create a new one if pool is empty
31    pub async fn get(&self) -> Vec<u8> {
32        let mut buffers = self.buffers.lock().await;
33        
34        match buffers.pop() {
35            Some(mut buffer) => {
36                buffer.clear();
37                buffer
38            }
39            None => {
40                // Pool is empty, create a new buffer
41                Vec::with_capacity(self.buffer_size)
42            }
43        }
44    }
45
46    /// Return a buffer to the pool
47    pub async fn return_buffer(&self, mut buffer: Vec<u8>) {
48        let mut buffers = self.buffers.lock().await;
49        
50        // Only return to pool if we're under the size limit
51        if buffers.len() < self.max_pool_size {
52            buffer.clear();
53            // Shrink if buffer grew too large
54            if buffer.capacity() > self.buffer_size * 2 {
55                buffer.shrink_to(self.buffer_size);
56            }
57            buffers.push(buffer);
58        }
59        // Otherwise, let the buffer be dropped
60    }
61
62    /// Get the current number of buffers in the pool
63    pub async fn size(&self) -> usize {
64        let buffers = self.buffers.lock().await;
65        buffers.len()
66    }
67
68    /// Pre-allocate buffers in the pool
69    pub async fn prewarm(&self, count: usize) {
70        let mut buffers = self.buffers.lock().await;
71        for _ in 0..count.min(self.max_pool_size) {
72            buffers.push(Vec::with_capacity(self.buffer_size));
73        }
74    }
75}
76
77impl Default for BufferPool {
78    fn default() -> Self {
79        // Default: 512KB buffers, pool of 10
80        Self::new(512 * 1024, 10)
81    }
82}
83
84#[cfg(test)]
85mod tests {
86    use super::*;
87
88    #[tokio::test]
89    async fn test_buffer_pool_get_and_return() {
90        let pool = BufferPool::new(1024, 5);
91        
92        let buffer = pool.get().await;
93        assert_eq!(buffer.capacity(), 1024);
94        
95        pool.return_buffer(buffer).await;
96        assert_eq!(pool.size().await, 1);
97    }
98
99    #[tokio::test]
100    async fn test_buffer_pool_prewarm() {
101        let pool = BufferPool::new(1024, 5);
102        pool.prewarm(3).await;
103        assert_eq!(pool.size().await, 3);
104    }
105
106    #[tokio::test]
107    async fn test_buffer_pool_max_size() {
108        let pool = BufferPool::new(1024, 2);
109        
110        let buf1 = pool.get().await;
111        let buf2 = pool.get().await;
112        let buf3 = pool.get().await;
113        
114        pool.return_buffer(buf1).await;
115        pool.return_buffer(buf2).await;
116        pool.return_buffer(buf3).await; // This should be dropped, not added to pool
117        
118        assert_eq!(pool.size().await, 2); // Max size is 2
119    }
120}
121