Skip to main content

adze_stack_pool_core/
lib.rs

1//! Small stack pool used by the parser to amortize allocations.
2#![forbid(unsafe_op_in_unsafe_fn)]
3#![deny(missing_docs)]
4#![cfg_attr(feature = "strict_api", deny(unreachable_pub))]
5#![cfg_attr(not(feature = "strict_api"), warn(unreachable_pub))]
6#![cfg_attr(feature = "strict_docs", deny(missing_docs))]
7#![cfg_attr(not(feature = "strict_docs"), allow(missing_docs))]
8
9use std::cell::RefCell;
10use std::collections::VecDeque;
11use std::rc::Rc;
12
13/// A pool of reusable stacks to reduce allocation overhead.
14///
15/// # Examples
16///
17/// ```
18/// use adze_stack_pool_core::StackPool;
19///
20/// let pool: StackPool<u32> = StackPool::new(4);
21/// let mut stack = pool.acquire();
22/// stack.push(42);
23/// pool.release(stack);
24///
25/// let reused = pool.acquire();
26/// assert!(reused.is_empty()); // cleared on reuse
27/// assert_eq!(pool.stats().reuse_count, 1);
28/// ```
29pub struct StackPool<T: Clone> {
30    /// Pool of available stacks ready for reuse.
31    available: RefCell<VecDeque<Vec<T>>>,
32    /// Maximum number of stacks to keep in the pool.
33    max_pool_size: usize,
34    /// Statistics for monitoring pool performance.
35    stats: RefCell<PoolStats>,
36}
37
38/// Statistics for stack pool usage.
39#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
40pub struct PoolStats {
41    /// Total number of stacks allocated.
42    pub total_allocations: usize,
43    /// Number of times a pooled stack was reused.
44    pub reuse_count: usize,
45    /// Number of direct pool hits.
46    pub pool_hits: usize,
47    /// Number of misses requiring new allocation.
48    pub pool_misses: usize,
49    /// Maximum observed pool depth.
50    pub max_pool_depth: usize,
51}
52
53impl<T: Clone> std::fmt::Debug for StackPool<T> {
54    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
55        f.debug_struct("StackPool")
56            .field("available", &self.available.borrow().len())
57            .field("max_pool_size", &self.max_pool_size)
58            .field("stats", &*self.stats.borrow())
59            .finish()
60    }
61}
62
63impl<T: Clone> StackPool<T> {
64    /// Create a new stack pool with the specified maximum size.
65    ///
66    /// # Examples
67    ///
68    /// ```
69    /// use adze_stack_pool_core::StackPool;
70    ///
71    /// let pool: StackPool<i32> = StackPool::new(8);
72    /// assert_eq!(pool.stats().total_allocations, 0);
73    /// ```
74    #[must_use]
75    pub fn new(max_pool_size: usize) -> Self {
76        StackPool {
77            available: RefCell::new(VecDeque::with_capacity(max_pool_size)),
78            max_pool_size,
79            stats: RefCell::new(PoolStats::default()),
80        }
81    }
82
83    /// Acquire a stack from the pool, or allocate a new one if pool is empty.
84    ///
85    /// # Examples
86    ///
87    /// ```
88    /// use adze_stack_pool_core::StackPool;
89    ///
90    /// let pool: StackPool<u32> = StackPool::new(4);
91    /// let stack = pool.acquire();
92    /// assert_eq!(stack.capacity(), 256);
93    /// assert_eq!(pool.stats().pool_misses, 1);
94    /// ```
95    #[must_use]
96    pub fn acquire(&self) -> Vec<T> {
97        let mut pool = self.available.borrow_mut();
98        let mut stats = self.stats.borrow_mut();
99
100        if let Some(mut stack) = pool.pop_front() {
101            stack.clear();
102            stats.pool_hits += 1;
103            stats.reuse_count += 1;
104            stack
105        } else {
106            stats.pool_misses += 1;
107            stats.total_allocations += 1;
108            Vec::with_capacity(256)
109        }
110    }
111
112    /// Acquire a stack with at least the requested capacity.
113    #[must_use]
114    pub fn acquire_with_capacity(&self, capacity: usize) -> Vec<T> {
115        let mut pool = self.available.borrow_mut();
116        let mut stats = self.stats.borrow_mut();
117
118        if let Some(pos) = pool.iter().position(|s| s.capacity() >= capacity) {
119            let mut stack = pool.remove(pos).unwrap();
120            stack.clear();
121            stats.pool_hits += 1;
122            stats.reuse_count += 1;
123            stack
124        } else {
125            stats.pool_misses += 1;
126            stats.total_allocations += 1;
127            Vec::with_capacity(capacity)
128        }
129    }
130
131    /// Return a stack to the pool for reuse.
132    ///
133    /// # Examples
134    ///
135    /// ```
136    /// use adze_stack_pool_core::StackPool;
137    ///
138    /// let pool: StackPool<u32> = StackPool::new(4);
139    /// let stack = pool.acquire();
140    /// pool.release(stack);
141    /// assert_eq!(pool.stats().max_pool_depth, 1);
142    /// ```
143    pub fn release(&self, mut stack: Vec<T>) {
144        let mut pool = self.available.borrow_mut();
145
146        if stack.capacity() <= 4096 && pool.len() < self.max_pool_size {
147            stack.clear();
148            pool.push_back(stack);
149
150            let mut stats = self.stats.borrow_mut();
151            stats.max_pool_depth = stats.max_pool_depth.max(pool.len());
152        }
153    }
154
155    /// Clone a stack, potentially using a pooled stack for the destination.
156    ///
157    /// # Examples
158    ///
159    /// ```
160    /// use adze_stack_pool_core::StackPool;
161    ///
162    /// let pool: StackPool<u32> = StackPool::new(4);
163    /// let original = vec![1, 2, 3];
164    /// let cloned = pool.clone_stack(&original);
165    /// assert_eq!(cloned, vec![1, 2, 3]);
166    /// ```
167    #[must_use]
168    pub fn clone_stack(&self, source: &[T]) -> Vec<T> {
169        let mut dest = self.acquire_with_capacity(source.len());
170        dest.extend_from_slice(source);
171        dest
172    }
173
174    /// Get current pool statistics.
175    ///
176    /// # Examples
177    ///
178    /// ```
179    /// use adze_stack_pool_core::StackPool;
180    ///
181    /// let pool: StackPool<u32> = StackPool::new(4);
182    /// let _ = pool.acquire();
183    /// let stats = pool.stats();
184    /// assert_eq!(stats.total_allocations, 1);
185    /// assert_eq!(stats.pool_misses, 1);
186    /// ```
187    #[must_use]
188    pub fn stats(&self) -> PoolStats {
189        *self.stats.borrow()
190    }
191
192    /// Reset statistics.
193    pub fn reset_stats(&self) {
194        *self.stats.borrow_mut() = PoolStats::default();
195    }
196
197    /// Clear the pool, releasing all cached stacks.
198    pub fn clear(&self) {
199        self.available.borrow_mut().clear();
200    }
201}
202
203thread_local! {
204    /// Thread-local stack pool for single-threaded parsing.
205    static STACK_POOL: RefCell<Option<Rc<StackPool<u32>>>> = const { RefCell::new(None) };
206}
207
208/// Initialize the thread-local stack pool.
209pub fn init_thread_local_pool(max_size: usize) {
210    STACK_POOL.with(|pool| {
211        *pool.borrow_mut() = Some(Rc::new(StackPool::new(max_size)));
212    });
213}
214
215/// Get the thread-local stack pool, initializing if necessary.
216#[must_use]
217pub fn get_thread_local_pool() -> Rc<StackPool<u32>> {
218    STACK_POOL.with(|pool| {
219        let mut pool_ref = pool.borrow_mut();
220        if pool_ref.is_none() {
221            *pool_ref = Some(Rc::new(StackPool::new(64)));
222        }
223
224        pool_ref.as_ref().unwrap().clone()
225    })
226}
227
228#[cfg(test)]
229mod tests {
230    use super::*;
231
232    #[test]
233    fn pool_tracks_reuse_via_release_and_reacquire() {
234        let pool: StackPool<u32> = StackPool::new(2);
235
236        let mut stack = pool.acquire();
237        stack.push(1);
238        pool.release(stack);
239
240        let reused = pool.acquire();
241        assert!(reused.is_empty());
242        assert_eq!(pool.stats().pool_hits, 1);
243        assert_eq!(pool.stats().reuse_count, 1);
244    }
245
246    #[test]
247    fn acquires_with_capacity_can_reuse_matching_or_larger_stack() {
248        let pool: StackPool<u32> = StackPool::new(2);
249
250        let stack_small = Vec::with_capacity(16);
251        let stack_medium = Vec::with_capacity(128);
252
253        pool.release(stack_small);
254        pool.release(stack_medium);
255
256        let acquired = pool.acquire_with_capacity(64);
257        assert!(acquired.capacity() >= 128);
258
259        let stats = pool.stats();
260        assert_eq!(stats.pool_hits, 1);
261    }
262
263    #[test]
264    fn pool_ignores_oversized_stacks() {
265        let pool: StackPool<u32> = StackPool::new(1);
266
267        let oversized = vec![0u32; 4097];
268        pool.release(oversized);
269
270        assert_eq!(pool.stats().max_pool_depth, 0);
271    }
272
273    #[test]
274    fn thread_local_pool_defaults_and_reuses() {
275        init_thread_local_pool(3);
276
277        let pool = get_thread_local_pool();
278        let stack = pool.acquire();
279        assert_eq!(stack.capacity(), 256);
280
281        pool.release(stack);
282
283        let stats = pool.stats();
284        assert_eq!(stats.total_allocations, 1);
285    }
286
287    #[test]
288    fn clone_stack_copies_contents() {
289        let pool: StackPool<u32> = StackPool::new(4);
290
291        let original = vec![1, 2, 3, 4];
292        let cloned = pool.clone_stack(&original);
293
294        assert_eq!(cloned, original);
295    }
296
297    // --- Mutation-catching tests ---
298
299    #[test]
300    fn release_accepts_stack_at_capacity_boundary() {
301        let pool: StackPool<u32> = StackPool::new(2);
302        let stack: Vec<u32> = Vec::with_capacity(4096);
303        pool.release(stack);
304        assert_eq!(pool.stats().max_pool_depth, 1);
305    }
306
307    #[test]
308    fn release_rejects_stack_just_over_capacity_boundary() {
309        let pool: StackPool<u32> = StackPool::new(2);
310        let stack: Vec<u32> = Vec::with_capacity(4097);
311        pool.release(stack);
312        assert_eq!(pool.stats().max_pool_depth, 0);
313    }
314
315    #[test]
316    fn pool_full_rejects_additional_release() {
317        let pool: StackPool<u32> = StackPool::new(1);
318        pool.release(Vec::with_capacity(8));
319        pool.release(Vec::with_capacity(8));
320        assert_eq!(pool.stats().max_pool_depth, 1);
321    }
322
323    #[test]
324    fn reset_stats_zeroes_all_fields() {
325        let pool: StackPool<u32> = StackPool::new(4);
326        let s = pool.acquire();
327        pool.release(s);
328        let _ = pool.acquire();
329
330        pool.reset_stats();
331        let stats = pool.stats();
332        assert_eq!(stats.total_allocations, 0);
333        assert_eq!(stats.reuse_count, 0);
334        assert_eq!(stats.pool_hits, 0);
335        assert_eq!(stats.pool_misses, 0);
336        assert_eq!(stats.max_pool_depth, 0);
337    }
338
339    #[test]
340    fn acquire_from_empty_pool_always_misses() {
341        let pool: StackPool<u32> = StackPool::new(4);
342        let _ = pool.acquire();
343        assert_eq!(pool.stats().pool_misses, 1);
344        assert_eq!(pool.stats().pool_hits, 0);
345        assert_eq!(pool.stats().total_allocations, 1);
346    }
347
348    #[test]
349    fn acquire_with_capacity_from_empty_pool_misses() {
350        let pool: StackPool<u32> = StackPool::new(4);
351        let s = pool.acquire_with_capacity(64);
352        assert!(s.capacity() >= 64);
353        assert_eq!(pool.stats().pool_misses, 1);
354        assert_eq!(pool.stats().pool_hits, 0);
355    }
356
357    #[test]
358    fn clear_empties_the_pool() {
359        let pool: StackPool<u32> = StackPool::new(4);
360        let s1 = pool.acquire();
361        let s2 = pool.acquire();
362        pool.release(s1);
363        pool.release(s2);
364        pool.clear();
365        pool.reset_stats();
366
367        let _ = pool.acquire();
368        assert_eq!(pool.stats().pool_hits, 0);
369        assert_eq!(pool.stats().pool_misses, 1);
370    }
371
372    #[test]
373    fn default_acquire_capacity_is_256() {
374        let pool: StackPool<u32> = StackPool::new(4);
375        let s = pool.acquire();
376        assert_eq!(s.capacity(), 256);
377    }
378}