oxihuman_core/
thread_local_pool.rs1#![allow(dead_code)]
4
5use std::collections::VecDeque;
8
9#[allow(dead_code)]
11pub struct ThreadLocalPool<T> {
12 free: VecDeque<T>,
13 capacity: usize,
14 alloc_count: u64,
15 recycle_count: u64,
16 overflow_count: u64,
17}
18
19#[allow(dead_code)]
20impl<T> ThreadLocalPool<T> {
21 pub fn new(capacity: usize) -> Self {
23 Self {
24 free: VecDeque::with_capacity(capacity),
25 capacity,
26 alloc_count: 0,
27 recycle_count: 0,
28 overflow_count: 0,
29 }
30 }
31
32 pub fn warm_up(&mut self, count: usize, factory: impl Fn() -> T) {
34 for _ in 0..count.min(self.capacity) {
35 self.free.push_back(factory());
36 }
37 }
38
39 pub fn acquire(&mut self, factory: impl FnOnce() -> T) -> T {
41 self.alloc_count += 1;
42 self.free.pop_front().unwrap_or_else(factory)
43 }
44
45 pub fn release(&mut self, obj: T) {
47 if self.free.len() < self.capacity {
48 self.free.push_back(obj);
49 self.recycle_count += 1;
50 } else {
51 self.overflow_count += 1;
52 }
53 }
54
55 pub fn free_count(&self) -> usize {
57 self.free.len()
58 }
59
60 pub fn capacity(&self) -> usize {
61 self.capacity
62 }
63
64 pub fn alloc_count(&self) -> u64 {
65 self.alloc_count
66 }
67
68 pub fn recycle_count(&self) -> u64 {
69 self.recycle_count
70 }
71
72 pub fn overflow_count(&self) -> u64 {
73 self.overflow_count
74 }
75
76 pub fn is_empty(&self) -> bool {
77 self.free.is_empty()
78 }
79
80 pub fn is_full(&self) -> bool {
81 self.free.len() >= self.capacity
82 }
83
84 pub fn drain(&mut self) -> Vec<T> {
86 self.free.drain(..).collect()
87 }
88}
89
90pub fn new_thread_local_pool<T>(capacity: usize) -> ThreadLocalPool<T> {
91 ThreadLocalPool::new(capacity)
92}
93
94#[cfg(test)]
95mod tests {
96 use super::*;
97
98 #[test]
99 fn new_pool_empty() {
100 let p: ThreadLocalPool<i32> = new_thread_local_pool(4);
101 assert!(p.is_empty());
102 assert_eq!(p.free_count(), 0);
103 }
104
105 #[test]
106 fn acquire_creates_when_empty() {
107 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(4);
108 let v = p.acquire(|| 42);
109 assert_eq!(v, 42);
110 assert_eq!(p.alloc_count(), 1);
111 }
112
113 #[test]
114 fn release_and_reuse() {
115 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(4);
116 p.release(99);
117 let v = p.acquire(|| 0);
118 assert_eq!(v, 99);
119 assert_eq!(p.recycle_count(), 1);
120 }
121
122 #[test]
123 fn overflow_when_full() {
124 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(1);
125 p.release(1);
126 p.release(2); assert_eq!(p.overflow_count(), 1);
128 assert_eq!(p.free_count(), 1);
129 }
130
131 #[test]
132 fn warm_up_fills_pool() {
133 let mut p: ThreadLocalPool<Vec<u8>> = new_thread_local_pool(4);
134 p.warm_up(3, Vec::new);
135 assert_eq!(p.free_count(), 3);
136 }
137
138 #[test]
139 fn capacity_respected() {
140 let p: ThreadLocalPool<i32> = new_thread_local_pool(8);
141 assert_eq!(p.capacity(), 8);
142 }
143
144 #[test]
145 fn is_full_detection() {
146 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(2);
147 p.release(1);
148 p.release(2);
149 assert!(p.is_full());
150 }
151
152 #[test]
153 fn drain_empties_pool() {
154 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(4);
155 p.release(1);
156 p.release(2);
157 let drained = p.drain();
158 assert_eq!(drained.len(), 2);
159 assert!(p.is_empty());
160 }
161
162 #[test]
163 fn warm_up_capped_at_capacity() {
164 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(2);
165 p.warm_up(10, || 0);
166 assert_eq!(p.free_count(), 2);
167 }
168
169 #[test]
170 fn alloc_count_increments() {
171 let mut p: ThreadLocalPool<i32> = new_thread_local_pool(4);
172 p.acquire(|| 1);
173 p.acquire(|| 2);
174 assert_eq!(p.alloc_count(), 2);
175 }
176}