syncpool/lib.rs
1//! [`SyncPool`]
2//! A simple and thread-safe objects pool to reuse heavy objects placed in the heap.
3//!
4//! ## What this crate is for
5//! Inspired by Go's `sync.Pool` module, this crate provides a multithreading-friendly
6//! library to recycle and reuse heavy, heap-based objects, such that the overall
7//! allocation and memory pressure will be reduced, and hence boosting the performance.
8//!
9//!
10//! ## What this crate is NOT for
11//! There is no such thing as the silver bullet when designing a multithreading project,
12//! programmer has to judge use cases on a case-by-case base.
13//!
14//! As shown by a few (hundred) benchmarks we have run, it is quite clear that the
15//! library can reliably beat the allocator in the following case:
16//!
17//! The object is large enough that it makes sense to live in the heap.
18//! The clean up operations required to sanitize the written data before putting the
19//! element back to the pool is simple and fast to run.
20//! The estimation on the maximum number of elements simultaneously checked out
21//! during the program run is good enough, i.e. the parallelism is deterministic;
22//! otherwise when the pool is starving (i.e. it doesn't have enough elements left to
23//! provide), the performance will suffer because we will need to create (and allocate
24//! in the heap for) new elements.
25//!
26//! If your struct is nibble enough to live in the stack without blowing it, or if it's
27//! not in middle of the hottest code path, you most likely won't need the library to
28//! labor for you, allocators nowadays work quite marvelously, especially on the stack.
29//!
30//!
31//! ## Example
32//! ```rust
33//! extern crate syncpool;
34//!
35//! use std::collections::HashMap;
36//! use std::sync::mpsc::{self, SyncSender};
37//! use std::thread;
38//! use std::time::Duration;
39//! use syncpool::prelude::*;
40//!
41//! /// For simplicity and illustration, here we use the most simple but unsafe way to
42//! /// define the shared pool: make it static mut. Other safer implementation exists
43//! /// but may require some detour depending on the business logic and project structure.
44//! static mut POOL: Option<SyncPool<ComplexStruct>> = None;
45//!
46//! /// Number of producers that runs in this test
47//! const COUNT: usize = 128;
48//!
49//! /// The complex data struct for illustration. Usually such a heavy element could also
50//! /// contain other nested struct, and should almost always be placed in the heap. If
51//! /// your struct is *not* heavy enough to be living in the heap, you most likely won't
52//! /// need this library -- the allocator will work better on the stack. The only requirement
53//! /// for the struct is that it has to implement the `Default` trait, which can be derived
54//! /// in most cases, or implemented easily.
55//! #[derive(Default, Debug)]
56//! struct ComplexStruct {
57//! id: usize,
58//! name: String,
59//! body: Vec<String>,
60//! flags: Vec<usize>,
61//! children: Vec<usize>,
62//! index: HashMap<usize, String>,
63//! rev_index: HashMap<String, usize>,
64//! }
65//!
66//! fn main() {
67//! // Must initialize the pool first
68//! unsafe { POOL.replace(SyncPool::with_size(COUNT / 2)); }
69//!
70//! // use the channel that create a concurrent pipeline.
71//! let (tx, rx) = mpsc::sync_channel(64);
72//!
73//! // data producer loop
74//! thread::spawn(move || {
75//! let mut producer = unsafe { POOL.as_mut().unwrap() };
76//!
77//! for i in 0..COUNT {
78//! // take a pre-init element from the pool, we won't allocate in this
79//! // call since the boxed element is already placed in the heap, and
80//! // here we only reuse the one.
81//! let mut content: Box<ComplexStruct> = producer.get();
82//! content.id = i;
83//!
84//! // simulating busy/heavy calculations we're doing in this time period,
85//! // usually involving the `content` object.
86//! thread::sleep(Duration::from_nanos(32));
87//!
88//! // done with the stuff, send the result out.
89//! tx.send(content).unwrap_or_default();
90//! }
91//! });
92//!
93//! // data consumer logic
94//! let handler = thread::spawn(move || {
95//! let mut consumer = unsafe { POOL.as_mut().unwrap() };
96//!
97//! // `content` has the type `Box<ComplexStruct>`
98//! for content in rx {
99//! println!("Receiving struct with id: {}", content.id);
100//! consumer.put(content);
101//! }
102//! });
103//!
104//! // wait for the receiver to finish and print the result.
105//! handler.join().unwrap_or_default();
106//!
107//! println!("All done...");
108//!
109//! }
110//! ```
111//!
112//! In addition, if you prefer to use a constructor for creating and intializing the element, you
113//! may opt to use the `with_builder` API:
114//!
115//! ```rust
116//! use syncpool::prelude::*;
117//! use std::vec;
118//!
119//! struct BigStruct {
120//! a: u32,
121//! b: u32,
122//! c: Vec<u8>
123//! }
124//!
125//! impl BigStruct {
126//! fn new() -> Self {
127//! BigStruct {
128//! a: 1,
129//! b: 42,
130//! c: vec::from_elem(0u8, 0x1_000_000),
131//! }
132//! }
133//! }
134//!
135//! let mut pool = SyncPool::with_builder(BigStruct::new);
136//!
137//! println!("Pool created...");
138//!
139//! let big_box = pool.get();
140//!
141//! assert_eq!(big_box.a, 1);
142//! assert_eq!(big_box.b, 42);
143//! assert_eq!(big_box.c.len(), 0x1_000_000);
144//!
145//! pool.put(big_box);
146//! ```
147//!
148//! There are occassions where the struct is too large to fit into the stack (e.g. a buffer data
149//! structure), you can instead use the `with_packer` API to intialize the object that has already
150//! been created on heap, so you can worry less on making the object and move it to the heap (and
151//! with automatic performance boost)!
152//!
153//! Note that if calling the `with_packer` API, you have to make sure that all fields shall be properly
154//! initialized. The provided placeholder object is well-aligned, however, the fields may be undefined
155//! if not initialized correctly, e.g. a field of the `NonNull<T>` type, or the `MaybeUninit<T>` type.
156//!
157//! ```rust
158//! use syncpool::prelude::*;
159//! use std::vec;
160//!
161//! struct BigStruct {
162//! a: u32,
163//! b: u32,
164//! c: Vec<u8>
165//! }
166//!
167//! impl BigStruct {
168//! fn initializer(mut self: Box<Self>) -> Box<Self> {
169//! self.a = 1;
170//! self.b = 42;
171//! self.c = vec::from_elem(0u8, 0x1_000_000);
172//! self
173//! }
174//! }
175//!
176//! let mut pool = SyncPool::with_packer(BigStruct::initializer);
177//!
178//! println!("Pool created...");
179//!
180//! let big_box = pool.get();
181//!
182//! assert_eq!(big_box.a, 1);
183//! assert_eq!(big_box.b, 42);
184//! assert_eq!(big_box.c.len(), 0x1_000_000);
185//!
186//! pool.put(big_box);
187//! ```
188//!
189//! You can find more complex (i.e. practical) use cases in the [examples](https://github.com/Chopinsky/byte_buffer/tree/master/sync_pool/examples)
190//! folder.
191//!
192
193mod boxed;
194mod bucket;
195mod pool;
196mod utils;
197
198pub use crate::{
199 boxed::{default_box, make_box, raw_box, raw_box_zeroed},
200 pool::{PoolManager, PoolState, SyncPool},
201};
202
203pub mod prelude {
204 pub use crate::boxed::*;
205 pub use crate::{PoolManager, PoolState, SyncPool};
206}
207
208#[cfg(test)]
209mod tests {
210 use super::*;
211
212 #[test]
213 fn check() {
214 let mut pool: SyncPool<[u8; 32]> = SyncPool::with_size(12);
215
216 for _ in 0..32 {
217 let ary = pool.get();
218 assert_eq!(ary.len(), 32);
219 pool.put(ary);
220 }
221
222 assert!(pool.len() > 0);
223 }
224}