1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
//! A thread-safe object pool with automatic return and attach/detach semantics
//!
//! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects
//!
//! Common use case is when using buffer to read IO.
//! You would create a Pool of size n, containing Vec<u8> that can be used to call something like `file.read_to_end(buff)`
//!
//! ## Warning
//!
//! Objects in the pool are not automatically reset, they are returned but NOT reset
//! You may want to call `object.reset()` or  `object.clear()`
//! or any other equivalent for the object you are using after pulling from the pool
//!
//! # Examples
//!
//! ## Creating a Pool
//!
//! The general pool creation looks like this
//! ```
//!  let pool: Pool<T> = Pool::new(capacity, || T::new());
//! ```
//! Example pool with 32 `Vec<u8>` with capacity of 4096
//! ```
//!  let pool: Pool<Vec<u8>> = Pool::new(32, || Vec::with_capacity(4096));
//! ```
//!
//! ## Using a Pool
//!
//! Basic usage for pulling from the pool
//! ```
//! let pool: Pool<Vec<u8>> = Pool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); //clear the buff before using
//! some_file.read_to_end(reusable_buff);
//! //reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//! Pull from poll and `detach()`
//! ```
//! let pool: Pool<Vec<u8>> = Pool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); //clear the buff before using
//! let mut s = String::from(reusable_buff.detach(Vec::new()));
//! s.push_str("hello, world!");
//! reusable_buff.attach(s.into_bytes()); //reattach the buffer before reusable goes out of scope
//! //reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//!
//! ## Using Across Threads
//!
//! You simply wrap the pool in a [`std::sync::Arc`]
//! ```
//! let pool: Arc<Pool<T>> = Arc::new(Pool::new(cap, || T::new()));
//! ```
//!
//! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html

use std::mem;
use std::ops::{
    Deref, DerefMut,
};
use std::marker::PhantomData;

use parking_lot::{
    Mutex, MutexGuard
};
use crossbeam::utils::Backoff;

pub struct Pool<'a,T> {
    inner: Vec<Mutex<T>>,
    lifetime: PhantomData<&'a T>,
}

impl<'a,T> Pool<'a,T> {
    pub fn new<F>(cap: usize, init: F) -> Pool<'a,T>
        where F: Fn() -> T {
        let mut inner = Vec::with_capacity(cap);

        for _ in 0..cap {
            inner.push(Mutex::new(init()));
        }

        Pool {
            inner,
            lifetime: PhantomData
        }
    }

    pub fn try_pull(&self) -> Option<Reusable<T>> {
        for entry in &self.inner {
            let entry_guard = match entry.try_lock() {
                Some(v) => v,
                _ => { continue; }
            };

            return Some(Reusable {
                data: entry_guard,
            });
        }

        None
    }

    pub fn pull(&self) ->Reusable<T> {
        let backoff = Backoff::new();
        loop {
            match self.try_pull() {
                Some(r) => return r,
                None => {
                    backoff.snooze();
                }
            };
        }
    }
}

//for testing only
impl<'a> Pool<'a, Vec<u8>> {
    pub fn count(&self) -> u64 {
        let mut count = 0 as u64;

        for entry in &self.inner {
            count += entry.lock().len() as u64;
        }

        count
    }
}


pub struct Reusable<'a, T> {
    data: MutexGuard<'a, T>,
}

impl<'a, T> Reusable<'a, T> {
    pub fn detach(&mut self, replacement: T) -> T {
        mem::replace(&mut self.data, replacement)
    }

    pub fn attach(&mut self, data: T) -> T {
        mem::replace(&mut self.data, data)
    }
}

impl<'a, T> Deref for Reusable<'a, T> {
    type Target = T;

    fn deref(&self) -> &Self::Target {
        self.data.deref()
    }
}


impl<'a, T> DerefMut for Reusable<'a, T> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.data.deref_mut()
    }
}

#[cfg(test)]
mod tests {
    use std::sync::Arc;
    use lazy_static::lazy_static;
    use super::*;

    #[test]
    fn round_trip() {
        lazy_static! {
            static ref POOL: Arc<Pool<'static, Vec<u8>>> = Arc::new(Pool::new(10, || Vec::with_capacity(1)));
        }

        for _ in 0..10 {
            let tmp = POOL.clone();
            std::thread::spawn(move || {
                for i in 0..1_000_000 {
                    let mut reusable = tmp.pull();
                    if i % 2 == 0 {
                        let mut vec = reusable.detach(Vec::new());
                        vec.push(i as u8);
                        reusable.attach(vec);
                    } else {
                        reusable.push(i as u8);
                    }
                }
            });
        }

        //wait for everything to finish
        std::thread::sleep_ms(3000);

        assert_eq!(POOL.count(), 10_000_000)
    }
}