infinity_pool/pinned/
pool_managed.rs

1use std::fmt;
2use std::iter::FusedIterator;
3use std::marker::PhantomData;
4use std::mem::MaybeUninit;
5use std::ptr::NonNull;
6use std::sync::{Arc, Mutex};
7
8use crate::{
9    ERR_POISONED_LOCK, PooledMut, RawOpaquePool, RawOpaquePoolIterator, RawOpaquePoolSend,
10};
11
12/// A thread-safe pool of reference-counted objects of type `T`.
13///
14/// All values in the pool remain pinned for their entire lifetime.
15///
16/// The pool automatically expands its capacity when needed.
17#[doc = include_str!("../../doc/snippets/managed_pool_lifetimes.md")]
18///
19/// # Thread safety
20///
21/// The pool is thread-safe.
22///
23/// # Example
24///
25/// ```rust
26/// use infinity_pool::PinnedPool;
27///
28/// let mut pool = PinnedPool::<String>::new();
29///
30/// // Insert an object into the pool
31/// let handle = pool.insert("Hello, Pinned!".to_string());
32///
33/// // Access the object through the handle
34/// assert_eq!(*handle, "Hello, Pinned!");
35///
36/// // The object is automatically removed when the handle is dropped
37/// ```
38///
39/// # Pool clones are functionally equivalent
40///
41/// ```rust
42/// use infinity_pool::PinnedPool;
43///
44/// let mut pool1 = PinnedPool::<i32>::new();
45/// let pool2 = pool1.clone();
46///
47/// assert_eq!(pool1.len(), pool2.len());
48/// let _handle = pool1.insert(42);
49/// assert_eq!(pool1.len(), pool2.len());
50/// ```
51///
52/// The pool is thread-safe (`Send` and `Sync`) and requires `T: Send`.
53pub struct PinnedPool<T: Send + 'static> {
54    // We require 'static from any inserted values because the pool
55    // does not enforce any Rust lifetime semantics, only reference counts.
56    //
57    // The pool type itself is just a handle around the inner opaque pool,
58    // which is reference-counted and mutex-guarded. The inner pool
59    // will only ever be dropped once all items have been removed from
60    // it and no more `PinnedPool` instances exist that point to it.
61    //
62    // This also implies that `DropPolicy` has no meaning for this
63    // pool configuration, as the pool can never be dropped if it has
64    // contents (as dropping the handles of pooled objects will remove
65    // them from the pool, while keeping the pool alive until then).
66    inner: Arc<Mutex<RawOpaquePoolSend>>,
67
68    _phantom: PhantomData<T>,
69}
70
71impl<T> PinnedPool<T>
72where
73    T: Send + 'static,
74{
75    /// Creates a new pool for objects of type `T`.
76    #[must_use]
77    pub fn new() -> Self {
78        let inner = RawOpaquePool::with_layout_of::<T>();
79
80        // SAFETY: All insertion methods require `T: Send`.
81        let inner = unsafe { RawOpaquePoolSend::new(inner) };
82
83        Self {
84            inner: Arc::new(Mutex::new(inner)),
85            _phantom: PhantomData,
86        }
87    }
88
89    #[doc = include_str!("../../doc/snippets/pool_len.md")]
90    #[must_use]
91    #[inline]
92    pub fn len(&self) -> usize {
93        self.inner.lock().expect(ERR_POISONED_LOCK).len()
94    }
95
96    #[doc = include_str!("../../doc/snippets/pool_capacity.md")]
97    #[must_use]
98    #[inline]
99    pub fn capacity(&self) -> usize {
100        self.inner.lock().expect(ERR_POISONED_LOCK).capacity()
101    }
102
103    #[doc = include_str!("../../doc/snippets/pool_is_empty.md")]
104    #[must_use]
105    #[inline]
106    pub fn is_empty(&self) -> bool {
107        self.inner.lock().expect(ERR_POISONED_LOCK).is_empty()
108    }
109
110    #[doc = include_str!("../../doc/snippets/pool_reserve.md")]
111    #[inline]
112    pub fn reserve(&mut self, additional: usize) {
113        self.inner
114            .lock()
115            .expect(ERR_POISONED_LOCK)
116            .reserve(additional);
117    }
118
119    #[doc = include_str!("../../doc/snippets/pool_shrink_to_fit.md")]
120    #[inline]
121    pub fn shrink_to_fit(&mut self) {
122        self.inner.lock().expect(ERR_POISONED_LOCK).shrink_to_fit();
123    }
124
125    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
126    ///
127    /// # Example
128    ///
129    /// ```rust
130    /// use infinity_pool::PinnedPool;
131    ///
132    /// let mut pool = PinnedPool::<String>::new();
133    ///
134    /// // Insert an object into the pool
135    /// let mut handle = pool.insert("Hello".to_string());
136    ///
137    /// // Mutate the object via the unique handle
138    /// handle.push_str(", World!");
139    /// assert_eq!(&*handle, "Hello, World!");
140    ///
141    /// // Transform the unique handle into a shared handle
142    /// let shared_handle = handle.into_shared();
143    ///
144    /// // After transformation, you can only immutably dereference the object
145    /// assert_eq!(&*shared_handle, "Hello, World!");
146    /// // shared_handle.push_str("!"); // This would not compile
147    ///
148    /// // The object is removed when the handle is dropped
149    /// drop(shared_handle); // Explicitly drop to remove from pool
150    /// assert_eq!(pool.len(), 0);
151    /// ```
152    #[inline]
153    #[must_use]
154    pub fn insert(&mut self, value: T) -> PooledMut<T> {
155        let inner = self.inner.lock().expect(ERR_POISONED_LOCK).insert(value);
156
157        PooledMut::new(inner, Arc::clone(&self.inner))
158    }
159
160    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
161    ///
162    /// # Example
163    ///
164    /// ```rust
165    /// use std::mem::MaybeUninit;
166    ///
167    /// use infinity_pool::PinnedPool;
168    ///
169    /// struct DataBuffer {
170    ///     id: u32,
171    ///     data: MaybeUninit<[u8; 1024]>, // Large buffer to skip initializing
172    /// }
173    ///
174    /// let mut pool = PinnedPool::<DataBuffer>::new();
175    ///
176    /// // Initialize only the id, leaving data uninitialized for performance
177    /// let handle = unsafe {
178    ///     pool.insert_with(|uninit: &mut MaybeUninit<DataBuffer>| {
179    ///         let ptr = uninit.as_mut_ptr();
180    ///         // SAFETY: Writing to the id field within allocated space
181    ///         unsafe {
182    ///             std::ptr::addr_of_mut!((*ptr).id).write(42);
183    ///             // data field is intentionally left uninitialized
184    ///         }
185    ///     })
186    /// };
187    ///
188    /// // ID is accessible, data remains uninitialized
189    /// let id = unsafe { std::ptr::addr_of!((*handle).id).read() };
190    /// assert_eq!(id, 42);
191    /// ```
192    ///
193    /// # Safety
194    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
195    #[inline]
196    #[must_use]
197    pub unsafe fn insert_with<F>(&mut self, f: F) -> PooledMut<T>
198    where
199        F: FnOnce(&mut MaybeUninit<T>),
200    {
201        // SAFETY: Forwarding safety guarantees from caller.
202        let inner = unsafe { self.inner.lock().expect(ERR_POISONED_LOCK).insert_with(f) };
203
204        PooledMut::new(inner, Arc::clone(&self.inner))
205    }
206
207    /// Calls a closure with an iterator over all objects in the pool.
208    ///
209    /// The iterator only yields pointers to the objects, not references, because the pool
210    /// does not have the authority to create references to its contents as user code may
211    /// concurrently be holding a conflicting exclusive reference via `PooledMut<T>`.
212    ///
213    /// Therefore, obtaining actual references to pool contents via iteration is only possible
214    /// by using the pointer to create such references in unsafe code and relies on the caller
215    /// guaranteeing that no conflicting exclusive references exist.
216    ///
217    /// # Mutual exclusion
218    ///
219    /// The pool is locked for the entire duration of the closure, ensuring that objects
220    /// cannot be removed while iteration is in progress. This guarantees that all pointers
221    /// yielded by the iterator remain valid for the duration of the closure.
222    ///
223    /// # Examples
224    ///
225    /// ```
226    /// # use infinity_pool::PinnedPool;
227    /// let mut pool = PinnedPool::<u32>::new();
228    /// let _handle1 = pool.insert(42u32);
229    /// let _handle2 = pool.insert(100u32);
230    ///
231    /// // Safe iteration with guaranteed pointer validity
232    /// pool.with_iter(|iter| {
233    ///     for ptr in iter {
234    ///         // SAFETY: We know these are u32 pointers from this pool
235    ///         let value = unsafe { ptr.as_ref() };
236    ///         println!("Value: {}", value);
237    ///     }
238    /// });
239    ///
240    /// // Collect values safely
241    /// let values: Vec<u32> =
242    ///     pool.with_iter(|iter| iter.map(|ptr| unsafe { *ptr.as_ref() }).collect());
243    /// ```
244    pub fn with_iter<F, R>(&self, f: F) -> R
245    where
246        F: FnOnce(PinnedPoolIterator<'_, T>) -> R,
247    {
248        let guard = self.inner.lock().expect(ERR_POISONED_LOCK);
249        let iter = PinnedPoolIterator::new(&guard);
250        f(iter)
251    }
252}
253
254impl<T> Clone for PinnedPool<T>
255where
256    T: Send,
257{
258    #[inline]
259    fn clone(&self) -> Self {
260        Self {
261            inner: Arc::clone(&self.inner),
262            _phantom: PhantomData,
263        }
264    }
265}
266
267impl<T> Default for PinnedPool<T>
268where
269    T: Send,
270{
271    fn default() -> Self {
272        Self::new()
273    }
274}
275
276impl<T> fmt::Debug for PinnedPool<T>
277where
278    T: Send,
279{
280    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
281        f.debug_struct("PinnedPool")
282            .field("inner", &self.inner)
283            .finish()
284    }
285}
286
287/// Iterator over all objects in a pinned pool.
288///
289/// The iterator only yields pointers to the objects, not references, because the pool
290/// does not have the authority to create references to its contents as user code may
291/// concurrently be holding a conflicting exclusive reference via `PooledMut<T>`.
292///
293/// Therefore, obtaining actual references to pool contents via iteration is only possible
294/// by using the pointer to create such references in unsafe code and relies on the caller
295/// guaranteeing that no conflicting exclusive references exist.
296#[derive(Debug)]
297pub struct PinnedPoolIterator<'p, T: 'static> {
298    raw_iter: RawOpaquePoolIterator<'p>,
299    _phantom: PhantomData<T>,
300}
301
302impl<'p, T> PinnedPoolIterator<'p, T> {
303    fn new(pool: &'p RawOpaquePoolSend) -> Self {
304        Self {
305            raw_iter: pool.iter(),
306            _phantom: PhantomData,
307        }
308    }
309}
310
311impl<T> Iterator for PinnedPoolIterator<'_, T> {
312    type Item = NonNull<T>;
313
314    fn next(&mut self) -> Option<Self::Item> {
315        self.raw_iter.next().map(NonNull::cast::<T>)
316    }
317
318    fn size_hint(&self) -> (usize, Option<usize>) {
319        self.raw_iter.size_hint()
320    }
321}
322
323impl<T> DoubleEndedIterator for PinnedPoolIterator<'_, T> {
324    fn next_back(&mut self) -> Option<Self::Item> {
325        self.raw_iter.next_back().map(NonNull::cast::<T>)
326    }
327}
328
329impl<T> ExactSizeIterator for PinnedPoolIterator<'_, T> {
330    fn len(&self) -> usize {
331        self.raw_iter.len()
332    }
333}
334
335impl<T> FusedIterator for PinnedPoolIterator<'_, T> {}
336
337#[cfg(test)]
338mod tests {
339    use std::mem::MaybeUninit;
340
341    use super::*;
342
343    #[test]
344    fn new_pool_is_empty() {
345        let pool = PinnedPool::<u32>::new();
346
347        assert_eq!(pool.len(), 0);
348        assert!(pool.is_empty());
349        assert_eq!(pool.capacity(), 0);
350    }
351
352    #[test]
353    fn default_pool_is_empty() {
354        let pool: PinnedPool<String> = PinnedPool::default();
355
356        assert_eq!(pool.len(), 0);
357        assert!(pool.is_empty());
358        assert_eq!(pool.capacity(), 0);
359    }
360
361    #[test]
362    fn insert_and_length() {
363        let mut pool = PinnedPool::<u64>::new();
364
365        let _h1 = pool.insert(10);
366        assert_eq!(pool.len(), 1);
367        assert!(!pool.is_empty());
368
369        let _h2 = pool.insert(20);
370        assert_eq!(pool.len(), 2);
371    }
372
373    #[test]
374    fn capacity_grows_when_needed() {
375        let mut pool = PinnedPool::<u64>::new();
376
377        assert_eq!(pool.capacity(), 0);
378
379        let _handle = pool.insert(123_u64);
380
381        // Should have some capacity now
382        assert!(pool.capacity() > 0);
383        let initial_capacity = pool.capacity();
384
385        // Fill up the pool to force capacity expansion
386        #[expect(
387            clippy::collection_is_never_read,
388            reason = "handles are used for ownership"
389        )]
390        let mut handles = Vec::new();
391        for i in 1..initial_capacity {
392            handles.push(pool.insert(i as u64));
393        }
394
395        // One more insert should expand capacity
396        let _handle = pool.insert(999_u64);
397
398        assert!(pool.capacity() >= initial_capacity);
399    }
400
401    #[test]
402    fn reserve_creates_capacity() {
403        let mut pool = PinnedPool::<u8>::new();
404
405        pool.reserve(100);
406        assert!(pool.capacity() >= 100);
407
408        let initial_capacity = pool.capacity();
409        pool.reserve(50); // Should not increase capacity
410        assert_eq!(pool.capacity(), initial_capacity);
411
412        pool.reserve(200); // Should increase capacity
413        assert!(pool.capacity() >= 200);
414    }
415
416    #[test]
417    fn shrink_to_fit_removes_unused_capacity() {
418        let mut pool = PinnedPool::<u8>::new();
419
420        // Reserve more than we need
421        pool.reserve(100);
422
423        // Insert only a few items
424        let _handle1 = pool.insert(1_u8);
425        let _handle2 = pool.insert(2_u8);
426
427        // Shrink should not panic
428        pool.shrink_to_fit();
429
430        // Pool should still work normally
431        assert_eq!(pool.len(), 2);
432        let _handle3 = pool.insert(3_u8);
433        assert_eq!(pool.len(), 3);
434    }
435
436    #[test]
437    fn shrink_to_fit_with_zero_items_shrinks_to_zero_capacity() {
438        let mut pool = PinnedPool::<u8>::new();
439
440        // Add some items to create capacity
441        let handle1 = pool.insert(1_u8);
442        let handle2 = pool.insert(2_u8);
443        let handle3 = pool.insert(3_u8);
444
445        // Verify we have capacity
446        assert!(pool.capacity() > 0);
447
448        // Remove all items by dropping handles
449        drop(handle1);
450        drop(handle2);
451        drop(handle3);
452
453        assert!(pool.is_empty());
454
455        pool.shrink_to_fit();
456
457        // Testing implementation detail: empty pool should shrink capacity to zero
458        // This may become untrue with future algorithm changes, at which point
459        // we will need to adjust the tests.
460        assert_eq!(pool.capacity(), 0);
461    }
462
463    #[test]
464    fn handle_provides_access_to_object() {
465        let mut pool = PinnedPool::<u64>::new();
466
467        let handle = pool.insert(12345_u64);
468
469        assert_eq!(*handle, 12345);
470    }
471
472    #[test]
473    fn multiple_handles_to_same_type() {
474        let mut pool = PinnedPool::<String>::new();
475
476        let handle1 = pool.insert("hello".to_string());
477        let handle2 = pool.insert("world".to_string());
478
479        assert_eq!(pool.len(), 2);
480
481        assert_eq!(&*handle1, "hello");
482        assert_eq!(&*handle2, "world");
483
484        // Dropping handles should remove items from pool
485        drop(handle1);
486        assert_eq!(pool.len(), 1);
487
488        drop(handle2);
489        assert_eq!(pool.len(), 0);
490        assert!(pool.is_empty());
491    }
492
493    #[test]
494    fn handle_drop_removes_objects_both_exclusive_and_shared() {
495        let mut pool = PinnedPool::<String>::new();
496
497        // Test exclusive handle drop
498        let exclusive_handle = pool.insert("exclusive".to_string());
499        assert_eq!(pool.len(), 1);
500        drop(exclusive_handle);
501        assert_eq!(pool.len(), 0);
502
503        // Test shared handle drop
504        let mut_handle = pool.insert("shared".to_string());
505        let shared_handle = mut_handle.into_shared();
506        assert_eq!(pool.len(), 1);
507
508        // Verify shared handle works
509        assert_eq!(&*shared_handle, "shared");
510
511        // Drop the shared handle should remove from pool
512        drop(shared_handle);
513        assert_eq!(pool.len(), 0);
514        assert!(pool.is_empty());
515    }
516
517    #[test]
518    fn insert_with_closure() {
519        let mut pool = PinnedPool::<u64>::new();
520
521        // SAFETY: we correctly initialize the value
522        let handle = unsafe {
523            pool.insert_with(|u: &mut MaybeUninit<u64>| {
524                u.write(42);
525            })
526        };
527
528        assert_eq!(pool.len(), 1);
529        assert_eq!(*handle, 42);
530    }
531
532    #[test]
533    fn clone_behavior() {
534        let mut p1 = PinnedPool::<u32>::new();
535        let mut p2 = p1.clone();
536
537        let _h1 = p1.insert(100);
538        assert_eq!(p2.len(), 1);
539
540        let _h2 = p2.insert(200);
541        assert_eq!(p1.len(), 2);
542
543        // Both can iterate and see the same contents
544        let mut values1: Vec<u32> = p1.with_iter(|iter| {
545            iter.map(|ptr| {
546                // SAFETY: The iterator yields valid NonNull<T> pointers for items still in the pool.
547                unsafe { *ptr.as_ref() }
548            })
549            .collect()
550        });
551        let mut values2: Vec<u32> = p2.with_iter(|iter| {
552            iter.map(|ptr| {
553                // SAFETY: The iterator yields valid NonNull<T> pointers for items still in the pool.
554                unsafe { *ptr.as_ref() }
555            })
556            .collect()
557        });
558        values1.sort_unstable();
559        values2.sort_unstable();
560        assert_eq!(values1, vec![100, 200]);
561        assert_eq!(values2, vec![100, 200]);
562    }
563
564    #[test]
565    fn lifecycle_handles_keep_pool_alive() {
566        let handle = {
567            let mut pool = PinnedPool::<String>::new();
568            pool.insert("persist".to_string())
569        }; // pool dropped here
570
571        assert_eq!(&*handle, "persist");
572
573        // Dropping handle now removes from pool (implicitly tested by absence of panic)
574        drop(handle);
575    }
576
577    #[test]
578    fn lifecycle_pool_clone_keeps_inner_alive() {
579        let mut pool = PinnedPool::<String>::new();
580
581        // Insert & clone
582        let handle = pool.insert("data".to_string());
583        let pool_clone = pool.clone();
584
585        // Drop original pool; handle + clone should keep inner alive
586        drop(pool); // original handle dropped
587
588        // Validate access still works
589        assert_eq!(&*handle, "data");
590        assert_eq!(pool_clone.len(), 1);
591
592        // Drop last handle; pool should now be empty
593        drop(handle);
594        assert_eq!(pool_clone.len(), 0);
595    }
596
597    #[test]
598    fn pooled_mut_mutation_reflected() {
599        let mut pool = PinnedPool::<String>::new();
600
601        let mut handle = pool.insert("hello".to_string());
602        handle.push_str(" world");
603
604        assert_eq!(&*handle, "hello world");
605        assert_eq!(pool.len(), 1);
606
607        // Iteration sees updated value
608        pool.with_iter(|iter| {
609            let vals: Vec<String> = iter
610                .map(|p| {
611                    // SAFETY: Iterator guarantees each pointer is a valid &String for the duration of closure.
612                    unsafe { p.as_ref().clone() }
613                })
614                .collect();
615            assert_eq!(vals, vec!["hello world".to_string()]);
616        });
617    }
618
619    #[test]
620    fn reserve_and_shrink_to_fit_shared() {
621        let mut p1 = PinnedPool::<u8>::new();
622        let mut p2 = p1.clone();
623
624        // Reserve capacity via one handle
625        p1.reserve(50);
626        assert!(p1.capacity() >= 50);
627        assert_eq!(p1.capacity(), p2.capacity());
628
629        // Add and then drop items to create unused capacity
630        let h = p2.insert(99);
631        assert_eq!(p1.len(), 1);
632        drop(h);
633        assert_eq!(p2.len(), 0);
634
635        // Shrink and ensure both views updated
636        p1.shrink_to_fit();
637        assert_eq!(p1.capacity(), p2.capacity());
638    }
639
640    #[test]
641    fn with_iter_empty_pool() {
642        let pool = PinnedPool::<u32>::new();
643        let count = pool.with_iter(|iter| iter.count());
644        assert_eq!(count, 0);
645    }
646
647    #[test]
648    fn with_iter_collect_values() {
649        let mut pool = PinnedPool::<u32>::new();
650
651        let _handles: Vec<_> = [10, 20, 30].into_iter().map(|v| pool.insert(v)).collect();
652
653        let mut collected: Vec<u32> = pool.with_iter(|iter| {
654            iter.map(|p| {
655                // SAFETY: Iterator yields valid NonNull<u32> pointers for items alive in pool.
656                unsafe { *p.as_ref() }
657            })
658            .collect()
659        });
660
661        collected.sort_unstable();
662        assert_eq!(collected, vec![10, 20, 30]);
663    }
664
665    #[test]
666    fn with_iter_double_ended() {
667        let mut pool = PinnedPool::<i32>::new();
668        let _handles: Vec<_> = [1, 2, 3].into_iter().map(|v| pool.insert(v)).collect();
669
670        pool.with_iter(|mut iter| {
671            assert_eq!(iter.len(), 3);
672
673            let _front = iter.next();
674            assert_eq!(iter.len(), 2);
675
676            let _back = iter.next_back();
677            assert_eq!(iter.len(), 1);
678
679            let _remaining = iter.next();
680            assert!(iter.next().is_none());
681            assert!(iter.next_back().is_none());
682        });
683    }
684
685    #[test]
686    fn with_iter_fused_behavior() {
687        let pool = PinnedPool::<u32>::new();
688        pool.with_iter(|mut iter| {
689            assert!(iter.next().is_none());
690            assert!(iter.next().is_none());
691            assert!(iter.next_back().is_none());
692        });
693    }
694
695    #[test]
696    fn iter_size_hint_and_exact_size() {
697        let mut pool = PinnedPool::<u32>::new();
698
699        // Empty pool
700        pool.with_iter(|iter| {
701            assert_eq!(iter.size_hint(), (0, Some(0)));
702            assert_eq!(iter.len(), 0);
703        });
704
705        // Add some items
706        let _handle1 = pool.insert(100_u32);
707        let _handle2 = pool.insert(200_u32);
708
709        pool.with_iter(|mut iter| {
710            assert_eq!(iter.size_hint(), (2, Some(2)));
711            assert_eq!(iter.len(), 2);
712
713            // Consume one item
714            let first_item = iter.next();
715            assert!(first_item.is_some());
716            assert_eq!(iter.size_hint(), (1, Some(1)));
717            assert_eq!(iter.len(), 1);
718
719            // Consume another
720            let second_item = iter.next();
721            assert!(second_item.is_some());
722            assert_eq!(iter.size_hint(), (0, Some(0)));
723            assert_eq!(iter.len(), 0);
724
725            // Should be exhausted now
726            assert_eq!(iter.next(), None);
727            assert_eq!(iter.size_hint(), (0, Some(0)));
728            assert_eq!(iter.len(), 0);
729        });
730    }
731
732    #[test]
733    fn thread_safety() {
734        use std::sync::Arc;
735        use std::thread;
736
737        let mut pool = PinnedPool::<String>::new();
738
739        // Insert some initial data
740        let handle1 = pool.insert("Thread test 1".to_string());
741        let handle2 = pool.insert("Thread test 2".to_string());
742
743        let pool = Arc::new(pool);
744        let pool_clone = Arc::clone(&pool);
745
746        // Spawn a thread that can access the pool
747        let thread_handle = thread::spawn(move || {
748            // Should be able to read the length
749            assert!(pool_clone.len() >= 2);
750
751            // Should be able to check capacity
752            assert!(pool_clone.capacity() >= 2);
753
754            // Should be able to check if empty
755            assert!(!pool_clone.is_empty());
756        });
757
758        // Wait for thread to complete
759        thread_handle.join().unwrap();
760
761        // Original handles should still be valid
762        assert_eq!(&*handle1, "Thread test 1");
763        assert_eq!(&*handle2, "Thread test 2");
764    }
765}