Skip to main content

infinity_pool/pinned/
pool_local.rs

1use std::any::type_name;
2use std::cell::RefCell;
3use std::fmt;
4use std::iter::FusedIterator;
5use std::marker::PhantomData;
6use std::mem::MaybeUninit;
7use std::ptr::NonNull;
8use std::rc::Rc;
9
10use crate::{LocalPooledMut, RawOpaquePool, RawOpaquePoolIterator};
11
12/// A single-threaded pool of reference-counted objects of type `T`.
13///
14/// All values in the pool remain pinned for their entire lifetime.
15///
16/// The pool automatically expands its capacity when needed.
17#[doc = include_str!("../../doc/snippets/local_pool_lifetimes.md")]
18///
19/// # Thread safety
20///
21/// The pool is single-threaded.
22///
23/// # Example: unique object ownership
24///
25/// ```rust
26/// use std::fmt::Display;
27///
28/// use infinity_pool::LocalPinnedPool;
29///
30/// let mut pool = LocalPinnedPool::new();
31///
32/// // Insert an object into the pool, returning a unique handle to it.
33/// let mut handle = pool.insert("Hello, world!".to_string());
34///
35/// // A unique handle grants the same access as a `&mut` reference to the object.
36/// handle.push_str(" Welcome to Infinity Pool!");
37///
38/// println!("Updated value: {}", &*handle);
39///
40/// // The object is removed when the handle is dropped.
41/// ```
42///
43/// # Example: shared object ownership
44///
45/// ```rust
46/// use std::fmt::Display;
47///
48/// use infinity_pool::LocalPinnedPool;
49///
50/// let mut pool = LocalPinnedPool::new();
51///
52/// // Insert an object into the pool, returning a unique handle to it.
53/// let handle = pool.insert("Hello, world!".to_string());
54///
55/// // The unique handle can be converted into a shared handle,
56/// // allowing multiple clones of the handle to be created.
57/// let shared_handle = handle.into_shared();
58/// let shared_handle_clone = shared_handle.clone();
59///
60/// // Shared handles grant the same access as `&` shared references to the object.
61/// println!("Shared access to value: {}", &*shared_handle);
62///
63/// // The object is removed when the last shared handle is dropped.
64/// ```
65///
66/// # Clones of the pool are functionally equivalent
67///
68/// ```rust
69/// use infinity_pool::LocalPinnedPool;
70///
71/// let mut pool1 = LocalPinnedPool::new();
72/// let pool2 = pool1.clone();
73///
74/// assert_eq!(pool1.len(), pool2.len());
75///
76/// _ = pool1.insert(42_i32);
77///
78/// assert_eq!(pool1.len(), pool2.len());
79/// ```
80pub struct LocalPinnedPool<T: 'static> {
81    // We require 'static from any inserted values because the pool
82    // does not enforce any Rust lifetime semantics, only reference counts.
83    //
84    // The pool type itself is just a handle around the inner opaque pool,
85    // which is reference-counted and refcell-guarded. The inner pool
86    // will only ever be dropped once all items have been removed from
87    // it and no more `LocalPinnedPool` instances exist that point to it.
88    //
89    // This also implies that `DropPolicy` has no meaning for this
90    // pool configuration, as the pool can never be dropped if it has
91    // contents (as dropping the handles of pooled objects will remove
92    // them from the pool, while keeping the pool alive until then).
93    inner: Rc<RefCell<RawOpaquePool>>,
94
95    _phantom: PhantomData<T>,
96}
97
98impl<T> LocalPinnedPool<T>
99where
100    T: 'static,
101{
102    /// Creates a new pool for objects of type `T`.
103    #[must_use]
104    pub fn new() -> Self {
105        let inner = RawOpaquePool::with_layout_of::<T>();
106
107        Self {
108            inner: Rc::new(RefCell::new(inner)),
109            _phantom: PhantomData,
110        }
111    }
112
113    #[doc = include_str!("../../doc/snippets/pool_len.md")]
114    #[must_use]
115    #[inline]
116    pub fn len(&self) -> usize {
117        self.inner.borrow().len()
118    }
119
120    #[doc = include_str!("../../doc/snippets/pool_capacity.md")]
121    #[must_use]
122    #[inline]
123    pub fn capacity(&self) -> usize {
124        self.inner.borrow().capacity()
125    }
126
127    #[doc = include_str!("../../doc/snippets/pool_is_empty.md")]
128    #[must_use]
129    #[inline]
130    pub fn is_empty(&self) -> bool {
131        self.inner.borrow().is_empty()
132    }
133
134    #[doc = include_str!("../../doc/snippets/pool_reserve.md")]
135    #[inline]
136    pub fn reserve(&self, additional: usize) {
137        self.inner.borrow_mut().reserve(additional);
138    }
139
140    #[doc = include_str!("../../doc/snippets/pool_shrink_to_fit.md")]
141    #[inline]
142    pub fn shrink_to_fit(&self) {
143        self.inner.borrow_mut().shrink_to_fit();
144    }
145
146    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
147    #[inline]
148    #[must_use]
149    #[cfg_attr(test, mutants::skip)] // All mutations are unviable - skip them to save time.
150    pub fn insert(&self, value: T) -> LocalPooledMut<T> {
151        let inner = self.inner.borrow_mut().insert(value);
152
153        LocalPooledMut::new(inner, Rc::clone(&self.inner))
154    }
155
156    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
157    ///
158    /// # Example
159    ///
160    /// ```rust
161    /// use std::mem::MaybeUninit;
162    /// use std::ptr;
163    ///
164    /// use infinity_pool::LocalPinnedPool;
165    ///
166    /// struct DataBuffer {
167    ///     id: u32,
168    ///     data: MaybeUninit<[u8; 1024]>,
169    /// }
170    ///
171    /// let mut pool = LocalPinnedPool::new();
172    ///
173    /// // Initialize only the id, leaving data uninitialized for performance.
174    /// let handle = unsafe {
175    ///     pool.insert_with(|uninit: &mut MaybeUninit<DataBuffer>| {
176    ///         let ptr = uninit.as_mut_ptr();
177    ///
178    ///         // SAFETY: We are writing to a correctly located field within the object.
179    ///         unsafe {
180    ///             ptr::addr_of_mut!((*ptr).id).write(42);
181    ///         }
182    ///     })
183    /// };
184    ///
185    /// assert_eq!(handle.id, 42);
186    /// ```
187    ///
188    /// # Safety
189    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
190    #[inline]
191    #[must_use]
192    pub unsafe fn insert_with<F>(&self, f: F) -> LocalPooledMut<T>
193    where
194        F: FnOnce(&mut MaybeUninit<T>),
195    {
196        // SAFETY: Forwarding safety guarantees from caller.
197        let inner = unsafe { self.inner.borrow_mut().insert_with(f) };
198
199        LocalPooledMut::new(inner, Rc::clone(&self.inner))
200    }
201
202    /// Calls a closure with an iterator over all objects in the pool.
203    ///
204    /// The iterator only yields pointers to the objects, not references, because the pool
205    /// does not have the authority to create references to its contents as user code may
206    /// concurrently be holding a conflicting exclusive reference via `LocalPooledMut<T>`.
207    ///
208    /// Therefore, obtaining actual references to pool contents via iteration is only possible
209    /// by using the pointer to create such references in unsafe code and relies on the caller
210    /// guaranteeing that no conflicting exclusive references exist.
211    ///
212    /// # Mutual exclusion
213    ///
214    /// The pool is borrowed for the entire duration of the closure, ensuring that objects
215    /// cannot be removed while iteration is in progress. This guarantees that all pointers
216    /// yielded by the iterator remain valid for the duration of the closure.
217    ///
218    /// # Examples
219    ///
220    /// ```
221    /// # use infinity_pool::LocalPinnedPool;
222    /// let mut pool = LocalPinnedPool::<u32>::new();
223    /// let _handle1 = pool.insert(42u32);
224    /// let _handle2 = pool.insert(100u32);
225    ///
226    /// let values: Vec<u32> = pool.with_iter(|iter| {
227    ///     // SAFETY: We ensure that no conflicting references to the pooled objects
228    ///     // exist. Simply look up - we just inserted the values, so there is nothing
229    ///     // else that could have a conflicting exclusive reference to them.
230    ///     iter.map(|ptr| unsafe { *ptr.as_ref() }).collect()
231    /// });
232    ///
233    /// assert_eq!(values.iter().sum::<u32>(), 142);
234    pub fn with_iter<F, R>(&self, f: F) -> R
235    where
236        F: FnOnce(LocalPinnedPoolIterator<'_, T>) -> R,
237    {
238        let guard = self.inner.borrow();
239        let iter = LocalPinnedPoolIterator::new(&guard);
240        f(iter)
241    }
242}
243
244impl<T> Clone for LocalPinnedPool<T> {
245    #[inline]
246    fn clone(&self) -> Self {
247        Self {
248            inner: Rc::clone(&self.inner),
249            _phantom: PhantomData,
250        }
251    }
252}
253
254impl<T> Default for LocalPinnedPool<T> {
255    fn default() -> Self {
256        Self::new()
257    }
258}
259
260#[cfg_attr(coverage_nightly, coverage(off))] // No API contract to test.
261impl<T> fmt::Debug for LocalPinnedPool<T> {
262    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
263        f.debug_struct(type_name::<Self>())
264            .field("inner", &self.inner)
265            .finish()
266    }
267}
268
269/// Iterator over all objects in a local pinned pool.
270///
271/// The iterator only yields pointers to the objects, not references, because the pool
272/// does not have the authority to create references to its contents as user code may
273/// concurrently be holding a conflicting exclusive reference via `LocalPooledMut<T>`.
274///
275/// Therefore, obtaining actual references to pool contents via iteration is only possible
276/// by using the pointer to create such references in unsafe code and relies on the caller
277/// guaranteeing that no conflicting exclusive references exist.
278#[derive(Debug)]
279pub struct LocalPinnedPoolIterator<'p, T: 'static> {
280    raw_iter: RawOpaquePoolIterator<'p>,
281    _phantom: PhantomData<T>,
282}
283
284impl<'p, T> LocalPinnedPoolIterator<'p, T> {
285    fn new(pool: &'p RawOpaquePool) -> Self {
286        Self {
287            raw_iter: pool.iter(),
288            _phantom: PhantomData,
289        }
290    }
291}
292
293impl<T> Iterator for LocalPinnedPoolIterator<'_, T> {
294    type Item = NonNull<T>;
295
296    fn next(&mut self) -> Option<Self::Item> {
297        self.raw_iter.next().map(NonNull::cast::<T>)
298    }
299
300    fn size_hint(&self) -> (usize, Option<usize>) {
301        self.raw_iter.size_hint()
302    }
303}
304
305impl<T> DoubleEndedIterator for LocalPinnedPoolIterator<'_, T> {
306    fn next_back(&mut self) -> Option<Self::Item> {
307        self.raw_iter.next_back().map(NonNull::cast::<T>)
308    }
309}
310
311impl<T> ExactSizeIterator for LocalPinnedPoolIterator<'_, T> {
312    fn len(&self) -> usize {
313        self.raw_iter.len()
314    }
315}
316
317impl<T> FusedIterator for LocalPinnedPoolIterator<'_, T> {}
318
319#[cfg(test)]
320#[cfg_attr(coverage_nightly, coverage(off))]
321mod tests {
322    use std::mem::MaybeUninit;
323
324    use static_assertions::{assert_impl_all, assert_not_impl_any};
325
326    use super::*;
327    use crate::SendAndSync;
328
329    assert_not_impl_any!(LocalPinnedPool<SendAndSync>: Send, Sync);
330
331    assert_impl_all!(LocalPinnedPoolIterator<'_, i32>: Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator);
332    assert_not_impl_any!(LocalPinnedPoolIterator<'_, SendAndSync>: Send, Sync);
333
334    #[test]
335    fn new_pool_is_empty() {
336        let pool = LocalPinnedPool::<u32>::new();
337
338        assert_eq!(pool.len(), 0);
339        assert!(pool.is_empty());
340        assert_eq!(pool.capacity(), 0);
341    }
342
343    #[test]
344    fn default_pool_is_empty() {
345        let pool: LocalPinnedPool<String> = LocalPinnedPool::default();
346
347        assert_eq!(pool.len(), 0);
348        assert!(pool.is_empty());
349        assert_eq!(pool.capacity(), 0);
350    }
351
352    #[test]
353    fn insert_and_length() {
354        let pool = LocalPinnedPool::<u64>::new();
355
356        let _h1 = pool.insert(10);
357        assert_eq!(pool.len(), 1);
358        assert!(!pool.is_empty());
359
360        let _h2 = pool.insert(20);
361        assert_eq!(pool.len(), 2);
362    }
363
364    #[test]
365    fn capacity_grows_when_needed() {
366        let pool = LocalPinnedPool::<u64>::new();
367
368        assert_eq!(pool.capacity(), 0);
369
370        let _handle = pool.insert(123_u64);
371
372        // Should have some capacity now
373        assert!(pool.capacity() > 0);
374        let initial_capacity = pool.capacity();
375
376        // Fill up the pool to force capacity expansion
377        #[expect(
378            clippy::collection_is_never_read,
379            reason = "handles are used for ownership"
380        )]
381        let mut handles = Vec::new();
382        for i in 1..initial_capacity {
383            handles.push(pool.insert(i as u64));
384        }
385
386        // One more insert should expand capacity
387        let _handle = pool.insert(999_u64);
388
389        assert!(pool.capacity() >= initial_capacity);
390    }
391
392    #[test]
393    fn reserve_creates_capacity() {
394        let pool = LocalPinnedPool::<u8>::new();
395
396        pool.reserve(100);
397        assert!(pool.capacity() >= 100);
398
399        let initial_capacity = pool.capacity();
400        pool.reserve(50); // Should not increase capacity
401        assert_eq!(pool.capacity(), initial_capacity);
402
403        pool.reserve(200); // Should increase capacity
404        assert!(pool.capacity() >= 200);
405    }
406
407    #[test]
408    fn shrink_to_fit_removes_unused_capacity() {
409        let pool = LocalPinnedPool::<u8>::new();
410
411        // Reserve more than we need
412        pool.reserve(100);
413
414        // Insert only a few items
415        let _handle1 = pool.insert(1_u8);
416        let _handle2 = pool.insert(2_u8);
417
418        // Shrink should not panic
419        pool.shrink_to_fit();
420
421        // Pool should still work normally
422        assert_eq!(pool.len(), 2);
423        let _handle3 = pool.insert(3_u8);
424        assert_eq!(pool.len(), 3);
425    }
426
427    #[test]
428    fn shrink_to_fit_with_zero_items_shrinks_to_zero_capacity() {
429        let pool = LocalPinnedPool::<u8>::new();
430
431        // Add some items to create capacity
432        let handle1 = pool.insert(1_u8);
433        let handle2 = pool.insert(2_u8);
434        let handle3 = pool.insert(3_u8);
435
436        // Verify we have capacity
437        assert!(pool.capacity() > 0);
438
439        // Remove all items by dropping handles
440        drop(handle1);
441        drop(handle2);
442        drop(handle3);
443
444        assert!(pool.is_empty());
445
446        pool.shrink_to_fit();
447
448        // Testing implementation detail: empty pool should shrink capacity to zero
449        // This may become untrue with future algorithm changes, at which point
450        // we will need to adjust the tests.
451        assert_eq!(pool.capacity(), 0);
452    }
453
454    #[test]
455    fn handle_provides_access_to_object() {
456        let pool = LocalPinnedPool::<u64>::new();
457
458        let handle = pool.insert(12345_u64);
459
460        assert_eq!(*handle, 12345);
461    }
462
463    #[test]
464    fn multiple_handles_to_same_type() {
465        let pool = LocalPinnedPool::<String>::new();
466
467        let handle1 = pool.insert("hello".to_string());
468        let handle2 = pool.insert("world".to_string());
469
470        assert_eq!(pool.len(), 2);
471
472        assert_eq!(&*handle1, "hello");
473        assert_eq!(&*handle2, "world");
474
475        // Dropping handles should remove items from pool
476        drop(handle1);
477        assert_eq!(pool.len(), 1);
478
479        drop(handle2);
480        assert_eq!(pool.len(), 0);
481        assert!(pool.is_empty());
482    }
483
484    #[test]
485    fn handle_drop_removes_objects_both_exclusive_and_shared() {
486        let pool = LocalPinnedPool::<String>::new();
487
488        // Test exclusive handle drop
489        let exclusive_handle = pool.insert("exclusive".to_string());
490        assert_eq!(pool.len(), 1);
491        drop(exclusive_handle);
492        assert_eq!(pool.len(), 0);
493
494        // Test shared handle drop
495        let mut_handle = pool.insert("shared".to_string());
496        let shared_handle = mut_handle.into_shared();
497        assert_eq!(pool.len(), 1);
498
499        // Verify shared handle works
500        assert_eq!(&*shared_handle, "shared");
501
502        // Drop the shared handle should remove from pool
503        drop(shared_handle);
504        assert_eq!(pool.len(), 0);
505        assert!(pool.is_empty());
506    }
507
508    #[test]
509    fn insert_with_closure() {
510        let pool = LocalPinnedPool::<u64>::new();
511
512        // SAFETY: we correctly initialize the value
513        let handle = unsafe {
514            pool.insert_with(|u: &mut MaybeUninit<u64>| {
515                u.write(42);
516            })
517        };
518
519        assert_eq!(pool.len(), 1);
520        assert_eq!(*handle, 42);
521    }
522
523    #[test]
524    fn clone_behavior() {
525        let p1 = LocalPinnedPool::<u32>::new();
526        let p2 = p1.clone();
527
528        let _h1 = p1.insert(100);
529        assert_eq!(p2.len(), 1);
530
531        let _h2 = p2.insert(200);
532        assert_eq!(p1.len(), 2);
533
534        // Both can iterate and see the same contents
535        let mut values1: Vec<u32> = p1.with_iter(|iter| {
536            iter.map(|ptr| {
537                // SAFETY: Iterator yields valid NonNull<T> for items alive in pool
538                unsafe { *ptr.as_ref() }
539            })
540            .collect()
541        });
542        let mut values2: Vec<u32> = p2.with_iter(|iter| {
543            iter.map(|ptr| {
544                // SAFETY: Iterator yields valid NonNull<T> for items alive in pool
545                unsafe { *ptr.as_ref() }
546            })
547            .collect()
548        });
549        values1.sort_unstable();
550        values2.sort_unstable();
551        assert_eq!(values1, vec![100, 200]);
552        assert_eq!(values2, vec![100, 200]);
553    }
554
555    #[test]
556    fn lifecycle_handles_keep_pool_alive() {
557        let handle = {
558            let pool = LocalPinnedPool::<String>::new();
559            pool.insert("persist".to_string())
560        }; // pool dropped here
561
562        assert_eq!(&*handle, "persist");
563        drop(handle); // drop removes from pool
564    }
565
566    #[test]
567    fn lifecycle_pool_clone_keeps_inner_alive() {
568        let pool = LocalPinnedPool::<String>::new();
569
570        // Insert & clone
571        let handle = pool.insert("data".to_string());
572        let pool_clone = pool.clone();
573
574        // Drop original
575        drop(pool);
576
577        assert_eq!(&*handle, "data");
578        assert_eq!(pool_clone.len(), 1);
579
580        drop(handle);
581        assert_eq!(pool_clone.len(), 0);
582    }
583
584    #[test]
585    fn pooled_mut_mutation_reflected() {
586        let pool = LocalPinnedPool::<String>::new();
587
588        let mut handle = pool.insert("hello".to_string());
589        handle.push_str(" world");
590
591        assert_eq!(&*handle, "hello world");
592        assert_eq!(pool.len(), 1);
593
594        pool.with_iter(|iter| {
595            let vals: Vec<String> = iter
596                .map(|p| {
597                    // SAFETY: Iterator guarantees pointer validity for duration of closure.
598                    unsafe { p.as_ref().clone() }
599                })
600                .collect();
601            assert_eq!(vals, vec!["hello world".to_string()]);
602        });
603    }
604
605    #[test]
606    fn multiple_handles_and_drop() {
607        let pool = LocalPinnedPool::<u32>::new();
608
609        // Insert multiple objects
610        let h1 = pool.insert(1);
611        let h2 = pool.insert(2);
612        let h3 = pool.insert(3);
613
614        assert_eq!(pool.len(), 3);
615        assert_eq!((*h1, *h2, *h3), (1, 2, 3));
616
617        drop(h2);
618        assert_eq!(pool.len(), 2);
619
620        assert_eq!(*h1, 1);
621        assert_eq!(*h3, 3);
622
623        drop(h1);
624        drop(h3);
625        assert_eq!(pool.len(), 0);
626    }
627
628    #[test]
629    fn reserve_and_shrink_to_fit_shared() {
630        let p1 = LocalPinnedPool::<u8>::new();
631        let p2 = p1.clone();
632
633        // Reserve via first handle
634        p1.reserve(50);
635        assert!(p1.capacity() >= 50);
636        assert_eq!(p1.capacity(), p2.capacity());
637
638        let h = p2.insert(99);
639        assert_eq!(p1.len(), 1);
640        drop(h);
641        assert_eq!(p2.len(), 0);
642
643        p1.shrink_to_fit();
644        assert_eq!(p1.capacity(), p2.capacity());
645    }
646
647    #[test]
648    fn with_iter_empty_pool() {
649        let pool = LocalPinnedPool::<u32>::new();
650        let count = pool.with_iter(|iter| iter.count());
651        assert_eq!(count, 0);
652    }
653
654    #[test]
655    fn with_iter_collect_values() {
656        let pool = LocalPinnedPool::<u32>::new();
657
658        let _handles: Vec<_> = [10, 20, 30].into_iter().map(|v| pool.insert(v)).collect();
659
660        let mut collected: Vec<u32> = pool.with_iter(|iter| {
661            iter.map(|p| {
662                // SAFETY: Iterator yields valid NonNull<u32> pointers for items alive in pool.
663                unsafe { *p.as_ref() }
664            })
665            .collect()
666        });
667
668        collected.sort_unstable();
669        assert_eq!(collected, vec![10, 20, 30]);
670    }
671
672    #[test]
673    fn with_iter_double_ended() {
674        let pool = LocalPinnedPool::<i32>::new();
675        let _handles: Vec<_> = [1, 2, 3].into_iter().map(|v| pool.insert(v)).collect();
676
677        pool.with_iter(|mut iter| {
678            assert_eq!(iter.len(), 3);
679
680            let _front = iter.next();
681            assert_eq!(iter.len(), 2);
682
683            let _back = iter.next_back();
684            assert_eq!(iter.len(), 1);
685
686            let _remaining = iter.next();
687            assert!(iter.next().is_none());
688            assert!(iter.next_back().is_none());
689        });
690    }
691
692    #[test]
693    fn with_iter_fused_behavior() {
694        let pool = LocalPinnedPool::<u32>::new();
695        pool.with_iter(|mut iter| {
696            assert!(iter.next().is_none());
697            assert!(iter.next().is_none());
698            assert!(iter.next_back().is_none());
699        });
700    }
701
702    #[test]
703    fn iter_size_hint_and_exact_size() {
704        let pool = LocalPinnedPool::<u32>::new();
705
706        // Empty pool
707        pool.with_iter(|iter| {
708            assert_eq!(iter.size_hint(), (0, Some(0)));
709            assert_eq!(iter.len(), 0);
710        });
711
712        // Add some items
713        let _handle1 = pool.insert(100_u32);
714        let _handle2 = pool.insert(200_u32);
715
716        pool.with_iter(|mut iter| {
717            assert_eq!(iter.size_hint(), (2, Some(2)));
718            assert_eq!(iter.len(), 2);
719
720            // Consume one item
721            let first_item = iter.next();
722            assert!(first_item.is_some());
723            assert_eq!(iter.size_hint(), (1, Some(1)));
724            assert_eq!(iter.len(), 1);
725
726            // Consume another
727            let second_item = iter.next();
728            assert!(second_item.is_some());
729            assert_eq!(iter.size_hint(), (0, Some(0)));
730            assert_eq!(iter.len(), 0);
731
732            // Should be exhausted now
733            assert_eq!(iter.next(), None);
734            assert_eq!(iter.size_hint(), (0, Some(0)));
735            assert_eq!(iter.len(), 0);
736        });
737    }
738
739    #[test]
740    fn non_send_types() {
741        // Custom non-Send type with raw pointer
742        struct NonSendType(*const u8);
743        // SAFETY: Only used in single-threaded local test environment, never shared across threads
744        unsafe impl Sync for NonSendType {}
745
746        // LocalPinnedPool should work with non-Send types since it is single-threaded
747        use std::cell::RefCell;
748        use std::rc::Rc;
749
750        // Test with Rc (not Send)
751        let rc_pool = LocalPinnedPool::<Rc<String>>::new();
752        let rc_handle = rc_pool.insert(Rc::new("Non-Send data".to_string()));
753        assert_eq!(rc_pool.len(), 1);
754        assert_eq!(&**rc_handle, "Non-Send data");
755
756        // Test with RefCell (not Send)
757        let refcell_pool = LocalPinnedPool::<RefCell<i32>>::new();
758        let refcell_handle = refcell_pool.insert(RefCell::new(42));
759        assert_eq!(refcell_pool.len(), 1);
760        assert_eq!(*refcell_handle.borrow(), 42);
761
762        // Test with custom non-Send type
763        let custom_pool = LocalPinnedPool::<NonSendType>::new();
764        let raw_ptr = 0x1234 as *const u8;
765        let non_send_handle = custom_pool.insert(NonSendType(raw_ptr));
766        assert_eq!(custom_pool.len(), 1);
767        assert_eq!(non_send_handle.0, raw_ptr);
768
769        // Test iteration with non-Send types
770        rc_pool.with_iter(|iter| {
771            let values: Vec<String> = iter
772                .map(|ptr| {
773                    // SAFETY: Iterator yields valid NonNull<T> pointers for items alive in pool
774                    unsafe { ptr.as_ref().as_ref().clone() }
775                })
776                .collect();
777            assert_eq!(values, vec!["Non-Send data"]);
778        });
779
780        // Test nested non-Send types
781        let nested_pool = LocalPinnedPool::<Rc<RefCell<Vec<i32>>>>::new();
782        let nested_handle = nested_pool.insert(Rc::new(RefCell::new(vec![1, 2, 3])));
783        assert_eq!(nested_pool.len(), 1);
784        assert_eq!(*nested_handle.borrow(), vec![1, 2, 3]);
785    }
786}