Skip to main content

infinity_pool/opaque/
pool_raw.rs

1use std::alloc::Layout;
2use std::iter::{self, FusedIterator};
3use std::mem::{MaybeUninit, size_of};
4use std::ptr::NonNull;
5
6use crate::opaque::slab::SlabIterator;
7use crate::{
8    DropPolicy, RawOpaquePoolBuilder, RawPooled, RawPooledMut, Slab, SlabLayout, VacancyTracker,
9};
10
11/// A pool of objects with uniform memory layout.
12///
13/// Stores objects of any type that match a [`Layout`] defined at pool creation
14/// time. All values in the pool remain pinned for their entire lifetime.
15///
16/// The pool automatically expands its capacity when needed.
17#[doc = include_str!("../../doc/snippets/raw_pool_is_potentially_thread_safe.md")]
18///
19/// # Example: unique object ownership
20///
21/// ```rust
22/// use std::fmt::Display;
23///
24/// use infinity_pool::RawOpaquePool;
25///
26/// let mut pool = RawOpaquePool::with_layout_of::<String>();
27///
28/// // Insert an object into the pool, returning a unique handle to it.
29/// let mut handle = pool.insert("Hello, world!".to_string());
30///
31/// // A unique handle allows us to create exclusive references to the target object.
32/// // SAFETY: We promise to keep the pool alive for the duration of this reference.
33/// let value_mut = unsafe { handle.as_mut() };
34/// value_mut.push_str(" Welcome to Infinity Pool!");
35///
36/// println!("Updated value: {value_mut}");
37///
38/// // This is optional - we could also just drop the pool.
39/// // SAFETY: We promise that this handle really is for an object present in this pool.
40/// unsafe {
41///     pool.remove(handle);
42/// }
43/// ```
44///
45/// # Example: shared object ownership
46///
47/// ```rust
48/// use std::fmt::Display;
49///
50/// use infinity_pool::RawOpaquePool;
51///
52/// let mut pool = RawOpaquePool::with_layout_of::<String>();
53///
54/// // Insert an object into the pool, returning a unique handle to it.
55/// let handle = pool.insert("Hello, world!".to_string());
56///
57/// // The unique handle can be converted into a shared handle,
58/// // allowing multiple copies of the handle to be created.
59/// let shared_handle = handle.into_shared();
60/// let shared_handle_copy = shared_handle;
61///
62/// // Shared handles allow only shared references to be created.
63/// // SAFETY: We promise to keep the pool alive for the duration of this reference.
64/// let value_ref = unsafe { shared_handle.as_ref() };
65///
66/// println!("Shared access to value: {value_ref}");
67///
68/// // This is optional - we could also just drop the pool.
69/// // SAFETY: We promise that the object has not already been removed
70/// // via a different shared handle - look up to verify that.
71/// unsafe {
72///     pool.remove(shared_handle);
73/// }
74/// ```
75#[derive(Debug)]
76pub struct RawOpaquePool {
77    /// The layout of each slab in the pool, determined based on the object
78    /// layout provided at pool creation time.
79    slab_layout: SlabLayout,
80
81    /// The slabs that make up the pool's memory capacity. Automatically extended
82    /// with new slabs as needed. Shrinking is supported but must be manually commanded.
83    slabs: Vec<Slab>,
84
85    /// Drop policy that determines how the pool handles remaining items when dropped.
86    drop_policy: DropPolicy,
87
88    /// Number of items currently in the pool. We track this explicitly to avoid repeatedly
89    /// summing across slabs when calculating the length.
90    length: usize,
91
92    /// Tracks which slabs have vacancies, acting as a cache for fast insertion.
93    /// Guaranteed 100% accurate - we update the tracker whenever there is a status change.
94    vacancy_tracker: VacancyTracker,
95}
96
97impl RawOpaquePool {
98    #[doc = include_str!("../../doc/snippets/pool_builder.md")]
99    #[cfg_attr(test, mutants::skip)] // Gets mutated to alternate version of itself.
100    pub fn builder() -> RawOpaquePoolBuilder {
101        RawOpaquePoolBuilder::new()
102    }
103
104    /// Creates a new instance of the pool with the specified layout.
105    ///
106    /// Shorthand for a builder that keeps all other options at their default values.
107    ///
108    /// # Panics
109    ///
110    /// Panics if the layout is zero-sized.
111    #[must_use]
112    pub fn with_layout(object_layout: Layout) -> Self {
113        Self::builder().layout(object_layout).build()
114    }
115
116    /// Creates a new instance of the pool with the layout of `T`.
117    ///
118    /// Shorthand for a builder that keeps all other options at their default values.
119    #[must_use]
120    pub fn with_layout_of<T: Sized>() -> Self {
121        const {
122            assert!(
123                size_of::<T>() > 0,
124                "cannot create a pool of zero-sized objects"
125            );
126        };
127
128        Self::builder().layout_of::<T>().build()
129    }
130
131    /// Creates a new pool for objects of the specified layout.
132    ///
133    /// # Panics
134    ///
135    /// Panics if the object layout has zero size.
136    #[must_use]
137    pub(crate) fn new_inner(object_layout: Layout, drop_policy: DropPolicy) -> Self {
138        let slab_layout = SlabLayout::new(object_layout);
139
140        Self {
141            slab_layout,
142            slabs: Vec::new(),
143            drop_policy,
144            length: 0,
145            vacancy_tracker: VacancyTracker::new(),
146        }
147    }
148
149    #[doc = include_str!("../../doc/snippets/opaque_pool_layout.md")]
150    #[must_use]
151    #[inline]
152    pub fn object_layout(&self) -> Layout {
153        self.slab_layout.object_layout()
154    }
155
156    #[doc = include_str!("../../doc/snippets/pool_len.md")]
157    #[must_use]
158    #[inline]
159    pub fn len(&self) -> usize {
160        self.length
161    }
162
163    #[doc = include_str!("../../doc/snippets/pool_capacity.md")]
164    #[must_use]
165    #[inline]
166    pub fn capacity(&self) -> usize {
167        // Wrapping here would imply capacity is greater than virtual memory,
168        // which is impossible because we can never create that many slabs.
169        self.slabs
170            .len()
171            .wrapping_mul(self.slab_layout.capacity().get())
172    }
173
174    #[doc = include_str!("../../doc/snippets/pool_is_empty.md")]
175    #[must_use]
176    #[inline]
177    pub fn is_empty(&self) -> bool {
178        self.length == 0
179    }
180
181    #[doc = include_str!("../../doc/snippets/pool_reserve.md")]
182    pub fn reserve(&mut self, additional: usize) {
183        let required_capacity = self
184            .len()
185            .checked_add(additional)
186            .expect("requested capacity exceeds size of virtual memory");
187
188        if self.capacity() >= required_capacity {
189            return;
190        }
191
192        // Calculate how many additional slabs we need
193        let current_slabs = self.slabs.len();
194        let required_slabs = required_capacity.div_ceil(self.slab_layout.capacity().get());
195        let additional_slabs = required_slabs.saturating_sub(current_slabs);
196
197        self.slabs.extend(
198            iter::repeat_with(|| Slab::new(self.slab_layout, self.drop_policy))
199                .take(additional_slabs),
200        );
201
202        self.vacancy_tracker.update_slab_count(self.slabs.len());
203    }
204
205    #[doc = include_str!("../../doc/snippets/pool_shrink_to_fit.md")]
206    #[cfg_attr(test, mutants::skip)] // Vacant slot cache mutation - hard to test. Revisit later.
207    pub fn shrink_to_fit(&mut self) {
208        // Find the last non-empty slab by scanning from the end
209        let new_len = self
210            .slabs
211            .iter()
212            .enumerate()
213            .rev()
214            .find_map(|(idx, slab)| {
215                if !slab.is_empty() {
216                    // Cannot wrap because that would imply we have more slabs than the size
217                    // of virtual memory, which is impossible.
218                    Some(idx.wrapping_add(1))
219                } else {
220                    None
221                }
222            })
223            .unwrap_or(0);
224
225        if new_len == self.slabs.len() {
226            // Nothing to do.
227            return;
228        }
229
230        // Truncate the slabs vector to remove empty slabs from the end.
231        self.slabs.truncate(new_len);
232
233        self.vacancy_tracker.update_slab_count(self.slabs.len());
234    }
235
236    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
237    ///
238    /// # Panics
239    #[doc = include_str!("../../doc/snippets/panic_on_pool_t_layout_mismatch.md")]
240    #[inline]
241    #[cfg_attr(test, mutants::skip)] // All mutations are unviable - skip them to save time.
242    pub fn insert<T: 'static>(&mut self, value: T) -> RawPooledMut<T> {
243        assert_eq!(
244            Layout::new::<T>(),
245            self.object_layout(),
246            "layout of T does not match object layout of the pool"
247        );
248
249        // SAFETY: We just verified that T's layout matches the pool's layout.
250        unsafe { self.insert_unchecked(value) }
251    }
252
253    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
254    /// # Safety
255    #[doc = include_str!("../../doc/snippets/safety_pool_t_layout_must_match.md")]
256    #[inline]
257    pub unsafe fn insert_unchecked<T: 'static>(&mut self, value: T) -> RawPooledMut<T> {
258        // Implement insert() in terms of insert_with() to reduce logic duplication.
259        // SAFETY: Forwarding safety requirements to the caller.
260        unsafe {
261            self.insert_with_unchecked(|uninit: &mut MaybeUninit<T>| {
262                uninit.write(value);
263            })
264        }
265    }
266
267    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
268    ///
269    /// # Example
270    ///
271    /// ```rust
272    /// use std::mem::MaybeUninit;
273    /// use std::ptr;
274    ///
275    /// use infinity_pool::RawOpaquePool;
276    ///
277    /// struct DataBuffer {
278    ///     id: u32,
279    ///     data: MaybeUninit<[u8; 1024]>,
280    /// }
281    ///
282    /// let mut pool = RawOpaquePool::with_layout_of::<DataBuffer>();
283    ///
284    /// // Initialize only the id, leaving data uninitialized for performance.
285    /// let handle = unsafe {
286    ///     pool.insert_with(|uninit: &mut MaybeUninit<DataBuffer>| {
287    ///         let ptr = uninit.as_mut_ptr();
288    ///
289    ///         // SAFETY: We are writing to a correctly located field within the object.
290    ///         unsafe {
291    ///             ptr::addr_of_mut!((*ptr).id).write(42);
292    ///         }
293    ///     })
294    /// };
295    ///
296    /// // SAFETY: We promise that the pool is not dropped while we hold this reference.
297    /// let item = unsafe { handle.as_ref() };
298    /// assert_eq!(item.id, 42);
299    /// ```
300    ///
301    /// # Panics
302    #[doc = include_str!("../../doc/snippets/panic_on_pool_t_layout_mismatch.md")]
303    ///
304    /// # Safety
305    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
306    #[inline]
307    pub unsafe fn insert_with<T, F>(&mut self, f: F) -> RawPooledMut<T>
308    where
309        F: FnOnce(&mut MaybeUninit<T>),
310        T: 'static,
311    {
312        assert_eq!(
313            Layout::new::<T>(),
314            self.object_layout(),
315            "layout of T does not match object layout of the pool"
316        );
317
318        // SAFETY: We just verified that T's layout matches the pool's layout.
319        unsafe { self.insert_with_unchecked(f) }
320    }
321
322    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
323    ///
324    /// This unchecked variant of the method skips the layout verification step, requiring
325    /// the caller to ensure that the object has a matching layout with the pool.
326    ///
327    /// # Safety
328    #[doc = include_str!("../../doc/snippets/safety_pool_t_layout_must_match.md")]
329    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
330    pub unsafe fn insert_with_unchecked<T, F>(&mut self, f: F) -> RawPooledMut<T>
331    where
332        F: FnOnce(&mut MaybeUninit<T>),
333        T: 'static,
334    {
335        let slab_index = self.index_of_slab_to_insert_into();
336
337        // SAFETY: We just received knowledge that there is a slab with a vacant slot at this index.
338        let slab = unsafe { self.slabs.get_unchecked_mut(slab_index) };
339
340        // SAFETY: Forwarding guarantee from caller that T's layout matches the pool's layout
341        // and that the closure properly initializes the value.
342        //
343        // We ensure that the slab is not full via the logic that gets us `slab_index`,
344        // with the logic always guaranteeing that there is a vacancy in that slab.
345        let slab_handle = unsafe { slab.insert_with_unchecked(f) };
346
347        // Update our tracked length since we just inserted an object.
348        // This can never overflow since that would mean the pool is greater than virtual memory.
349        self.length = self.length.wrapping_add(1);
350
351        // We invalidate the "slab with vacant slot" cache here if this is the last vacant slot.
352        if slab.is_full() {
353            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
354            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
355            unsafe {
356                self.vacancy_tracker.update_slab_status(slab_index, false);
357            }
358        }
359
360        // The pool itself does not care about the type T but for the convenience of the caller
361        // we imbue the RawPooledMut with the type information, to reduce required casting by caller.
362        RawPooledMut::new(slab_index, slab_handle)
363    }
364
365    #[doc = include_str!("../../doc/snippets/raw_pool_remove.md")]
366    pub unsafe fn remove<T: ?Sized>(&mut self, handle: impl Into<RawPooled<T>>) {
367        let handle = handle.into();
368
369        // SAFETY: Caller guarantees the handle is valid for this pool.
370        let slab = unsafe { self.slabs.get_unchecked_mut(handle.slab_index()) };
371
372        // SAFETY: Caller guarantees the handle is valid for this pool.
373        unsafe {
374            slab.remove(handle.slab_handle());
375        }
376
377        // Update our tracked length since we just removed an object.
378        // This cannot wrap around because we just removed an object,
379        // so the value must be at least 1 before subtraction.
380        self.length = self.length.wrapping_sub(1);
381
382        if slab.len() == self.slab_layout.capacity().get().wrapping_sub(1) {
383            // We removed from a full slab.
384            // This means we have a vacant slot where there was not one before.
385
386            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
387            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
388            unsafe {
389                self.vacancy_tracker
390                    .update_slab_status(handle.slab_index(), true);
391            }
392        }
393    }
394
395    #[doc = include_str!("../../doc/snippets/raw_pool_remove_unpin.md")]
396    #[must_use]
397    pub unsafe fn remove_unpin<T: Unpin>(&mut self, handle: impl Into<RawPooled<T>>) -> T {
398        let handle = handle.into();
399
400        const {
401            assert!(
402                size_of::<T>() > 0,
403                "cannot extract zero-sized types from pool"
404            );
405        };
406
407        // SAFETY: Caller guarantees the handle is valid for this pool.
408        let slab = unsafe { self.slabs.get_unchecked_mut(handle.slab_index()) };
409
410        // SAFETY: Caller guarantees the handle is valid for this pool.
411        let value = unsafe { slab.remove_unpin::<T>(handle.slab_handle()) };
412
413        // Update our tracked length since we just removed an object.
414        // This cannot wrap around because we just removed an object,
415        // so the value must be at least 1 before subtraction.
416        self.length = self.length.wrapping_sub(1);
417
418        if slab.len() == self.slab_layout.capacity().get().wrapping_sub(1) {
419            // We removed from a full slab.
420            // This means we have a vacant slot where there was not one before.
421
422            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
423            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
424            unsafe {
425                self.vacancy_tracker
426                    .update_slab_status(handle.slab_index(), true);
427            }
428        }
429
430        value
431    }
432
433    #[doc = include_str!("../../doc/snippets/raw_pool_iter.md")]
434    #[must_use]
435    #[inline]
436    pub fn iter(&self) -> RawOpaquePoolIterator<'_> {
437        RawOpaquePoolIterator::new(self)
438    }
439
440    /// Adds a new slab if needed.
441    #[must_use]
442    fn index_of_slab_to_insert_into(&mut self) -> usize {
443        if let Some(index) = self.vacancy_tracker.next_vacancy() {
444            // There is a vacancy, so use it.
445            return index;
446        }
447
448        // If we got here, there are no vacancies and we need to extend the pool.
449        debug_assert_eq!(self.len(), self.capacity());
450
451        self.slabs
452            .push(Slab::new(self.slab_layout, self.drop_policy));
453
454        self.vacancy_tracker.update_slab_count(self.slabs.len());
455
456        // This can never wrap around because we just added a slab, so len() is at least 1.
457        self.slabs.len().wrapping_sub(1)
458    }
459}
460
461/// Iterator over all objects in a raw opaque pool.
462///
463/// This iterator yields untyped pointers to objects stored across all slabs in the pool.
464/// Since the pool can contain objects of different types (as long as they have the same layout),
465/// the iterator returns `NonNull<()>` and leaves type casting to the caller.
466///
467/// # Thread safety
468///
469/// The type is single-threaded.
470#[derive(Debug)]
471pub struct RawOpaquePoolIterator<'p> {
472    pool: &'p RawOpaquePool,
473
474    // Current slab index for forward iteration.
475    // This is the index of the next slab we will take items from.
476    // If iterator is exhausted, will point to undefined value.
477    current_front_slab_index: usize,
478
479    // Current slab index for backward iteration.
480    // This is the index of the next slab we will take items from.
481    // If iterator is exhausted, will point to undefined value.
482    current_back_slab_index: usize,
483
484    // Iterator for the current front slab (if any).
485    current_front_slab_iter: Option<SlabIterator<'p>>,
486
487    // Iterator for the current back slab (if any).
488    current_back_slab_iter: Option<SlabIterator<'p>>,
489
490    // Total number of items already yielded.
491    yielded_count: usize,
492}
493
494impl<'p> RawOpaquePoolIterator<'p> {
495    fn new(pool: &'p RawOpaquePool) -> Self {
496        Self {
497            pool,
498            current_front_slab_index: 0,
499            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
500            current_back_slab_index: pool.slabs.len().wrapping_sub(1),
501            current_front_slab_iter: None,
502            current_back_slab_iter: None,
503            yielded_count: 0,
504        }
505    }
506}
507
508impl Iterator for RawOpaquePoolIterator<'_> {
509    type Item = NonNull<()>;
510
511    fn next(&mut self) -> Option<Self::Item> {
512        while self.len() > 0 {
513            // If no current iterator, get one for the current slab.
514            let slab_iter = self.current_front_slab_iter.get_or_insert_with(|| {
515                self.pool.slabs
516                    .get(self.current_front_slab_index)
517                    .expect("iterator has items remaining, so there must still be a slab to get them from")
518                    .iter()
519            });
520
521            // Try to get the next item from current iterator
522            if let Some(item) = slab_iter.next() {
523                // Will never wrap because that would mean we have more
524                // items than we have virtual memory.
525                self.yielded_count = self.yielded_count.wrapping_add(1);
526                return Some(item);
527            }
528
529            // No more items from this slab, move to next
530            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
531            self.current_front_slab_index = self.current_front_slab_index.wrapping_add(1);
532            self.current_front_slab_iter = None;
533        }
534
535        None
536    }
537
538    fn size_hint(&self) -> (usize, Option<usize>) {
539        let remaining = self.len();
540        (remaining, Some(remaining))
541    }
542}
543
544impl DoubleEndedIterator for RawOpaquePoolIterator<'_> {
545    fn next_back(&mut self) -> Option<Self::Item> {
546        while self.len() > 0 {
547            // If no current iterator, get one for the current slab.
548            let slab_iter = self.current_back_slab_iter.get_or_insert_with(|| {
549                self.pool.slabs
550                    .get(self.current_back_slab_index)
551                    .expect("iterator has items remaining, so there must still be a slab to get them from")
552                    .iter()
553            });
554
555            // Try to get the next item from current iterator
556            if let Some(item) = slab_iter.next_back() {
557                // Will never wrap because that would mean we have more
558                // items than we have virtual memory.
559                self.yielded_count = self.yielded_count.wrapping_add(1);
560                return Some(item);
561            }
562
563            // No more items from this slab, move to next
564            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
565            self.current_back_slab_index = self.current_back_slab_index.wrapping_sub(1);
566            self.current_back_slab_iter = None;
567        }
568
569        None
570    }
571}
572
573impl ExactSizeIterator for RawOpaquePoolIterator<'_> {
574    fn len(&self) -> usize {
575        // Total objects in pool minus those we've already yielded
576        // Will not wrap because we cannot yield more items than exist in the pool.
577        self.pool.len().wrapping_sub(self.yielded_count)
578    }
579}
580
581// Once we return None, we will keep returning None.
582impl FusedIterator for RawOpaquePoolIterator<'_> {}
583
584impl<'p> IntoIterator for &'p RawOpaquePool {
585    type Item = NonNull<()>;
586    type IntoIter = RawOpaquePoolIterator<'p>;
587
588    fn into_iter(self) -> Self::IntoIter {
589        self.iter()
590    }
591}
592
593#[cfg(test)]
594#[allow(
595    clippy::indexing_slicing,
596    clippy::multiple_unsafe_ops_per_block,
597    clippy::undocumented_unsafe_blocks,
598    reason = "tests focus on succinct code and do not need to tick all the boxes"
599)]
600#[cfg_attr(coverage_nightly, coverage(off))]
601mod tests {
602    use std::alloc::Layout;
603    use std::mem::MaybeUninit;
604
605    use static_assertions::{assert_impl_all, assert_not_impl_any};
606
607    use super::*;
608
609    // We are nominally single-threaded.
610    assert_not_impl_any!(RawOpaquePool: Send, Sync);
611
612    assert_impl_all!(RawOpaquePoolIterator<'_>: Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator);
613    assert_not_impl_any!(RawOpaquePoolIterator<'_>: Send, Sync);
614
615    assert_impl_all!(&RawOpaquePool: IntoIterator);
616
617    #[test]
618    fn new_pool_is_empty() {
619        let pool = RawOpaquePool::with_layout_of::<u64>();
620
621        assert_eq!(pool.len(), 0);
622        assert!(pool.is_empty());
623        assert_eq!(pool.capacity(), 0);
624        assert_eq!(pool.object_layout(), Layout::new::<u64>());
625    }
626
627    #[test]
628    fn with_layout_results_in_pool_with_correct_layout() {
629        let layout = Layout::new::<i64>();
630        let pool = RawOpaquePool::with_layout(layout);
631
632        assert_eq!(pool.object_layout(), layout);
633        assert_eq!(pool.len(), 0);
634        assert!(pool.is_empty());
635        assert_eq!(pool.capacity(), 0);
636    }
637
638    #[test]
639    fn instance_creation_through_builder_succeeds() {
640        let pool = RawOpaquePool::builder()
641            .layout_of::<i64>()
642            .drop_policy(DropPolicy::MustNotDropContents)
643            .build();
644
645        assert_eq!(pool.object_layout(), Layout::new::<i64>());
646        assert_eq!(pool.len(), 0);
647        assert!(pool.is_empty());
648        assert_eq!(pool.capacity(), 0);
649    }
650
651    #[test]
652    fn insert_and_length() {
653        let mut pool = RawOpaquePool::with_layout_of::<u32>();
654
655        let _handle1 = pool.insert(42_u32);
656        assert_eq!(pool.len(), 1);
657        assert!(!pool.is_empty());
658
659        let _handle2 = pool.insert(100_u32);
660        assert_eq!(pool.len(), 2);
661    }
662
663    #[test]
664    fn capacity_grows_with_slabs() {
665        let mut pool = RawOpaquePool::with_layout_of::<u64>();
666
667        assert_eq!(pool.capacity(), 0);
668
669        let _handle = pool.insert(123_u64);
670
671        // Should have at least one slab's worth of capacity now
672        assert!(pool.capacity() > 0);
673        let initial_capacity = pool.capacity();
674
675        // Fill up the slab to force creation of a new one
676        for i in 1..initial_capacity {
677            let _handle = pool.insert(i as u64);
678        }
679
680        // One more insert should create a new slab
681        let _handle = pool.insert(999_u64);
682
683        assert!(pool.capacity() >= initial_capacity * 2);
684    }
685
686    #[test]
687    fn reserve_creates_capacity() {
688        let mut pool = RawOpaquePool::with_layout_of::<u8>();
689
690        pool.reserve(100);
691        assert!(pool.capacity() >= 100);
692
693        let initial_capacity = pool.capacity();
694        pool.reserve(50); // Should not increase capacity
695        assert_eq!(pool.capacity(), initial_capacity);
696
697        pool.reserve(200); // Should increase capacity
698        assert!(pool.capacity() >= 200);
699    }
700
701    #[test]
702    fn insert_with_closure() {
703        let mut pool = RawOpaquePool::with_layout_of::<u64>();
704
705        let handle = unsafe {
706            pool.insert_with(|uninit: &mut MaybeUninit<u64>| {
707                uninit.write(42);
708            })
709        };
710
711        assert_eq!(pool.len(), 1);
712
713        let value = unsafe { pool.remove_unpin(handle) };
714        assert_eq!(value, 42);
715    }
716
717    #[test]
718    fn remove_decreases_length() {
719        let mut pool = RawOpaquePool::with_layout_of::<String>();
720
721        let handle1 = pool.insert("hello".to_string());
722        let handle2 = pool.insert("world".to_string());
723
724        assert_eq!(pool.len(), 2);
725
726        unsafe {
727            pool.remove(handle1);
728        }
729        assert_eq!(pool.len(), 1);
730
731        unsafe {
732            pool.remove(handle2);
733        }
734        assert_eq!(pool.len(), 0);
735        assert!(pool.is_empty());
736    }
737
738    #[test]
739    fn remove_unpin_returns_value() {
740        let mut pool = RawOpaquePool::with_layout_of::<i32>();
741
742        let handle = pool.insert(-456_i32);
743
744        let value = unsafe { pool.remove_unpin(handle) };
745        assert_eq!(value, -456);
746        assert_eq!(pool.len(), 0);
747    }
748
749    #[test]
750    fn shrink_to_fit_removes_empty_slabs() {
751        let mut pool = RawOpaquePool::with_layout_of::<u8>();
752
753        // Add some items.
754        let mut handles = Vec::new();
755        for i in 0..10 {
756            handles.push(pool.insert(u8::try_from(i).unwrap()));
757        }
758
759        // Remove all items.
760        for handle in handles {
761            unsafe {
762                pool.remove(handle);
763            }
764        }
765
766        assert!(pool.is_empty());
767
768        pool.shrink_to_fit();
769
770        // We have white-box knowledge that an empty pool will shrink to zero.
771        // This may become untrue with future algorithm changes, at which point
772        // we will need to adjust the tests.
773        assert_eq!(pool.capacity(), 0);
774    }
775
776    #[test]
777    fn handle_provides_access_to_object() {
778        let mut pool = RawOpaquePool::with_layout_of::<u64>();
779
780        let handle = pool.insert(12345_u64);
781
782        assert_eq!(unsafe { *handle.as_ref() }, 12345);
783
784        // Access the value through the handle's pointer
785        let ptr = handle.ptr();
786
787        let value = unsafe { ptr.as_ref() };
788
789        assert_eq!(*value, 12345);
790    }
791
792    #[test]
793    fn shared_handles_are_copyable() {
794        let mut pool = RawOpaquePool::with_layout_of::<u32>();
795
796        let handle1 = pool.insert(789_u32).into_shared();
797        let handle2 = handle1;
798        #[expect(clippy::clone_on_copy, reason = "intentional, testing cloning")]
799        let handle3 = handle1.clone();
800
801        unsafe {
802            assert_eq!(*handle1.as_ref(), *handle2.as_ref());
803            assert_eq!(*handle1.as_ref(), *handle3.as_ref());
804            assert_eq!(*handle2.as_ref(), *handle3.as_ref());
805        }
806    }
807
808    #[test]
809    fn multiple_removals_and_insertions() {
810        let mut pool = RawOpaquePool::with_layout_of::<usize>();
811
812        // Insert, remove, insert again to test slot reuse
813        let handle1 = pool.insert(1_usize);
814        unsafe {
815            pool.remove(handle1);
816        }
817
818        let handle2 = pool.insert(2_usize);
819
820        assert_eq!(pool.len(), 1);
821
822        let value = unsafe { pool.remove_unpin(handle2) };
823        assert_eq!(value, 2);
824    }
825
826    #[test]
827    fn remove_with_shared_handle() {
828        let mut pool = RawOpaquePool::with_layout_of::<i64>();
829
830        let handle = pool.insert(999_i64).into_shared();
831
832        assert_eq!(pool.len(), 1);
833
834        unsafe {
835            pool.remove(handle);
836        }
837
838        assert_eq!(pool.len(), 0);
839        assert!(pool.is_empty());
840    }
841
842    #[test]
843    fn remove_unpin_with_shared_handle() {
844        let mut pool = RawOpaquePool::with_layout_of::<i32>();
845
846        let handle = pool.insert(42_i32).into_shared();
847
848        assert_eq!(pool.len(), 1);
849
850        let value = unsafe { pool.remove_unpin(handle) };
851
852        assert_eq!(value, 42);
853        assert_eq!(pool.len(), 0);
854    }
855
856    #[test]
857    #[should_panic]
858    fn insert_panics_if_provided_type_with_wrong_layout() {
859        let mut pool = RawOpaquePool::with_layout_of::<u32>();
860
861        // Try to insert a u64 into a pool configured for u32
862        let _handle = pool.insert(123_u64);
863    }
864
865    #[test]
866    #[should_panic]
867    fn insert_with_panics_if_provided_type_with_wrong_layout() {
868        let mut pool = RawOpaquePool::with_layout_of::<u16>();
869
870        // Try to insert a u32 into a pool configured for u16
871        let _handle = unsafe {
872            pool.insert_with(|uninit: &mut MaybeUninit<u32>| {
873                uninit.write(456);
874            })
875        };
876    }
877
878    #[test]
879    fn iter_empty_pool() {
880        let pool = RawOpaquePool::with_layout_of::<u32>();
881
882        let mut iter = pool.iter();
883        assert_eq!(iter.size_hint(), (0, Some(0)));
884        assert_eq!(iter.len(), 0);
885
886        assert_eq!(iter.next(), None);
887        assert_eq!(iter.size_hint(), (0, Some(0)));
888        assert_eq!(iter.len(), 0);
889    }
890
891    #[test]
892    fn iter_single_item() {
893        let mut pool = RawOpaquePool::with_layout_of::<u32>();
894
895        let _handle = pool.insert(42_u32);
896
897        let mut iter = pool.iter();
898        assert_eq!(iter.len(), 1);
899
900        // First item should be the object we inserted
901        let ptr = iter.next().unwrap();
902
903        let value = unsafe { ptr.cast::<u32>().as_ref() };
904        assert_eq!(*value, 42);
905
906        // No more items
907        assert_eq!(iter.next(), None);
908        assert_eq!(iter.len(), 0);
909    }
910
911    #[test]
912    fn iter_multiple_items_single_slab() {
913        let mut pool = RawOpaquePool::with_layout_of::<u32>();
914
915        // Insert multiple items that should fit in a single slab
916        let _handle1 = pool.insert(100_u32);
917        let _handle2 = pool.insert(200_u32);
918        let _handle3 = pool.insert(300_u32);
919
920        let values: Vec<u32> = pool
921            .iter()
922            .map(|ptr| unsafe { *ptr.cast::<u32>().as_ref() })
923            .collect();
924
925        // Should get all values in order of their slot indices
926        assert_eq!(values, vec![100, 200, 300]);
927    }
928
929    #[test]
930    fn iter_multiple_items_multiple_slabs() {
931        let mut pool = RawOpaquePool::with_layout_of::<u8>();
932
933        // Insert enough items to span multiple slabs
934        #[expect(
935            clippy::collection_is_never_read,
936            reason = "handles are used for ownership"
937        )]
938        let mut handles = Vec::new();
939        for i in 0..50 {
940            handles.push(pool.insert(u8::try_from(i).unwrap()));
941        }
942
943        let values: Vec<u8> = pool
944            .iter()
945            .map(|ptr| unsafe { *ptr.cast::<u8>().as_ref() })
946            .collect();
947
948        // Should get all values we inserted
949        assert_eq!(values.len(), 50);
950        for (i, &value) in values.iter().enumerate() {
951            assert_eq!(value, u8::try_from(i).unwrap());
952        }
953    }
954
955    #[test]
956    fn iter_with_gaps() {
957        let mut pool = RawOpaquePool::with_layout_of::<u32>();
958
959        // Insert items
960        let _handle1 = pool.insert(100_u32);
961        let handle2 = pool.insert(200_u32);
962        let _handle3 = pool.insert(300_u32);
963
964        // Remove the middle item to create a gap
965        unsafe {
966            pool.remove(handle2);
967        }
968
969        let values: Vec<u32> = pool
970            .iter()
971            .map(|ptr| unsafe { *ptr.cast::<u32>().as_ref() })
972            .collect();
973
974        // Should get only the remaining values
975        assert_eq!(values, vec![100, 300]);
976    }
977
978    #[test]
979    fn iter_with_empty_slabs() {
980        let mut pool = RawOpaquePool::with_layout_of::<u64>();
981
982        // Force creation of multiple slabs by inserting many items
983        let mut handles = Vec::new();
984        for i in 0_u64..20 {
985            handles.push(pool.insert(i));
986        }
987
988        // Remove all items from some slabs to create empty slabs
989        for handle in handles.drain(5..15) {
990            unsafe {
991                pool.remove(handle);
992            }
993        }
994
995        let values: Vec<u64> = pool
996            .iter()
997            .map(|ptr| unsafe { *ptr.cast::<u64>().as_ref() })
998            .collect();
999
1000        // Should get values from non-empty slabs only
1001        let expected: Vec<u64> = (0..5_u64).chain(15..20_u64).collect();
1002        assert_eq!(values, expected);
1003    }
1004
1005    #[test]
1006    fn iter_size_hint() {
1007        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1008
1009        // Empty pool
1010        let iter = pool.iter();
1011        assert_eq!(iter.size_hint(), (0, Some(0)));
1012        assert_eq!(iter.len(), 0);
1013
1014        // Add some items
1015        let _handle1 = pool.insert(100_u32);
1016        let _handle2 = pool.insert(200_u32);
1017
1018        let mut iter = pool.iter();
1019        assert_eq!(iter.size_hint(), (2, Some(2)));
1020        assert_eq!(iter.len(), 2);
1021
1022        // Consume one item
1023        let first_item = iter.next();
1024        assert!(first_item.is_some());
1025        assert_eq!(iter.size_hint(), (1, Some(1)));
1026        assert_eq!(iter.len(), 1);
1027
1028        // Consume another
1029        let second_item = iter.next();
1030        assert!(second_item.is_some());
1031        assert_eq!(iter.size_hint(), (0, Some(0)));
1032        assert_eq!(iter.len(), 0);
1033
1034        // Should be exhausted now
1035        assert_eq!(iter.next(), None);
1036        assert_eq!(iter.size_hint(), (0, Some(0)));
1037        assert_eq!(iter.len(), 0);
1038    }
1039
1040    #[test]
1041    fn iter_double_ended_basic() {
1042        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1043
1044        // Insert items
1045        let _handle1 = pool.insert(100_u32);
1046        let _handle2 = pool.insert(200_u32);
1047        let _handle3 = pool.insert(300_u32);
1048
1049        let mut iter = pool.iter();
1050
1051        // Iterate from the back
1052        let last_ptr = iter.next_back().unwrap();
1053        let last_value = unsafe { *last_ptr.cast::<u32>().as_ref() };
1054        assert_eq!(last_value, 300);
1055
1056        let middle_ptr = iter.next_back().unwrap();
1057        let middle_value = unsafe { *middle_ptr.cast::<u32>().as_ref() };
1058        assert_eq!(middle_value, 200);
1059
1060        let first_ptr = iter.next_back().unwrap();
1061        let first_value = unsafe { *first_ptr.cast::<u32>().as_ref() };
1062        assert_eq!(first_value, 100);
1063
1064        // Should be exhausted
1065        assert_eq!(iter.next_back(), None);
1066        assert_eq!(iter.next(), None);
1067    }
1068
1069    #[test]
1070    fn iter_double_ended_mixed_directions() {
1071        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1072
1073        // Insert 5 items
1074        let _handle1 = pool.insert(100_u32);
1075        let _handle2 = pool.insert(200_u32);
1076        let _handle3 = pool.insert(300_u32);
1077        let _handle4 = pool.insert(400_u32);
1078        let _handle5 = pool.insert(500_u32);
1079
1080        let mut iter = pool.iter();
1081        assert_eq!(iter.len(), 5);
1082
1083        // Get first from front
1084        let first_ptr = iter.next().unwrap();
1085        let first_value = unsafe { *first_ptr.cast::<u32>().as_ref() };
1086        assert_eq!(first_value, 100);
1087        assert_eq!(iter.len(), 4);
1088
1089        // Get last from back
1090        let last_ptr = iter.next_back().unwrap();
1091        let last_value = unsafe { *last_ptr.cast::<u32>().as_ref() };
1092        assert_eq!(last_value, 500);
1093        assert_eq!(iter.len(), 3);
1094
1095        // Get second from front
1096        let second_ptr = iter.next().unwrap();
1097        let second_value = unsafe { *second_ptr.cast::<u32>().as_ref() };
1098        assert_eq!(second_value, 200);
1099        assert_eq!(iter.len(), 2);
1100
1101        // Get fourth from back
1102        let fourth_ptr = iter.next_back().unwrap();
1103        let fourth_value = unsafe { *fourth_ptr.cast::<u32>().as_ref() };
1104        assert_eq!(fourth_value, 400);
1105        assert_eq!(iter.len(), 1);
1106
1107        // Get middle item
1108        let middle_ptr = iter.next().unwrap();
1109        let middle_value = unsafe { *middle_ptr.cast::<u32>().as_ref() };
1110        assert_eq!(middle_value, 300);
1111        assert_eq!(iter.len(), 0);
1112
1113        // Should be exhausted
1114        assert_eq!(iter.next(), None);
1115        assert_eq!(iter.next_back(), None);
1116        assert_eq!(iter.len(), 0);
1117    }
1118
1119    #[test]
1120    fn iter_fused_behavior() {
1121        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1122
1123        // Test with empty pool
1124        let mut iter = pool.iter();
1125        assert_eq!(iter.next(), None);
1126        assert_eq!(iter.next(), None); // Should still be None
1127        assert_eq!(iter.next_back(), None);
1128        assert_eq!(iter.next_back(), None); // Should still be None
1129
1130        // Test with some items
1131        let _handle1 = pool.insert(100_u32);
1132        let _handle2 = pool.insert(200_u32);
1133
1134        let mut iter = pool.iter();
1135
1136        // Consume all items
1137        let first = iter.next();
1138        assert!(first.is_some());
1139        let second = iter.next();
1140        assert!(second.is_some());
1141
1142        // Now iterator should be exhausted
1143        assert_eq!(iter.next(), None);
1144        assert_eq!(iter.next(), None); // FusedIterator guarantee: still None
1145        assert_eq!(iter.next(), None); // Still None
1146        assert_eq!(iter.next_back(), None); // Should also be None from back
1147        assert_eq!(iter.next_back(), None); // Still None from back
1148
1149        // Test bidirectional exhaustion
1150        let mut iter = pool.iter();
1151
1152        // Consume from both ends until exhausted
1153        iter.next(); // Consume from front
1154        iter.next_back(); // Consume from back
1155
1156        // Now should be exhausted
1157        assert_eq!(iter.next(), None);
1158        assert_eq!(iter.next_back(), None);
1159        assert_eq!(iter.next(), None); // FusedIterator guarantee
1160        assert_eq!(iter.next_back(), None); // FusedIterator guarantee
1161    }
1162
1163    #[test]
1164    fn iter_across_multiple_slabs_with_gaps() {
1165        let mut pool = RawOpaquePool::with_layout_of::<usize>();
1166
1167        // Create a pattern: insert many items, remove some to create gaps across slabs
1168        let mut handles = Vec::new();
1169        for i in 0_usize..30 {
1170            handles.push(pool.insert(i));
1171        }
1172
1173        // Remove every third item to create gaps across slabs
1174        let mut to_remove = Vec::new();
1175        for (index, _) in handles.iter().enumerate().step_by(3) {
1176            to_remove.push(index);
1177        }
1178
1179        // Remove in reverse order to maintain indices
1180        for &index in to_remove.iter().rev() {
1181            unsafe {
1182                pool.remove(handles.swap_remove(index));
1183            }
1184        }
1185
1186        let values: Vec<usize> = pool
1187            .iter()
1188            .map(|ptr| unsafe { *ptr.cast::<usize>().as_ref() })
1189            .collect();
1190
1191        // Should get all non-removed values
1192        let expected: Vec<usize> = (0_usize..30).filter(|&i| i % 3 != 0).collect();
1193        assert_eq!(values, expected);
1194    }
1195
1196    #[test]
1197    fn into_iterator_trait_works() {
1198        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1199
1200        let _handle1 = pool.insert(100_u32);
1201        let _handle2 = pool.insert(200_u32);
1202        let _handle3 = pool.insert(300_u32);
1203
1204        // Test using for-in loop (which uses IntoIterator)
1205        let mut values = Vec::new();
1206        for ptr in &pool {
1207            let value = unsafe { *ptr.cast::<u32>().as_ref() };
1208            values.push(value);
1209        }
1210
1211        assert_eq!(values, vec![100, 200, 300]);
1212    }
1213}