infinity_pool/opaque/
pool_raw.rs

1use std::alloc::Layout;
2use std::iter::{self, FusedIterator};
3use std::mem::MaybeUninit;
4use std::ptr::NonNull;
5
6use crate::opaque::slab::SlabIterator;
7use crate::{
8    DropPolicy, RawOpaquePoolBuilder, RawPooled, RawPooledMut, Slab, SlabLayout, VacancyTracker,
9};
10
11/// A pool of objects with uniform memory layout.
12///
13/// Stores objects of any type that match a [`Layout`] defined at pool creation
14/// time. All values in the pool remain pinned for their entire lifetime.
15///
16/// The pool automatically expands its capacity when needed.
17#[doc = include_str!("../../doc/snippets/raw_pool_is_potentially_send.md")]
18///
19/// # Example
20///
21/// ```rust
22/// use infinity_pool::RawOpaquePool;
23///
24/// fn work_with_displayable<T: std::fmt::Display + 'static + Unpin>(value: T) {
25///     let mut pool = RawOpaquePool::with_layout_of::<T>();
26///
27///     // Insert an object into the pool
28///     let handle = pool.insert(value);
29///
30///     // Access the object through the handle
31///     let stored_value = unsafe { handle.ptr().as_ref() };
32///     println!("Stored: {}", stored_value);
33///
34///     // Explicitly remove the object from the pool
35///     pool.remove_mut(handle);
36/// }
37///
38/// work_with_displayable("Hello, world!");
39/// work_with_displayable(42);
40/// ```
41#[derive(Debug)]
42pub struct RawOpaquePool {
43    /// The layout of each slab in the pool, determined based on the object
44    /// layout provided at pool creation time.
45    slab_layout: SlabLayout,
46
47    /// The slabs that make up the pool's memory capacity. Automatically extended
48    /// with new slabs as needed. Shrinking is supported but must be manually commanded.
49    slabs: Vec<Slab>,
50
51    /// Drop policy that determines how the pool handles remaining items when dropped.
52    drop_policy: DropPolicy,
53
54    /// Number of items currently in the pool. We track this explicitly to avoid repeatedly
55    /// summing across slabs when calculating the length.
56    length: usize,
57
58    /// Tracks which slabs have vacancies, acting as a cache for fast insertion.
59    /// Guaranteed 100% accurate - we update the tracker whenever there is a status change.
60    vacancy_tracker: VacancyTracker,
61}
62
63impl RawOpaquePool {
64    #[doc = include_str!("../../doc/snippets/pool_builder.md")]
65    #[cfg_attr(test, mutants::skip)] // Gets mutated to alternate version of itself.
66    pub fn builder() -> RawOpaquePoolBuilder {
67        RawOpaquePoolBuilder::new()
68    }
69
70    /// Creates a new instance of the pool with the specified layout.
71    ///
72    /// Shorthand for a builder that keeps all other options at their default values.
73    ///
74    /// # Panics
75    ///
76    /// Panics if the layout is zero-sized.
77    #[must_use]
78    pub fn with_layout(object_layout: Layout) -> Self {
79        Self::builder().layout(object_layout).build()
80    }
81
82    /// Creates a new instance of the pool with the layout of `T`.
83    ///
84    /// Shorthand for a builder that keeps all other options at their default values.
85    ///
86    /// # Panics
87    ///
88    /// Panics if `T` is a zero-sized type.
89    #[must_use]
90    pub fn with_layout_of<T: Sized>() -> Self {
91        Self::builder().layout_of::<T>().build()
92    }
93
94    /// Creates a new pool for objects of the specified layout.
95    ///
96    /// # Panics
97    ///
98    /// Panics if the object layout has zero size.
99    #[must_use]
100    pub(crate) fn new_inner(object_layout: Layout, drop_policy: DropPolicy) -> Self {
101        let slab_layout = SlabLayout::new(object_layout);
102
103        Self {
104            slab_layout,
105            slabs: Vec::new(),
106            drop_policy,
107            length: 0,
108            vacancy_tracker: VacancyTracker::new(),
109        }
110    }
111
112    #[doc = include_str!("../../doc/snippets/opaque_pool_layout.md")]
113    #[must_use]
114    #[inline]
115    pub fn object_layout(&self) -> Layout {
116        self.slab_layout.object_layout()
117    }
118
119    #[doc = include_str!("../../doc/snippets/pool_len.md")]
120    #[must_use]
121    #[inline]
122    pub fn len(&self) -> usize {
123        self.length
124    }
125
126    #[doc = include_str!("../../doc/snippets/pool_capacity.md")]
127    #[must_use]
128    #[inline]
129    pub fn capacity(&self) -> usize {
130        // Wrapping here would imply capacity is greater than virtual memory,
131        // which is impossible because we can never create that many slabs.
132        self.slabs
133            .len()
134            .wrapping_mul(self.slab_layout.capacity().get())
135    }
136
137    #[doc = include_str!("../../doc/snippets/pool_is_empty.md")]
138    #[must_use]
139    #[inline]
140    pub fn is_empty(&self) -> bool {
141        self.length == 0
142    }
143
144    #[doc = include_str!("../../doc/snippets/pool_reserve.md")]
145    pub fn reserve(&mut self, additional: usize) {
146        let required_capacity = self
147            .len()
148            .checked_add(additional)
149            .expect("requested capacity exceeds size of virtual memory");
150
151        if self.capacity() >= required_capacity {
152            return;
153        }
154
155        // Calculate how many additional slabs we need
156        let current_slabs = self.slabs.len();
157        let required_slabs = required_capacity.div_ceil(self.slab_layout.capacity().get());
158        let additional_slabs = required_slabs.saturating_sub(current_slabs);
159
160        self.slabs.extend(
161            iter::repeat_with(|| Slab::new(self.slab_layout, self.drop_policy))
162                .take(additional_slabs),
163        );
164
165        self.vacancy_tracker.update_slab_count(self.slabs.len());
166    }
167
168    #[doc = include_str!("../../doc/snippets/pool_shrink_to_fit.md")]
169    #[cfg_attr(test, mutants::skip)] // Vacant slot cache mutation - hard to test. Revisit later.
170    pub fn shrink_to_fit(&mut self) {
171        // Find the last non-empty slab by scanning from the end
172        let new_len = self
173            .slabs
174            .iter()
175            .enumerate()
176            .rev()
177            .find_map(|(idx, slab)| {
178                if !slab.is_empty() {
179                    // Cannot wrap because that would imply we have more slabs than the size
180                    // of virtual memory, which is impossible.
181                    Some(idx.wrapping_add(1))
182                } else {
183                    None
184                }
185            })
186            .unwrap_or(0);
187
188        if new_len == self.slabs.len() {
189            // Nothing to do.
190            return;
191        }
192
193        // Truncate the slabs vector to remove empty slabs from the end.
194        self.slabs.truncate(new_len);
195
196        self.vacancy_tracker.update_slab_count(self.slabs.len());
197    }
198
199    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
200    ///
201    /// # Panics
202    #[doc = include_str!("../../doc/snippets/panic_on_pool_t_layout_mismatch.md")]
203    ///
204    /// # Example
205    ///
206    /// ```rust
207    /// use std::alloc::Layout;
208    ///
209    /// use infinity_pool::RawOpaquePool;
210    ///
211    /// let mut pool = RawOpaquePool::with_layout(Layout::new::<String>());
212    ///
213    /// // Insert an object into the pool
214    /// let mut handle = pool.insert("Hello".to_string());
215    ///
216    /// // Mutate the object via the unique handle
217    /// // SAFETY: The handle is valid and points to a properly initialized String
218    /// unsafe {
219    ///     handle.as_mut().push_str(", Raw Opaque World!");
220    ///     assert_eq!(handle.as_ref(), "Hello, Raw Opaque World!");
221    /// }
222    ///
223    /// // Transform the unique handle into a shared handle
224    /// let shared_handle = handle.into_shared();
225    ///
226    /// // After transformation, you can only immutably dereference the object
227    /// // SAFETY: The shared handle is valid and points to a properly initialized String
228    /// unsafe {
229    ///     assert_eq!(shared_handle.as_ref(), "Hello, Raw Opaque World!");
230    ///     // shared_handle.as_mut(); // This would not compile
231    /// }
232    ///
233    /// // Explicitly remove the object from the pool
234    /// // SAFETY: The handle belongs to this pool and references a valid object
235    /// unsafe {
236    ///     pool.remove(shared_handle);
237    /// }
238    /// assert_eq!(pool.len(), 0);
239    /// ```
240    #[inline]
241    pub fn insert<T>(&mut self, value: T) -> RawPooledMut<T> {
242        assert_eq!(
243            Layout::new::<T>(),
244            self.object_layout(),
245            "layout of T does not match object layout of the pool"
246        );
247
248        // SAFETY: We just verified that T's layout matches the pool's layout.
249        unsafe { self.insert_unchecked(value) }
250    }
251
252    #[doc = include_str!("../../doc/snippets/pool_insert.md")]
253    /// # Safety
254    #[doc = include_str!("../../doc/snippets/safety_pool_t_layout_must_match.md")]
255    #[inline]
256    pub unsafe fn insert_unchecked<T>(&mut self, value: T) -> RawPooledMut<T> {
257        // Implement insert() in terms of insert_with() to reduce logic duplication.
258        // SAFETY: Forwarding safety requirements to the caller.
259        unsafe {
260            self.insert_with_unchecked(|uninit: &mut MaybeUninit<T>| {
261                uninit.write(value);
262            })
263        }
264    }
265
266    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
267    ///
268    /// # Example
269    ///
270    /// ```rust
271    /// use std::mem::MaybeUninit;
272    ///
273    /// use infinity_pool::RawOpaquePool;
274    ///
275    /// struct DataBuffer {
276    ///     id: u32,
277    ///     data: MaybeUninit<[u8; 1024]>, // Large buffer to skip initializing
278    /// }
279    ///
280    /// let mut pool = RawOpaquePool::with_layout_of::<DataBuffer>();
281    ///
282    /// // Initialize only the id, leaving data uninitialized for performance
283    /// let handle = unsafe {
284    ///     pool.insert_with(|uninit: &mut MaybeUninit<DataBuffer>| {
285    ///         let ptr = uninit.as_mut_ptr();
286    ///         // SAFETY: Writing to the id field within allocated space
287    ///         unsafe {
288    ///             std::ptr::addr_of_mut!((*ptr).id).write(42);
289    ///             // data field is intentionally left uninitialized
290    ///         }
291    ///     })
292    /// };
293    ///
294    /// // ID is accessible, data remains uninitialized
295    /// let id = unsafe { std::ptr::addr_of!(handle.ptr().as_ref().id).read() };
296    /// assert_eq!(id, 42);
297    /// ```
298    ///
299    /// # Panics
300    #[doc = include_str!("../../doc/snippets/panic_on_pool_t_layout_mismatch.md")]
301    ///
302    /// # Safety
303    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
304    #[inline]
305    pub unsafe fn insert_with<T, F>(&mut self, f: F) -> RawPooledMut<T>
306    where
307        F: FnOnce(&mut MaybeUninit<T>),
308    {
309        assert_eq!(
310            Layout::new::<T>(),
311            self.object_layout(),
312            "layout of T does not match object layout of the pool"
313        );
314
315        // SAFETY: We just verified that T's layout matches the pool's layout.
316        unsafe { self.insert_with_unchecked(f) }
317    }
318
319    #[doc = include_str!("../../doc/snippets/pool_insert_with.md")]
320    ///
321    /// # Example
322    ///
323    /// ```rust
324    /// use std::mem::MaybeUninit;
325    ///
326    /// use infinity_pool::RawOpaquePool;
327    ///
328    /// struct DataBuffer {
329    ///     id: u32,
330    ///     data: MaybeUninit<[u8; 1024]>, // Large buffer to skip initializing
331    /// }
332    ///
333    /// let mut pool = RawOpaquePool::with_layout_of::<DataBuffer>();
334    ///
335    /// // Initialize only the id, leaving data uninitialized for performance
336    /// let handle = unsafe {
337    ///     pool.insert_with_unchecked(|uninit: &mut MaybeUninit<DataBuffer>| {
338    ///         let ptr = uninit.as_mut_ptr();
339    ///         // SAFETY: Writing to the id field within allocated space
340    ///         unsafe {
341    ///             std::ptr::addr_of_mut!((*ptr).id).write(42);
342    ///             // data field is intentionally left uninitialized
343    ///         }
344    ///     })
345    /// };
346    ///
347    /// // ID is accessible, data remains uninitialized
348    /// let id = unsafe { std::ptr::addr_of!(handle.ptr().as_ref().id).read() };
349    /// assert_eq!(id, 42);
350    /// ```
351    ///
352    /// # Safety
353    #[doc = include_str!("../../doc/snippets/safety_pool_t_layout_must_match.md")]
354    #[doc = include_str!("../../doc/snippets/safety_closure_must_initialize_object.md")]
355    pub unsafe fn insert_with_unchecked<T, F>(&mut self, f: F) -> RawPooledMut<T>
356    where
357        F: FnOnce(&mut MaybeUninit<T>),
358    {
359        let slab_index = self.index_of_slab_to_insert_into();
360
361        // SAFETY: We just received knowledge that there is a slab with a vacant slot at this index.
362        let slab = unsafe { self.slabs.get_unchecked_mut(slab_index) };
363
364        // SAFETY: Forwarding guarantee from caller that T's layout matches the pool's layout
365        // and that the closure properly initializes the value.
366        //
367        // We ensure that the slab is not full via the logic that gets us `slab_index`,
368        // with the logic always guaranteeing that there is a vacancy in that slab.
369        let slab_handle = unsafe { slab.insert_with(f) };
370
371        // Update our tracked length since we just inserted an object.
372        // This can never overflow since that would mean the pool is greater than virtual memory.
373        self.length = self.length.wrapping_add(1);
374
375        // We invalidate the "slab with vacant slot" cache here if this is the last vacant slot.
376        if slab.is_full() {
377            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
378            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
379            unsafe {
380                self.vacancy_tracker.update_slab_status(slab_index, false);
381            }
382        }
383
384        // The pool itself does not care about the type T but for the convenience of the caller
385        // we imbue the RawPooledMut with the type information, to reduce required casting by caller.
386        RawPooledMut::new(slab_index, slab_handle)
387    }
388
389    #[doc = include_str!("../../doc/snippets/raw_pool_remove_mut.md")]
390    #[inline]
391    pub fn remove_mut<T: ?Sized>(&mut self, handle: RawPooledMut<T>) {
392        // SAFETY: The provided handle is a unique handle, which guarantees that the object
393        // has not been removed yet (because doing so consumes the unique handle).
394        unsafe {
395            self.remove(handle.into_shared());
396        }
397    }
398
399    #[doc = include_str!("../../doc/snippets/raw_pool_remove.md")]
400    pub unsafe fn remove<T: ?Sized>(&mut self, handle: RawPooled<T>) {
401        let slab = self
402            .slabs
403            .get_mut(handle.slab_index())
404            .expect("the RawPooled did not point to an object in this pool");
405
406        // SAFETY: Forwarding guarantees from caller.
407        unsafe {
408            slab.remove(handle.slab_handle());
409        }
410
411        // Update our tracked length since we just removed an object.
412        // This cannot wrap around because we just removed an object,
413        // so the value must be at least 1 before subtraction.
414        self.length = self.length.wrapping_sub(1);
415
416        if slab.len() == self.slab_layout.capacity().get().wrapping_sub(1) {
417            // We removed from a full slab.
418            // This means we have a vacant slot where there was not one before.
419
420            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
421            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
422            unsafe {
423                self.vacancy_tracker
424                    .update_slab_status(handle.slab_index(), true);
425            }
426        }
427    }
428
429    #[doc = include_str!("../../doc/snippets/raw_pool_remove_mut_unpin.md")]
430    #[must_use]
431    #[inline]
432    pub fn remove_mut_unpin<T: Unpin>(&mut self, handle: RawPooledMut<T>) -> T {
433        // SAFETY: The provided handle is a unique handle, which guarantees that the object
434        // has not been removed yet (because doing so consumes the unique handle).
435        unsafe { self.remove_unpin(handle.into_shared()) }
436    }
437
438    #[doc = include_str!("../../doc/snippets/raw_pool_remove_unpin.md")]
439    #[must_use]
440    pub unsafe fn remove_unpin<T: Unpin>(&mut self, handle: RawPooled<T>) -> T {
441        // We would rather prefer to check for `RawPooled<()>` specifically but
442        // that would imply specialization or `T: 'static` or TypeId shenanigans.
443        // This is good enough because type-erasing a handle is the only way to get a
444        // handle to a ZST anyway because the slab does not even support ZSTs.
445        assert_ne!(
446            size_of::<T>(),
447            0,
448            "cannot remove_unpin() through a type-erased handle"
449        );
450
451        let slab = self
452            .slabs
453            .get_mut(handle.slab_index())
454            .expect("the RawPooled did not point to an existing object in the pool");
455
456        // SAFETY: The RawPooled<T> guarantees the type T is correct for this pool slot.
457        let value = unsafe { slab.remove_unpin::<T>(handle.slab_handle()) };
458
459        // Update our tracked length since we just removed an object.
460        // This cannot wrap around because we just removed an object,
461        // so the value must be at least 1 before subtraction.
462        self.length = self.length.wrapping_sub(1);
463
464        if slab.len() == self.slab_layout.capacity().get().wrapping_sub(1) {
465            // We removed from a full slab.
466            // This means we have a vacant slot where there was not one before.
467
468            // SAFETY: We are currently operating on the slab, so it must be an existing slab.
469            // The mechanism that adds slabs is responsible for updating vacancy tracker bounds.
470            unsafe {
471                self.vacancy_tracker
472                    .update_slab_status(handle.slab_index(), true);
473            }
474        }
475
476        value
477    }
478
479    #[doc = include_str!("../../doc/snippets/raw_pool_iter.md")]
480    #[must_use]
481    #[inline]
482    pub fn iter(&self) -> RawOpaquePoolIterator<'_> {
483        RawOpaquePoolIterator::new(self)
484    }
485
486    /// Adds a new slab if needed.
487    #[must_use]
488    fn index_of_slab_to_insert_into(&mut self) -> usize {
489        if let Some(index) = self.vacancy_tracker.next_vacancy() {
490            // There is a vacancy, so use it.
491            return index;
492        }
493
494        // If we got here, there are no vacancies and we need to extend the pool.
495        debug_assert_eq!(self.len(), self.capacity());
496
497        self.slabs
498            .push(Slab::new(self.slab_layout, self.drop_policy));
499
500        self.vacancy_tracker.update_slab_count(self.slabs.len());
501
502        // This can never wrap around because we just added a slab, so len() is at least 1.
503        self.slabs.len().wrapping_sub(1)
504    }
505}
506
507/// Iterator over all objects in a raw opaque pool.
508///
509/// This iterator yields untyped pointers to objects stored across all slabs in the pool.
510/// Since the pool can contain objects of different types (as long as they have the same layout),
511/// the iterator returns `NonNull<()>` and leaves type casting to the caller.
512///
513/// # Thread safety
514///
515/// The type is single-threaded.
516#[derive(Debug)]
517pub struct RawOpaquePoolIterator<'p> {
518    pool: &'p RawOpaquePool,
519
520    // Current slab index for forward iteration.
521    // This is the index of the next slab we will take items from.
522    // If iterator is exhausted, will point to undefined value.
523    current_front_slab_index: usize,
524
525    // Current slab index for backward iteration.
526    // This is the index of the next slab we will take items from.
527    // If iterator is exhausted, will point to undefined value.
528    current_back_slab_index: usize,
529
530    // Iterator for the current front slab (if any).
531    current_front_slab_iter: Option<SlabIterator<'p>>,
532
533    // Iterator for the current back slab (if any).
534    current_back_slab_iter: Option<SlabIterator<'p>>,
535
536    // Total number of items already yielded.
537    yielded_count: usize,
538}
539
540impl<'p> RawOpaquePoolIterator<'p> {
541    fn new(pool: &'p RawOpaquePool) -> Self {
542        Self {
543            pool,
544            current_front_slab_index: 0,
545            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
546            current_back_slab_index: pool.slabs.len().wrapping_sub(1),
547            current_front_slab_iter: None,
548            current_back_slab_iter: None,
549            yielded_count: 0,
550        }
551    }
552}
553
554impl Iterator for RawOpaquePoolIterator<'_> {
555    type Item = NonNull<()>;
556
557    fn next(&mut self) -> Option<Self::Item> {
558        while self.len() > 0 {
559            // If no current iterator, get one for the current slab.
560            let slab_iter = self.current_front_slab_iter.get_or_insert_with(|| {
561                self.pool.slabs
562                    .get(self.current_front_slab_index)
563                    .expect("iterator has items remaining, so there must still be a slab to get them from")
564                    .iter()
565            });
566
567            // Try to get the next item from current iterator
568            if let Some(item) = slab_iter.next() {
569                // Will never wrap because that would mean we have more
570                // items than we have virtual memory.
571                self.yielded_count = self.yielded_count.wrapping_add(1);
572                return Some(item);
573            }
574
575            // No more items from this slab, move to next
576            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
577            self.current_front_slab_index = self.current_front_slab_index.wrapping_add(1);
578            self.current_front_slab_iter = None;
579        }
580
581        None
582    }
583
584    fn size_hint(&self) -> (usize, Option<usize>) {
585        let remaining = self.len();
586        (remaining, Some(remaining))
587    }
588}
589
590impl DoubleEndedIterator for RawOpaquePoolIterator<'_> {
591    fn next_back(&mut self) -> Option<Self::Item> {
592        while self.len() > 0 {
593            // If no current iterator, get one for the current slab.
594            let slab_iter = self.current_back_slab_iter.get_or_insert_with(|| {
595                self.pool.slabs
596                    .get(self.current_back_slab_index)
597                    .expect("iterator has items remaining, so there must still be a slab to get them from")
598                    .iter()
599            });
600
601            // Try to get the next item from current iterator
602            if let Some(item) = slab_iter.next_back() {
603                // Will never wrap because that would mean we have more
604                // items than we have virtual memory.
605                self.yielded_count = self.yielded_count.wrapping_add(1);
606                return Some(item);
607            }
608
609            // No more items from this slab, move to next
610            // This is allowed to wrap - if the iterator is exhausted, we point to undefined value.
611            self.current_back_slab_index = self.current_back_slab_index.wrapping_sub(1);
612            self.current_back_slab_iter = None;
613        }
614
615        None
616    }
617}
618
619impl ExactSizeIterator for RawOpaquePoolIterator<'_> {
620    fn len(&self) -> usize {
621        // Total objects in pool minus those we've already yielded
622        // Will not wrap because we cannot yield more items than exist in the pool.
623        self.pool.len().wrapping_sub(self.yielded_count)
624    }
625}
626
627// Once we return None, we will keep returning None.
628impl FusedIterator for RawOpaquePoolIterator<'_> {}
629
630impl<'p> IntoIterator for &'p RawOpaquePool {
631    type Item = NonNull<()>;
632    type IntoIter = RawOpaquePoolIterator<'p>;
633
634    fn into_iter(self) -> Self::IntoIter {
635        self.iter()
636    }
637}
638
639#[cfg(test)]
640#[allow(
641    clippy::indexing_slicing,
642    clippy::multiple_unsafe_ops_per_block,
643    clippy::undocumented_unsafe_blocks,
644    reason = "tests focus on succinct code and do not need to tick all the boxes"
645)]
646mod tests {
647    use std::alloc::Layout;
648    use std::mem::MaybeUninit;
649
650    use static_assertions::{assert_impl_all, assert_not_impl_any};
651
652    use super::*;
653
654    assert_impl_all!(RawOpaquePoolIterator<'_>: Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator);
655    assert_not_impl_any!(RawOpaquePoolIterator<'_>: Send, Sync);
656
657    assert_impl_all!(&RawOpaquePool: IntoIterator);
658
659    #[test]
660    fn new_pool_is_empty() {
661        let pool = RawOpaquePool::with_layout_of::<u64>();
662
663        assert_eq!(pool.len(), 0);
664        assert!(pool.is_empty());
665        assert_eq!(pool.capacity(), 0);
666        assert_eq!(pool.object_layout(), Layout::new::<u64>());
667    }
668
669    #[test]
670    fn with_layout_results_in_pool_with_correct_layout() {
671        let layout = Layout::new::<i64>();
672        let pool = RawOpaquePool::with_layout(layout);
673
674        assert_eq!(pool.object_layout(), layout);
675        assert_eq!(pool.len(), 0);
676        assert!(pool.is_empty());
677        assert_eq!(pool.capacity(), 0);
678    }
679
680    #[test]
681    fn instance_creation_through_builder_succeeds() {
682        let pool = RawOpaquePool::builder()
683            .layout_of::<i64>()
684            .drop_policy(DropPolicy::MustNotDropContents)
685            .build();
686
687        assert_eq!(pool.object_layout(), Layout::new::<i64>());
688        assert_eq!(pool.len(), 0);
689        assert!(pool.is_empty());
690        assert_eq!(pool.capacity(), 0);
691    }
692
693    #[test]
694    fn insert_and_length() {
695        let mut pool = RawOpaquePool::with_layout_of::<u32>();
696
697        let _handle1 = pool.insert(42_u32);
698        assert_eq!(pool.len(), 1);
699        assert!(!pool.is_empty());
700
701        let _handle2 = pool.insert(100_u32);
702        assert_eq!(pool.len(), 2);
703    }
704
705    #[test]
706    fn capacity_grows_with_slabs() {
707        let mut pool = RawOpaquePool::with_layout_of::<u64>();
708
709        assert_eq!(pool.capacity(), 0);
710
711        let _handle = pool.insert(123_u64);
712
713        // Should have at least one slab's worth of capacity now
714        assert!(pool.capacity() > 0);
715        let initial_capacity = pool.capacity();
716
717        // Fill up the slab to force creation of a new one
718        for i in 1..initial_capacity {
719            let _handle = pool.insert(i as u64);
720        }
721
722        // One more insert should create a new slab
723        let _handle = pool.insert(999_u64);
724
725        assert!(pool.capacity() >= initial_capacity * 2);
726    }
727
728    #[test]
729    fn reserve_creates_capacity() {
730        let mut pool = RawOpaquePool::with_layout_of::<u8>();
731
732        pool.reserve(100);
733        assert!(pool.capacity() >= 100);
734
735        let initial_capacity = pool.capacity();
736        pool.reserve(50); // Should not increase capacity
737        assert_eq!(pool.capacity(), initial_capacity);
738
739        pool.reserve(200); // Should increase capacity
740        assert!(pool.capacity() >= 200);
741    }
742
743    #[test]
744    fn insert_with_closure() {
745        let mut pool = RawOpaquePool::with_layout_of::<u64>();
746
747        let handle = unsafe {
748            pool.insert_with(|uninit: &mut MaybeUninit<u64>| {
749                uninit.write(42);
750            })
751        };
752
753        assert_eq!(pool.len(), 1);
754
755        let value = pool.remove_mut_unpin(handle);
756        assert_eq!(value, 42);
757    }
758
759    #[test]
760    fn remove_decreases_length() {
761        let mut pool = RawOpaquePool::with_layout_of::<String>();
762
763        let handle1 = pool.insert("hello".to_string());
764        let handle2 = pool.insert("world".to_string());
765
766        assert_eq!(pool.len(), 2);
767
768        pool.remove_mut(handle1);
769        assert_eq!(pool.len(), 1);
770
771        pool.remove_mut(handle2);
772        assert_eq!(pool.len(), 0);
773        assert!(pool.is_empty());
774    }
775
776    #[test]
777    fn remove_unpin_returns_value() {
778        let mut pool = RawOpaquePool::with_layout_of::<i32>();
779
780        let handle = pool.insert(-456_i32);
781
782        let value = pool.remove_mut_unpin(handle);
783        assert_eq!(value, -456);
784        assert_eq!(pool.len(), 0);
785    }
786
787    #[test]
788    fn shrink_to_fit_removes_empty_slabs() {
789        let mut pool = RawOpaquePool::with_layout_of::<u8>();
790
791        // Add some items.
792        let mut handles = Vec::new();
793        for i in 0..10 {
794            handles.push(pool.insert(u8::try_from(i).unwrap()));
795        }
796
797        // Remove all items.
798        for handle in handles {
799            pool.remove_mut(handle);
800        }
801
802        assert!(pool.is_empty());
803
804        pool.shrink_to_fit();
805
806        // We have white-box knowledge that an empty pool will shrink to zero.
807        // This may become untrue with future algorithm changes, at which point
808        // we will need to adjust the tests.
809        assert_eq!(pool.capacity(), 0);
810    }
811
812    #[test]
813    fn handle_provides_access_to_object() {
814        let mut pool = RawOpaquePool::with_layout_of::<u64>();
815
816        let handle = pool.insert(12345_u64);
817
818        assert_eq!(unsafe { *handle.as_ref() }, 12345);
819
820        // Access the value through the handle's pointer
821        let ptr = handle.ptr();
822
823        let value = unsafe { ptr.as_ref() };
824
825        assert_eq!(*value, 12345);
826    }
827
828    #[test]
829    fn shared_handles_are_copyable() {
830        let mut pool = RawOpaquePool::with_layout_of::<u32>();
831
832        let handle1 = pool.insert(789_u32).into_shared();
833        let handle2 = handle1;
834        #[expect(clippy::clone_on_copy, reason = "intentional, testing cloning")]
835        let handle3 = handle1.clone();
836
837        unsafe {
838            assert_eq!(*handle1.as_ref(), *handle2.as_ref());
839            assert_eq!(*handle1.as_ref(), *handle3.as_ref());
840            assert_eq!(*handle2.as_ref(), *handle3.as_ref());
841        }
842    }
843
844    #[test]
845    fn multiple_removals_and_insertions() {
846        let mut pool = RawOpaquePool::with_layout_of::<usize>();
847
848        // Insert, remove, insert again to test slot reuse
849        let handle1 = pool.insert(1_usize);
850        pool.remove_mut(handle1);
851
852        let handle2 = pool.insert(2_usize);
853
854        assert_eq!(pool.len(), 1);
855
856        let value = pool.remove_mut_unpin(handle2);
857        assert_eq!(value, 2);
858    }
859
860    #[test]
861    fn remove_with_shared_handle() {
862        let mut pool = RawOpaquePool::with_layout_of::<i64>();
863
864        let handle = pool.insert(999_i64).into_shared();
865
866        assert_eq!(pool.len(), 1);
867
868        unsafe {
869            pool.remove(handle);
870        }
871
872        assert_eq!(pool.len(), 0);
873        assert!(pool.is_empty());
874    }
875
876    #[test]
877    fn remove_unpin_with_shared_handle() {
878        let mut pool = RawOpaquePool::with_layout_of::<i32>();
879
880        let handle = pool.insert(42_i32).into_shared();
881
882        assert_eq!(pool.len(), 1);
883
884        let value = unsafe { pool.remove_unpin(handle) };
885
886        assert_eq!(value, 42);
887        assert_eq!(pool.len(), 0);
888    }
889
890    #[test]
891    #[should_panic]
892    fn remove_unpin_panics_on_zero_sized_type() {
893        // We need to use a type that is not zero-sized for the pool itself,
894        // but we create a handle that gets type-erased to a ZST.
895        let mut pool = RawOpaquePool::with_layout_of::<u8>();
896
897        let handle = pool.insert(123_u8);
898
899        let erased_handle: RawPooled<()> = handle.into_shared().erase();
900
901        // This should panic because size_of::<()>() == 0
902        unsafe {
903            #[expect(unused_must_use, reason = "impossible to use a unit value")]
904            pool.remove_unpin(erased_handle);
905        }
906    }
907
908    #[test]
909    #[should_panic]
910    fn insert_panics_if_provided_type_with_wrong_layout() {
911        let mut pool = RawOpaquePool::with_layout_of::<u32>();
912
913        // Try to insert a u64 into a pool configured for u32
914        let _handle = pool.insert(123_u64);
915    }
916
917    #[test]
918    #[should_panic]
919    fn insert_with_panics_if_provided_type_with_wrong_layout() {
920        let mut pool = RawOpaquePool::with_layout_of::<u16>();
921
922        // Try to insert a u32 into a pool configured for u16
923        let _handle = unsafe {
924            pool.insert_with(|uninit: &mut MaybeUninit<u32>| {
925                uninit.write(456);
926            })
927        };
928    }
929
930    #[test]
931    fn iter_empty_pool() {
932        let pool = RawOpaquePool::with_layout_of::<u32>();
933
934        let mut iter = pool.iter();
935        assert_eq!(iter.size_hint(), (0, Some(0)));
936        assert_eq!(iter.len(), 0);
937
938        assert_eq!(iter.next(), None);
939        assert_eq!(iter.size_hint(), (0, Some(0)));
940        assert_eq!(iter.len(), 0);
941    }
942
943    #[test]
944    fn iter_single_item() {
945        let mut pool = RawOpaquePool::with_layout_of::<u32>();
946
947        let _handle = pool.insert(42_u32);
948
949        let mut iter = pool.iter();
950        assert_eq!(iter.len(), 1);
951
952        // First item should be the object we inserted
953        let ptr = iter.next().expect("should have one item");
954
955        let value = unsafe { ptr.cast::<u32>().as_ref() };
956        assert_eq!(*value, 42);
957
958        // No more items
959        assert_eq!(iter.next(), None);
960        assert_eq!(iter.len(), 0);
961    }
962
963    #[test]
964    fn iter_multiple_items_single_slab() {
965        let mut pool = RawOpaquePool::with_layout_of::<u32>();
966
967        // Insert multiple items that should fit in a single slab
968        let _handle1 = pool.insert(100_u32);
969        let _handle2 = pool.insert(200_u32);
970        let _handle3 = pool.insert(300_u32);
971
972        let values: Vec<u32> = pool
973            .iter()
974            .map(|ptr| unsafe { *ptr.cast::<u32>().as_ref() })
975            .collect();
976
977        // Should get all values in order of their slot indices
978        assert_eq!(values, vec![100, 200, 300]);
979    }
980
981    #[test]
982    fn iter_multiple_items_multiple_slabs() {
983        let mut pool = RawOpaquePool::with_layout_of::<u8>();
984
985        // Insert enough items to span multiple slabs
986        #[allow(
987            clippy::collection_is_never_read,
988            reason = "handles are used for ownership"
989        )]
990        let mut handles = Vec::new();
991        for i in 0..50 {
992            handles.push(pool.insert(u8::try_from(i).unwrap()));
993        }
994
995        let values: Vec<u8> = pool
996            .iter()
997            .map(|ptr| unsafe { *ptr.cast::<u8>().as_ref() })
998            .collect();
999
1000        // Should get all values we inserted
1001        assert_eq!(values.len(), 50);
1002        for (i, &value) in values.iter().enumerate() {
1003            assert_eq!(value, u8::try_from(i).unwrap());
1004        }
1005
1006        // Clean up
1007        for handle in handles {
1008            pool.remove_mut(handle);
1009        }
1010    }
1011
1012    #[test]
1013    fn iter_with_gaps() {
1014        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1015
1016        // Insert items
1017        let _handle1 = pool.insert(100_u32);
1018        let handle2 = pool.insert(200_u32);
1019        let _handle3 = pool.insert(300_u32);
1020
1021        // Remove the middle item to create a gap
1022        pool.remove_mut(handle2);
1023
1024        let values: Vec<u32> = pool
1025            .iter()
1026            .map(|ptr| unsafe { *ptr.cast::<u32>().as_ref() })
1027            .collect();
1028
1029        // Should get only the remaining values
1030        assert_eq!(values, vec![100, 300]);
1031    }
1032
1033    #[test]
1034    fn iter_with_empty_slabs() {
1035        let mut pool = RawOpaquePool::with_layout_of::<u64>();
1036
1037        // Force creation of multiple slabs by inserting many items
1038        let mut handles = Vec::new();
1039        for i in 0_u64..20 {
1040            handles.push(pool.insert(i));
1041        }
1042
1043        // Remove all items from some slabs to create empty slabs
1044        for handle in handles.drain(5..15) {
1045            pool.remove_mut(handle);
1046        }
1047
1048        let values: Vec<u64> = pool
1049            .iter()
1050            .map(|ptr| unsafe { *ptr.cast::<u64>().as_ref() })
1051            .collect();
1052
1053        // Should get values from non-empty slabs only
1054        let expected: Vec<u64> = (0..5_u64).chain(15..20_u64).collect();
1055        assert_eq!(values, expected);
1056    }
1057
1058    #[test]
1059    fn iter_size_hint() {
1060        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1061
1062        // Empty pool
1063        let iter = pool.iter();
1064        assert_eq!(iter.size_hint(), (0, Some(0)));
1065        assert_eq!(iter.len(), 0);
1066
1067        // Add some items
1068        let _handle1 = pool.insert(100_u32);
1069        let _handle2 = pool.insert(200_u32);
1070
1071        let mut iter = pool.iter();
1072        assert_eq!(iter.size_hint(), (2, Some(2)));
1073        assert_eq!(iter.len(), 2);
1074
1075        // Consume one item
1076        let first_item = iter.next();
1077        assert!(first_item.is_some());
1078        assert_eq!(iter.size_hint(), (1, Some(1)));
1079        assert_eq!(iter.len(), 1);
1080
1081        // Consume another
1082        let second_item = iter.next();
1083        assert!(second_item.is_some());
1084        assert_eq!(iter.size_hint(), (0, Some(0)));
1085        assert_eq!(iter.len(), 0);
1086
1087        // Should be exhausted now
1088        assert_eq!(iter.next(), None);
1089        assert_eq!(iter.size_hint(), (0, Some(0)));
1090        assert_eq!(iter.len(), 0);
1091    }
1092
1093    #[test]
1094    fn iter_double_ended_basic() {
1095        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1096
1097        // Insert items
1098        let _handle1 = pool.insert(100_u32);
1099        let _handle2 = pool.insert(200_u32);
1100        let _handle3 = pool.insert(300_u32);
1101
1102        let mut iter = pool.iter();
1103
1104        // Iterate from the back
1105        let last_ptr = iter.next_back().expect("should have last item");
1106        let last_value = unsafe { *last_ptr.cast::<u32>().as_ref() };
1107        assert_eq!(last_value, 300);
1108
1109        let middle_ptr = iter.next_back().expect("should have middle item");
1110        let middle_value = unsafe { *middle_ptr.cast::<u32>().as_ref() };
1111        assert_eq!(middle_value, 200);
1112
1113        let first_ptr = iter.next_back().expect("should have first item");
1114        let first_value = unsafe { *first_ptr.cast::<u32>().as_ref() };
1115        assert_eq!(first_value, 100);
1116
1117        // Should be exhausted
1118        assert_eq!(iter.next_back(), None);
1119        assert_eq!(iter.next(), None);
1120    }
1121
1122    #[test]
1123    fn iter_double_ended_mixed_directions() {
1124        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1125
1126        // Insert 5 items
1127        let _handle1 = pool.insert(100_u32);
1128        let _handle2 = pool.insert(200_u32);
1129        let _handle3 = pool.insert(300_u32);
1130        let _handle4 = pool.insert(400_u32);
1131        let _handle5 = pool.insert(500_u32);
1132
1133        let mut iter = pool.iter();
1134        assert_eq!(iter.len(), 5);
1135
1136        // Get first from front
1137        let first_ptr = iter.next().expect("should have first item");
1138        let first_value = unsafe { *first_ptr.cast::<u32>().as_ref() };
1139        assert_eq!(first_value, 100);
1140        assert_eq!(iter.len(), 4);
1141
1142        // Get last from back
1143        let last_ptr = iter.next_back().expect("should have last item");
1144        let last_value = unsafe { *last_ptr.cast::<u32>().as_ref() };
1145        assert_eq!(last_value, 500);
1146        assert_eq!(iter.len(), 3);
1147
1148        // Get second from front
1149        let second_ptr = iter.next().expect("should have second item");
1150        let second_value = unsafe { *second_ptr.cast::<u32>().as_ref() };
1151        assert_eq!(second_value, 200);
1152        assert_eq!(iter.len(), 2);
1153
1154        // Get fourth from back
1155        let fourth_ptr = iter.next_back().expect("should have fourth item");
1156        let fourth_value = unsafe { *fourth_ptr.cast::<u32>().as_ref() };
1157        assert_eq!(fourth_value, 400);
1158        assert_eq!(iter.len(), 1);
1159
1160        // Get middle item
1161        let middle_ptr = iter.next().expect("should have middle item");
1162        let middle_value = unsafe { *middle_ptr.cast::<u32>().as_ref() };
1163        assert_eq!(middle_value, 300);
1164        assert_eq!(iter.len(), 0);
1165
1166        // Should be exhausted
1167        assert_eq!(iter.next(), None);
1168        assert_eq!(iter.next_back(), None);
1169        assert_eq!(iter.len(), 0);
1170    }
1171
1172    #[test]
1173    fn iter_fused_behavior() {
1174        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1175
1176        // Test with empty pool
1177        let mut iter = pool.iter();
1178        assert_eq!(iter.next(), None);
1179        assert_eq!(iter.next(), None); // Should still be None
1180        assert_eq!(iter.next_back(), None);
1181        assert_eq!(iter.next_back(), None); // Should still be None
1182
1183        // Test with some items
1184        let _handle1 = pool.insert(100_u32);
1185        let _handle2 = pool.insert(200_u32);
1186
1187        let mut iter = pool.iter();
1188
1189        // Consume all items
1190        let first = iter.next();
1191        assert!(first.is_some());
1192        let second = iter.next();
1193        assert!(second.is_some());
1194
1195        // Now iterator should be exhausted
1196        assert_eq!(iter.next(), None);
1197        assert_eq!(iter.next(), None); // FusedIterator guarantee: still None
1198        assert_eq!(iter.next(), None); // Still None
1199        assert_eq!(iter.next_back(), None); // Should also be None from back
1200        assert_eq!(iter.next_back(), None); // Still None from back
1201
1202        // Test bidirectional exhaustion
1203        let mut iter = pool.iter();
1204
1205        // Consume from both ends until exhausted
1206        iter.next(); // Consume from front
1207        iter.next_back(); // Consume from back
1208
1209        // Now should be exhausted
1210        assert_eq!(iter.next(), None);
1211        assert_eq!(iter.next_back(), None);
1212        assert_eq!(iter.next(), None); // FusedIterator guarantee
1213        assert_eq!(iter.next_back(), None); // FusedIterator guarantee
1214    }
1215
1216    #[test]
1217    fn iter_across_multiple_slabs_with_gaps() {
1218        let mut pool = RawOpaquePool::with_layout_of::<usize>();
1219
1220        // Create a pattern: insert many items, remove some to create gaps across slabs
1221        let mut handles = Vec::new();
1222        for i in 0_usize..30 {
1223            handles.push(pool.insert(i));
1224        }
1225
1226        // Remove every third item to create gaps across slabs
1227        let mut to_remove = Vec::new();
1228        for (index, _) in handles.iter().enumerate().step_by(3) {
1229            to_remove.push(index);
1230        }
1231
1232        // Remove in reverse order to maintain indices
1233        for &index in to_remove.iter().rev() {
1234            pool.remove_mut(handles.swap_remove(index));
1235        }
1236
1237        let values: Vec<usize> = pool
1238            .iter()
1239            .map(|ptr| unsafe { *ptr.cast::<usize>().as_ref() })
1240            .collect();
1241
1242        // Should get all non-removed values
1243        let expected: Vec<usize> = (0_usize..30).filter(|&i| i % 3 != 0).collect();
1244        assert_eq!(values, expected);
1245    }
1246
1247    #[test]
1248    fn into_iterator_trait_works() {
1249        let mut pool = RawOpaquePool::with_layout_of::<u32>();
1250
1251        let _handle1 = pool.insert(100_u32);
1252        let _handle2 = pool.insert(200_u32);
1253        let _handle3 = pool.insert(300_u32);
1254
1255        // Test using for-in loop (which uses IntoIterator)
1256        let mut values = Vec::new();
1257        for ptr in &pool {
1258            let value = unsafe { *ptr.cast::<u32>().as_ref() };
1259            values.push(value);
1260        }
1261
1262        assert_eq!(values, vec![100, 200, 300]);
1263    }
1264}