Skip to main content

bias_shared_arena/
pool.rs

1use std::cell::Cell;
2use std::marker::PhantomData;
3use std::mem::MaybeUninit;
4use std::ptr::NonNull;
5use std::rc::Rc;
6
7use crate::block::Block;
8use crate::common::{Pointer, BLOCK_PER_PAGE};
9use crate::page::pool::{drop_page, PagePool};
10use crate::ArenaRc;
11
12/// A pointer to `T` in `Pool`
13///
14/// `PoolBox` implements [`DerefMut`] so it is directly mutable
15/// (without mutex or other synchronization methods).
16///
17/// It is not clonable and cannot be sent to others threads.
18///
19/// ```
20/// # use shared_arena::{PoolBox, Pool};
21/// let pool = Pool::new();
22/// let mut my_opt: PoolBox<Option<i32>> = pool.alloc(Some(10));
23///
24/// assert!(my_opt.is_some());
25/// assert_eq!(my_opt.take(), Some(10));
26/// assert!(my_opt.is_none());
27/// ```
28///
29/// [`ArenaArc`]: ./struct.ArenaArc.html
30/// [`Arena`]: ./struct.Arena.html
31/// [`SharedArena`]: ./struct.SharedArena.html
32/// [`DerefMut`]: https://doc.rust-lang.org/std/ops/trait.DerefMut.html
33///
34pub struct PoolBox<T> {
35    block: NonNull<Block<T>>,
36    _marker: PhantomData<*mut ()>,
37}
38
39impl<T> PoolBox<T> {
40    fn new(mut block: NonNull<Block<T>>) -> PoolBox<T> {
41        // PoolBox is not Send, so we can make the counter non-atomic
42        let counter_mut = unsafe { block.as_mut() }.counter.get_mut();
43
44        // let counter = &mut unsafe { block.as_mut() }.counter;
45        // See ArenaBox<T>::new for why we touch the counter
46        debug_assert!(
47            *counter_mut == 0,
48            "PoolBox: Counter not zero {}",
49            counter_mut
50        );
51        *counter_mut = 1;
52        PoolBox {
53            block,
54            _marker: PhantomData,
55        }
56    }
57}
58
59impl<T> std::ops::Deref for PoolBox<T> {
60    type Target = T;
61    fn deref(&self) -> &T {
62        unsafe { &*self.block.as_ref().value.get() }
63    }
64}
65
66impl<T> std::ops::DerefMut for PoolBox<T> {
67    fn deref_mut(&mut self) -> &mut T {
68        unsafe { &mut *self.block.as_ref().value.get() }
69    }
70}
71
72/// Drop the PoolBox<T>
73///
74/// The value pointed by this PoolBox is also dropped
75impl<T> Drop for PoolBox<T> {
76    fn drop(&mut self) {
77        // PoolBox is not Send, so we can make the counter non-atomic
78        let counter_mut = unsafe { self.block.as_mut() }.counter.get_mut();
79        // let block = unsafe { self.block.as_mut() };
80
81        // See ArenaBox<T>::new for why we touch the counter
82        assert!(
83            *counter_mut == 1,
84            "PoolBox: Counter != 1 on drop {}",
85            counter_mut
86        );
87        *counter_mut = 0;
88
89        Block::drop_block(self.block)
90    }
91}
92
93/// A single threaded arena
94///
95/// It produces only `PoolBox` and `ArenaRc` which cannot be sent
96/// to other threads.
97///
98/// [`ArenaRc`]: ./struct.ArenaRc.html
99/// [`PoolBox`]: ./struct.PoolBox.html
100///
101pub struct Pool<T: Sized> {
102    free: Rc<Pointer<PagePool<T>>>,
103    page_list: Pointer<PagePool<T>>,
104    npages: Cell<usize>,
105    _marker: PhantomData<*mut ()>,
106}
107
108impl<T: Sized> Pool<T> {
109    /// Constructs a new `Pool` capable of holding exactly 63 elements
110    ///
111    /// The Pool will reallocate itself if there is not enough space
112    /// when allocating (with alloc* functions)
113    ///
114    /// ## Example
115    ///
116    /// ```
117    /// # use shared_arena::Pool;
118    /// let arena = Pool::new();
119    /// # arena.alloc(1);
120    /// ```
121    pub fn new() -> Pool<T> {
122        Self::with_capacity(1)
123    }
124
125    /// Constructs a new `Pool` capable of holding at least `cap` elements
126    ///
127    /// Because the arena allocate by page of 63 elements, it might be able to
128    /// hold more elements than `cap`.
129    ///
130    /// The Pool will reallocate itself if there is not enough space
131    /// when allocating (with alloc* functions)
132    ///
133    /// ## Example
134    ///
135    /// ```
136    /// # use shared_arena::Pool;
137    /// let arena = Pool::with_capacity(2048);
138    /// # arena.alloc(1);
139    /// ```
140    pub fn with_capacity(cap: usize) -> Pool<T> {
141        let npages = ((cap.max(1) - 1) / BLOCK_PER_PAGE) + 1;
142        let free = Rc::new(Cell::new(std::ptr::null_mut()));
143
144        let (mut first, _) = PagePool::make_list(npages, &free);
145        let first_ref = unsafe { first.as_mut() };
146
147        free.set(first_ref);
148
149        Pool {
150            npages: Cell::new(npages),
151            free,
152            page_list: Cell::new(first_ref),
153            _marker: PhantomData,
154        }
155    }
156
157    fn alloc_new_page(&self) -> NonNull<PagePool<T>> {
158        let len = self.npages.get();
159
160        let to_allocate = len.max(1).min(900_000);
161
162        let (first, mut last) = PagePool::make_list(to_allocate, &self.free);
163
164        let last_ref = unsafe { last.as_mut() };
165        last_ref.next_free.set(self.free.get());
166        last_ref.next.set(self.page_list.get());
167
168        let first_ptr = first.as_ptr();
169        self.free.set(first_ptr);
170        self.page_list.set(first_ptr);
171
172        self.npages.set(len + to_allocate);
173
174        first
175    }
176
177    fn find_place(&self) -> NonNull<Block<T>> {
178        loop {
179            while let Some(page) = unsafe { self.free.get().as_mut() } {
180                if let Some(block) = page.acquire_free_block() {
181                    return block;
182                }
183
184                let next = page.next_free.get();
185
186                self.free.set(next);
187                page.in_free_list = false;
188            }
189            self.alloc_new_page();
190        }
191    }
192
193    /// Writes a value in the arena, and returns an [`PoolBox`]
194    /// pointing to that value.
195    ///
196    /// ## Example
197    ///
198    /// ```
199    /// # use shared_arena::{PoolBox, Pool};
200    /// let arena = Pool::new();
201    /// let my_num: PoolBox<u8> = arena.alloc(0xFF);
202    ///
203    /// assert_eq!(*my_num, 255);
204    /// ```
205    ///
206    /// [`PoolBox`]: ./struct.PoolBox.html
207    pub fn alloc(&self, value: T) -> PoolBox<T> {
208        let block = self.find_place();
209
210        unsafe {
211            let ptr = block.as_ref().value.get();
212            ptr.write(value);
213        }
214
215        PoolBox::new(block)
216    }
217
218    /// Finds an empty space in the arena and calls the function `initializer`
219    /// with its argument pointing to that space.
220    /// It returns an [`PoolBox`] pointing to the newly initialized value.
221    ///
222    /// The difference with [`alloc`] is that it has the benefit of
223    /// avoiding intermediate copies of the value.
224    ///
225    /// ## Safety
226    ///
227    /// It is the caller responsability to initialize properly the value.  
228    /// `initializer` must return `&T`, this is a way to ensure that
229    /// its parameter `&mut MaybeUninit<T>` has been "consumed".
230    ///
231    /// If `initializer` returns a different reference than its parameter,
232    /// the function will panic.
233    ///
234    /// When the [`PoolBox`] is dropped, the value is also
235    /// dropped. If the value is not initialized correctly, it will
236    /// drop an unitialized value, which is undefined behavior.
237    ///
238    /// ## Example
239    ///
240    /// ```
241    /// # use shared_arena::Pool;
242    /// # use std::ptr;
243    /// # use core::mem::MaybeUninit;
244    /// struct MyData {
245    ///     a: usize
246    /// }
247    ///
248    /// fn initialize_data<'a>(uninit: &'a mut MaybeUninit<MyData>, source: &MyData) -> &'a MyData {
249    ///     unsafe {
250    ///         let ptr = uninit.as_mut_ptr();
251    ///         ptr::copy(source, ptr, 1);
252    ///         &*ptr
253    ///     }
254    /// }
255    ///
256    /// let arena = Pool::<MyData>::new();
257    /// let source = MyData { a: 101 };
258    ///
259    /// let data = arena.alloc_with(|uninit| {
260    ///     initialize_data(uninit, &source)
261    /// });
262    /// assert!(data.a == 101);
263    /// ```
264    ///
265    /// [`PoolBox`]: ./struct.PoolBox.html
266    /// [`alloc`]: struct.Pool.html#method.alloc
267    /// [`MaybeUninit`]: https://doc.rust-lang.org/std/mem/union.MaybeUninit.html
268    pub fn alloc_with<F>(&self, initializer: F) -> PoolBox<T>
269    where
270        F: Fn(&mut MaybeUninit<T>) -> &T,
271    {
272        let block = self.find_place();
273        let result = PoolBox::new(block);
274
275        unsafe {
276            let ptr = block.as_ref().value.get();
277            let reference = initializer(&mut *(ptr as *mut std::mem::MaybeUninit<T>));
278            debug_assert_eq!(
279                ptr as *const T, reference as *const T,
280                "`initializer` must return a reference of its parameter"
281            );
282        }
283
284        result
285    }
286
287    /// Writes a value in the arena, and returns an [`ArenaRc`]
288    /// pointing to that value.
289    ///
290    /// ## Example
291    ///
292    /// ```
293    /// # use shared_arena::{ArenaRc, Pool};
294    /// let arena = Pool::new();
295    /// let my_num: ArenaRc<u8> = arena.alloc_rc(0xFF);
296    ///
297    /// assert_eq!(*my_num, 255);
298    /// ```
299    ///
300    /// [`ArenaRc`]: ./struct.ArenaRc.html
301    pub fn alloc_rc(&self, value: T) -> ArenaRc<T> {
302        let block = self.find_place();
303
304        unsafe {
305            let ptr = block.as_ref().value.get();
306            ptr.write(value);
307        }
308
309        ArenaRc::new(block)
310    }
311
312    /// Finds an empty space in the arena and calls the function `initializer`
313    /// with its argument pointing to that space.
314    /// It returns an [`ArenaRc`] pointing to the newly initialized value.
315    ///
316    /// The difference with [`alloc_rc`] is that it has the benefit of
317    /// avoiding intermediate copies of the value.
318    ///
319    /// ## Safety
320    ///
321    /// It is the caller responsability to initialize properly the value.  
322    /// `initializer` must return `&T`, this is a way to ensure that
323    /// its parameter `&mut MaybeUninit<T>` has been "consumed".
324    ///
325    /// If `initializer` returns a different reference than its parameter,
326    /// the function will panic.
327    ///
328    /// When all [`ArenaRc`] pointing that value are dropped, the value
329    /// is also dropped. If the value is not initialized correctly, it will
330    /// drop an unitialized value, which is undefined behavior.
331    ///
332    /// ## Example
333    ///
334    /// ```
335    /// # use shared_arena::Pool;
336    /// # use std::ptr;
337    /// # use core::mem::MaybeUninit;
338    /// struct MyData {
339    ///     a: usize
340    /// }
341    ///
342    /// fn initialize_data<'a>(uninit: &'a mut MaybeUninit<MyData>, source: &MyData) -> &'a MyData {
343    ///     unsafe {
344    ///         let ptr = uninit.as_mut_ptr();
345    ///         ptr::copy(source, ptr, 1);
346    ///         &*ptr
347    ///     }
348    /// }
349    ///
350    /// let arena = Pool::<MyData>::new();
351    /// let source = MyData { a: 101 };
352    ///
353    /// let data = arena.alloc_rc_with(|uninit| {
354    ///     initialize_data(uninit, &source)
355    /// });
356    /// assert!(data.a == 101);
357    /// ```
358    ///
359    /// [`ArenaRc`]: ./struct.ArenaRc.html
360    /// [`alloc_rc`]: #method.alloc_rc
361    /// [`MaybeUninit`]: https://doc.rust-lang.org/std/mem/union.MaybeUninit.html
362    pub fn alloc_rc_with<F>(&self, initializer: F) -> ArenaRc<T>
363    where
364        F: Fn(&mut MaybeUninit<T>) -> &T,
365    {
366        let block = self.find_place();
367        let result = ArenaRc::new(block);
368
369        unsafe {
370            let ptr = block.as_ref().value.get();
371            let reference = initializer(&mut *(ptr as *mut std::mem::MaybeUninit<T>));
372            debug_assert_eq!(
373                ptr as *const T, reference as *const T,
374                "`initializer` must return a reference of its parameter"
375            );
376        }
377
378        result
379    }
380
381    /// Returns a tuple of non-free and free spaces in the arena
382    ///
383    /// This is a slow function and it should not be called in a hot
384    /// path.
385    ///
386    /// ## Example
387    ///
388    /// ```
389    /// # use shared_arena::Pool;
390    /// let arena = Pool::new();
391    /// let item = arena.alloc(1);
392    /// let (used, free) = arena.stats();
393    /// assert!(used == 1 && free == 62);
394    /// ```
395    pub fn stats(&self) -> (usize, usize) {
396        let mut next = self.page_list.get();
397        let mut used = 0;
398        let mut npages = 0;
399
400        while let Some(next_ref) = unsafe { next.as_mut() } {
401            let next_next = next_ref.next.get();
402
403            let bitfield = next_ref.bitfield;
404            let zeros = bitfield.count_zeros() as usize;
405            used += zeros;
406            next = next_next;
407
408            npages += 1;
409        }
410
411        debug_assert!(npages == self.npages.get());
412
413        let free = (npages * BLOCK_PER_PAGE) - used;
414
415        (used, free)
416    }
417
418    #[cfg(target_pointer_width = "64")]
419    #[cfg(test)]
420    pub(crate) fn size_lists(&self) -> (usize, usize) {
421        let mut next = self.page_list.get();
422        let mut size = 0;
423        while let Some(next_ref) = unsafe { next.as_mut() } {
424            next = next_ref.next.get();
425            size += 1;
426        }
427
428        let mut next = self.free.get();
429        let mut free = 0;
430        while let Some(next_ref) = unsafe { next.as_mut() } {
431            next = next_ref.next_free.get();
432            free += 1;
433        }
434
435        (size, free)
436    }
437
438    /// Shrinks the capacity of the arena as much as possible.
439    ///
440    /// It will drop all pages that are unused (no ArenaRc or PoolBox
441    /// points to it).  
442    /// If there is still one or more references to a page, the page
443    /// won't be dropped.
444    ///
445    /// This is a slow function and it should not be called in a hot
446    /// path.
447    ///
448    /// The dedicated memory will be deallocated during this call.
449    ///
450    /// ## Example
451    ///
452    /// ```
453    /// # use shared_arena::Pool;
454    /// let mut arena = Pool::with_capacity(2048);
455    /// let mut values = Vec::new();
456    ///
457    /// assert_eq!(arena.stats(), (0, 2079));
458    ///
459    /// for _ in 0..80 {
460    ///     values.push(arena.alloc(0xFF));
461    /// }
462    ///
463    /// arena.shrink_to_fit();
464    ///
465    /// let (used, free) = arena.stats();
466    /// assert!(used == 80, free == 46);
467    ///
468    /// ```
469    pub fn shrink_to_fit(&mut self) {
470        let mut current: &Pointer<PagePool<T>> = &self.free;
471
472        let mut to_drop = vec![];
473
474        while let Some(current_value) = unsafe { current.get().as_mut() } {
475            let next = &current_value.next_free;
476            let next_value = next.get();
477
478            if current_value.bitfield == !0 {
479                current.set(next_value);
480                to_drop.push(current_value as *const _ as *mut PagePool<T>);
481            } else {
482                current = next;
483            }
484        }
485
486        let mut current: &Pointer<PagePool<T>> = &self.page_list;
487
488        // Loop on the full list
489        // We remove the pages from it
490        while let Some(current_value) = unsafe { current.get().as_mut() } {
491            let next = &current_value.next;
492            let next_value = next.get();
493
494            if to_drop.contains(&(current_value as *const _ as *mut PagePool<T>)) {
495                current.set(next_value);
496            } else {
497                current = next;
498            }
499        }
500
501        self.npages.set(self.npages.get() - to_drop.len());
502
503        for page in to_drop.iter().rev() {
504            drop_page(*page)
505        }
506    }
507
508    #[allow(dead_code)]
509    #[cfg(test)]
510    pub(crate) fn display_list(&self) {
511        let mut full = vec![];
512
513        let mut next = self.page_list.get();
514        while let Some(next_ref) = unsafe { next.as_mut() } {
515            full.push(next);
516            next = next_ref.next.get();
517        }
518
519        let mut list_free = vec![];
520
521        let mut next = self.page_list.get();
522        while let Some(next_ref) = unsafe { next.as_mut() } {
523            list_free.push(next);
524            next = next_ref.next_free.get();
525        }
526
527        println!("FULL {} {:#?}", full.len(), full);
528        println!("FREE {} {:#?}", list_free.len(), list_free);
529    }
530}
531
532impl<T> Default for Pool<T> {
533    fn default() -> Self {
534        Pool::new()
535    }
536}
537
538impl<T> Drop for Pool<T> {
539    fn drop(&mut self) {
540        let mut next = self.page_list.get();
541
542        while let Some(next_ref) = unsafe { next.as_mut() } {
543            let next_next = next_ref.next.get();
544            drop_page(next);
545            next = next_next;
546        }
547    }
548}
549
550impl<T> std::fmt::Debug for Pool<T> {
551    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
552        struct Page {
553            free: usize,
554            used: usize,
555        }
556
557        impl std::fmt::Debug for Page {
558            fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
559                write!(f, "Page {{ free: {} used: {} }}", self.free, self.used)
560            }
561        }
562
563        let npages = self.npages.get();
564
565        let mut vec = Vec::with_capacity(npages);
566
567        let mut next = self.page_list.get();
568
569        while let Some(next_ref) = unsafe { next.as_mut() } {
570            let used = next_ref.bitfield.count_zeros() as usize;
571            vec.push(Page {
572                used,
573                free: BLOCK_PER_PAGE - used,
574            });
575
576            next = next_ref.next.get();
577        }
578
579        let blocks_used: usize = vec.iter().map(|p| p.used).sum();
580        let blocks_free: usize = vec.iter().map(|p| p.free).sum();
581
582        f.debug_struct("Pool")
583            .field("blocks_free", &blocks_free)
584            .field("blocks_used", &blocks_used)
585            .field("npages", &npages)
586            .field("pages", &vec)
587            .finish()
588    }
589}
590
591/// Code that should fail to compile.
592/// compile_fail is supported on doc only
593///
594/// Fails because Pool doesn't implement Sync, which Arc requires
595/// ```compile_fail
596/// use shared_arena::Pool;
597/// use std::sync::Arc;
598///
599/// let arena: Arc<Pool<i32>> = Arc::new(Pool::new());
600///
601/// std::thread::spawn(move || {
602///     std::mem::drop(arena)
603/// });
604/// ```
605///
606/// ```compile_fail
607/// use shared_arena::Pool;
608/// use std::sync::Arc;
609///
610/// let arena: Arc<Pool<i32>> = Arc::new(Pool::new());
611///
612/// std::thread::spawn(move || {
613///     arena.alloc(1);
614/// });
615/// arena.alloc(2);
616/// ```
617#[allow(dead_code)]
618fn arena_fail() {} // grcov_ignore
619
620#[cfg(test)]
621mod tests {
622    use super::Pool;
623    use std::mem::MaybeUninit;
624    use std::ptr;
625
626    #[cfg(target_pointer_width = "64")]
627    #[test]
628    fn arena_shrink() {
629        let mut arena = Pool::<usize>::with_capacity(1000);
630        assert_eq!(arena.stats(), (0, 1008));
631        arena.shrink_to_fit();
632        assert_eq!(arena.stats(), (0, 0));
633    }
634
635    #[cfg(target_pointer_width = "64")]
636    #[test]
637    fn arena_shrink2() {
638        let mut arena = Pool::<usize>::with_capacity(1000);
639
640        println!("A");
641        let _a = arena.alloc(1);
642        arena.shrink_to_fit();
643        assert_eq!(arena.stats(), (1, 62));
644
645        println!("A1");
646        let _a = arena.alloc(1);
647        arena.shrink_to_fit();
648        assert_eq!(arena.stats(), (2, 61));
649
650        println!("A2");
651        let mut values = Vec::with_capacity(64);
652        for _ in 0..64 {
653            values.push(arena.alloc(1));
654        }
655
656        println!("A3");
657        assert_eq!(arena.stats(), (66, 60));
658        println!("A32");
659        arena.shrink_to_fit();
660        println!("A33");
661        assert_eq!(arena.stats(), (66, 60));
662
663        println!("A4");
664        std::mem::drop(values);
665
666        println!("A5");
667        assert_eq!(arena.stats(), (2, 124));
668        println!("A6");
669        arena.shrink_to_fit();
670        println!("A7");
671        assert_eq!(arena.stats(), (2, 61));
672    }
673
674    #[cfg(target_pointer_width = "64")]
675    #[test]
676    fn arena_size() {
677        let mut arena = Pool::<usize>::with_capacity(1000);
678
679        assert_eq!(arena.size_lists(), (16, 16));
680        let a = arena.alloc(1);
681        assert_eq!(arena.size_lists(), (16, 16));
682
683        let mut values = Vec::with_capacity(539);
684        for _ in 0..539 {
685            values.push(arena.alloc(1));
686        }
687        assert_eq!(arena.size_lists(), (16, 8));
688
689        arena.shrink_to_fit();
690
691        assert_eq!(arena.size_lists(), (9, 1));
692
693        values.truncate(503);
694        arena.shrink_to_fit();
695
696        assert_eq!(arena.size_lists(), (8, 0));
697
698        std::mem::drop(a);
699        for _ in 0..62 {
700            values.remove(0);
701        }
702
703        assert_eq!(arena.size_lists(), (8, 1));
704
705        arena.shrink_to_fit();
706
707        assert_eq!(arena.size_lists(), (7, 0));
708
709        values.clear();
710
711        assert_eq!(arena.size_lists(), (7, 7));
712
713        arena.shrink_to_fit();
714
715        assert_eq!(arena.size_lists(), (0, 0));
716
717        {
718            let _a = arena.alloc(1);
719            println!("LA3",);
720            assert_eq!(arena.size_lists(), (1, 1));
721
722            println!("{:?}", arena);
723            arena.display_list();
724        }
725
726        assert_eq!(arena.size_lists(), (1, 1));
727        arena.shrink_to_fit();
728        assert_eq!(arena.size_lists(), (0, 0));
729
730        let mut values = Vec::with_capacity(126);
731        for _ in 0..126 {
732            values.push(arena.alloc(1));
733        }
734        assert_eq!(arena.size_lists(), (2, 1));
735
736        values.remove(0);
737        assert_eq!(arena.size_lists(), (2, 2));
738
739        values.push(arena.alloc(1));
740        assert_eq!(arena.size_lists(), (2, 2));
741    }
742
743    #[test]
744    fn alloc_with_initializer() {
745        struct MyData {
746            a: usize,
747        }
748
749        fn initialize_data<'d>(uninit: &'d mut MaybeUninit<MyData>, source: &MyData) -> &'d MyData {
750            unsafe {
751                let ptr = uninit.as_mut_ptr();
752                ptr::copy(source, ptr, 1);
753                &*ptr
754            }
755        }
756
757        let arena = Pool::<MyData>::new();
758
759        let source = MyData { a: 101 };
760        let data = arena.alloc_with(|uninit| initialize_data(uninit, &source));
761        assert!(data.a == 101);
762
763        let source = MyData { a: 102 };
764        let data = arena.alloc_rc_with(|uninit| initialize_data(uninit, &source));
765        assert!(data.a == 102);
766    }
767
768    #[test]
769    #[should_panic]
770    fn alloc_with_panic() {
771        let arena = Pool::<usize>::new();
772        const SOURCE: usize = 10;
773
774        let _ = arena.alloc_with(|_| &SOURCE);
775    } // grcov_ignore
776
777    #[test]
778    #[should_panic]
779    fn alloc_rc_with_panic() {
780        let arena = Pool::<usize>::new();
781        const SOURCE: usize = 10;
782
783        let _ = arena.alloc_rc_with(|_| &SOURCE);
784    } // grcov_ignore
785
786    #[test]
787    fn alloc_fns() {
788        let arena = Pool::<usize>::new();
789
790        use std::ptr;
791
792        let a = arena.alloc_with(|place| unsafe {
793            ptr::copy(&101, place.as_mut_ptr(), 1);
794            &*place.as_mut_ptr()
795        });
796        assert!(*a == 101);
797
798        let a = arena.alloc_rc_with(|place| unsafe {
799            ptr::copy(&102, place.as_mut_ptr(), 1);
800            &*place.as_mut_ptr()
801        });
802        assert!(*a == 102);
803
804        let a = arena.alloc(103);
805        assert!(*a == 103);
806
807        let a = arena.alloc_rc(104);
808        assert!(*a == 104);
809    }
810
811    #[test]
812    fn drop_arena_with_valid_allocated() {
813        let (a, b, c, d) = {
814            let arena = Pool::<usize>::new();
815
816            use std::ptr;
817
818            let a = arena.alloc_with(|place| unsafe {
819                ptr::copy(&101, place.as_mut_ptr(), 1);
820                &*place.as_mut_ptr()
821            });
822            let b = arena.alloc_rc_with(|place| unsafe {
823                ptr::copy(&102, place.as_mut_ptr(), 1);
824                &*place.as_mut_ptr()
825            });
826            let c = arena.alloc(103);
827            let d = arena.alloc_rc(104);
828
829            (a, b, c, d)
830        };
831
832        assert_eq!((*a, *b, *c, *d), (101, 102, 103, 104))
833    }
834
835    #[test]
836    #[should_panic]
837    #[cfg(target_pointer_width = "64")]
838    fn invalid_block() {
839        use std::cell::UnsafeCell;
840        use std::ptr::NonNull;
841        use std::sync::atomic::AtomicUsize;
842
843        let mut block = super::Block {
844            value: UnsafeCell::new(1),
845            counter: AtomicUsize::new(1),
846            page: crate::block::PageTaggedPtr { data: !0 },
847        };
848
849        super::Block::drop_block(NonNull::from(&mut block));
850    } // grcov_ignore
851}