kappendlist/
lib.rs

1#![allow(dead_code)]
2use std::alloc::{GlobalAlloc, Layout, System};
3use std::cell::UnsafeCell;
4use std::fmt::{self, Debug};
5use std::iter::FromIterator;
6use std::mem::MaybeUninit;
7use std::ops::Index;
8
9// Must be a power of 2
10const CHUNK_SIZE: usize = 16;
11const CHUNK_MASK: usize = CHUNK_SIZE - 1;
12
13/// A list that can be appended to while elements are borrowed
14///
15/// Uses pointer tagging to track allocated chunks of a fixed size.
16///
17/// Indexing is O(1)
18pub struct BaseAppendList<T, V> {
19    inner: UnsafeCell<Inner<T>>,
20    _variant: std::marker::PhantomData<V>,
21}
22
23impl<T, V> Default for BaseAppendList<T, V> {
24    #[inline]
25    fn default() -> Self {
26        Self {
27            inner: Default::default(),
28            _variant: std::marker::PhantomData,
29        }
30    }
31}
32
33pub type AppendList<T> = BaseAppendList<T, variants::Index>;
34pub type AppendListMut<T> = BaseAppendList<T, variants::PushMut>;
35
36fn chunks_needed_maintaining_invariant(total_chunk_count: usize) -> usize {
37    // let initial_count = self.chunks.len();
38    let initial_count = 0;
39    let mut new_chunk_count = initial_count;
40
41    // Need to allocate more chunks
42    // In a geometric series, 2^(n+1) = Sum(2^n, 0, n) + 1
43    // or generally, r^(n+1) = (r-1) * Sum(r^n, 0, n) + 1
44    // So in order to double the number of previous chunks allocated,
45    // allocate `len + 1` more.
46    // So `new_len = len + len + 1 = len * 2 + 1 = (len << 1) + 1`
47    // Could also probably do this with some kind of log2() but meh
48    while new_chunk_count < total_chunk_count {
49        new_chunk_count <<= 1;
50        new_chunk_count += 1;
51    }
52    new_chunk_count - initial_count
53}
54
55pub mod variants {
56    pub struct PushMut;
57    pub struct Index;
58}
59
60impl<T, V> BaseAppendList<T, V> {
61    #[inline]
62    pub fn new() -> Self {
63        Self::default()
64    }
65
66    /// Get an item from the list, if it is in bounds
67    ///
68    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
69    /// index with `[]`, which will panic on out-of-bounds.
70    #[inline]
71    pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
72        self.inner.get_mut().get_mut(index)
73    }
74
75    ///// Get an item from the list, if it is in bounds
76    /////
77    ///// Returns `None` if the `index` is out-of-bounds. Note that you can also
78    ///// index with `[]`, which will panic on out-of-bounds.
79    //#[inline]
80    //pub fn expand_and_get_mut(&self, index: usize) -> &mut T
81    //where
82    //    T: Default,
83    //{
84    //    unsafe { (&mut *self.inner.get()).expand_and_get_mut(index) }
85    //}
86
87    #[inline(always)]
88    fn unsafe_inner(&self) -> &mut Inner<T> {
89        unsafe { &mut *self.inner.get() }
90    }
91
92    /// Get an iterator over the list
93    #[inline]
94    pub fn iter_mut(&mut self) -> IterMut<T> {
95        self.inner.get_mut().iter_mut()
96    }
97
98    #[inline]
99    pub fn drain_all<'a>(&'a mut self) -> Drain<'a, T> {
100        self.inner.get_mut().drain_all()
101    }
102
103    #[inline]
104    pub fn len(&self) -> usize {
105        self.unsafe_inner().len()
106    }
107
108    #[inline]
109    pub fn capacity(&self) -> usize {
110        self.unsafe_inner().capacity()
111        // self.unsafe_inner().chunks.len() * CHUNK_SIZE
112    }
113
114    #[inline]
115    pub fn is_empty(&self) -> bool {
116        self.len() == 0
117    }
118
119    #[inline]
120    pub fn extend<I: IntoIterator<Item = T>>(&self, iter: I) {
121        self.unsafe_inner().extend(iter)
122    }
123}
124
125impl<T> BaseAppendList<T, variants::PushMut> {
126    /// Append an item to the end
127    ///
128    /// Note that this does not require `mut`.
129    #[inline]
130    pub fn push(&self, item: T) -> &mut T {
131        self.unsafe_inner().push(item)
132    }
133}
134
135impl<T> BaseAppendList<T, variants::Index> {
136    /// Append an item to the end
137    ///
138    /// Note that this does not require `mut`.
139    #[inline]
140    pub fn push(&self, item: T) -> &T {
141        self.unsafe_inner().push(item)
142    }
143
144    /// Get an item from the list, if it is in bounds
145    ///
146    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
147    /// index with `[]`, which will panic on out-of-bounds.
148    #[inline]
149    pub fn get<'a>(&'a self, index: usize) -> Option<&'a T> {
150        unsafe { (&mut *self.inner.get()).get(index) }
151    }
152
153    /// Get an iterator over the list
154    #[inline]
155    pub fn iter(&self) -> Iter<T> {
156        self.unsafe_inner().iter()
157    }
158}
159
160impl<T> std::ops::Index<usize> for BaseAppendList<T, variants::Index> {
161    type Output = T;
162
163    fn index(&self, idx: usize) -> &T {
164        self.get(idx).unwrap()
165    }
166}
167
168struct Inner<T> {
169    len: usize,
170    chunks: Vec<Chunk<T, CHUNK_SIZE>>,
171}
172
173pub struct Chunk<T, const CHUNK_SIZE: usize>(*mut [MaybeUninit<T>; CHUNK_SIZE]);
174
175impl<T, const CHUNK_SIZE: usize> Chunk<T, CHUNK_SIZE> {
176    pub unsafe fn system_alloc(count: usize) -> impl Iterator<Item = Self> {
177        Self::alloc(count, |layout| System.alloc(layout))
178    }
179    pub unsafe fn alloc(
180        count: usize,
181        alloc: impl FnOnce(Layout) -> *mut u8,
182    ) -> impl Iterator<Item = Self> {
183        // The memory layout of arrays is guaranteed to be compatible with
184        // putting them next to eachother contiguously.
185        // MaybeUninit has the same memory layout as T
186        let first_chunk = alloc(Layout::array::<T>(CHUNK_SIZE * count).unwrap())
187            as *mut [MaybeUninit<T>; CHUNK_SIZE];
188        assert!(count <= isize::MAX as usize);
189        std::iter::once(Chunk(tag_ptr(first_chunk, 1)))
190            .chain((1..count).map(move |i| Chunk(first_chunk.offset(i as isize))))
191    }
192    #[inline]
193    pub fn tag(&self) -> u64 {
194        get_ptr_tag(self.0)
195    }
196    #[inline]
197    pub fn as_slice_mut(&mut self) -> &mut [MaybeUninit<T>; CHUNK_SIZE] {
198        unsafe { &mut *self.ptr() }
199    }
200    #[inline]
201    pub unsafe fn as_slice_mut_unsafe(&self) -> &mut [MaybeUninit<T>; CHUNK_SIZE] {
202        &mut *self.ptr()
203    }
204    #[inline]
205    pub fn as_slice(&self) -> &[MaybeUninit<T>; CHUNK_SIZE] {
206        unsafe { &*self.ptr() }
207    }
208    #[inline]
209    pub fn ptr(&self) -> *mut [MaybeUninit<T>; CHUNK_SIZE] {
210        untagged(self.0)
211    }
212
213    #[inline]
214    pub unsafe fn system_dealloc(chunks: &[Chunk<T, CHUNK_SIZE>]) {
215        Self::dealloc(chunks, |ptr, layout| System.dealloc(ptr, layout))
216    }
217
218    pub unsafe fn dealloc(
219        chunks: &[Chunk<T, CHUNK_SIZE>],
220        mut dealloc: impl FnMut(*mut u8, Layout),
221    ) {
222        if chunks.is_empty() {
223            return;
224        }
225        assert_eq!(
226            chunks[0].tag(),
227            1,
228            "First chunk wasn't tagged with allocation"
229        );
230        let mut chunk_count = 0;
231        for chunk in chunks.iter().rev() {
232            chunk_count += 1;
233            if chunk.tag() == 1 {
234                let layout = Layout::array::<T>(CHUNK_SIZE * chunk_count).unwrap();
235                dealloc(chunk.ptr().cast(), layout);
236                chunk_count = 0;
237            }
238        }
239        assert_eq!(
240            chunk_count, 0,
241            "Chunks which weren't terminated by a tagged chunk were found"
242        );
243    }
244}
245
246// impl<T> Drop for Chunk<T> {
247//     fn drop(&mut self) {
248//         if self.tag() == 1 {
249//             unsafe {
250//                 System.dealloc(self.0.cast(), layout);
251//             }
252//         }
253//     }
254// }
255
256impl<T> Default for Inner<T> {
257    fn default() -> Self {
258        Self {
259            len: 0,
260            chunks: Default::default(),
261        }
262    }
263}
264
265pub const TAG_MASK: u64 = 0b111;
266#[inline(always)]
267pub fn modify_ptr<T>(p: *mut T, f: impl FnOnce(u64) -> u64) -> *mut T {
268    f(p as u64) as *mut T
269}
270#[inline(always)]
271pub fn get_ptr_tag<T>(p: *mut T) -> u64 {
272    p as u64 & TAG_MASK
273}
274#[inline(always)]
275pub fn untagged<T>(p: *mut T) -> *mut T {
276    modify_ptr(p, |v| v & (!TAG_MASK))
277}
278#[inline(always)]
279pub fn tag_ptr<T>(p: *mut T, tag: u64) -> *mut T {
280    modify_ptr(p, |v| v | (tag & TAG_MASK))
281}
282
283impl<T> Inner<T> {
284    /// In test builds, check all of the unsafe invariants
285    ///
286    /// In release builds, no-op
287    fn check_invariants(&self) {
288        // TODO?
289        // #[cfg(test)]
290        // {
291        //     if self.len.get() > 0 {
292        //     } else {
293        //     }
294        // }
295    }
296
297    #[inline(always)]
298    pub fn capacity(&self) -> usize {
299        self.chunks.len() * CHUNK_SIZE
300    }
301
302    #[inline(always)]
303    unsafe fn get_chunk(&self, chunk_index: usize) -> &mut [MaybeUninit<T>; CHUNK_SIZE] {
304        self.chunks.get_unchecked(chunk_index).as_slice_mut_unsafe()
305    }
306
307    /// Append an item to the end
308    ///
309    /// Note that this does not require `mut`.
310    pub fn push(&mut self, item: T) -> &mut T {
311        self.check_invariants();
312
313        let new_index = self.len;
314        let chunk_index = new_index / CHUNK_SIZE;
315        let index_in_chunk = new_index & CHUNK_MASK;
316
317        if chunk_index >= self.chunks.len() {
318            // Need to allocate more chunks
319            // In a geometric series, 2^(n+1) = Sum(2^n, 0, n) + 1
320            // or generally, r^(n+1) = (r-1) * Sum(r^n, 0, n) + 1
321            // So in order to double the number of previous chunks allocated,
322            // allocate `len + 1` more.
323            self.allocate_chunks(self.chunks.len() + 1);
324        }
325        debug_assert!(chunk_index < self.chunks.len());
326        let chunk = unsafe { self.chunks.get_unchecked(chunk_index).as_slice_mut_unsafe() };
327        chunk[index_in_chunk].write(item);
328
329        self.len += 1;
330
331        self.check_invariants();
332
333        unsafe { chunk[index_in_chunk].assume_init_mut() }
334    }
335
336    fn allocate_chunks(&mut self, count: usize) {
337        if count == 0 {
338            return;
339        }
340        self.chunks.extend(unsafe { Chunk::system_alloc(count) });
341    }
342
343    fn chunks_needed_maintaining_invariant(&self, total_chunk_count: usize) -> usize {
344        let mut new_chunk_count = self.chunks.len();
345        // Need to allocate more chunks
346        // In a geometric series, 2^(n+1) = Sum(2^n, 0, n) + 1
347        // or generally, r^(n+1) = (r-1) * Sum(r^n, 0, n) + 1
348        // So in order to double the number of previous chunks allocated,
349        // allocate `len + 1` more.
350        // So `new_len = len + len + 1 = len * 2 + 1 = (len << 1) + 1`
351        // Could also probably do this with some kind of log2() but meh
352        while new_chunk_count < total_chunk_count {
353            new_chunk_count <<= 1;
354            new_chunk_count += 1;
355        }
356        new_chunk_count - self.chunks.len()
357    }
358
359    /// Get the length of the list
360    pub fn len(&self) -> usize {
361        self.check_invariants();
362        self.len
363    }
364
365    /// Get an item from the list, if it is in bounds
366    ///
367    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
368    /// index with `[]`, which will panic on out-of-bounds.
369    pub fn get<'a>(&'a self, index: usize) -> Option<&'a T> {
370        self.check_invariants();
371
372        if index >= self.len {
373            return None;
374        }
375        let chunk_index = index / CHUNK_SIZE;
376        let index_in_chunk = index & CHUNK_MASK;
377        Some(unsafe { self.get_chunk(chunk_index)[index_in_chunk].assume_init_ref() })
378    }
379
380    /// Get an item from the list, if it is in bounds
381    ///
382    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
383    /// index with `[]`, which will panic on out-of-bounds.
384    // pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
385    pub fn get_mut<'a>(&'a mut self, index: usize) -> Option<&'a mut T> {
386        self.check_invariants();
387        if index >= self.len {
388            return None;
389        }
390        let chunk_index = index / CHUNK_SIZE;
391        let index_in_chunk = index & CHUNK_MASK;
392        Some(unsafe { self.get_chunk(chunk_index)[index_in_chunk].assume_init_mut() })
393    }
394
395    /// Get an item from the list, if it is in bounds
396    ///
397    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
398    /// index with `[]`, which will panic on out-of-bounds.
399    #[inline(always)]
400    fn get_unchecked_move(&mut self, index: usize) -> T {
401        let chunk_index = index / CHUNK_SIZE;
402        let index_in_chunk = index & CHUNK_MASK;
403        unsafe { self.get_chunk(chunk_index)[index_in_chunk].assume_init_read() }
404    }
405
406    /// Get an item from the list, if it is in bounds
407    ///
408    /// Returns `None` if the `index` is out-of-bounds. Note that you can also
409    /// index with `[]`, which will panic on out-of-bounds.
410    pub fn expand_and_get_mut(&mut self, index: usize) -> &mut T
411    where
412        T: Default,
413    {
414        self.check_invariants();
415        if index >= self.len {
416            if index >= self.capacity() {
417                let min_chunks_needed = index / CHUNK_SIZE;
418                self.allocate_chunks(self.chunks_needed_maintaining_invariant(min_chunks_needed));
419            }
420            self.extend(std::iter::repeat_with(Default::default).take(index + 1 - self.len));
421        }
422        assert!(index < self.capacity());
423        let chunk_index = index / CHUNK_SIZE;
424        let index_in_chunk = index & CHUNK_MASK;
425        unsafe { self.get_chunk(chunk_index)[index_in_chunk].assume_init_mut() }
426    }
427
428    /// Get an iterator over the list
429    pub fn iter<'a>(&'a self) -> Iter<'a, T> {
430        self.check_invariants();
431        Iter {
432            list: self,
433            index: 0,
434        }
435    }
436
437    /// Get an iterator over the list
438    pub fn iter_mut<'a>(&'a mut self) -> IterMut<'a, T> {
439        self.check_invariants();
440        IterMut {
441            list: self,
442            index: 0,
443        }
444    }
445
446    pub fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
447        // TODO use size hint to reserve
448        self.check_invariants();
449        for x in iter {
450            self.push(x);
451        }
452    }
453
454    pub fn drain_all<'a>(&'a mut self) -> Drain<'a, T> {
455        self.check_invariants();
456        let len = self.len;
457        self.len = 0;
458        Drain {
459            list: self,
460            index: 0,
461            len,
462        }
463    }
464}
465
466impl<T> Drop for Inner<T> {
467    fn drop(&mut self) {
468        let mut remaining = self.len;
469        // Drop the individual elements
470        // The last one will be truncated via .take() by counting how many are
471        // lefts, and .take() works fine if it's greater than the number of
472        // elements in the iterator.
473        for chunk in self.chunks.iter_mut() {
474            // Iterates at most CHUNK_SIZE elements.
475            for elem in chunk.as_slice_mut().iter_mut().take(remaining) {
476                unsafe {
477                    elem.assume_init_drop();
478                }
479                remaining -= 1;
480            }
481        }
482        // Deallocate the actual array chunks
483        unsafe {
484            Chunk::system_dealloc(self.chunks.as_slice());
485        }
486    }
487}
488
489#[inline]
490pub const fn floor_log2(x: usize) -> usize {
491    const BITS_PER_BYTE: usize = 8;
492
493    BITS_PER_BYTE * std::mem::size_of::<usize>() - (x.leading_zeros() as usize) - 1
494}
495
496#[inline]
497pub const fn ceil_log2(x: usize) -> usize {
498    const BITS_PER_BYTE: usize = 8;
499
500    BITS_PER_BYTE * std::mem::size_of::<usize>() - (x.leading_zeros() as usize)
501}
502
503impl<T> Index<usize> for Inner<T> {
504    type Output = T;
505
506    fn index(&self, index: usize) -> &Self::Output {
507        self.get(index).expect("Inner indexed beyond its length")
508    }
509}
510
511impl<T, V> std::iter::Extend<T> for BaseAppendList<T, V> {
512    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
513        BaseAppendList::extend(self, iter)
514    }
515}
516
517impl<T, V> FromIterator<T> for BaseAppendList<T, V> {
518    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
519        let list = Self::default();
520        list.extend(iter);
521        list
522    }
523}
524
525impl<'a, T> IntoIterator for &'a BaseAppendList<T, variants::Index> {
526    type Item = &'a T;
527    type IntoIter = Iter<'a, T>;
528
529    fn into_iter(self) -> Self::IntoIter {
530        self.iter()
531    }
532}
533
534impl<T: PartialEq> PartialEq for BaseAppendList<T, variants::Index> {
535    fn eq(&self, other: &BaseAppendList<T, variants::Index>) -> bool {
536        let mut s = self.iter();
537        let mut o = other.iter();
538
539        loop {
540            match (s.next(), o.next()) {
541                (Some(a), Some(b)) if a == b => {}
542                (None, None) => return true,
543                _ => return false,
544            }
545        }
546    }
547}
548
549impl<T: Debug> Debug for BaseAppendList<T, variants::Index> {
550    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
551        fmt.debug_list().entries(self.iter()).finish()
552    }
553}
554
555pub struct Drain<'a, T> {
556    list: &'a mut Inner<T>,
557    index: usize,
558    len: usize,
559}
560
561impl<T> Iterator for Drain<'_, T> {
562    type Item = T;
563
564    #[inline(always)]
565    fn next(&mut self) -> Option<Self::Item> {
566        if self.index >= self.len {
567            return None;
568        }
569        Some(self.list.get_unchecked_move(self.index.post_increment()))
570    }
571
572    #[inline(always)]
573    fn size_hint(&self) -> (usize, Option<usize>) {
574        let remaining = self.len - self.index;
575        (remaining, Some(remaining))
576    }
577}
578
579trait PostIncrement {
580    fn post_increment(&mut self) -> Self;
581}
582
583impl PostIncrement for usize {
584    #[inline(always)]
585    fn post_increment(&mut self) -> Self {
586        let res = *self;
587        *self += 1;
588        res
589    }
590}
591
592pub struct Iter<'a, T> {
593    list: &'a Inner<T>,
594    index: usize,
595}
596
597impl<'a, T> Iterator for Iter<'a, T> {
598    type Item = &'a T;
599
600    #[inline(always)]
601    fn next(&mut self) -> Option<Self::Item> {
602        self.list.get(self.index.post_increment())
603    }
604
605    #[inline(always)]
606    fn size_hint(&self) -> (usize, Option<usize>) {
607        let remaining = self.list.len() - self.index;
608        (remaining, Some(remaining))
609    }
610}
611
612pub struct IterMut<'a, T> {
613    list: &'a mut Inner<T>,
614    index: usize,
615}
616
617impl<'a, T> Iterator for IterMut<'a, T> {
618    type Item = &'a mut T;
619
620    #[inline(always)]
621    fn next<'b>(&'b mut self) -> Option<&'a mut T> {
622        unsafe { &mut *(self.list as *mut Inner<T>) }.get_mut(self.index.post_increment())
623    }
624
625    #[inline(always)]
626    fn size_hint(&self) -> (usize, Option<usize>) {
627        let remaining = self.list.len() - self.index;
628        (remaining, Some(remaining))
629    }
630}
631
632#[cfg(test)]
633mod test {
634    use super::*;
635
636    #[test]
637    fn from_iterator() {
638        let l: AppendList<i32> = (0..100).collect();
639
640        for i in 0..100 {
641            assert_eq!(l[i], i as i32);
642        }
643    }
644
645    #[test]
646    fn iterator() {
647        let l: AppendList<i32> = (0..100).collect();
648        let mut i1 = l.iter();
649        let mut i2 = l.into_iter();
650
651        for item in 0..100 {
652            assert_eq!(i1.next(), Some(&item));
653            assert_eq!(i2.next(), Some(&item));
654        }
655
656        assert_eq!(i1.next(), None);
657        assert_eq!(i2.next(), None);
658    }
659
660    #[test]
661    fn equality() {
662        let a = AppendList::new();
663        let b = AppendList::new();
664
665        assert_eq!(a, b);
666
667        a.push("foo");
668
669        assert_ne!(a, b);
670
671        b.push("foo");
672
673        assert_eq!(a, b);
674
675        a.push("bar");
676        a.push("baz");
677
678        assert_ne!(a, b);
679    }
680
681    #[test]
682    fn iterator_size_hint() {
683        let l: AppendList<i32> = AppendList::new();
684        let mut i = l.iter();
685        assert_eq!(i.size_hint(), (0, Some(0)));
686
687        l.push(1);
688        assert_eq!(i.size_hint(), (1, Some(1)));
689
690        l.push(2);
691        assert_eq!(i.size_hint(), (2, Some(2)));
692
693        i.next();
694        assert_eq!(i.size_hint(), (1, Some(1)));
695
696        l.push(3);
697        assert_eq!(i.size_hint(), (2, Some(2)));
698
699        i.next();
700        assert_eq!(i.size_hint(), (1, Some(1)));
701
702        i.next();
703        assert_eq!(i.size_hint(), (0, Some(0)));
704    }
705
706    // #[test]
707    // fn chunk_sizes_make_sense() {
708    //     assert_eq!(chunk_size(0), FIRST_CHUNK_SIZE);
709
710    //     let mut index = 0;
711
712    //     for chunk in 0..20 {
713    //         // Each chunk starts just after the previous one ends
714    //         assert_eq!(chunk_start(chunk), index);
715    //         index += chunk_size(chunk);
716    //     }
717    // }
718
719    // #[test]
720    // fn index_chunk_matches_up() {
721    //     for index in 0..1_000_000 {
722    //         let chunk_id = index_chunk(index);
723
724    //         // Each index happens after its chunk start and before its chunk end
725    //         assert!(index >= chunk_start(chunk_id));
726    //         assert!(index < chunk_start(chunk_id) + chunk_size(chunk_id));
727    //     }
728    // }
729
730    #[test]
731    fn empty_list() {
732        let n: AppendList<usize> = AppendList::new();
733
734        assert_eq!(n.len(), 0);
735        assert_eq!(n.get(0), None);
736
737        let d: AppendList<usize> = AppendList::default();
738
739        assert_eq!(d.len(), 0);
740        assert_eq!(d.get(0), None);
741    }
742
743    #[test]
744    fn thousand_item_list() {
745        // test_big_list(1_000);
746        // test_big_list(1_024);
747        test_big_list(1_025);
748        // test_big_list(0);
749        // test_big_list(1);
750        // test_big_list(15);
751        // test_big_list(16);
752    }
753
754    #[test]
755    #[ignore]
756    fn million_item_list() {
757        test_big_list(1_000_000);
758    }
759
760    fn test_big_list(size: usize) {
761        let l = AppendList::new();
762        let mut refs: Vec<&usize> = Vec::new();
763
764        assert!(l.unsafe_inner().chunks.is_empty());
765        for i in 0..size {
766            assert_eq!(l.len(), i);
767
768            refs.push(l.push(i));
769            assert_eq!(l.len(), i + 1);
770
771            // refs.push(&l[i]);
772            if size < 5_000 {
773                // The number of chunks makes sense.
774                assert_eq!(
775                    l.unsafe_inner().chunks.len(),
776                    chunks_needed_maintaining_invariant((l.len() + CHUNK_SIZE - 1) / CHUNK_SIZE)
777                );
778                // The number of leading chunks with tag 1 is the log of the number of
779                // chunks (based on how many times you'd have to reallocate)
780                assert_eq!(
781                    l.unsafe_inner()
782                        .chunks
783                        .iter()
784                        .filter(|x| x.tag() == 1)
785                        .count(),
786                    ceil_log2(l.unsafe_inner().chunks.len())
787                );
788                // Tags are only 0 or 1
789                assert_eq!(
790                    l.unsafe_inner()
791                        .chunks
792                        .iter()
793                        .filter(|x| match x.tag() {
794                            0..=1 => false,
795                            _ => true,
796                        })
797                        .count(),
798                    0
799                );
800            }
801        }
802
803        for i in 0..size {
804            assert_eq!(Some(refs[i]), l.get(i));
805            assert_eq!(Some(refs[i] as *const _), l.get(i).map(|x| x as *const _));
806        }
807        let mut l = l;
808        for (i, x) in l.drain_all().enumerate() {
809            assert_eq!(x, i);
810            // NOTE uncommenting this should fail
811            // assert_eq!(x, *refs[i]);
812        }
813        assert_eq!(l.len(), 0);
814        assert!(l.is_empty());
815        assert_eq!(
816            l.capacity(),
817            chunks_needed_maintaining_invariant((size + CHUNK_SIZE - 1) / CHUNK_SIZE) * CHUNK_SIZE
818        );
819        assert_eq!(l.capacity() % CHUNK_SIZE, 0);
820        // capacity = (2^n - 1) * CHUNK_SIZE
821        // => capacity / CHUNK_SIZE = 2^n - 1
822        // => log2(capacity / CHUNK_SIZE + 1) = n
823        assert_eq!(
824            l.capacity() / CHUNK_SIZE,
825            (1 << ceil_log2(size / CHUNK_SIZE + 1)) - 1
826        );
827        // The number of chunks makes sense.
828        assert_eq!(
829            l.unsafe_inner().chunks.len(),
830            chunks_needed_maintaining_invariant((size + CHUNK_SIZE - 1) / CHUNK_SIZE)
831        );
832        // The number of leading chunks with tag 1 is the log of the number of
833        // chunks (based on how many times you'd have to reallocate)
834        assert_eq!(
835            l.unsafe_inner()
836                .chunks
837                .iter()
838                .filter(|x| x.tag() == 1)
839                .count(),
840            ceil_log2(l.unsafe_inner().chunks.len())
841        );
842        // Tags are only 0 or 1
843        assert_eq!(
844            l.unsafe_inner()
845                .chunks
846                .iter()
847                .filter(|x| match x.tag() {
848                    0..=1 => false,
849                    _ => true,
850                })
851                .count(),
852            0
853        );
854
855        l.push(1);
856        // {
857        //     let x = l.push(1);
858        //     let r = l.get(0).unwrap();
859        //     assert_eq!(*r, 1);
860        //     *x = 2;
861        //     assert_eq!(*r, 2);
862        //     *x = 1;
863        //     assert_eq!(*r, 1);
864        // }
865        assert_eq!(l.drain_all().collect::<Vec<_>>(), vec![1]);
866        // NOTE uncommenting this should fail
867        // assert_eq!(*refs[0], 0);
868    }
869}