Skip to main content

appendvec/
lib.rs

1//! A concurrent append-only container of immutable values, where reads return
2//! stable references and can happen concurrently to a write.
3
4#![forbid(
5    missing_docs,
6    unsafe_op_in_unsafe_fn,
7    clippy::missing_safety_doc,
8    clippy::multiple_unsafe_ops_per_block
9)]
10#![cfg_attr(not(test), forbid(clippy::undocumented_unsafe_blocks))]
11
12mod str;
13
14use crossbeam_utils::CachePadded;
15use std::mem::MaybeUninit;
16use std::ops::{Index, Range};
17use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use std::sync::{Mutex, MutexGuard};
19pub use str::AppendStr;
20
21/// A concurrent append-only [`Vec`]-like container.
22///
23/// Unlike [`Vec`], this collection provides the following features.
24/// - Reads return stable references, i.e. a reference to an item stays valid
25///   even when more values are appended to the collection.
26/// - Lock-free reads can happen while a write operation (append) is on-going.
27/// - However, multiple writes don't happen concurrently: under the hood a write
28///   lock is held during each [`push()`](Self::push) operation.
29///
30/// This makes this collection useful for scenarios such as a caching system,
31/// where a large number of readers are continuously reading items while new
32/// items are occasionally added to the collection. In this cases, wrapping a
33/// regular [`Vec`] inside a [`RwLock`](std::sync::RwLock) would be less
34/// efficient, as readers and writers would block each other every time a value
35/// is added to the collection.
36///
37/// The drawbacks are that this collection offers only a simple API (only
38/// appends and indexing are supported), it takes a bit more space in memory
39/// than [`Vec`] (a few hundred bytes of fixed overhead), and each operation
40/// uses atomics (which involves some hardware synchronization).
41///
42/// Under the hood, this is implemented as a series of buckets with power-of-two
43/// sizes. Each bucket is allocated only when needed (i.e. when all the previous
44/// buckets are full). This ensures that the memory overhead of this collection
45/// remains bounded by a factor two, similarly to a regular [`Vec`].
46///
47/// Because the values are spread over multiple buckets in memory, it's not
48/// possible to obtain a slice to a sequence of items.
49///
50/// This container is [`Send`] and [`Sync`] if and only if `T` is both [`Send`]
51/// and [`Sync`]. Indeed, sharing or sending an [`AppendVec`] allows retrieving
52/// const references to `T` and moving values of type `T` across threads (where
53/// the value is pushed vs. where it is dropped).
54pub struct AppendVec<T> {
55    /// Length of the collection.
56    len: CachePadded<AtomicUsize>,
57    /// Base-2 logarithm of the size of the first bucket.
58    bucket_offset: u32,
59    /// Pointers to allocated buckets of growing size, or null for
60    /// not-yet-allocated buckets. [`bucket_len()`] gives the constant size of
61    /// each bucket.
62    buckets: [AtomicPtr<T>; usize::BITS as usize],
63    /// Lock held during [`push()`](Self::push) operations.
64    write_lock: Mutex<()>,
65}
66
67// SAFETY: Sending an AppendVec allows (1) retrieving a &T on another thread and
68// (2) dropping T on another thread.
69unsafe impl<T: Send + Sync> Send for AppendVec<T> {}
70// SAFETY: Sharing an AppendVec allows (1) retrieving a &T on another thread and
71// (2) importing T from another thread, via the push() method.
72unsafe impl<T: Send + Sync> Sync for AppendVec<T> {}
73
74impl<T> Default for AppendVec<T> {
75    fn default() -> Self {
76        Self::new()
77    }
78}
79
80impl<T> AppendVec<T> {
81    const ITEM_SIZE_LOG2: u32 = std::mem::size_of::<T>().next_power_of_two().ilog2();
82    const MAX_LEN: usize = !0 >> (Self::ITEM_SIZE_LOG2 + 1);
83
84    /// Creates a new, empty collection.
85    ///
86    /// ```
87    /// use appendvec::AppendVec;
88    ///
89    /// let mut container = AppendVec::new();
90    /// let index = container.push_mut(42);
91    /// assert_eq!(container[index], 42);
92    /// ```
93    pub fn new() -> Self {
94        Self {
95            len: CachePadded::new(AtomicUsize::new(0)),
96            bucket_offset: 1,
97            buckets: [const { AtomicPtr::new(std::ptr::null_mut()) }; usize::BITS as usize],
98            write_lock: Mutex::new(()),
99        }
100    }
101
102    /// Creates a new, empty collection, pre-allocating space for at least
103    /// `capacity` items.
104    ///
105    /// The requested `capacity` items are guaranteed to be allocated
106    /// contiguously in a single bucket.
107    ///
108    /// # Panics
109    ///
110    /// This function panics if the capacity exceeds the maximum allocation size
111    /// for a collection of items of type `T`.
112    ///
113    /// ```
114    /// use appendvec::AppendVec;
115    ///
116    /// let mut container = AppendVec::with_capacity(42);
117    /// for i in 0..42 {
118    ///     let index = container.push_mut(i);
119    ///     assert_eq!(container[index], i);
120    /// }
121    /// ```
122    #[allow(clippy::needless_range_loop)]
123    pub fn with_capacity(capacity: usize) -> Self {
124        assert!(
125            capacity <= Self::MAX_LEN,
126            "AppendVec: requested capacity is too large for the given type ({capacity}, {})",
127            Self::MAX_LEN
128        );
129
130        let mut buckets = [std::ptr::null_mut(); usize::BITS as usize];
131        let bucket_offset = if capacity == 0 {
132            1
133        } else {
134            let bucket_offset = capacity.next_power_of_two().ilog2();
135            debug_assert_eq!(bucketize(capacity - 1, bucket_offset).0, 0);
136
137            let bucket_len = 1 << bucket_offset;
138            let allocated = Box::<[T]>::new_uninit_slice(bucket_len);
139            let bucket_ptr = Box::into_raw(allocated) as *mut MaybeUninit<T> as *mut T;
140            buckets[0] = bucket_ptr;
141
142            bucket_offset
143        };
144
145        Self {
146            len: CachePadded::new(AtomicUsize::new(0)),
147            bucket_offset,
148            buckets: buckets.map(AtomicPtr::new),
149            write_lock: Mutex::new(()),
150        }
151    }
152
153    /// Returns the length of this collection.
154    ///
155    /// Given that writes can happen concurrently, beware of
156    /// [TOCTOU](https://en.wikipedia.org/wiki/Time-of-check_to_time-of-use)
157    /// bugs! The value returned here is only a lower-bound of the collection
158    /// size.
159    ///
160    /// To know the index of an added item, use the return value of the
161    /// [`push()`](Self::push) function.
162    ///
163    /// ```
164    /// use appendvec::AppendVec;
165    /// use std::thread;
166    ///
167    /// let container = AppendVec::with_capacity(42);
168    /// thread::scope(|s| {
169    ///     s.spawn(|| {
170    ///         for i in 0..42 {
171    ///             let index = container.push(i);
172    ///             // There is only one writer thread.
173    ///             assert_eq!(index, i);
174    ///         }
175    ///     });
176    ///     s.spawn(|| {
177    ///         loop {
178    ///             let l = container.len();
179    ///             if l != 0 {
180    ///                 // The unique writer thread pushes values in order.
181    ///                 assert_eq!(container[l - 1], l - 1);
182    ///             }
183    ///             if l == 42 {
184    ///                 break;
185    ///             }
186    ///         }
187    ///     });
188    /// });
189    /// ```
190    #[allow(clippy::len_without_is_empty)]
191    pub fn len(&self) -> usize {
192        self.len.load(Ordering::Acquire)
193    }
194
195    /// Returns an estimate of the length of this collection, without performing
196    /// any synchronization (i.e. using [`Relaxed`](Ordering::Relaxed)
197    /// ordering).
198    ///
199    /// # Safety
200    ///
201    /// Passing the result of this function (minus one) to
202    /// [`get_unchecked()`](Self::get_unchecked) without any other form of
203    /// synchronization is unsound, as the write(s) of the appended value(s)
204    /// cannot be assumed to have *happened before* the increment of the length
205    /// observed here.
206    ///
207    /// If you don't know what ["happens
208    /// before"](https://doc.rust-lang.org/stable/nomicon/atomics.html) entails,
209    /// you should probably not use this function, and use the regular
210    /// [`len()`](Self::len) function instead.
211    ///
212    /// ```
213    /// use appendvec::AppendVec;
214    /// use std::sync::Barrier;
215    /// use std::thread;
216    ///
217    /// let container = AppendVec::with_capacity(42);
218    /// let barrier = Barrier::new(2);
219    /// thread::scope(|s| {
220    ///     s.spawn(|| {
221    ///         for i in 0..42 {
222    ///             let index = container.push(i);
223    ///         }
224    ///         // Synchronizes all the writes with the reader thread.
225    ///         barrier.wait();
226    ///     });
227    ///     s.spawn(|| {
228    ///         // Wait for all values to be appended to the container, and synchronize.
229    ///         barrier.wait();
230    ///         // SAFETY: After the synchronization barrier, no more values are appended in the
231    ///         // writer thread.
232    ///         let len = unsafe { container.len_unsynchronized() };
233    ///         for i in 0..len {
234    ///             // SAFETY: The writer thread wrote until the container length before the
235    ///             // synchronization barrier.
236    ///             let value = unsafe { container.get_unchecked(i) };
237    ///             assert_eq!(*value, i);
238    ///         }
239    ///     });
240    /// });
241    /// ```
242    pub unsafe fn len_unsynchronized(&self) -> usize {
243        self.len.load(Ordering::Relaxed)
244    }
245
246    /// Adds the given item to this collection and returns the index at
247    /// which it was added.
248    ///
249    /// Internally, a write lock is held during this function call, i.e. other
250    /// writers must wait for this function to complete. However, readers are
251    /// free to read items in the meantime.
252    ///
253    /// Note that even though this internally appends the item at the end of the
254    /// collection, and a writer lock is internally held during the operation,
255    /// there is no guarantee that the returned index will remain the last one
256    /// (i.e. equal to `self.len() - 1`), as a concurrent write may happen
257    /// immediately afterwards. Beware of
258    /// [TOCTOU](https://en.wikipedia.org/wiki/Time-of-check_to_time-of-use)
259    /// bugs!
260    ///
261    /// See also [`push_mut()`](Self::push_mut), which is more efficient if
262    /// you hold a mutable reference to this collection.
263    ///
264    /// # Panics
265    ///
266    /// This function panics if this collection has reached the maximum
267    /// allocation size for items of type `T`.
268    ///
269    /// ```
270    /// use appendvec::AppendVec;
271    ///
272    /// // The container isn't mutable, we'll use concurrent interior mutability via the push()
273    /// // function.
274    /// let container = AppendVec::with_capacity(42);
275    /// for i in 0..42 {
276    ///     let index = container.push(i);
277    ///     assert_eq!(container[index], i);
278    /// }
279    /// ```
280    pub fn push(&self, t: T) -> usize {
281        let guard = self.write_lock.lock().unwrap();
282
283        let index = self.len.load(Ordering::Relaxed);
284        if index == Self::MAX_LEN {
285            // Drop the guard before panicking to avoid poisoning the Mutex.
286            drop(guard);
287            panic!("AppendVec is full: cannot push");
288        }
289
290        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
291        let bucket_ptr = self.get_bucket_ptr(bucket, &guard);
292
293        // SAFETY:
294        // - bucket_index * size_of::<T>() fits in an isize, as promised by the
295        //   bucketize() function, with an input index <= Self::MAX_LEN,
296        // - the entire range between bucket_ptr and ptr is derived from one allocation
297        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
298        let ptr = unsafe { bucket_ptr.add(bucket_index) };
299        // SAFETY:
300        // - ptr is properly aligned, non-null with correct provenance, because it's
301        //   derived from bucket_ptr which is itself aligned as it was allocated from a
302        //   boxed slice of Ts,
303        // - ptr is valid for exclusive writes:
304        //   - the Release store on the length just below ensures that no other thread
305        //     is reading the value at the given index (via safe APIs),
306        //   - the write lock ensures that no other thread is ever obtaining the same
307        //     index, i.e. no other thread is ever writing to this index.
308        unsafe { std::ptr::write(ptr, t) };
309        self.len.store(index + 1, Ordering::Release);
310
311        drop(guard);
312
313        index
314    }
315
316    /// Adds the given item to this collection and returns the index at
317    /// which it was added.
318    ///
319    /// Contrary to [`push()`](Self::push), no write lock is held internally
320    /// because this function already takes an exclusive mutable reference
321    /// to this collection.
322    ///
323    /// # Panics
324    ///
325    /// This function panics if this collection has reached the maximum
326    /// allocation size for items of type `T`.
327    ///
328    /// ```
329    /// use appendvec::AppendVec;
330    ///
331    /// let mut container = AppendVec::with_capacity(42);
332    /// for i in 0..42 {
333    ///     let index = container.push_mut(i);
334    ///     assert_eq!(container[index], i);
335    /// }
336    /// ```
337    pub fn push_mut(&mut self, t: T) -> usize {
338        let index = self.len.load(Ordering::Relaxed);
339        assert_ne!(index, Self::MAX_LEN, "AppendVec is full: cannot push");
340
341        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
342        let bucket_ptr = self.get_bucket_ptr_mut(bucket);
343
344        // SAFETY:
345        // - bucket_index * size_of::<T>() fits in an isize, as promised by the
346        //   bucketize() function, with an input index <= Self::MAX_LEN,
347        // - the entire range between bucket_ptr and ptr is derived from one allocation
348        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
349        let ptr = unsafe { bucket_ptr.add(bucket_index) };
350        // SAFETY:
351        // - ptr is properly aligned, non-null with correct provenance, because it's
352        //   derived from bucket_ptr which is itself aligned as it was allocated from a
353        //   boxed slice of Ts,
354        // - ptr is valid for exclusive writes as this function takes an exclusive `&mut
355        //   self` parameter.
356        unsafe { std::ptr::write(ptr, t) };
357        self.len.store(index + 1, Ordering::Release);
358
359        index
360    }
361
362    /// Obtain a reference to the item at the given index without performing
363    /// bound checks.
364    ///
365    /// # Safety
366    ///
367    /// The passed `index` must be lower than the size of the collection, i.e. a
368    /// call to [`push()`](Self::push) that returned `index` must have
369    /// *happened before* this function call.
370    ///
371    /// If you don't know what ["happens
372    /// before"](https://doc.rust-lang.org/stable/nomicon/atomics.html) entails,
373    /// you should probably not use this function, and use the regular indexing
374    /// syntax instead.
375    ///
376    /// ```
377    /// use appendvec::AppendVec;
378    /// use std::sync::Barrier;
379    /// use std::thread;
380    ///
381    /// let container = AppendVec::with_capacity(42);
382    /// let barrier = Barrier::new(2);
383    /// thread::scope(|s| {
384    ///     s.spawn(|| {
385    ///         for i in 0..42 {
386    ///             let index = container.push(i);
387    ///         }
388    ///         // Synchronizes all the writes with the reader thread.
389    ///         barrier.wait();
390    ///     });
391    ///     s.spawn(|| {
392    ///         // Wait for all values to be appended to the container, and synchronize.
393    ///         barrier.wait();
394    ///         // SAFETY: After the synchronization barrier, no more values are appended in the
395    ///         // writer thread.
396    ///         let len = unsafe { container.len_unsynchronized() };
397    ///         for i in 0..len {
398    ///             // SAFETY: The writer thread wrote until the container length before the
399    ///             // synchronization barrier.
400    ///             let value = unsafe { container.get_unchecked(i) };
401    ///             assert_eq!(*value, i);
402    ///         }
403    ///     });
404    /// });
405    /// ```
406    pub unsafe fn get_unchecked(&self, index: usize) -> &T {
407        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
408
409        let bucket_ptr = self.buckets[bucket].load(Ordering::Relaxed) as *const T;
410        debug_assert_ne!(bucket_ptr, std::ptr::null());
411
412        // SAFETY:
413        // - bucket_index * size_of::<T>() fits in an isize, as promised by the caller
414        //   and the checks within push() and push_mut() that ensure that at most
415        //   Self::MAX_LEN + 1 items are stored in this collection,
416        // - the entire range between bucket_ptr and ptr is derived from one allocation
417        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
418        let ptr = unsafe { bucket_ptr.add(bucket_index) };
419        // SAFETY:
420        // - ptr is properly aligned, non-null with correct provenance, because it's
421        //   derived from bucket_ptr which is itself aligned as it was allocated from a
422        //   boxed slice of Ts,
423        // - ptr is valid for reads: the only write at the given index has happened
424        //   before this read as promised by the caller.
425        unsafe { &*ptr }
426    }
427
428    /// Obtain an iterator over this collection.
429    ///
430    /// Note that once this iterator has been created, it will
431    /// not iterate over items added afterwards, even on the same thread. This
432    /// is to minimize the number of atomic operations.
433    ///
434    /// ```
435    /// use appendvec::AppendVec;
436    /// use std::thread;
437    ///
438    /// let container = AppendVec::with_capacity(42);
439    /// thread::scope(|s| {
440    ///     s.spawn(|| {
441    ///         for i in 0..42 {
442    ///             let index = container.push(i);
443    ///             assert_eq!(index, i);
444    ///         }
445    ///     });
446    ///     s.spawn(|| {
447    ///         loop {
448    ///             let it = container.iter();
449    ///             let len = it.len();
450    ///             for (i, value) in it.enumerate() {
451    ///                 assert_eq!(*value, i);
452    ///             }
453    ///             if len == 42 {
454    ///                 break;
455    ///             }
456    ///         }
457    ///     });
458    /// });
459    /// ```
460    pub fn iter(&self) -> AppendVecIter<'_, T> {
461        AppendVecIter {
462            inner: self,
463            bucket_offset: self.bucket_offset,
464            len: self.len.load(Ordering::Acquire),
465            index: 0,
466            bucket_ptr: std::ptr::null(),
467        }
468    }
469
470    /// Internal function to retrieve the pointer to the given bucket,
471    /// allocating it if it's null.
472    fn get_bucket_ptr_mut(&mut self, bucket: usize) -> *mut T {
473        // SAFETY: The caller passed a mutable reference to self, which confirms
474        // exclusive access.
475        unsafe { self.get_bucket_ptr_raw(bucket) }
476    }
477
478    /// Internal function to retrieve the pointer to the given bucket,
479    /// allocating it if it's null.
480    fn get_bucket_ptr(&self, bucket: usize, _guard: &MutexGuard<()>) -> *mut T {
481        // SAFETY: The caller passed a mutex guard, which confirms exclusive access.
482        unsafe { self.get_bucket_ptr_raw(bucket) }
483    }
484
485    /// Internal function to retrieve the pointer to the given bucket,
486    /// allocating it if it's null.
487    ///
488    /// # Safety
489    ///
490    /// The caller must ensure exclusive write access to this object (as a new
491    /// bucket may be allocated), by holding a mutable reference to self or
492    /// the write lock.
493    unsafe fn get_bucket_ptr_raw(&self, bucket: usize) -> *mut T {
494        let ptr = self.buckets[bucket].load(Ordering::Relaxed);
495        if !ptr.is_null() {
496            ptr
497        } else {
498            let bucket_len = bucket_len(bucket, self.bucket_offset);
499            let allocated = Box::<[T]>::new_uninit_slice(bucket_len);
500            let bucket_ptr = Box::into_raw(allocated) as *mut MaybeUninit<T> as *mut T;
501            self.buckets[bucket].store(bucket_ptr, Ordering::Relaxed);
502            bucket_ptr
503        }
504    }
505}
506
507impl<T: Copy + Default> AppendVec<T> {
508    /// Adds the given contiguous slice of items to this collection, and returns
509    /// the range at which they were added.
510    ///
511    /// The items are guaranteed to be pushed contiguously, so that indexing the
512    /// result allows to retrieve back a contiguous slice. If the current bucket
513    /// doesn't have enough remaining capacity to accommodate this contiguous
514    /// slice, it will be padded with default items, hence the [`Default`]
515    /// requirement for `T`.
516    ///
517    /// Note that there is currently also a [`Copy`] requirement for performance
518    /// reasons when copying the input slice into this collection.
519    ///
520    /// See also [`push()`](Self::push).
521    ///
522    /// # Panics
523    ///
524    /// This function panics if this collection has reached the maximum
525    /// allocation size for items of type `T`.
526    ///
527    /// ```
528    /// use appendvec::AppendVec;
529    ///
530    /// let container = AppendVec::new();
531    /// for i in 0..42 {
532    ///     let blob = vec![123; i];
533    ///     let index = container.push_slice(blob.as_slice());
534    ///     assert_eq!(&container[index], blob.as_slice());
535    /// }
536    /// ```
537    pub fn push_slice(&self, slice: &[T]) -> Range<usize> {
538        if slice.is_empty() {
539            return 0..0;
540        }
541
542        let guard = self.write_lock.lock().unwrap();
543
544        let mut index = self.len.load(Ordering::Relaxed);
545        if slice.len() > Self::MAX_LEN - index {
546            // Drop the guard before panicking to avoid poisoning the Mutex.
547            drop(guard);
548            panic!("AppendVec is full: cannot push");
549        }
550
551        loop {
552            let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
553            let bucket_len = bucket_len(bucket, self.bucket_offset);
554            if slice.len() <= bucket_len - bucket_index {
555                break;
556            }
557
558            let bucket_ptr = self.get_bucket_ptr(bucket, &guard);
559            for i in bucket_index..bucket_len {
560                // SAFETY:
561                // - i * size_of::<T>() fits in an isize, as promised by the bucket_len()
562                //   function, with an input index <= Self::MAX_LEN,
563                // - the entire range between bucket_ptr and ptr is derived from one allocation
564                //   of bucket_len items, as 0 <= i < bucket_len.
565                let ptr = unsafe { bucket_ptr.add(i) };
566                // SAFETY:
567                // - ptr is properly aligned, non-null with correct provenance, because it's
568                //   derived from bucket_ptr which is itself aligned as it was allocated from a
569                //   boxed slice of Ts,
570                // - ptr is valid for exclusive writes:
571                //   - the Release store on the length just below ensures that no other thread
572                //     is reading the value at the given index (via safe APIs),
573                //   - the write lock ensures that no other thread is ever obtaining the same
574                //     index, i.e. no other thread is ever writing to this index.
575                unsafe { std::ptr::write(ptr, T::default()) };
576            }
577
578            index += bucket_len - bucket_index;
579        }
580
581        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
582        debug_assert!(slice.len() <= bucket_len(bucket, self.bucket_offset) - bucket_index);
583        let bucket_ptr = self.get_bucket_ptr(bucket, &guard);
584
585        // SAFETY:
586        // - bucket_index * size_of::<T>() fits in an isize, as promised by the
587        //   bucketize() function, with an input index <= Self::MAX_LEN,
588        // - the entire range between bucket_ptr and ptr is derived from one allocation
589        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
590        let ptr = unsafe { bucket_ptr.add(bucket_index) };
591        // SAFETY:
592        // - slice.as_ptr() is valid for reading slice.len() items of type T,
593        // - ptr is valid for writing slice.len() items of type T, indeed bucket_index +
594        //   slice.len() <= bucket_len(bucket),
595        // - slice.as_ptr() is properly aligned, as it directly derives from a slice,
596        // - ptr is properly aligned, as it derives from bucket_ptr which is properly
597        //   aligned,
598        // - the memory regions don't overlap, as the input slice is passed as an
599        //   external const reference parameter,
600        // - T is Copy.
601        unsafe { std::ptr::copy_nonoverlapping(slice.as_ptr(), ptr, slice.len()) };
602        self.len.store(index + slice.len(), Ordering::Release);
603
604        drop(guard);
605
606        index..index + slice.len()
607    }
608
609    /// Adds the given contiguous slice of items to this collection, and returns
610    /// the range at which they were added.
611    ///
612    /// The items are guaranteed to be pushed contiguously, so that indexing the
613    /// result allows to retrieve back a contiguous slice. If the current bucket
614    /// doesn't have enough remaining capacity to accommodate this contiguous
615    /// slice, it will be padded with default items, hence the [`Default`]
616    /// requirement for `T`.
617    ///
618    /// Note that there is currently also a [`Copy`] requirement for performance
619    /// reasons when copying the input slice into this collection.
620    ///
621    /// Contrary to [`push_slice()`](Self::push_slice), no write lock is held
622    /// internally because this function already takes an exclusive mutable
623    /// reference to this collection.
624    ///
625    /// # Panics
626    ///
627    /// This function panics if this collection has reached the maximum
628    /// allocation size for items of type `T`.
629    ///
630    /// ```
631    /// use appendvec::AppendVec;
632    ///
633    /// let mut container = AppendVec::new();
634    /// for i in 0..42 {
635    ///     let blob = vec![123; i];
636    ///     let index = container.push_slice_mut(blob.as_slice());
637    ///     assert_eq!(&container[index], blob.as_slice());
638    /// }
639    /// ```
640    pub fn push_slice_mut(&mut self, slice: &[T]) -> Range<usize> {
641        if slice.is_empty() {
642            return 0..0;
643        }
644
645        let mut index = self.len.load(Ordering::Relaxed);
646        assert!(
647            slice.len() <= Self::MAX_LEN - index,
648            "AppendVec is full: cannot push"
649        );
650
651        loop {
652            let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
653            let bucket_len = bucket_len(bucket, self.bucket_offset);
654            if slice.len() <= bucket_len - bucket_index {
655                break;
656            }
657
658            let bucket_ptr = self.get_bucket_ptr_mut(bucket);
659            for i in bucket_index..bucket_len {
660                // SAFETY:
661                // - i * size_of::<T>() fits in an isize, as promised by the bucket_len()
662                //   function, with an input index <= Self::MAX_LEN,
663                // - the entire range between bucket_ptr and ptr is derived from one allocation
664                //   of bucket_len items, as 0 <= i < bucket_len.
665                let ptr = unsafe { bucket_ptr.add(i) };
666                // SAFETY:
667                // - ptr is properly aligned, non-null with correct provenance, because it's
668                //   derived from bucket_ptr which is itself aligned as it was allocated from a
669                //   boxed slice of Ts,
670                // - ptr is valid for exclusive writes as this function takes an exclusive `&mut
671                //   self` parameter.
672                unsafe { std::ptr::write(ptr, T::default()) };
673            }
674
675            index += bucket_len - bucket_index;
676        }
677
678        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
679        debug_assert!(slice.len() <= bucket_len(bucket, self.bucket_offset) - bucket_index);
680        let bucket_ptr = self.get_bucket_ptr_mut(bucket);
681
682        // SAFETY:
683        // - bucket_index * size_of::<T>() fits in an isize, as promised by the
684        //   bucketize() function, with an input index <= Self::MAX_LEN,
685        // - the entire range between bucket_ptr and ptr is derived from one allocation
686        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
687        let ptr = unsafe { bucket_ptr.add(bucket_index) };
688        // SAFETY:
689        // - slice.as_ptr() is valid for reading slice.len() items of type T,
690        // - ptr is valid for writing slice.len() items of type T, indeed bucket_index +
691        //   slice.len() <= bucket_len(bucket),
692        // - slice.as_ptr() is properly aligned, as it directly derives from a slice,
693        // - ptr is properly aligned, as it derives from bucket_ptr which is properly
694        //   aligned,
695        // - the memory regions don't overlap, as the input slice is passed as an
696        //   external const reference parameter,
697        // - T is Copy.
698        unsafe { std::ptr::copy_nonoverlapping(slice.as_ptr(), ptr, slice.len()) };
699        self.len.store(index + slice.len(), Ordering::Release);
700
701        index..index + slice.len()
702    }
703}
704
705impl<T> Drop for AppendVec<T> {
706    fn drop(&mut self) {
707        let len = self.len.load(Ordering::Acquire);
708        if len == 0 {
709            return;
710        }
711
712        let (max_bucket, max_index) = bucketize(len - 1, self.bucket_offset);
713        for bucket in 0..=max_bucket {
714            if bucket != 0 && bucket < self.bucket_offset as usize {
715                continue;
716            }
717
718            let bucket_len = bucket_len(bucket, self.bucket_offset);
719            let bucket_items = if bucket != max_bucket {
720                bucket_len
721            } else {
722                max_index + 1
723            };
724
725            let ptr: *mut T = self.buckets[bucket].load(Ordering::Relaxed);
726            let slice: *mut [T] = std::ptr::slice_from_raw_parts_mut(ptr, bucket_items);
727            // SAFETY:
728            // - slice starts at the aligned and non-null ptr,
729            // - slice is valid for reads, as all items until len have been written to
730            //   before, as ensured by the Acquire load on self.len,
731            // - slice is valid for writes, as this function takes an exclusive `&mut self`
732            //   parameter,
733            // - slice is valid for dropping, as it is a part of the leaked boxed slice of
734            //   this bucket,
735            // - nothing else is accessing the `slice` while `drop_in_place` is executing,
736            //   as this function takes an exclusive `&mut self` parameter.
737            unsafe { std::ptr::drop_in_place(slice) };
738
739            // SAFETY:
740            // - ptr has been allocated with the global allocator, as it is derived from a
741            //   leaked boxed slice,
742            // - T has the same alignement as what ptr was allocated with, because ptr
743            //   derives from a boxed slice of Ts,
744            // - ptr was allocated with T * bucket_len bytes,
745            // - the length is zero, therefore lower than or equal to the capacity,
746            // - the first 0 values are properly initialized values of type T,
747            // - the allocated size in bytes isn't larger than isize::MAX, because that's
748            //   derived from a leaked boxed slice.
749            let vec: Vec<T> = unsafe { Vec::from_raw_parts(ptr, 0, bucket_len) };
750            drop(vec);
751        }
752    }
753}
754
755impl<T> Index<usize> for AppendVec<T> {
756    type Output = T;
757
758    /// Obtain a reference to the item at the given index.
759    ///
760    /// # Panics
761    ///
762    /// The passed `index` must be lower than the size of the collection, i.e. a
763    /// call to [`push()`](Self::push) (or its variants) that returned `index`
764    /// must have happened before this function call. Otherwise, this
765    /// function panics.
766    fn index(&self, index: usize) -> &Self::Output {
767        assert!(index < self.len.load(Ordering::Acquire));
768        let (bucket, bucket_index) = bucketize(index, self.bucket_offset);
769
770        let bucket_ptr = self.buckets[bucket].load(Ordering::Relaxed) as *const T;
771        debug_assert_ne!(bucket_ptr, std::ptr::null());
772
773        // SAFETY:
774        // - the assertion at the beginning of this function, with the Acquire load on
775        //   the length, ensures that an item was added at the given index before the
776        //   rest of this function is executed,
777        // - bucket_index * size_of::<T>() fits in an isize, as the checks within push()
778        //   and its variants ensure that at most Self::MAX_LEN + 1 items are stored in
779        //   this collection,
780        // - the entire range between bucket_ptr and ptr is derived from one allocation
781        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
782        let ptr = unsafe { bucket_ptr.add(bucket_index) };
783        // SAFETY:
784        // - ptr is properly aligned, non-null with correct provenance, because it's
785        //   derived from bucket_ptr which is itself aligned as it was allocated from a
786        //   boxed slice of Ts,
787        // - ptr is valid for reads: the only write at the given index has happened
788        //   before this read as ensured by the assertion with an Acquire load on the
789        //   length at the beginning of this function.
790        unsafe { &*ptr }
791    }
792}
793
794impl<T> Index<Range<usize>> for AppendVec<T> {
795    type Output = [T];
796
797    /// # Panics
798    ///
799    /// The passed `index` range:
800    /// - must be correctly ordered, i.e. `index.start <= index.end`,
801    /// - must be lower than the size of the collection, i.e. a call to
802    ///   [`push_slice()`](Self::push_slice) (or its variants) that returned a
803    ///   superset of `index` must have happened before this function call,
804    /// - must be contained in a single contiguous bucket; this is always the
805    ///   case when passing a subset of a range returned by a previous call to
806    ///   [`push_slice()`](Self::push_slice).
807    ///
808    /// Otherwise, this function panics.
809    fn index(&self, index: Range<usize>) -> &Self::Output {
810        if index.start == index.end {
811            return &[];
812        }
813
814        assert!(index.start <= index.end);
815        let index_len = index.end - index.start;
816
817        assert!(index.end <= self.len.load(Ordering::Acquire));
818        let (bucket, bucket_index) = bucketize(index.start, self.bucket_offset);
819        assert!(index_len <= bucket_len(bucket, self.bucket_offset) - bucket_index);
820
821        let bucket_ptr = self.buckets[bucket].load(Ordering::Relaxed) as *const T;
822        debug_assert_ne!(bucket_ptr, std::ptr::null());
823
824        // SAFETY:
825        // - the assertion comparing `index.end` with `self.len`, with the Acquire load
826        //   on the length, ensures that items were added until the given index before
827        //   the rest of this function is executed,
828        // - bucket_index * size_of::<T>() fits in an isize, as the checks within push()
829        //   and its variants ensure that at most Self::MAX_LEN + 1 items are stored in
830        //   this collection,
831        // - the entire range between bucket_ptr and ptr is derived from one allocation
832        //   of bucket_len(bucket) items, as 0 <= bucket_index < bucket_len(bucket).
833        let ptr = unsafe { bucket_ptr.add(bucket_index) };
834        // SAFETY:
835        // - ptr is non-null, as this collection has an invariant that all buckets until
836        //   the length are non null, futher confirmed by a debug assertion,
837        //   - note that the index range isn't empty in this code branch,
838        // - ptr is aligned as it derives from the aligned bucket_ptr,
839        // - ptr is valid for reads of index_len items of type T, as confirmed by the
840        //   assertion that bucket_index + index_len <= bucket_len(bucket),
841        // - ptr points to index_len initialized values of type T, as the collection
842        //   maintains the invariant that all items until `self.len` are initialized,
843        // - the memory isn't mutated for the whole output lifetime, which is bound by
844        //   the lifetime of this collection,
845        // - index_len * size_of::<T>() fits in an isize, as the checks within push()
846        //   and its variants ensure that at most Self::MAX_LEN + 1 items are stored in
847        //   this collection.
848        unsafe { std::slice::from_raw_parts(ptr, index_len) }
849    }
850}
851
852/// Iterator over an [`AppendVec`].
853///
854/// Note that once this iterator has been created via the [`AppendVec::iter()`]
855/// function, it will not iterate over items added afterwards, even on the same
856/// thread. This is to minimize the number of atomic operations, and to allow
857/// implementing [`ExactSizeIterator`].
858///
859/// This is is [`Send`] and [`Sync`] if and only if `T` is [`Sync`], as sharing
860/// or sending this iterator allows retrieving const references to `T`.
861pub struct AppendVecIter<'a, T> {
862    inner: &'a AppendVec<T>,
863    bucket_offset: u32,
864    len: usize,
865    index: usize,
866    bucket_ptr: *const T,
867}
868
869// SAFETY: Sending an AppendVecIter allows retrieving a &T on another thread.
870unsafe impl<T: Sync> Send for AppendVecIter<'_, T> {}
871// SAFETY: One cannot do much by sharing an AppendVecIter, but at most it would
872// allow retrieving a &T on another thread.
873unsafe impl<T: Sync> Sync for AppendVecIter<'_, T> {}
874
875impl<'a, T> Iterator for AppendVecIter<'a, T> {
876    type Item = &'a T;
877
878    fn next(&mut self) -> Option<Self::Item> {
879        if self.index == self.len {
880            None
881        } else {
882            let (bucket, bucket_index) = bucketize(self.index, self.bucket_offset);
883            self.index += 1;
884
885            if bucket_index == 0 {
886                self.bucket_ptr = self.inner.buckets[bucket].load(Ordering::Relaxed) as *const T;
887                debug_assert_ne!(self.bucket_ptr, std::ptr::null());
888            }
889
890            // SAFETY:
891            // - the Acquire load in iter() ensures that all indices before self.len,
892            //   including self.index, have been pushed before the iterator was created,
893            // - bucket_index * size_of::<T>() fits in an isize, as the checks within push()
894            //   and push_mut() ensure that at most Self::MAX_LEN + 1 items are stored in
895            //   this collection,
896            // - the entire range between self.bucket_ptr and ptr is derived from one
897            //   allocation of bucket_len(bucket) items, as 0 <= bucket_index <
898            //   bucket_len(bucket).
899            let ptr = unsafe { self.bucket_ptr.add(bucket_index) };
900            // SAFETY:
901            // - ptr is properly aligned, non-null with correct provenance, because it's
902            //   derived from self.bucket_ptr which is itself aligned as it was allocated
903            //   from a boxed slice of Ts,
904            // - ptr is valid for reads: the only write at the given index has happened
905            //   before this read as ensured by the assertion with an Acquire load on the
906            //   length in iter().
907            unsafe { Some(&*ptr) }
908        }
909    }
910
911    fn size_hint(&self) -> (usize, Option<usize>) {
912        let remaining = self.len - self.index;
913        (remaining, Some(remaining))
914    }
915}
916
917impl<T> ExactSizeIterator for AppendVecIter<'_, T> {}
918
919/// Decomposes the given `index` into the bucket that contains it and the index
920/// within that bucket.
921///
922/// This function guarantees that the returned (`bucket`, `bucket_index`)
923/// satisfy:
924/// - 0 <= `bucket` < [`usize::BITS`].
925/// - if `bucket` != 0, then `bucket` >= `bucket_offset`
926/// - if `bucket` == 0: 0 <= `bucket_index` < 2^bucket_offset
927/// - otherwise: 0 <= `bucket_index` < `1 << bucket`
928const fn bucketize(index: usize, bucket_offset: u32) -> (usize, usize) {
929    let mut bucket = (usize::BITS - 1).saturating_sub(index.leading_zeros());
930    if bucket < bucket_offset {
931        bucket = 0;
932    }
933
934    let bucket_index = if bucket == 0 {
935        index
936    } else {
937        index - (1 << bucket)
938    };
939
940    (bucket as usize, bucket_index)
941}
942
943/// Returns the number of items held in the given `bucket`.
944///
945/// This is `2^bucket`, except for the first bucket which is of size
946/// `2^bucket_offset` instead of 1. This formula ensures that the sum of the
947/// sizes of all buckets before bucket `n` is `2^n` (for `n >= bucket_offset`).
948const fn bucket_len(bucket: usize, bucket_offset: u32) -> usize {
949    if bucket == 0 {
950        1 << bucket_offset
951    } else if bucket < bucket_offset as usize {
952        panic!("Invalid bucket between 0 and bucket_offset");
953    } else {
954        1 << bucket
955    }
956}
957
958#[cfg(test)]
959mod test {
960    use super::*;
961    use std::ops::Deref;
962    use std::thread;
963
964    #[test]
965    fn test_item_size_log2() {
966        assert_eq!(AppendVec::<u8>::ITEM_SIZE_LOG2, 0);
967        assert_eq!(AppendVec::<u16>::ITEM_SIZE_LOG2, 1);
968        assert_eq!(AppendVec::<u32>::ITEM_SIZE_LOG2, 2);
969        assert_eq!(AppendVec::<u64>::ITEM_SIZE_LOG2, 3);
970
971        // The implementation uses the next power of 2.
972        assert_eq!(AppendVec::<[u8; 2]>::ITEM_SIZE_LOG2, 1);
973        assert_eq!(AppendVec::<[u8; 3]>::ITEM_SIZE_LOG2, 2);
974        assert_eq!(AppendVec::<[u8; 4]>::ITEM_SIZE_LOG2, 2);
975        assert_eq!(AppendVec::<[u8; 5]>::ITEM_SIZE_LOG2, 3);
976        assert_eq!(AppendVec::<[u8; 6]>::ITEM_SIZE_LOG2, 3);
977        assert_eq!(AppendVec::<[u8; 7]>::ITEM_SIZE_LOG2, 3);
978    }
979
980    #[test]
981    fn test_max_len() {
982        assert_eq!(AppendVec::<u8>::MAX_LEN, usize::MAX >> 1);
983        assert_eq!(AppendVec::<u16>::MAX_LEN, usize::MAX >> 2);
984        assert_eq!(AppendVec::<u32>::MAX_LEN, usize::MAX >> 3);
985        assert_eq!(AppendVec::<u64>::MAX_LEN, usize::MAX >> 4);
986
987        // The implementation uses the next power of 2.
988        assert_eq!(AppendVec::<[u8; 2]>::MAX_LEN, usize::MAX >> 2);
989        assert_eq!(AppendVec::<[u8; 3]>::MAX_LEN, usize::MAX >> 3);
990        assert_eq!(AppendVec::<[u8; 4]>::MAX_LEN, usize::MAX >> 3);
991        assert_eq!(AppendVec::<[u8; 5]>::MAX_LEN, usize::MAX >> 4);
992        assert_eq!(AppendVec::<[u8; 6]>::MAX_LEN, usize::MAX >> 4);
993        assert_eq!(AppendVec::<[u8; 7]>::MAX_LEN, usize::MAX >> 4);
994    }
995
996    #[test]
997    fn test_bucketize() {
998        assert_eq!(bucketize(0, 1), (0, 0));
999        assert_eq!(bucketize(1, 1), (0, 1));
1000        assert_eq!(bucketize(2, 1), (1, 0));
1001        assert_eq!(bucketize(3, 1), (1, 1));
1002        assert_eq!(bucketize(4, 1), (2, 0));
1003        assert_eq!(bucketize(5, 1), (2, 1));
1004        assert_eq!(bucketize(6, 1), (2, 2));
1005        assert_eq!(bucketize(7, 1), (2, 3));
1006        assert_eq!(bucketize(8, 1), (3, 0));
1007        assert_eq!(bucketize(9, 1), (3, 1));
1008        assert_eq!(bucketize(10, 1), (3, 2));
1009
1010        assert_eq!(bucketize(0, 2), (0, 0));
1011        assert_eq!(bucketize(1, 2), (0, 1));
1012        assert_eq!(bucketize(2, 2), (0, 2));
1013        assert_eq!(bucketize(3, 2), (0, 3));
1014        assert_eq!(bucketize(4, 2), (2, 0));
1015        assert_eq!(bucketize(5, 2), (2, 1));
1016        assert_eq!(bucketize(6, 2), (2, 2));
1017        assert_eq!(bucketize(7, 2), (2, 3));
1018        assert_eq!(bucketize(8, 2), (3, 0));
1019        assert_eq!(bucketize(9, 2), (3, 1));
1020        assert_eq!(bucketize(10, 2), (3, 2));
1021
1022        assert_eq!(bucketize(0, 3), (0, 0));
1023        assert_eq!(bucketize(1, 3), (0, 1));
1024        assert_eq!(bucketize(2, 3), (0, 2));
1025        assert_eq!(bucketize(3, 3), (0, 3));
1026        assert_eq!(bucketize(4, 3), (0, 4));
1027        assert_eq!(bucketize(5, 3), (0, 5));
1028        assert_eq!(bucketize(6, 3), (0, 6));
1029        assert_eq!(bucketize(7, 3), (0, 7));
1030        assert_eq!(bucketize(8, 3), (3, 0));
1031        assert_eq!(bucketize(9, 3), (3, 1));
1032        assert_eq!(bucketize(10, 3), (3, 2));
1033    }
1034
1035    #[test]
1036    fn test_bucket_len() {
1037        assert_eq!(bucket_len(0, 1), 2);
1038        assert_eq!(bucket_len(1, 1), 2);
1039        assert_eq!(bucket_len(2, 1), 4);
1040        assert_eq!(bucket_len(3, 1), 8);
1041        assert_eq!(bucket_len(4, 1), 16);
1042        assert_eq!(bucket_len(5, 1), 32);
1043
1044        assert_eq!(bucket_len(0, 2), 4);
1045        assert_eq!(bucket_len(2, 2), 4);
1046        assert_eq!(bucket_len(3, 2), 8);
1047        assert_eq!(bucket_len(4, 2), 16);
1048        assert_eq!(bucket_len(5, 2), 32);
1049
1050        assert_eq!(bucket_len(0, 3), 8);
1051        assert_eq!(bucket_len(3, 3), 8);
1052        assert_eq!(bucket_len(4, 3), 16);
1053        assert_eq!(bucket_len(5, 3), 32);
1054    }
1055
1056    #[test]
1057    #[should_panic(expected = "Invalid bucket between 0 and bucket_offset")]
1058    fn test_invalid_bucket_len() {
1059        let _ = bucket_len(1, 2);
1060    }
1061
1062    #[test]
1063    #[should_panic(expected = "AppendVec: requested capacity is too large for the given type")]
1064    fn test_with_overlarge_capacity() {
1065        let _ = AppendVec::<u8>::with_capacity(usize::MAX);
1066    }
1067
1068    #[test]
1069    fn test_push_index() {
1070        let v = AppendVec::new();
1071        for i in 0..100 {
1072            assert_eq!(v.push(i), i);
1073        }
1074        for i in 0..100 {
1075            assert_eq!(v[i], i);
1076        }
1077    }
1078
1079    #[test]
1080    fn test_push_mut_index() {
1081        let mut v = AppendVec::new();
1082        for i in 0..100 {
1083            assert_eq!(v.push_mut(i), i);
1084        }
1085        for i in 0..100 {
1086            assert_eq!(v[i], i);
1087        }
1088    }
1089
1090    #[test]
1091    fn test_push_slice_index() {
1092        let v = AppendVec::new();
1093        for len in 0..10 {
1094            let mut prev = 0..0;
1095            for i in 0..100 {
1096                let data = vec![i; len];
1097                let index = v.push_slice(&data);
1098                assert!(index.start >= prev.end);
1099                assert_eq!(index.end - index.start, len);
1100                prev = index.clone();
1101                assert_eq!(&v[index], &data);
1102            }
1103        }
1104    }
1105
1106    #[test]
1107    fn test_push_slice_mut_index() {
1108        let mut v = AppendVec::new();
1109        for len in 0..10 {
1110            let mut prev = 0..0;
1111            for i in 0..100 {
1112                let data = vec![i; len];
1113                let index = v.push_slice_mut(&data);
1114                assert!(index.start >= prev.end);
1115                assert_eq!(index.end - index.start, len);
1116                prev = index.clone();
1117                assert_eq!(&v[index], &data);
1118            }
1119        }
1120    }
1121
1122    #[test]
1123    fn test_push_slice_padding() {
1124        for len in 1..100 {
1125            let v = AppendVec::new();
1126            let range = v.push_slice(&vec![42; len]);
1127            assert_eq!(range.end - range.start, len);
1128
1129            let bucket_start = if len <= 2 {
1130                0
1131            } else {
1132                let bucket = (len - 1).ilog2() + 1;
1133                1 << bucket
1134            };
1135            assert_eq!(range.start, bucket_start);
1136            for i in 0..range.start {
1137                assert_eq!(v[i], 0);
1138            }
1139        }
1140    }
1141
1142    const NUM_READERS: usize = 4;
1143    const NUM_WRITERS: usize = 4;
1144    #[cfg(not(miri))]
1145    const NUM_ITEMS: usize = 1_000_000;
1146    #[cfg(miri)]
1147    const NUM_ITEMS: usize = 100;
1148
1149    #[test]
1150    fn test_push_index_concurrent_reads() {
1151        let v: AppendVec<Box<usize>> = AppendVec::new();
1152        thread::scope(|s| {
1153            for _ in 0..NUM_READERS {
1154                s.spawn(|| {
1155                    loop {
1156                        let len = v.len();
1157                        if len > 0 {
1158                            let last = len - 1;
1159                            assert_eq!(*v[last].deref(), last);
1160                            if len == NUM_ITEMS {
1161                                break;
1162                            }
1163                        }
1164                    }
1165                });
1166            }
1167            s.spawn(|| {
1168                for j in 0..NUM_ITEMS {
1169                    assert_eq!(v.push(Box::new(j)), j);
1170                }
1171            });
1172        });
1173    }
1174
1175    #[test]
1176    fn test_push_index_concurrent_writes() {
1177        let v: AppendVec<Box<usize>> = AppendVec::new();
1178        thread::scope(|s| {
1179            s.spawn(|| {
1180                loop {
1181                    let len = v.len();
1182                    if len > 0 {
1183                        let last = len - 1;
1184                        assert!(*v[last].deref() <= last);
1185                        if len == NUM_WRITERS * NUM_ITEMS {
1186                            break;
1187                        }
1188                    }
1189                }
1190            });
1191            for _ in 0..NUM_WRITERS {
1192                s.spawn(|| {
1193                    for j in 0..NUM_ITEMS {
1194                        assert!(v.push(Box::new(j)) >= j);
1195                    }
1196                });
1197            }
1198        });
1199    }
1200
1201    #[test]
1202    fn test_push_index_concurrent_readwrites() {
1203        let v: AppendVec<Box<usize>> = AppendVec::new();
1204        thread::scope(|s| {
1205            for _ in 0..NUM_READERS {
1206                s.spawn(|| {
1207                    loop {
1208                        let len = v.len();
1209                        if len > 0 {
1210                            let last = len - 1;
1211                            assert!(*v[last].deref() <= last);
1212                            if len == NUM_WRITERS * NUM_ITEMS {
1213                                break;
1214                            }
1215                        }
1216                    }
1217                });
1218            }
1219            for _ in 0..NUM_WRITERS {
1220                s.spawn(|| {
1221                    for j in 0..NUM_ITEMS {
1222                        assert!(v.push(Box::new(j)) >= j);
1223                    }
1224                });
1225            }
1226        });
1227    }
1228
1229    #[test]
1230    fn test_iter() {
1231        let v: AppendVec<Box<usize>> = AppendVec::new();
1232        thread::scope(|s| {
1233            for _ in 0..NUM_READERS {
1234                s.spawn(|| {
1235                    loop {
1236                        let iter = v.iter();
1237                        let len = iter.len();
1238                        for (i, x) in iter.enumerate() {
1239                            assert_eq!(i, **x);
1240                        }
1241                        if len == NUM_ITEMS {
1242                            break;
1243                        }
1244                    }
1245                });
1246            }
1247            s.spawn(|| {
1248                for j in 0..NUM_ITEMS {
1249                    assert_eq!(v.push(Box::new(j)), j);
1250                }
1251            });
1252        });
1253    }
1254
1255    #[test]
1256    fn test_iter_with_some_capacity() {
1257        let v: AppendVec<Box<usize>> = AppendVec::with_capacity(NUM_ITEMS / 3);
1258        thread::scope(|s| {
1259            for _ in 0..NUM_READERS {
1260                s.spawn(|| {
1261                    loop {
1262                        let iter = v.iter();
1263                        let len = iter.len();
1264                        for (i, x) in iter.enumerate() {
1265                            assert_eq!(i, **x);
1266                        }
1267                        if len == NUM_ITEMS {
1268                            break;
1269                        }
1270                    }
1271                });
1272            }
1273            s.spawn(|| {
1274                for j in 0..NUM_ITEMS {
1275                    assert_eq!(v.push(Box::new(j)), j);
1276                }
1277            });
1278        });
1279    }
1280
1281    #[test]
1282    fn test_push_slice_index_concurrent_reads() {
1283        const SLICE_LEN: usize = 7;
1284
1285        let v: AppendVec<usize> = AppendVec::new();
1286        thread::scope(|s| {
1287            for _ in 0..NUM_READERS {
1288                s.spawn(|| {
1289                    loop {
1290                        let len = v.len();
1291                        if len > 0 {
1292                            let last = len - SLICE_LEN;
1293                            let slice = &v[last..len];
1294                            assert!(slice[0] * SLICE_LEN <= last);
1295                            for i in 1..SLICE_LEN {
1296                                assert_eq!(slice[i], slice[0]);
1297                            }
1298                            if len >= SLICE_LEN * NUM_ITEMS {
1299                                break;
1300                            }
1301                        }
1302                    }
1303                });
1304            }
1305            s.spawn(|| {
1306                for j in 0..NUM_ITEMS {
1307                    assert!(v.push_slice(&[j; SLICE_LEN]).start >= SLICE_LEN * j);
1308                }
1309            });
1310        });
1311    }
1312
1313    #[test]
1314    fn test_push_slice_index_concurrent_writes() {
1315        const SLICE_LEN: usize = 7;
1316
1317        let v: AppendVec<usize> = AppendVec::new();
1318        thread::scope(|s| {
1319            s.spawn(|| {
1320                loop {
1321                    let len = v.len();
1322                    if len > 0 {
1323                        let last = len - SLICE_LEN;
1324                        let slice = &v[last..len];
1325                        assert!(slice[0] * SLICE_LEN <= last);
1326                        for i in 1..SLICE_LEN {
1327                            assert_eq!(slice[i], slice[0]);
1328                        }
1329                        if len >= NUM_WRITERS * SLICE_LEN * NUM_ITEMS {
1330                            break;
1331                        }
1332                    }
1333                }
1334            });
1335            for _ in 0..NUM_WRITERS {
1336                s.spawn(|| {
1337                    for j in 0..NUM_ITEMS {
1338                        assert!(v.push_slice(&[j; SLICE_LEN]).start >= SLICE_LEN * j);
1339                    }
1340                });
1341            }
1342        });
1343    }
1344
1345    #[test]
1346    fn test_push_slice_index_concurrent_readwrites() {
1347        const SLICE_LEN: usize = 7;
1348
1349        let v: AppendVec<usize> = AppendVec::new();
1350        thread::scope(|s| {
1351            for _ in 0..NUM_READERS {
1352                s.spawn(|| {
1353                    loop {
1354                        let len = v.len();
1355                        if len > 0 {
1356                            let last = len - SLICE_LEN;
1357                            let slice = &v[last..len];
1358                            assert!(slice[0] * SLICE_LEN <= last);
1359                            for i in 1..SLICE_LEN {
1360                                assert_eq!(slice[i], slice[0]);
1361                            }
1362                            if len >= NUM_WRITERS * SLICE_LEN * NUM_ITEMS {
1363                                break;
1364                            }
1365                        }
1366                    }
1367                });
1368            }
1369            for _ in 0..NUM_WRITERS {
1370                s.spawn(|| {
1371                    for j in 0..NUM_ITEMS {
1372                        assert!(v.push_slice(&[j; SLICE_LEN]).start >= SLICE_LEN * j);
1373                    }
1374                });
1375            }
1376        });
1377    }
1378
1379    #[test]
1380    fn test_with_some_capacity() {
1381        const SLICE_LEN: usize = 7;
1382
1383        let v: AppendVec<usize> = AppendVec::with_capacity(SLICE_LEN);
1384        thread::scope(|s| {
1385            for _ in 0..NUM_READERS {
1386                s.spawn(|| {
1387                    loop {
1388                        let len = v.len();
1389                        if len > 0 {
1390                            let last = len - SLICE_LEN;
1391                            let slice = &v[last..len];
1392                            assert!(slice[0] * SLICE_LEN <= last);
1393                            for i in 1..SLICE_LEN {
1394                                assert_eq!(slice[i], slice[0]);
1395                            }
1396                            if len >= NUM_WRITERS * SLICE_LEN * NUM_ITEMS {
1397                                break;
1398                            }
1399                        }
1400                    }
1401                });
1402            }
1403            for _ in 0..NUM_WRITERS {
1404                s.spawn(|| {
1405                    for j in 0..NUM_ITEMS {
1406                        assert!(v.push_slice(&[j; SLICE_LEN]).start >= SLICE_LEN * j);
1407                    }
1408                });
1409            }
1410        });
1411    }
1412
1413    #[test]
1414    fn test_with_enough_capacity() {
1415        const SLICE_LEN: usize = 7;
1416
1417        let v: AppendVec<usize> = AppendVec::with_capacity(NUM_WRITERS * SLICE_LEN * NUM_ITEMS);
1418        thread::scope(|s| {
1419            for _ in 0..NUM_READERS {
1420                s.spawn(|| {
1421                    loop {
1422                        let len = v.len();
1423                        if len > 0 {
1424                            let last = len - SLICE_LEN;
1425                            let slice = &v[last..len];
1426                            assert!(slice[0] * SLICE_LEN <= last);
1427                            for i in 1..SLICE_LEN {
1428                                assert_eq!(slice[i], slice[0]);
1429                            }
1430                            if len == NUM_WRITERS * SLICE_LEN * NUM_ITEMS {
1431                                break;
1432                            }
1433                        }
1434                    }
1435                });
1436            }
1437            for _ in 0..NUM_WRITERS {
1438                s.spawn(|| {
1439                    for j in 0..NUM_ITEMS {
1440                        assert!(v.push_slice(&[j; SLICE_LEN]).start >= SLICE_LEN * j);
1441                    }
1442                });
1443            }
1444        });
1445        assert_eq!(v.len(), NUM_WRITERS * SLICE_LEN * NUM_ITEMS);
1446    }
1447
1448    #[test]
1449    fn test_get_unchecked_concurrent_reads() {
1450        let v: AppendVec<Box<usize>> = AppendVec::new();
1451        thread::scope(|s| {
1452            for _ in 0..NUM_READERS {
1453                s.spawn(|| {
1454                    loop {
1455                        let len = v.len();
1456                        if len > 0 {
1457                            let last = len - 1;
1458                            let x = unsafe { v.get_unchecked(last) };
1459                            assert_eq!(*x.deref(), last);
1460                            if len == NUM_ITEMS {
1461                                break;
1462                            }
1463                        }
1464                    }
1465                });
1466            }
1467            s.spawn(|| {
1468                for j in 0..NUM_ITEMS {
1469                    assert_eq!(v.push(Box::new(j)), j);
1470                }
1471            });
1472        });
1473    }
1474
1475    #[test]
1476    fn test_get_unchecked_concurrent_writes() {
1477        let v: AppendVec<Box<usize>> = AppendVec::new();
1478        thread::scope(|s| {
1479            s.spawn(|| {
1480                loop {
1481                    let len = v.len();
1482                    if len > 0 {
1483                        let last = len - 1;
1484                        let x = unsafe { v.get_unchecked(last) };
1485                        assert!(*x.deref() <= last);
1486                        if len == NUM_WRITERS * NUM_ITEMS {
1487                            break;
1488                        }
1489                    }
1490                }
1491            });
1492            for _ in 0..NUM_WRITERS {
1493                s.spawn(|| {
1494                    for j in 0..NUM_ITEMS {
1495                        assert!(v.push(Box::new(j)) >= j);
1496                    }
1497                });
1498            }
1499        });
1500    }
1501
1502    #[test]
1503    fn test_get_unchecked_concurrent_readwrites() {
1504        let v: AppendVec<Box<usize>> = AppendVec::new();
1505        thread::scope(|s| {
1506            for _ in 0..NUM_READERS {
1507                s.spawn(|| {
1508                    loop {
1509                        let len = v.len();
1510                        if len > 0 {
1511                            let last = len - 1;
1512                            let x = unsafe { v.get_unchecked(last) };
1513                            assert!(*x.deref() <= last);
1514                            if len == NUM_WRITERS * NUM_ITEMS {
1515                                break;
1516                            }
1517                        }
1518                    }
1519                });
1520            }
1521            for _ in 0..NUM_WRITERS {
1522                s.spawn(|| {
1523                    for j in 0..NUM_ITEMS {
1524                        assert!(v.push(Box::new(j)) >= j);
1525                    }
1526                });
1527            }
1528        });
1529    }
1530}