Skip to main content

virtual_buffer/concurrent/
vec.rs

1//! A concurrent, in-place growable vector.
2
3use crate::{
4    Allocation, SizedTypeProperties, align_up, assert_unsafe_precondition, page_size,
5    vec::{
6        GrowthStrategy, TryReserveError,
7        TryReserveErrorKind::{AllocError, CapacityOverflow},
8        capacity_overflow, handle_error,
9    },
10};
11use core::{
12    alloc::Layout,
13    borrow::{Borrow, BorrowMut},
14    cell::UnsafeCell,
15    cmp, fmt,
16    hash::{Hash, Hasher},
17    iter::FusedIterator,
18    marker::PhantomData,
19    mem::{ManuallyDrop, MaybeUninit},
20    ops::{Deref, DerefMut, Index, IndexMut},
21    panic::RefUnwindSafe,
22    ptr,
23    slice::{self, SliceIndex},
24    sync::atomic::{
25        AtomicBool, AtomicUsize,
26        Ordering::{Acquire, Relaxed, Release},
27    },
28};
29
30pub mod raw;
31
32/// Used to build a new vector.
33///
34/// This struct is created by the [`builder`] method on [`Vec`].
35///
36/// [`builder`]: Vec::builder
37#[derive(Clone, Copy, Debug)]
38pub struct VecBuilder {
39    max_capacity: usize,
40    capacity: usize,
41    growth_strategy: GrowthStrategy,
42}
43
44impl VecBuilder {
45    #[inline]
46    const fn new(max_capacity: usize) -> Self {
47        VecBuilder {
48            max_capacity,
49            capacity: 0,
50            growth_strategy: GrowthStrategy::new(),
51        }
52    }
53
54    /// The built `Vec` will have the minimum capacity required for `capacity` elements.
55    ///
56    /// The capacity can be greater due to the alignment to the [page size].
57    ///
58    /// [page size]: crate#pages
59    #[inline]
60    pub const fn capacity(&mut self, capacity: usize) -> &mut Self {
61        self.capacity = capacity;
62
63        self
64    }
65
66    /// The built `Vec` will have the given `growth_strategy`.
67    ///
68    /// # Panics
69    ///
70    /// Panics if `growth_strategy` isn't valid per the documentation of [`GrowthStrategy`].
71    #[inline]
72    #[track_caller]
73    pub const fn growth_strategy(&mut self, growth_strategy: GrowthStrategy) -> &mut Self {
74        growth_strategy.validate();
75
76        self.growth_strategy = growth_strategy;
77
78        self
79    }
80
81    /// Builds the `Vec`.
82    ///
83    /// # Panics
84    ///
85    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
86    /// - Panics if the `capacity` is greater than the `max_capacity`.
87    /// - Panics if [reserving] the allocation returns an error.
88    ///
89    /// [reserving]: crate#reserving
90    #[must_use]
91    #[track_caller]
92    pub fn build<T>(&self) -> Vec<T> {
93        match self.try_build() {
94            Ok(vec) => vec,
95            Err(err) => handle_error(err),
96        }
97    }
98
99    /// Tries to build the `Vec`, returning an error when allocation fails.
100    ///
101    /// # Errors
102    ///
103    /// - Returns an error if the `max_capacity` would exceed `isize::MAX` bytes.
104    /// - Returns an error if the `capacity` is greater than the `max_capacity`.
105    /// - Returns an error if [reserving] the allocation returns an error.
106    ///
107    /// [reserving]: crate#reserving
108    pub fn try_build<T>(&self) -> Result<Vec<T>, TryReserveError> {
109        Ok(Vec {
110            inner: RawVec {
111                inner: unsafe {
112                    RawVecInner::new(
113                        self.max_capacity,
114                        self.capacity,
115                        self.growth_strategy,
116                        Layout::new::<()>(),
117                        size_of::<T>(),
118                        align_of::<T>(),
119                    )
120                }?,
121                marker: PhantomData,
122            },
123        })
124    }
125}
126
127/// A concurrent, in-place growable vector.
128///
129/// This type behaves similarly to the standard library `Vec` except that it is guaranteed to never
130/// reallocate, and as such can support concurrent reads while also supporting growth. The vector
131/// grows similarly to the standard library vector, but instead of reallocating, it commits more
132/// memory.
133///
134/// All operations are lock-free. In order to support this, each element consists of a `T` and an
135/// `AtomicBool` used to denote whether the element has been initialized. That means that there is
136/// a memory overhead equal to `align_of::<T>()`. You can avoid this overhead if you have a byte of
137/// memory to spare in your `T` or by packing the bit into an existing atomic field in `T` by using
138/// [`RawVec`].
139///
140/// If you don't specify a [growth strategy], exponential growth with a growth factor of 2 is used,
141/// which is the same strategy that the standard library `Vec` uses.
142pub struct Vec<T> {
143    /// ```compile_fail,E0597
144    /// let vec = virtual_buffer::concurrent::vec::Vec::<&'static str>::new(1);
145    /// {
146    ///     let s = "oh no".to_owned();
147    ///     vec.push(&s);
148    /// }
149    /// dbg!(vec);
150    /// ```
151    inner: RawVec<Slot<T>>,
152}
153
154// SAFETY: `Vec` is an owned collection, which makes it safe to send to another thread as long as
155// its element is safe to send to another a thread.
156unsafe impl<T: Send> Send for Vec<T> {}
157
158// SAFETY: `Vec` allows pushing through a shared reference, which allows a shared `Vec` to be used
159// to send elements to another thread. Additionally, `Vec` allows getting a reference to any
160// element from any thread. Therefore, it is safe to share `Vec` between threads as long as the
161// element is both sendable and shareable.
162unsafe impl<T: Send + Sync> Sync for Vec<T> {}
163
164impl Vec<()> {
165    /// Returns a builder for a new `Vec`.
166    ///
167    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
168    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
169    /// is [committed] until you push elements into the vector or reserve capacity.
170    ///
171    /// This function is implemented on `Vec<()>` so that you don't have to import the `VecBuilder`
172    /// type too. The type parameter has no relation to the built `Vec`.
173    ///
174    /// [committed]: crate#committing
175    #[inline]
176    #[must_use]
177    pub const fn builder(max_capacity: usize) -> VecBuilder {
178        VecBuilder::new(max_capacity)
179    }
180}
181
182impl<T> Vec<T> {
183    /// Creates a new `Vec`.
184    ///
185    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
186    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
187    /// is [committed] until you push elements into the vector or reserve capacity.
188    ///
189    /// See the [`builder`] function for more creation options.
190    ///
191    /// # Panics
192    ///
193    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
194    /// - Panics if [reserving] the allocation returns an error.
195    ///
196    /// [committed]: crate#committing
197    /// [`builder`]: Self::builder
198    /// [reserving]: crate#reserving
199    #[must_use]
200    #[track_caller]
201    pub fn new(max_capacity: usize) -> Self {
202        Vec {
203            inner: unsafe { RawVec::new(max_capacity) },
204        }
205    }
206
207    /// Creates a dangling `Vec`.
208    ///
209    /// This is useful as a placeholder value to defer allocation until later or if no allocation
210    /// is needed.
211    #[inline]
212    #[must_use]
213    pub const fn dangling() -> Self {
214        Vec {
215            inner: RawVec::dangling(),
216        }
217    }
218
219    /// Returns the maximum capacity that was used when creating the `Vec`.
220    #[inline]
221    #[must_use]
222    pub fn max_capacity(&self) -> usize {
223        self.inner.max_capacity()
224    }
225
226    /// Returns the total number of elements the vector can hold without [committing] more memory.
227    ///
228    /// [committing]: crate#committing
229    #[inline]
230    #[must_use]
231    pub fn capacity(&self) -> usize {
232        self.inner.capacity()
233    }
234
235    /// Returns the number of elements in the vector.
236    ///
237    /// This number may exceed [`capacity`], doesn't correspond to the number of initialized
238    /// elements, and also doesn't synchronize with setting the capacity.
239    ///
240    /// [`capacity`]: Self::capacity
241    #[inline]
242    #[must_use]
243    pub fn len(&self) -> usize {
244        self.inner.len()
245    }
246
247    /// Returns `true` if the vector contains no elements.
248    ///
249    /// This counts elements that failed to be pushed due to an error.
250    #[inline]
251    #[must_use]
252    pub fn is_empty(&self) -> bool {
253        self.inner.is_empty()
254    }
255
256    /// Appends an element to the end of the vector. Returns the index of the inserted element.
257    #[inline]
258    #[track_caller]
259    pub fn push(&self, value: T) -> usize {
260        self.push_with(|_| value)
261    }
262
263    /// Appends an element to the end of the vector.
264    ///
265    /// `f` is called with the index of the element and the result is written to the element.
266    /// Returns the index of the element.
267    #[inline]
268    #[track_caller]
269    pub fn push_with(&self, f: impl FnOnce(usize) -> T) -> usize {
270        let (index, slot) = self.inner.push();
271
272        // SAFETY: `RawVec::push` guarantees that the slot is unique for each push.
273        unsafe { &mut *slot.value.get() }.write(f(index));
274
275        // SAFETY: We have written the element above.
276        unsafe { slot.occupied.store(true, Release) };
277
278        index
279    }
280
281    /// Appends an element to the end of the vector. Returns the index of the inserted element.
282    #[inline]
283    #[track_caller]
284    pub fn push_mut(&mut self, value: T) -> usize {
285        self.push_with_mut(|_| value)
286    }
287
288    /// Appends an element to the end of the vector.
289    ///
290    /// `f` is called with the index of the element and the result is written to the element.
291    /// Returns the index of the element.
292    #[inline]
293    #[track_caller]
294    pub fn push_with_mut(&mut self, f: impl FnOnce(usize) -> T) -> usize {
295        let (index, slot) = self.inner.push_mut();
296
297        slot.value.get_mut().write(f(index));
298
299        // SAFETY: We have written the element above.
300        unsafe { *slot.occupied.get_mut() = true };
301
302        index
303    }
304
305    /// Returns a reference to an element of the vector.
306    #[inline]
307    #[must_use]
308    pub fn get(&self, index: usize) -> Option<&T> {
309        self.inner.as_capacity().get(index)?.value()
310    }
311
312    /// Returns a mutable reference to an element of the vector.
313    #[inline]
314    #[must_use]
315    pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
316        self.inner.as_mut_capacity().get_mut(index)?.value_mut()
317    }
318
319    /// Returns a reference to an element of the vector without doing any checks.
320    ///
321    /// # Safety
322    ///
323    /// `index` must have been previously acquired through [`push`] or [`push_mut`] on `self`.
324    ///
325    /// [`push`]: Self::push
326    /// [`push_mut`]: Self::push_mut
327    #[inline]
328    #[must_use]
329    pub unsafe fn get_unchecked(&self, index: usize) -> &T {
330        // SAFETY: The caller must ensure that the index is in bounds.
331        let slot = unsafe { self.inner.as_capacity().get_unchecked(index) };
332
333        let occupied = slot.occupied.load(Acquire);
334
335        assert_unsafe_precondition!(
336            occupied,
337            "`Vec::get_unchecked` requires that `index` refers to an initialized element",
338        );
339
340        // SAFETY: The caller must ensure that the slot has been initialized. The `Acquire`
341        // ordering above synchronizes with the `Release` ordering in `push`, making sure that the
342        // write is visible here.
343        unsafe { slot.value_unchecked() }
344    }
345
346    /// Returns a mutable reference to an element of the vector without doing any checks.
347    ///
348    /// # Safety
349    ///
350    /// `index` must have been previously acquired through [`push`] or [`push_mut`] on `self`.
351    ///
352    /// [`push`]: Self::push
353    /// [`push_mut`]: Self::push_mut
354    #[inline]
355    #[must_use]
356    pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T {
357        // SAFETY: The caller must ensure that the index is in bounds.
358        let slot = unsafe { self.inner.as_mut_capacity().get_unchecked_mut(index) };
359
360        assert_unsafe_precondition!(
361            *slot.occupied.get_mut(),
362            "`Vec::get_unchecked` requires that `index` refers to an initialized element",
363        );
364
365        // SAFETY: The caller must ensure that the slot has been initialized.
366        unsafe { slot.value_unchecked_mut() }
367    }
368
369    /// Returns an iterator that yields references to elements of the vector.
370    #[inline]
371    pub fn iter(&self) -> Iter<'_, T> {
372        Iter::new(self)
373    }
374
375    /// Returns an iterator that yields mutable references to elements of the vector.
376    #[inline]
377    pub fn iter_mut(&mut self) -> IterMut<'_, T> {
378        IterMut {
379            inner: self.inner.iter_mut(),
380        }
381    }
382
383    /// Reserves capacity for at least `additional` more elements.
384    ///
385    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
386    /// standard library vector.
387    ///
388    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
389    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
390    /// already sufficient.
391    ///
392    /// # Panics
393    ///
394    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
395    /// - Panics if [committing] the new capacity fails.
396    ///
397    /// [reserving virtual memory]: crate#reserving
398    /// [growth strategy]: GrowthStrategy
399    /// [committing]: crate#committing
400    #[track_caller]
401    pub fn reserve(&self, additional: usize) {
402        self.inner.reserve(additional);
403    }
404
405    /// Reserves the minimum capacity required for `additional` more elements.
406    ///
407    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
408    /// standard library vector.
409    ///
410    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
411    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
412    /// nothing if the capacity is already sufficient.
413    ///
414    /// # Panics
415    ///
416    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
417    /// - Panics if [committing] the new capacity fails.
418    ///
419    /// [reserving virtual memory]: crate#reserving
420    /// [growth strategy]: GrowthStrategy
421    /// [page size]: crate#pages
422    /// [committing]: crate#committing
423    #[track_caller]
424    pub fn reserve_exact(&self, additional: usize) {
425        self.inner.reserve_exact(additional);
426    }
427
428    /// Tries to reserve capacity for at least `additional` more elements, returning an error on
429    /// failure.
430    ///
431    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
432    /// standard library vector.
433    ///
434    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
435    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
436    /// already sufficient.
437    ///
438    /// # Errors
439    ///
440    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
441    /// - Returns an error if [committing] the new capacity fails.
442    ///
443    /// [reserving virtual memory]: crate#reserving
444    /// [growth strategy]: GrowthStrategy
445    /// [committing]: crate#committing
446    pub fn try_reserve(&self, additional: usize) -> Result<(), TryReserveError> {
447        self.inner.try_reserve(additional)
448    }
449
450    /// Tries to reserve the minimum capacity required for `additional` more elements, returning an
451    /// error on failure.
452    ///
453    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
454    /// standard library vector.
455    ///
456    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
457    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
458    /// nothing if the capacity is already sufficient.
459    ///
460    /// # Errors
461    ///
462    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
463    /// - Returns an error if [committing] the new capacity fails.
464    ///
465    /// [reserving virtual memory]: crate#reserving
466    /// [growth strategy]: GrowthStrategy
467    /// [page size]: crate#pages
468    /// [committing]: crate#committing
469    pub fn try_reserve_exact(&self, additional: usize) -> Result<(), TryReserveError> {
470        self.inner.try_reserve_exact(additional)
471    }
472}
473
474impl<T> AsRef<Vec<T>> for Vec<T> {
475    #[inline]
476    fn as_ref(&self) -> &Vec<T> {
477        self
478    }
479}
480
481impl<T> AsMut<Vec<T>> for Vec<T> {
482    #[inline]
483    fn as_mut(&mut self) -> &mut Vec<T> {
484        self
485    }
486}
487
488impl<T: fmt::Debug> fmt::Debug for Vec<T> {
489    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
490        fmt::Debug::fmt(&self.inner, f)
491    }
492}
493
494impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for Vec<T> {
495    #[inline]
496    fn eq(&self, other: &Vec<U>) -> bool {
497        self.inner == other.inner
498    }
499}
500
501impl<T: PartialEq<U>, U> PartialEq<&[U]> for Vec<T> {
502    #[inline]
503    fn eq(&self, other: &&[U]) -> bool {
504        *self == **other
505    }
506}
507
508impl<T: PartialEq<U>, U> PartialEq<&mut [U]> for Vec<T> {
509    #[inline]
510    fn eq(&self, other: &&mut [U]) -> bool {
511        *self == **other
512    }
513}
514
515impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for &[T] {
516    #[inline]
517    fn eq(&self, other: &Vec<U>) -> bool {
518        **self == *other
519    }
520}
521
522impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for &mut [T] {
523    #[inline]
524    fn eq(&self, other: &Vec<U>) -> bool {
525        **self == *other
526    }
527}
528
529impl<T: PartialEq<U>, U> PartialEq<[U]> for Vec<T> {
530    #[inline]
531    fn eq(&self, other: &[U]) -> bool {
532        let len = self.len();
533
534        if len != other.len() {
535            return false;
536        }
537
538        #[allow(clippy::needless_range_loop)]
539        for index in 0..len {
540            let Some(elem) = self.get(index) else {
541                return false;
542            };
543
544            if *elem != other[index] {
545                return false;
546            }
547        }
548
549        true
550    }
551}
552
553impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for [T] {
554    #[inline]
555    fn eq(&self, other: &Vec<U>) -> bool {
556        let len = other.len();
557
558        if len != self.len() {
559            return false;
560        }
561
562        #[allow(clippy::needless_range_loop)]
563        for index in 0..len {
564            let Some(elem) = other.get(index) else {
565                return false;
566            };
567
568            if self[index] != *elem {
569                return false;
570            }
571        }
572
573        true
574    }
575}
576
577#[cfg(feature = "alloc")]
578impl<T: PartialEq<U> + Clone, U> PartialEq<Vec<U>> for alloc::borrow::Cow<'_, [T]> {
579    #[inline]
580    fn eq(&self, other: &Vec<U>) -> bool {
581        **self == *other
582    }
583}
584
585impl<T: PartialEq<U>, U, const N: usize> PartialEq<[U; N]> for Vec<T> {
586    #[inline]
587    fn eq(&self, other: &[U; N]) -> bool {
588        *self == *other.as_slice()
589    }
590}
591
592impl<T: PartialEq<U>, U, const N: usize> PartialEq<&[U; N]> for Vec<T> {
593    #[inline]
594    fn eq(&self, other: &&[U; N]) -> bool {
595        *self == *other.as_slice()
596    }
597}
598
599impl<T: Eq> Eq for Vec<T> {}
600
601impl<T> Extend<T> for Vec<T> {
602    #[inline]
603    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
604        for elem in iter {
605            self.push_mut(elem);
606        }
607    }
608}
609
610impl<'a, T: Copy> Extend<&'a T> for Vec<T> {
611    #[inline]
612    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
613        for &elem in iter {
614            self.push_mut(elem);
615        }
616    }
617}
618
619impl<T: Hash> Hash for Vec<T> {
620    #[inline]
621    fn hash<H: Hasher>(&self, state: &mut H) {
622        Hash::hash(&*self.inner, state);
623    }
624}
625
626impl<T> Index<usize> for Vec<T> {
627    type Output = T;
628
629    #[inline]
630    #[track_caller]
631    fn index(&self, index: usize) -> &Self::Output {
632        if let Some(value) = self.get(index) {
633            value
634        } else {
635            invalid_index(index)
636        }
637    }
638}
639
640impl<T> IndexMut<usize> for Vec<T> {
641    #[inline]
642    #[track_caller]
643    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
644        if let Some(value) = self.get_mut(index) {
645            value
646        } else {
647            invalid_index(index)
648        }
649    }
650}
651
652#[cold]
653#[inline(never)]
654#[track_caller]
655fn invalid_index(index: usize) -> ! {
656    panic!("index {index} is out of bounds or not yet initialized");
657}
658
659impl<'a, T> IntoIterator for &'a Vec<T> {
660    type Item = &'a T;
661
662    type IntoIter = Iter<'a, T>;
663
664    #[inline]
665    fn into_iter(self) -> Self::IntoIter {
666        self.iter()
667    }
668}
669
670impl<'a, T> IntoIterator for &'a mut Vec<T> {
671    type Item = &'a mut T;
672
673    type IntoIter = IterMut<'a, T>;
674
675    #[inline]
676    fn into_iter(self) -> Self::IntoIter {
677        self.iter_mut()
678    }
679}
680
681impl<T> IntoIterator for Vec<T> {
682    type Item = T;
683
684    type IntoIter = IntoIter<T>;
685
686    #[inline]
687    fn into_iter(self) -> Self::IntoIter {
688        IntoIter::new(self)
689    }
690}
691
692impl<T: PartialOrd> PartialOrd for Vec<T> {
693    #[inline]
694    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
695        PartialOrd::partial_cmp(&self.inner, &other.inner)
696    }
697}
698
699impl<T: Ord> Ord for Vec<T> {
700    #[inline]
701    fn cmp(&self, other: &Self) -> cmp::Ordering {
702        Ord::cmp(&self.inner, &other.inner)
703    }
704}
705
706struct Slot<T> {
707    occupied: AtomicBool,
708    value: UnsafeCell<MaybeUninit<T>>,
709}
710
711// While `Slot` does contain interior mutability, this is never exposed to the user and no
712// invariants can be broken.
713impl<T: RefUnwindSafe> RefUnwindSafe for Slot<T> {}
714
715impl<T> Slot<T> {
716    #[inline(always)]
717    fn value(&self) -> Option<&T> {
718        if self.occupied.load(Acquire) {
719            // SAFETY: We checked that the slot has been initialized above. The `Acquire` ordering
720            // above synchronizes with the `Release` ordering in `push`, making sure that the write
721            // is visible here.
722            Some(unsafe { self.value_unchecked() })
723        } else {
724            None
725        }
726    }
727
728    #[inline(always)]
729    fn value_mut(&mut self) -> Option<&mut T> {
730        if *self.occupied.get_mut() {
731            // SAFETY: We checked that the slot has been initialized above.
732            Some(unsafe { self.value_unchecked_mut() })
733        } else {
734            None
735        }
736    }
737
738    #[inline(always)]
739    unsafe fn value_unchecked(&self) -> &T {
740        // SAFETY: The caller must ensure that access to the cell's inner value is synchronized.
741        let value = unsafe { &*self.value.get() };
742
743        // SAFETY: The caller must ensure that the slot has been initialized.
744        unsafe { value.assume_init_ref() }
745    }
746
747    #[inline(always)]
748    unsafe fn value_unchecked_mut(&mut self) -> &mut T {
749        // SAFETY: The caller must ensure that the slot has been initialized.
750        unsafe { self.value.get_mut().assume_init_mut() }
751    }
752}
753
754impl<T: Clone> Clone for Slot<T> {
755    #[inline]
756    fn clone(&self) -> Self {
757        let value = self.value();
758
759        Slot {
760            occupied: AtomicBool::new(value.is_some()),
761            value: UnsafeCell::new(if let Some(value) = value {
762                MaybeUninit::new(value.clone())
763            } else {
764                MaybeUninit::uninit()
765            }),
766        }
767    }
768}
769
770impl<T: fmt::Debug> fmt::Debug for Slot<T> {
771    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
772        fmt::Debug::fmt(&self.value(), f)
773    }
774}
775
776impl<T> Drop for Slot<T> {
777    fn drop(&mut self) {
778        if *self.occupied.get_mut() {
779            // SAFETY: We checked that the slot has been initialized above. We own the value, and
780            // the slot is being dropped, which ensures that the value can't be accessed again.
781            unsafe { self.value.get_mut().assume_init_drop() };
782        }
783    }
784}
785
786impl<T: PartialEq<U>, U> PartialEq<Slot<U>> for Slot<T> {
787    #[allow(clippy::match_same_arms)]
788    #[inline]
789    fn eq(&self, other: &Slot<U>) -> bool {
790        match (self.value(), other.value()) {
791            (Some(value), Some(other_value)) => *value == *other_value,
792            (Some(_), None) => false,
793            (None, Some(_)) => false,
794            (None, None) => true,
795        }
796    }
797}
798
799impl<T: Eq> Eq for Slot<T> {}
800
801impl<T: Hash> Hash for Slot<T> {
802    #[inline]
803    fn hash<H: Hasher>(&self, state: &mut H) {
804        self.value().hash(state);
805    }
806}
807
808impl<T: PartialOrd> PartialOrd for Slot<T> {
809    #[inline]
810    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
811        PartialOrd::partial_cmp(&self.value(), &other.value())
812    }
813}
814
815impl<T: Ord> Ord for Slot<T> {
816    #[inline]
817    fn cmp(&self, other: &Self) -> cmp::Ordering {
818        Ord::cmp(&self.value(), &other.value())
819    }
820}
821
822/// An iterator that yields references to elements of a vector.
823///
824/// This struct is created by the [`iter`] method on [`Vec`].
825///
826/// [`iter`]: Vec::iter
827pub struct Iter<'a, T> {
828    start: *const (),
829    end: *const (),
830    marker: PhantomData<&'a T>,
831}
832
833// SAFETY: `Iter<'a, T>` is equivalent to `&'a [T]`.
834unsafe impl<T: Sync> Send for Iter<'_, T> {}
835
836// SAFETY: `Iter<'a, T>` is equivalent to `&'a [T]`.
837unsafe impl<T: Sync> Sync for Iter<'_, T> {}
838
839impl<T> Iter<'_, T> {
840    #[inline]
841    fn new(vec: &Vec<T>) -> Self {
842        let start = vec.inner.as_ptr();
843
844        let len = cmp::min(vec.len(), vec.capacity());
845
846        // SAFETY: The `Acquire` ordering in `RawVec::capacity` synchronizes with the `Release`
847        // ordering when setting the capacity, making sure that the newly committed memory is
848        // visible here.
849        let end = unsafe { start.add(len) };
850
851        Iter {
852            start: start.cast(),
853            end: end.cast(),
854            marker: PhantomData,
855        }
856    }
857
858    #[inline]
859    fn start(&self) -> *const Slot<T> {
860        self.start.cast()
861    }
862
863    #[inline]
864    fn end(&self) -> *const Slot<T> {
865        self.end.cast()
866    }
867
868    #[inline]
869    fn len(&self) -> usize {
870        // SAFETY:
871        // - By our invariant, `self.end` is always greater than or equal to `self.start`.
872        // - `start` and `end` were both created from the same object in `Iter::new`.
873        // - `Vec::new` ensures that the allocation size doesn't exceed `isize::MAX` bytes.
874        // - We know that the allocation doesn't wrap around the address space.
875        unsafe { self.end().offset_from_unsigned(self.start()) }
876    }
877
878    fn as_slice(&self) -> &[Slot<T>] {
879        unsafe { slice::from_raw_parts(self.start(), self.len()) }
880    }
881}
882
883impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
884    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
885        struct Elements<'a, T>(&'a [Slot<T>]);
886
887        impl<T: fmt::Debug> fmt::Debug for Elements<'_, T> {
888            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
889                f.debug_list()
890                    .entries(self.0.iter().filter_map(Slot::value))
891                    .finish()
892            }
893        }
894
895        f.debug_tuple("Iter")
896            .field(&Elements(self.as_slice()))
897            .finish()
898    }
899}
900
901impl<'a, T> Iterator for Iter<'a, T> {
902    type Item = &'a T;
903
904    #[inline]
905    fn next(&mut self) -> Option<Self::Item> {
906        loop {
907            if self.start == self.end {
908                break None;
909            }
910
911            let start = self.start();
912
913            // SAFETY: We checked that there are still elements remaining above.
914            self.start = unsafe { start.add(1) }.cast();
915
916            // SAFETY: Same as the previous.
917            let slot = unsafe { &*start };
918
919            if let Some(value) = slot.value() {
920                break Some(value);
921            }
922        }
923    }
924
925    #[inline]
926    fn size_hint(&self) -> (usize, Option<usize>) {
927        (0, Some(self.len()))
928    }
929}
930
931impl<T> DoubleEndedIterator for Iter<'_, T> {
932    #[inline]
933    fn next_back(&mut self) -> Option<Self::Item> {
934        loop {
935            if self.start == self.end {
936                break None;
937            }
938
939            // SAFETY: We checked that there are still elements remaining above.
940            let end = unsafe { self.end().sub(1) };
941
942            self.end = end.cast();
943
944            // SAFETY: Same as the previous.
945            let slot = unsafe { &*end };
946
947            if let Some(value) = slot.value() {
948                break Some(value);
949            }
950        }
951    }
952}
953
954impl<T> FusedIterator for Iter<'_, T> {}
955
956/// An iterator that yields mutable references to elements of a vector.
957///
958/// This struct is created by the [`iter_mut`] method on [`Vec`].
959///
960/// [`iter_mut`]: Vec::iter_mut
961pub struct IterMut<'a, T> {
962    inner: slice::IterMut<'a, Slot<T>>,
963}
964
965impl<'a, T> Iterator for IterMut<'a, T> {
966    type Item = &'a mut T;
967
968    #[inline]
969    fn next(&mut self) -> Option<Self::Item> {
970        loop {
971            let slot = self.inner.next()?;
972
973            if let Some(value) = slot.value_mut() {
974                break Some(value);
975            }
976        }
977    }
978
979    #[inline]
980    fn size_hint(&self) -> (usize, Option<usize>) {
981        (0, Some(self.inner.len()))
982    }
983}
984
985impl<T> DoubleEndedIterator for IterMut<'_, T> {
986    #[inline]
987    fn next_back(&mut self) -> Option<Self::Item> {
988        loop {
989            let slot = self.inner.next_back()?;
990
991            if let Some(value) = slot.value_mut() {
992                break Some(value);
993            }
994        }
995    }
996}
997
998impl<T> FusedIterator for IterMut<'_, T> {}
999
1000/// An iterator that moves out of a vector.
1001///
1002/// This struct is created by the [`into_iter`] method on [`Vec`].
1003///
1004/// [`into_iter`]: Vec::into_iter
1005pub struct IntoIter<T> {
1006    start: *mut (),
1007    end: *mut (),
1008    #[allow(dead_code)]
1009    allocation: Allocation,
1010    marker: PhantomData<T>,
1011}
1012
1013// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
1014unsafe impl<T: Send> Send for IntoIter<T> {}
1015
1016// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
1017unsafe impl<T: Sync> Sync for IntoIter<T> {}
1018
1019impl<T> IntoIter<T> {
1020    #[inline]
1021    pub(super) fn new(vec: Vec<T>) -> Self {
1022        let mut vec = ManuallyDrop::new(vec.inner);
1023
1024        // SAFETY: `vec` is wrapped in a `ManuallyDrop` such that a double-free can't happen even
1025        // if a panic was possible below.
1026        let allocation = unsafe { ptr::read(&vec.inner.allocation) };
1027
1028        let start = vec.as_mut_ptr();
1029
1030        let len = cmp::min(vec.len_mut(), vec.capacity_mut());
1031
1032        // SAFETY: The ownership synchronizes with setting the capacity, making sure that the newly
1033        // committed memory is visible here.
1034        let end = unsafe { start.add(len) };
1035
1036        IntoIter {
1037            start: start.cast(),
1038            end: end.cast(),
1039            allocation,
1040            marker: PhantomData,
1041        }
1042    }
1043
1044    #[inline]
1045    fn start(&self) -> *mut Slot<T> {
1046        self.start.cast()
1047    }
1048
1049    #[inline]
1050    fn end(&self) -> *mut Slot<T> {
1051        self.end.cast()
1052    }
1053
1054    #[inline]
1055    fn len(&self) -> usize {
1056        // SAFETY:
1057        // - By our invariant, `self.end` is always greater than or equal to `self.start`.
1058        // - `start` and `end` were both created from the same object in `IntoIter::new`.
1059        // - `Vec::new` ensures that the allocation size doesn't exceed `isize::MAX` bytes.
1060        // - We know that the allocation doesn't wrap around the address space.
1061        unsafe { self.end().offset_from_unsigned(self.start()) }
1062    }
1063
1064    fn as_slice(&self) -> &[Slot<T>] {
1065        unsafe { slice::from_raw_parts(self.start(), self.len()) }
1066    }
1067}
1068
1069impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
1070    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1071        struct Elements<'a, T>(&'a [Slot<T>]);
1072
1073        impl<T: fmt::Debug> fmt::Debug for Elements<'_, T> {
1074            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1075                f.debug_list()
1076                    .entries(self.0.iter().filter_map(Slot::value))
1077                    .finish()
1078            }
1079        }
1080
1081        f.debug_tuple("IntoIter")
1082            .field(&Elements(self.as_slice()))
1083            .finish()
1084    }
1085}
1086
1087impl<T> Drop for IntoIter<T> {
1088    fn drop(&mut self) {
1089        let elements = ptr::slice_from_raw_parts_mut(self.start(), self.len());
1090
1091        // SAFETY: We own the collection, and it is being dropped, which ensures that the elements
1092        // can't be accessed again.
1093        unsafe { elements.drop_in_place() };
1094    }
1095}
1096
1097impl<T> Iterator for IntoIter<T> {
1098    type Item = T;
1099
1100    #[inline]
1101    fn next(&mut self) -> Option<Self::Item> {
1102        loop {
1103            if self.start == self.end {
1104                break None;
1105            }
1106
1107            let start = self.start();
1108
1109            // SAFETY: We checked that there are still elements remaining above.
1110            self.start = unsafe { start.add(1) }.cast();
1111
1112            // SAFETY: Same as the previous.
1113            let slot = unsafe { &*start };
1114
1115            if let Some(value) = slot.value() {
1116                // SAFETY: We own the collection, and have just decremented the `end` pointer such
1117                // that this element can't be accessed again.
1118                break Some(unsafe { ptr::read(value) });
1119            }
1120        }
1121    }
1122
1123    #[inline]
1124    fn size_hint(&self) -> (usize, Option<usize>) {
1125        (0, Some(self.len()))
1126    }
1127}
1128
1129impl<T> DoubleEndedIterator for IntoIter<T> {
1130    #[inline]
1131    fn next_back(&mut self) -> Option<Self::Item> {
1132        loop {
1133            if self.start == self.end {
1134                break None;
1135            }
1136
1137            // SAFETY: We checked that there are still elements remaining above.
1138            let end = unsafe { self.end().sub(1) };
1139
1140            self.end = end.cast();
1141
1142            // SAFETY: Same as the previous.
1143            let slot = unsafe { &*end };
1144
1145            if let Some(value) = slot.value() {
1146                // SAFETY: We own the collection, and have just decremented the `end` pointer such
1147                // that this element can't be accessed again.
1148                break Some(unsafe { ptr::read(value) });
1149            }
1150        }
1151    }
1152}
1153
1154impl<T> FusedIterator for IntoIter<T> {}
1155
1156/// A low-level, concurrent, in-place growable vector.
1157///
1158/// This is meant to be a low-level primitive used to implement data structures such as the
1159/// concurrent [`Vec`]. Unlike the concurrent `Vec`, this type stores `T`s directly, and as such
1160/// doesn't have any memory overhead and derefs to `[T]`. Methods such as [`Vec::get`] are not
1161/// provided because you should use the slice equivalent. This added flexibility comes at the cost
1162/// of a bit of unsafety: constructing a `RawVec<T>`, you must ensure that `T` is zeroable, and
1163/// pushing and accessing elements requires you to handle initialization of the element yourself.
1164///
1165/// If you don't specify a [growth strategy], exponential growth with a growth factor of 2 is used,
1166/// which is the same strategy that the standard library `Vec` uses.
1167pub struct RawVec<T> {
1168    inner: RawVecInner,
1169    marker: PhantomData<T>,
1170}
1171
1172struct RawVecInner {
1173    elements: *mut (),
1174    capacity: AtomicUsize,
1175    len: AtomicUsize,
1176    max_capacity: usize,
1177    growth_strategy: GrowthStrategy,
1178    allocation: Allocation,
1179}
1180
1181impl RawVec<()> {
1182    /// Returns a builder for a new `RawVec`.
1183    ///
1184    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
1185    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
1186    /// is [committed] until you push elements into the vector or reserve capacity.
1187    ///
1188    /// This function is implemented on `RawVec<()>` so that you don't have to import the
1189    /// `VecBuilder` type too. The type parameter has no relation to the built `RawVec`.
1190    ///
1191    /// [committed]: crate#committing
1192    #[inline]
1193    #[must_use]
1194    pub const fn builder(max_capacity: usize) -> raw::VecBuilder {
1195        raw::VecBuilder::new(max_capacity)
1196    }
1197}
1198
1199impl<T> RawVec<T> {
1200    /// Creates a new `RawVec`.
1201    ///
1202    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
1203    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
1204    /// is [committed] until you push elements into the vector or reserve capacity.
1205    ///
1206    /// See the [`builder`] function more more creation options.
1207    ///
1208    /// # Safety
1209    ///
1210    /// `T` must be zeroable.
1211    ///
1212    /// # Panics
1213    ///
1214    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
1215    /// - Panics if [reserving] the allocation returns an error.
1216    ///
1217    /// [committed]: crate#committing
1218    /// [`builder`]: Self::builder
1219    /// [reserving]: crate#reserving
1220    #[must_use]
1221    #[track_caller]
1222    pub unsafe fn new(max_capacity: usize) -> Self {
1223        unsafe { RawVec::builder(max_capacity).build() }
1224    }
1225
1226    /// Creates a dangling `RawVec`.
1227    ///
1228    /// This is useful as a placeholder value to defer allocation until later or if no allocation
1229    /// is needed.
1230    #[inline]
1231    #[must_use]
1232    pub const fn dangling() -> Self {
1233        RawVec {
1234            inner: RawVecInner::dangling(align_of::<T>()),
1235            marker: PhantomData,
1236        }
1237    }
1238
1239    /// Returns a slice of the entire vector.
1240    ///
1241    /// This returns a slice with a length equal to the vector's capacity.
1242    #[inline]
1243    #[must_use]
1244    pub fn as_capacity(&self) -> &[T] {
1245        // SAFETY: The `Acquire` ordering in `RawVec::capacity` synchronizes with the `Release`
1246        // ordering when setting the capacity, making sure that the newly committed memory is
1247        // visible here. The constructor of `RawVec` must ensure that `T` is zeroable.
1248        unsafe { slice::from_raw_parts(self.as_ptr(), self.capacity()) }
1249    }
1250
1251    /// Returns a mutable slice of the entire vector.
1252    ///
1253    /// This returns a slice with a length equal to the vector's capacity.
1254    #[inline]
1255    #[must_use]
1256    pub fn as_mut_capacity(&mut self) -> &mut [T] {
1257        // SAFETY: The mutable reference synchronizes with setting the capacity, making sure that
1258        // the newly committed memory is visible here. The constructor of `RawVec` must ensure that
1259        // `T` is zeroable.
1260        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.capacity_mut()) }
1261    }
1262
1263    /// Returns a slice of the entire vector.
1264    ///
1265    /// This returns a slice with a length equal to the minimum of the vector's length and its
1266    /// capacity. As such, it is slightly less efficient than [`as_capacity`].
1267    ///
1268    /// This does the same thing as [`RawVec::deref`].
1269    ///
1270    /// [`as_capacity`]: Self::as_capacity
1271    #[inline]
1272    #[must_use]
1273    pub fn as_slice(&self) -> &[T] {
1274        let len = cmp::min(self.len(), self.capacity());
1275
1276        // SAFETY: The `Acquire` ordering in `RawVec::capacity` synchronizes with the `Release`
1277        // ordering when setting the capacity, making sure that the newly committed memory is
1278        // visible here. The constructor of `RawVec` must ensure that `T` is zeroable.
1279        unsafe { slice::from_raw_parts(self.as_ptr(), len) }
1280    }
1281
1282    /// Returns a mutable slice of the entire vector.
1283    ///
1284    /// This returns a slice with a length equal to the minimum of the vector's length and its
1285    /// capacity. As such, it is slightly less efficient than [`as_mut_capacity`].
1286    ///
1287    /// This does the same thing as [`RawVec::deref_mut`].
1288    ///
1289    /// [`as_mut_capacity`]: Self::as_mut_capacity
1290    #[inline]
1291    #[must_use]
1292    pub fn as_mut_slice(&mut self) -> &mut [T] {
1293        let len = cmp::min(self.len_mut(), self.capacity_mut());
1294
1295        // SAFETY: The mutable reference synchronizes with setting the capacity, making sure that
1296        // the newly committed memory is visible here. The constructor of `RawVec` must ensure that
1297        // `T` is zeroable.
1298        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), len) }
1299    }
1300
1301    /// Returns a pointer to the vector's buffer.
1302    ///
1303    /// This method is guaranteed to never materialize a reference to the underlying data. This,
1304    /// coupled with the fact that the vector never reallocates, means that the returned pointer
1305    /// stays valid until the vector is dropped.
1306    #[inline]
1307    #[must_use]
1308    pub fn as_ptr(&self) -> *const T {
1309        self.inner.elements.cast()
1310    }
1311
1312    /// Returns a mutable pointer to the vector's buffer.
1313    ///
1314    /// This method is guaranteed to never materialize a reference to the underlying data. This,
1315    /// coupled with the fact that the vector never reallocates, means that the returned pointer
1316    /// stays valid until the vector is dropped.
1317    #[inline]
1318    #[must_use]
1319    pub fn as_mut_ptr(&mut self) -> *mut T {
1320        self.inner.elements.cast()
1321    }
1322
1323    /// Returns the maximum capacity that was used when creating the `RawVec`.
1324    #[inline]
1325    #[must_use]
1326    pub fn max_capacity(&self) -> usize {
1327        self.inner.max_capacity
1328    }
1329
1330    /// Returns the total number of elements the vector can hold without [committing] more memory.
1331    ///
1332    /// [committing]: crate#committing
1333    #[inline]
1334    #[must_use]
1335    pub fn capacity(&self) -> usize {
1336        self.inner.capacity.load(Acquire)
1337    }
1338
1339    #[inline]
1340    fn capacity_mut(&mut self) -> usize {
1341        *self.inner.capacity.get_mut()
1342    }
1343
1344    /// Returns the number of elements in the vector.
1345    ///
1346    /// This number may exceed [`capacity`]. It also doesn't synchronize with setting the capacity.
1347    /// As such, this must not be used in conjunction with [`as_ptr`] to unsafely access any
1348    /// element unless you compared the length to the capacity.
1349    ///
1350    /// [`capacity`]: Self::capacity
1351    /// [`as_ptr`]: Self::as_ptr
1352    #[inline]
1353    #[must_use]
1354    pub fn len(&self) -> usize {
1355        self.inner.len.load(Relaxed)
1356    }
1357
1358    #[inline]
1359    fn len_mut(&mut self) -> usize {
1360        *self.inner.len.get_mut()
1361    }
1362
1363    /// Returns `true` if the vector contains no elements.
1364    ///
1365    /// This counts elements that failed to be pushed due to an error.
1366    #[inline]
1367    #[must_use]
1368    pub fn is_empty(&self) -> bool {
1369        self.len() == 0
1370    }
1371
1372    /// Appends an element to the end of the vector.
1373    ///
1374    /// Returns the index of the inserted element as well as the element itself. The element is
1375    /// zeroed; you must initialize it yourself.
1376    #[inline]
1377    #[track_caller]
1378    pub fn push(&self) -> (usize, &T) {
1379        if T::IS_ZST {
1380            let mut len = self.inner.len.load(Relaxed);
1381
1382            loop {
1383                if len == self.inner.max_capacity {
1384                    capacity_overflow();
1385                }
1386
1387                match self
1388                    .inner
1389                    .len
1390                    .compare_exchange(len, len + 1, Relaxed, Relaxed)
1391                {
1392                    Ok(_) => {
1393                        // SAFETY: `T` is a ZST, which means that all elements live at the same
1394                        // address. The constructor of `RawVec` must ensure that `T` is zeroable.
1395                        let slot = unsafe { &*self.as_ptr() };
1396
1397                        break (len, slot);
1398                    }
1399                    Err(new_len) => len = new_len,
1400                }
1401            }
1402        } else {
1403            // This cannot overflow because `self.inner.max_capacity` can never exceed `isize::MAX`
1404            // bytes, and because `RawVec::grow_one` decrements the length back if the length
1405            // overshot `self.inner.max_capacity`.
1406            let len = self.inner.len.fetch_add(1, Relaxed);
1407
1408            if len >= self.inner.capacity.load(Acquire) {
1409                unsafe { self.grow_one(len) };
1410            }
1411
1412            // SAFETY: We made sure that the index is in bounds above. The `Acquire` ordering above
1413            // synchronizes with the `Release` ordering when setting the capacity, making sure that
1414            // the newly committed memory is visible here. The constructor of `RawVec` must ensure
1415            // that `T` is zeroable.
1416            let slot = unsafe { &*self.as_ptr().add(len) };
1417
1418            (len, slot)
1419        }
1420    }
1421
1422    #[cold]
1423    #[inline(never)]
1424    #[track_caller]
1425    unsafe fn grow_one(&self, len: usize) {
1426        unsafe { self.inner.grow_one(len, size_of::<T>()) }
1427    }
1428
1429    /// Appends an element to the end of the vector.
1430    ///
1431    /// Returns the index of the inserted element as well as the element itself. The element is
1432    /// zeroed; you must initialize it yourself.
1433    #[inline]
1434    #[track_caller]
1435    pub fn push_mut(&mut self) -> (usize, &mut T) {
1436        let len = self.len_mut();
1437
1438        if len >= self.capacity_mut() {
1439            unsafe { self.grow_one_mut() };
1440        }
1441
1442        *self.inner.len.get_mut() = len + 1;
1443
1444        // SAFETY: We made sure the index is in bounds above. The mutable reference synchronizes
1445        // with setting the capacity, making sure that the newly committed memory is visible here.
1446        // The constructor of `RawVec` must ensure that `T` is zeroable.
1447        let slot = unsafe { &mut *self.as_mut_ptr().add(len) };
1448
1449        (len, slot)
1450    }
1451
1452    #[cold]
1453    #[inline(never)]
1454    #[track_caller]
1455    unsafe fn grow_one_mut(&mut self) {
1456        unsafe { self.inner.grow_one_mut(size_of::<T>()) }
1457    }
1458
1459    /// Reserves capacity for at least `additional` more elements.
1460    ///
1461    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
1462    /// standard library vector.
1463    ///
1464    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
1465    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
1466    /// already sufficient.
1467    ///
1468    /// # Panics
1469    ///
1470    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
1471    /// - Panics if [committing] the new capacity fails.
1472    ///
1473    /// [reserving virtual memory]: crate#reserving
1474    /// [growth strategy]: GrowthStrategy
1475    /// [committing]: crate#committing
1476    #[track_caller]
1477    pub fn reserve(&self, additional: usize) {
1478        unsafe { self.inner.reserve(additional, size_of::<T>()) };
1479    }
1480
1481    /// Reserves the minimum capacity required for `additional` more elements.
1482    ///
1483    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
1484    /// standard library vector.
1485    ///
1486    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
1487    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
1488    /// nothing if the capacity is already sufficient.
1489    ///
1490    /// # Panics
1491    ///
1492    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
1493    /// - Panics if [committing] the new capacity fails.
1494    ///
1495    /// [reserving virtual memory]: crate#reserving
1496    /// [growth strategy]: GrowthStrategy
1497    /// [page size]: crate#pages
1498    /// [committing]: crate#committing
1499    #[track_caller]
1500    pub fn reserve_exact(&self, additional: usize) {
1501        unsafe { self.inner.reserve_exact(additional, size_of::<T>()) };
1502    }
1503
1504    /// Tries to reserve capacity for at least `additional` more elements, returning an error on
1505    /// failure.
1506    ///
1507    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
1508    /// standard library vector.
1509    ///
1510    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
1511    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
1512    /// already sufficient.
1513    ///
1514    /// # Errors
1515    ///
1516    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
1517    /// - Returns an error if [committing] the new capacity fails.
1518    ///
1519    /// [reserving virtual memory]: crate#reserving
1520    /// [growth strategy]: GrowthStrategy
1521    /// [committing]: crate#committing
1522    pub fn try_reserve(&self, additional: usize) -> Result<(), TryReserveError> {
1523        unsafe { self.inner.try_reserve(additional, size_of::<T>()) }
1524    }
1525
1526    /// Tries to reserve the minimum capacity required for `additional` more elements, returning an
1527    /// error on failure.
1528    ///
1529    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
1530    /// standard library vector.
1531    ///
1532    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
1533    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
1534    /// nothing if the capacity is already sufficient.
1535    ///
1536    /// # Errors
1537    ///
1538    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
1539    /// - Returns an error if [committing] the new capacity fails.
1540    ///
1541    /// [reserving virtual memory]: crate#reserving
1542    /// [growth strategy]: GrowthStrategy
1543    /// [page size]: crate#pages
1544    /// [committing]: crate#committing
1545    pub fn try_reserve_exact(&self, additional: usize) -> Result<(), TryReserveError> {
1546        unsafe { self.inner.try_reserve_exact(additional, size_of::<T>()) }
1547    }
1548}
1549
1550impl RawVecInner {
1551    unsafe fn new(
1552        max_capacity: usize,
1553        capacity: usize,
1554        growth_strategy: GrowthStrategy,
1555        header_layout: Layout,
1556        elem_size: usize,
1557        elem_align: usize,
1558    ) -> Result<Self, TryReserveError> {
1559        let size = max_capacity
1560            .checked_mul(elem_size)
1561            .ok_or(CapacityOverflow)?;
1562
1563        #[allow(clippy::cast_possible_wrap)]
1564        if size > isize::MAX as usize || capacity > max_capacity {
1565            return Err(CapacityOverflow.into());
1566        }
1567
1568        if size == 0 && header_layout.size() == 0 {
1569            let allocation = Allocation::dangling(elem_align);
1570
1571            return Ok(RawVecInner {
1572                elements: allocation.ptr().cast(),
1573                capacity: AtomicUsize::new(max_capacity),
1574                len: AtomicUsize::new(0),
1575                max_capacity,
1576                growth_strategy,
1577                allocation,
1578            });
1579        }
1580
1581        // This can't overflow because `Layout`'s size can't exceed `isize::MAX`.
1582        let elements_offset = align_up(header_layout.size(), elem_align);
1583
1584        // This can't overflow because `elements_offset` can be at most `1 << 63` and `size` can be
1585        // at most `isize::MAX`, totaling `usize::MAX`.
1586        let size = elements_offset + size;
1587
1588        let page_size = page_size();
1589        let size = align_up(size, page_size);
1590
1591        if size == 0 {
1592            return Err(CapacityOverflow.into());
1593        }
1594
1595        let align = cmp::max(header_layout.align(), elem_align);
1596
1597        // The minimum additional size required to fulfill the alignment requirement of the header
1598        // and element. The virtual memory allocation is only guaranteed to be aligned to the page
1599        // size, so if the alignment is greater than the page size, we must pad the allocation.
1600        let align_size = cmp::max(align, page_size) - page_size;
1601
1602        let size = align_size.checked_add(size).ok_or(CapacityOverflow)?;
1603
1604        let allocation = Allocation::new(size).map_err(AllocError)?;
1605        let aligned_ptr = allocation.ptr().map_addr(|addr| align_up(addr, align));
1606
1607        // This can't overflow because the size is already allocated.
1608        let initial_size = align_up(elements_offset + capacity * elem_size, page_size);
1609
1610        if initial_size != 0 {
1611            allocation
1612                .commit(aligned_ptr, initial_size)
1613                .map_err(AllocError)?;
1614        }
1615
1616        let elements = aligned_ptr.wrapping_add(elements_offset).cast();
1617
1618        let capacity = if elem_size == 0 {
1619            max_capacity
1620        } else {
1621            (initial_size - elements_offset) / elem_size
1622        };
1623
1624        Ok(RawVecInner {
1625            elements,
1626            capacity: AtomicUsize::new(capacity),
1627            len: AtomicUsize::new(0),
1628            max_capacity,
1629            growth_strategy,
1630            allocation,
1631        })
1632    }
1633
1634    #[inline]
1635    const fn dangling(elem_align: usize) -> Self {
1636        let allocation = Allocation::dangling(elem_align);
1637
1638        RawVecInner {
1639            elements: allocation.ptr().cast(),
1640            capacity: AtomicUsize::new(0),
1641            len: AtomicUsize::new(0),
1642            max_capacity: 0,
1643            growth_strategy: GrowthStrategy::new(),
1644            allocation,
1645        }
1646    }
1647
1648    #[inline]
1649    #[track_caller]
1650    unsafe fn reserve(&self, additional: usize, elem_size: usize) {
1651        let len = self.len.load(Relaxed);
1652
1653        if self.needs_to_grow(len, additional) {
1654            unsafe { self.reserve_slow(len, additional, elem_size) };
1655        }
1656    }
1657
1658    #[cold]
1659    #[track_caller]
1660    unsafe fn reserve_slow(&self, len: usize, additional: usize, elem_size: usize) {
1661        if let Err(err) = unsafe { self.grow(len, additional, elem_size) } {
1662            handle_error(err);
1663        }
1664    }
1665
1666    #[track_caller]
1667    unsafe fn reserve_exact(&self, additional: usize, elem_size: usize) {
1668        if let Err(err) = unsafe { self.try_reserve_exact(additional, elem_size) } {
1669            handle_error(err);
1670        }
1671    }
1672
1673    unsafe fn try_reserve(
1674        &self,
1675        additional: usize,
1676        elem_size: usize,
1677    ) -> Result<(), TryReserveError> {
1678        let len = self.len.load(Relaxed);
1679
1680        if self.needs_to_grow(len, additional) {
1681            unsafe { self.grow(len, additional, elem_size) }
1682        } else {
1683            Ok(())
1684        }
1685    }
1686
1687    unsafe fn try_reserve_exact(
1688        &self,
1689        additional: usize,
1690        elem_size: usize,
1691    ) -> Result<(), TryReserveError> {
1692        let len = self.len.load(Relaxed);
1693
1694        if self.needs_to_grow(len, additional) {
1695            unsafe { self.grow_exact(len, additional, elem_size) }
1696        } else {
1697            Ok(())
1698        }
1699    }
1700
1701    #[inline]
1702    fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
1703        let capacity = self.capacity.load(Relaxed);
1704        let len = cmp::min(len, capacity);
1705
1706        additional > capacity - len
1707    }
1708
1709    #[track_caller]
1710    unsafe fn grow_one(&self, len: usize, elem_size: usize) {
1711        if let Err(err) = unsafe { self.grow(len, 1, elem_size) } {
1712            if len >= self.max_capacity {
1713                // This can't overflow because `grow_one` must be called after the length was
1714                // incremented. It is sound to decrement because it's impossible for an element to
1715                // be initialized while the length exceeds `self.max_capacity`. Note that this is
1716                // *not* the case when `grow` fails in general: it can happen that committing more
1717                // memory fails on this thread but succeeds on another, resulting in us
1718                // decrementing the length while it is below `self.capacity`, which would lead to
1719                // two threads getting the same index in `push`. That's why we must not decrement
1720                // the length unless it overshot `self.max_capacity`. We decrement the length in
1721                // order to prevent an overflow in `push`.
1722                self.len.fetch_sub(1, Relaxed);
1723            }
1724
1725            handle_error(err);
1726        }
1727    }
1728
1729    #[track_caller]
1730    unsafe fn grow_one_mut(&mut self, elem_size: usize) {
1731        let len = *self.len.get_mut();
1732
1733        if let Err(err) = unsafe { self.grow(len, 1, elem_size) } {
1734            handle_error(err);
1735        }
1736    }
1737
1738    unsafe fn grow(
1739        &self,
1740        len: usize,
1741        additional: usize,
1742        elem_size: usize,
1743    ) -> Result<(), TryReserveError> {
1744        unsafe { self.grow_inner(len, additional, false, elem_size) }
1745    }
1746
1747    unsafe fn grow_exact(
1748        &self,
1749        len: usize,
1750        additional: usize,
1751        elem_size: usize,
1752    ) -> Result<(), TryReserveError> {
1753        unsafe { self.grow_inner(len, additional, true, elem_size) }
1754    }
1755
1756    unsafe fn grow_inner(
1757        &self,
1758        len: usize,
1759        additional: usize,
1760        exact: bool,
1761        elem_size: usize,
1762    ) -> Result<(), TryReserveError> {
1763        debug_assert!(additional > 0);
1764
1765        if elem_size == 0 {
1766            return Err(CapacityOverflow.into());
1767        }
1768
1769        let required_capacity = len.checked_add(additional).ok_or(CapacityOverflow)?;
1770
1771        if required_capacity > self.max_capacity {
1772            return Err(CapacityOverflow.into());
1773        }
1774
1775        let old_capacity = self.capacity.load(Acquire);
1776
1777        if old_capacity >= required_capacity {
1778            // Another thread beat us to it.
1779            return Ok(());
1780        }
1781
1782        let mut new_capacity = required_capacity;
1783
1784        if !exact {
1785            new_capacity = cmp::max(new_capacity, self.growth_strategy.grow(old_capacity));
1786            new_capacity = cmp::min(new_capacity, self.max_capacity);
1787        }
1788
1789        let elements_offset = self.elements.addr() - self.allocation.ptr().addr();
1790
1791        let page_size = page_size();
1792
1793        // These can't overflow because the size is already allocated.
1794        let old_size = align_up(elements_offset + old_capacity * elem_size, page_size);
1795        let new_size = align_up(elements_offset + new_capacity * elem_size, page_size);
1796
1797        let ptr = self.allocation.ptr().wrapping_add(old_size);
1798        let size = new_size - old_size;
1799
1800        self.allocation.commit(ptr, size).map_err(AllocError)?;
1801
1802        let new_capacity = (new_size - elements_offset) / elem_size;
1803        let new_capacity = cmp::min(new_capacity, self.max_capacity);
1804
1805        if let Err(capacity) =
1806            self.capacity
1807                .compare_exchange(old_capacity, new_capacity, Release, Acquire)
1808        {
1809            // We lost the race, but the winner must have updated the capacity like we wanted to.
1810            debug_assert!(capacity >= new_capacity);
1811        }
1812
1813        Ok(())
1814    }
1815}
1816
1817impl<T> AsRef<RawVec<T>> for RawVec<T> {
1818    #[inline]
1819    fn as_ref(&self) -> &RawVec<T> {
1820        self
1821    }
1822}
1823
1824impl<T> AsMut<RawVec<T>> for RawVec<T> {
1825    #[inline]
1826    fn as_mut(&mut self) -> &mut RawVec<T> {
1827        self
1828    }
1829}
1830
1831impl<T> AsRef<[T]> for RawVec<T> {
1832    #[inline]
1833    fn as_ref(&self) -> &[T] {
1834        self
1835    }
1836}
1837
1838impl<T> AsMut<[T]> for RawVec<T> {
1839    #[inline]
1840    fn as_mut(&mut self) -> &mut [T] {
1841        self
1842    }
1843}
1844
1845impl<T> Borrow<[T]> for RawVec<T> {
1846    #[inline]
1847    fn borrow(&self) -> &[T] {
1848        self
1849    }
1850}
1851
1852impl<T> BorrowMut<[T]> for RawVec<T> {
1853    #[inline]
1854    fn borrow_mut(&mut self) -> &mut [T] {
1855        self
1856    }
1857}
1858
1859impl<T: fmt::Debug> fmt::Debug for RawVec<T> {
1860    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1861        fmt::Debug::fmt(&**self, f)
1862    }
1863}
1864
1865impl<T> Deref for RawVec<T> {
1866    type Target = [T];
1867
1868    /// This does the same thing as [`RawVec::as_slice`].
1869    #[inline]
1870    fn deref(&self) -> &Self::Target {
1871        self.as_slice()
1872    }
1873}
1874
1875impl<T> DerefMut for RawVec<T> {
1876    /// This does the same thing as [`RawVec::as_mut_slice`].
1877    #[inline]
1878    fn deref_mut(&mut self) -> &mut Self::Target {
1879        self.as_mut_slice()
1880    }
1881}
1882
1883impl<T> Drop for RawVec<T> {
1884    fn drop(&mut self) {
1885        let len = cmp::min(self.len_mut(), self.capacity_mut());
1886        let elements = ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), len);
1887
1888        // SAFETY: We own the collection, and it is being dropped, which ensures that the elements
1889        // can't be accessed again.
1890        unsafe { elements.drop_in_place() };
1891    }
1892}
1893
1894impl<T: PartialEq<U>, U> PartialEq<RawVec<U>> for RawVec<T> {
1895    #[inline]
1896    fn eq(&self, other: &RawVec<U>) -> bool {
1897        **self == **other
1898    }
1899}
1900
1901impl<T: PartialEq<U>, U> PartialEq<&[U]> for RawVec<T> {
1902    #[inline]
1903    fn eq(&self, other: &&[U]) -> bool {
1904        **self == **other
1905    }
1906}
1907
1908impl<T: PartialEq<U>, U> PartialEq<&mut [U]> for RawVec<T> {
1909    #[inline]
1910    fn eq(&self, other: &&mut [U]) -> bool {
1911        **self == **other
1912    }
1913}
1914
1915impl<T: PartialEq<U>, U> PartialEq<RawVec<U>> for &[T] {
1916    #[inline]
1917    fn eq(&self, other: &RawVec<U>) -> bool {
1918        **self == **other
1919    }
1920}
1921
1922impl<T: PartialEq<U>, U> PartialEq<RawVec<U>> for &mut [T] {
1923    #[inline]
1924    fn eq(&self, other: &RawVec<U>) -> bool {
1925        **self == **other
1926    }
1927}
1928
1929impl<T: PartialEq<U>, U> PartialEq<[U]> for RawVec<T> {
1930    #[inline]
1931    fn eq(&self, other: &[U]) -> bool {
1932        **self == *other
1933    }
1934}
1935
1936impl<T: PartialEq<U>, U> PartialEq<RawVec<U>> for [T] {
1937    #[inline]
1938    fn eq(&self, other: &RawVec<U>) -> bool {
1939        *self == **other
1940    }
1941}
1942
1943#[cfg(feature = "alloc")]
1944impl<T: PartialEq<U> + Clone, U> PartialEq<RawVec<U>> for alloc::borrow::Cow<'_, [T]> {
1945    #[inline]
1946    fn eq(&self, other: &RawVec<U>) -> bool {
1947        **self == **other
1948    }
1949}
1950
1951impl<T: PartialEq<U>, U, const N: usize> PartialEq<[U; N]> for RawVec<T> {
1952    #[inline]
1953    fn eq(&self, other: &[U; N]) -> bool {
1954        **self == *other
1955    }
1956}
1957
1958impl<T: PartialEq<U>, U, const N: usize> PartialEq<&[U; N]> for RawVec<T> {
1959    #[inline]
1960    fn eq(&self, other: &&[U; N]) -> bool {
1961        **self == **other
1962    }
1963}
1964
1965impl<T: Eq> Eq for RawVec<T> {}
1966
1967impl<T> Extend<T> for RawVec<T> {
1968    #[inline]
1969    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1970        for elem in iter {
1971            let (_, slot) = self.push_mut();
1972            *slot = elem;
1973        }
1974    }
1975}
1976
1977impl<'a, T: Copy> Extend<&'a T> for RawVec<T> {
1978    #[inline]
1979    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1980        for &elem in iter {
1981            let (_, slot) = self.push_mut();
1982            *slot = elem;
1983        }
1984    }
1985}
1986
1987impl<T: Hash> Hash for RawVec<T> {
1988    #[inline]
1989    fn hash<H: Hasher>(&self, state: &mut H) {
1990        Hash::hash(&**self, state);
1991    }
1992}
1993
1994impl<T, I: SliceIndex<[T]>> Index<I> for RawVec<T> {
1995    type Output = I::Output;
1996
1997    #[inline]
1998    fn index(&self, index: I) -> &Self::Output {
1999        Index::index(&**self, index)
2000    }
2001}
2002
2003impl<T, I: SliceIndex<[T]>> IndexMut<I> for RawVec<T> {
2004    #[inline]
2005    fn index_mut(&mut self, index: I) -> &mut Self::Output {
2006        IndexMut::index_mut(&mut **self, index)
2007    }
2008}
2009
2010impl<'a, T> IntoIterator for &'a RawVec<T> {
2011    type Item = &'a T;
2012
2013    type IntoIter = slice::Iter<'a, T>;
2014
2015    #[inline]
2016    fn into_iter(self) -> Self::IntoIter {
2017        self.iter()
2018    }
2019}
2020
2021impl<'a, T> IntoIterator for &'a mut RawVec<T> {
2022    type Item = &'a mut T;
2023
2024    type IntoIter = slice::IterMut<'a, T>;
2025
2026    #[inline]
2027    fn into_iter(self) -> Self::IntoIter {
2028        self.iter_mut()
2029    }
2030}
2031
2032impl<T> IntoIterator for RawVec<T> {
2033    type Item = T;
2034
2035    type IntoIter = raw::IntoIter<T>;
2036
2037    #[inline]
2038    fn into_iter(self) -> Self::IntoIter {
2039        raw::IntoIter::new(self)
2040    }
2041}
2042
2043impl<T: PartialOrd> PartialOrd for RawVec<T> {
2044    #[inline]
2045    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
2046        PartialOrd::partial_cmp(&**self, &**other)
2047    }
2048}
2049
2050impl<T: Ord> Ord for RawVec<T> {
2051    #[inline]
2052    fn cmp(&self, other: &Self) -> cmp::Ordering {
2053        Ord::cmp(&**self, &**other)
2054    }
2055}