Skip to main content

virtual_buffer/
vec.rs

1//! An in-place growable vector.
2
3use self::TryReserveErrorKind::{AllocError, CapacityOverflow};
4use crate::{Allocation, Error, SizedTypeProperties, align_up, is_aligned, page_size};
5use core::{
6    alloc::Layout,
7    borrow::{Borrow, BorrowMut},
8    cmp, fmt,
9    hash::{Hash, Hasher},
10    iter::FusedIterator,
11    marker::PhantomData,
12    mem::ManuallyDrop,
13    ops::{Deref, DerefMut, Index, IndexMut},
14    panic::UnwindSafe,
15    ptr,
16    slice::{self, SliceIndex},
17};
18
19/// Used to create a new vector.
20///
21/// This struct is created by the [`builder`] method on [`Vec`].
22///
23/// [`builder`]: Vec::builder
24#[derive(Clone, Copy, Debug)]
25pub struct VecBuilder {
26    max_capacity: usize,
27    capacity: usize,
28    growth_strategy: GrowthStrategy,
29    header_layout: Layout,
30}
31
32impl VecBuilder {
33    #[inline]
34    const fn new(max_capacity: usize) -> Self {
35        VecBuilder {
36            max_capacity,
37            capacity: 0,
38            growth_strategy: GrowthStrategy::new(),
39            header_layout: Layout::new::<()>(),
40        }
41    }
42
43    /// The built `Vec` will have the minimum capacity required for `capacity` elements.
44    ///
45    /// The capacity can be greater due to the alignment to the [page size].
46    ///
47    /// [page size]: crate#pages
48    #[inline]
49    pub const fn capacity(&mut self, capacity: usize) -> &mut Self {
50        self.capacity = capacity;
51
52        self
53    }
54
55    /// The built `Vec` will have the given `growth_strategy`.
56    ///
57    /// # Panics
58    ///
59    /// Panics if `growth_strategy` isn't valid per the documentation of [`GrowthStrategy`].
60    #[inline]
61    #[track_caller]
62    pub const fn growth_strategy(&mut self, growth_strategy: GrowthStrategy) -> &mut Self {
63        growth_strategy.validate();
64
65        self.growth_strategy = growth_strategy;
66
67        self
68    }
69
70    /// The built `Vec` will have a header with the given `header_layout`.
71    ///
72    /// `header_layout.size()` bytes will be allocated before the start of the vector's elements,
73    /// with the start aligned to `header_layout.align()`. You can use [`Vec::as_ptr`] and offset
74    /// backwards to access the header.
75    ///
76    /// # Panics
77    ///
78    /// Panics if `header_layout` is not padded to its alignment.
79    #[inline]
80    #[track_caller]
81    pub const fn header(&mut self, header_layout: Layout) -> &mut Self {
82        assert!(is_aligned(header_layout.size(), header_layout.align()));
83
84        self.header_layout = header_layout;
85
86        self
87    }
88
89    /// Builds the `Vec`.
90    ///
91    /// # Panics
92    ///
93    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
94    /// - Panics if the `capacity` is greater than the `max_capacity`.
95    /// - Panics if [reserving] the allocation returns an error.
96    ///
97    /// [reserving]: crate#reserving
98    #[must_use]
99    #[track_caller]
100    pub fn build<T>(&self) -> Vec<T> {
101        match self.try_build() {
102            Ok(vec) => vec,
103            Err(err) => handle_error(err),
104        }
105    }
106
107    /// Tries to build the `Vec`, returning an error when allocation fails.
108    ///
109    /// # Errors
110    ///
111    /// - Returns an error if the `max_capacity` would exceed `isize::MAX` bytes.
112    /// - Returns an error if the `capacity` is greater than the `max_capacity`.
113    /// - Returns an error if [reserving] the allocation returns an error.
114    ///
115    /// [reserving]: crate#reserving
116    pub fn try_build<T>(&self) -> Result<Vec<T>, TryReserveError> {
117        Ok(Vec {
118            inner: unsafe {
119                VecInner::new(
120                    self.max_capacity,
121                    self.capacity,
122                    self.growth_strategy,
123                    self.header_layout,
124                    size_of::<T>(),
125                    align_of::<T>(),
126                )
127            }?,
128            marker: PhantomData,
129        })
130    }
131}
132
133/// An in-place growable vector.
134///
135/// This type behaves similarly to the standard library `Vec` except that it is guaranteed to never
136/// reallocate. The vector grows similarly to the standard library vector, but instead of
137/// reallocating, it commits more memory.
138///
139/// If you don't specify a [growth strategy], exponential growth with a growth factor of 2 is used,
140/// which is the same strategy that the standard library `Vec` uses.
141///
142/// [growth strategy]: GrowthStrategy
143pub struct Vec<T> {
144    inner: VecInner,
145    marker: PhantomData<T>,
146}
147
148struct VecInner {
149    elements: *mut (),
150    capacity: usize,
151    len: usize,
152    max_capacity: usize,
153    growth_strategy: GrowthStrategy,
154    allocation: Allocation,
155}
156
157// SAFETY: `Vec` is an owned collection, which makes it safe to send to another thread as long as
158// its element is safe to send to another a thread.
159unsafe impl<T: Send> Send for Vec<T> {}
160
161// SAFETY: `Vec` doesn't allow modifications through a shared reference, so a shared `Vec` can't be
162// used to send elements to another thread. However, `Vec` allows getting a reference to any
163// element from any thread. Therefore, it is safe to share `Vec` between threads as long as the
164// element is shareable.
165unsafe impl<T: Sync> Sync for Vec<T> {}
166
167impl Vec<()> {
168    /// Returns a builder for a new `Vec`.
169    ///
170    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
171    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
172    /// is [committed] until you push elements into the vector or reserve capacity.
173    ///
174    /// This function is implemented on `Vec<()>` so that you don't have to import the `VecBuilder`
175    /// type too. The type parameter has no relation to the built `Vec`.
176    ///
177    /// [committed]: crate#committing
178    #[inline]
179    #[must_use]
180    pub const fn builder(max_capacity: usize) -> VecBuilder {
181        VecBuilder::new(max_capacity)
182    }
183}
184
185impl<T> Vec<T> {
186    /// Creates a new `Vec`.
187    ///
188    /// `max_capacity` is the maximum capacity the vector can ever have. The vector is guaranteed
189    /// to never exceed this capacity. The capacity can be excessively huge, as none of the memory
190    /// is [committed] until you push elements into the vector or reserve capacity.
191    ///
192    /// See the [`builder`] function for more creation options.
193    ///
194    /// # Panics
195    ///
196    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
197    /// - Panics if [reserving] the allocation returns an error.
198    ///
199    /// [committed]: crate#committing
200    /// [`builder`]: Self::builder
201    /// [reserving]: crate#reserving
202    #[must_use]
203    #[track_caller]
204    pub fn new(max_capacity: usize) -> Self {
205        VecBuilder::new(max_capacity).build()
206    }
207
208    /// Creates a dangling `Vec`.
209    ///
210    /// This is useful as a placeholder value to defer allocation until later or if no allocation
211    /// is needed.
212    #[inline]
213    #[must_use]
214    pub const fn dangling() -> Self {
215        Vec {
216            inner: VecInner::dangling(align_of::<T>()),
217            marker: PhantomData,
218        }
219    }
220
221    /// Returns a slice of the entire vector.
222    #[inline]
223    #[must_use]
224    pub fn as_slice(&self) -> &[T] {
225        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
226    }
227
228    /// Returns a mutable slice of the entire vector.
229    #[inline]
230    #[must_use]
231    pub fn as_mut_slice(&mut self) -> &mut [T] {
232        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }
233    }
234
235    /// Returns a pointer to the vector's buffer.
236    ///
237    /// This method is guaranteed to never materialize a reference to the underlying data. This,
238    /// coupled with the fact that the vector never reallocates, means that the returned pointer
239    /// stays valid until the vector is dropped.
240    #[inline]
241    #[must_use]
242    pub fn as_ptr(&self) -> *const T {
243        self.inner.elements.cast()
244    }
245
246    /// Returns a mutable pointer to the vector's buffer.
247    ///
248    /// This method is guaranteed to never materialize a reference to the underlying data. This,
249    /// coupled with the fact that the vector never reallocates, means that the returned pointer
250    /// stays valid until the vector is dropped.
251    #[inline]
252    #[must_use]
253    pub fn as_mut_ptr(&mut self) -> *mut T {
254        self.inner.elements.cast()
255    }
256
257    /// Returns the maximum capacity that was used when creating the `Vec`.
258    #[inline]
259    #[must_use]
260    pub fn max_capacity(&self) -> usize {
261        self.inner.max_capacity
262    }
263
264    /// Returns the total number of elements the vector can hold without [committing] more memory.
265    ///
266    /// [committing]: crate#committing
267    #[inline]
268    #[must_use]
269    pub fn capacity(&self) -> usize {
270        self.inner.capacity
271    }
272
273    /// Returns the number of elements in the vector.
274    #[inline]
275    #[must_use]
276    pub fn len(&self) -> usize {
277        self.inner.len
278    }
279
280    /// Returns `true` if the vector contains no elements.
281    #[inline]
282    #[must_use]
283    pub fn is_empty(&self) -> bool {
284        self.len() == 0
285    }
286
287    /// Appends an element to the end of the vector.
288    #[inline]
289    #[track_caller]
290    pub fn push(&mut self, value: T) {
291        let len = self.len();
292
293        if len == self.capacity() {
294            self.grow_one();
295        }
296
297        // SAFETY: We made sure that the index is in bounds above.
298        unsafe { self.as_mut_ptr().add(len).write(value) };
299
300        // SAFETY: We have written the element above.
301        unsafe { self.inner.len = len + 1 };
302    }
303
304    #[cold]
305    #[inline(never)]
306    #[track_caller]
307    fn grow_one(&mut self) {
308        unsafe { self.inner.grow_one(size_of::<T>()) };
309    }
310
311    /// Reserves capacity for at least `additional` more elements.
312    ///
313    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
314    /// standard library vector.
315    ///
316    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
317    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
318    /// already sufficient.
319    ///
320    /// # Panics
321    ///
322    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
323    /// - Panics if [committing] the new capacity fails.
324    ///
325    /// [reserving virtual memory]: crate#reserving
326    /// [growth strategy]: GrowthStrategy
327    /// [committing]: crate#committing
328    #[track_caller]
329    pub fn reserve(&mut self, additional: usize) {
330        unsafe { self.inner.reserve(additional, size_of::<T>()) };
331    }
332
333    /// Reserves the minimum capacity required for `additional` more elements.
334    ///
335    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
336    /// standard library vector.
337    ///
338    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
339    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
340    /// nothing if the capacity is already sufficient.
341    ///
342    /// # Panics
343    ///
344    /// - Panics if `self.len() + additional` would exceed `self.max_capacity()`.
345    /// - Panics if [committing] the new capacity fails.
346    ///
347    /// [reserving virtual memory]: crate#reserving
348    /// [growth strategy]: GrowthStrategy
349    /// [page size]: crate#pages
350    /// [committing]: crate#committing
351    #[track_caller]
352    pub fn reserve_exact(&mut self, additional: usize) {
353        unsafe { self.inner.reserve_exact(additional, size_of::<T>()) };
354    }
355
356    /// Tries to reserve capacity for at least `additional` more elements, returning an error on
357    /// failure.
358    ///
359    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
360    /// standard library vector.
361    ///
362    /// The new capacity is at least `self.len() + additional`. If this capacity is below the one
363    /// calculated using the [growth strategy], the latter is used. Does nothing if the capacity is
364    /// already sufficient.
365    ///
366    /// # Errors
367    ///
368    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
369    /// - Returns an error if [committing] the new capacity fails.
370    ///
371    /// [reserving virtual memory]: crate#reserving
372    /// [growth strategy]: GrowthStrategy
373    /// [committing]: crate#committing
374    pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
375        unsafe { self.inner.try_reserve(additional, size_of::<T>()) }
376    }
377
378    /// Tries to reserve the minimum capacity required for `additional` more elements, returning an
379    /// error on failure.
380    ///
381    /// Not to be confused with [reserving virtual memory]; this method is named so as to match the
382    /// standard library vector.
383    ///
384    /// The new capacity is at least `self.len() + additional`. The [growth strategy] is ignored,
385    /// but the new capacity can still be greater due to the alignment to the [page size]. Does
386    /// nothing if the capacity is already sufficient.
387    ///
388    /// # Errors
389    ///
390    /// - Returns an error if `self.len() + additional` would exceed `self.max_capacity()`.
391    /// - Returns an error if [committing] the new capacity fails.
392    ///
393    /// [reserving virtual memory]: crate#reserving
394    /// [growth strategy]: GrowthStrategy
395    /// [page size]: crate#pages
396    /// [committing]: crate#committing
397    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
398        unsafe { self.inner.try_reserve_exact(additional, size_of::<T>()) }
399    }
400}
401
402impl VecInner {
403    unsafe fn new(
404        max_capacity: usize,
405        capacity: usize,
406        growth_strategy: GrowthStrategy,
407        header_layout: Layout,
408        elem_size: usize,
409        elem_align: usize,
410    ) -> Result<Self, TryReserveError> {
411        let size = max_capacity
412            .checked_mul(elem_size)
413            .ok_or(CapacityOverflow)?;
414
415        #[allow(clippy::cast_possible_wrap)]
416        if size > isize::MAX as usize || capacity > max_capacity {
417            return Err(CapacityOverflow.into());
418        }
419
420        if size == 0 && header_layout.size() == 0 {
421            let allocation = Allocation::dangling(elem_align);
422
423            return Ok(VecInner {
424                elements: allocation.ptr().cast(),
425                capacity: max_capacity,
426                len: 0,
427                max_capacity,
428                growth_strategy,
429                allocation,
430            });
431        }
432
433        // This can't overflow because `Layout`'s size can't exceed `isize::MAX`.
434        let elements_offset = align_up(header_layout.size(), elem_align);
435
436        // This can't overflow because `elements_offset` can be at most `1 << 63` and `size` can be
437        // at most `isize::MAX`, totaling `usize::MAX`.
438        let size = elements_offset + size;
439
440        let page_size = page_size();
441        let size = align_up(size, page_size);
442
443        if size == 0 {
444            return Err(CapacityOverflow.into());
445        }
446
447        let align = cmp::max(header_layout.align(), elem_align);
448
449        // The minimum additional size required to fulfill the alignment requirement of the header
450        // and element. The virtual memory allocation is only guaranteed to be aligned to the page
451        // size, so if the alignment is greater than the page size, we must pad the allocation.
452        let align_size = cmp::max(align, page_size) - page_size;
453
454        let size = align_size.checked_add(size).ok_or(CapacityOverflow)?;
455
456        let allocation = Allocation::new(size).map_err(AllocError)?;
457        let aligned_ptr = allocation.ptr().map_addr(|addr| align_up(addr, align));
458
459        // This can't overflow because the size is already allocated.
460        let initial_size = align_up(elements_offset + capacity * elem_size, page_size);
461
462        if initial_size != 0 {
463            allocation
464                .commit(aligned_ptr, initial_size)
465                .map_err(AllocError)?;
466        }
467
468        let elements = aligned_ptr.wrapping_add(elements_offset).cast();
469
470        let capacity = if elem_size == 0 {
471            max_capacity
472        } else {
473            (initial_size - elements_offset) / elem_size
474        };
475
476        Ok(VecInner {
477            elements,
478            capacity,
479            len: 0,
480            max_capacity,
481            growth_strategy,
482            allocation,
483        })
484    }
485
486    #[inline]
487    const fn dangling(elem_align: usize) -> Self {
488        let allocation = Allocation::dangling(elem_align);
489
490        VecInner {
491            elements: allocation.ptr().cast(),
492            capacity: 0,
493            len: 0,
494            max_capacity: 0,
495            growth_strategy: GrowthStrategy::new(),
496            allocation,
497        }
498    }
499
500    #[inline]
501    #[track_caller]
502    unsafe fn reserve(&mut self, additional: usize, elem_size: usize) {
503        if self.needs_to_grow(additional) {
504            unsafe { self.reserve_slow(additional, elem_size) };
505        }
506    }
507
508    #[cold]
509    #[track_caller]
510    unsafe fn reserve_slow(&mut self, additional: usize, elem_size: usize) {
511        if let Err(err) = unsafe { self.grow(additional, elem_size) } {
512            handle_error(err);
513        }
514    }
515
516    #[track_caller]
517    unsafe fn reserve_exact(&mut self, additional: usize, elem_size: usize) {
518        if let Err(err) = unsafe { self.try_reserve_exact(additional, elem_size) } {
519            handle_error(err);
520        }
521    }
522
523    unsafe fn try_reserve(
524        &mut self,
525        additional: usize,
526        elem_size: usize,
527    ) -> Result<(), TryReserveError> {
528        if self.needs_to_grow(additional) {
529            unsafe { self.grow(additional, elem_size) }
530        } else {
531            Ok(())
532        }
533    }
534
535    unsafe fn try_reserve_exact(
536        &mut self,
537        additional: usize,
538        elem_size: usize,
539    ) -> Result<(), TryReserveError> {
540        if self.needs_to_grow(additional) {
541            unsafe { self.grow_exact(additional, elem_size) }
542        } else {
543            Ok(())
544        }
545    }
546
547    #[inline]
548    fn needs_to_grow(&self, additional: usize) -> bool {
549        additional > self.capacity - self.len
550    }
551
552    #[track_caller]
553    unsafe fn grow_one(&mut self, elem_size: usize) {
554        if let Err(err) = unsafe { self.grow(1, elem_size) } {
555            handle_error(err);
556        }
557    }
558
559    unsafe fn grow(&mut self, additional: usize, elem_size: usize) -> Result<(), TryReserveError> {
560        unsafe { self.grow_inner(additional, false, elem_size) }
561    }
562
563    unsafe fn grow_exact(
564        &mut self,
565        additional: usize,
566        elem_size: usize,
567    ) -> Result<(), TryReserveError> {
568        unsafe { self.grow_inner(additional, true, elem_size) }
569    }
570
571    unsafe fn grow_inner(
572        &mut self,
573        additional: usize,
574        exact: bool,
575        elem_size: usize,
576    ) -> Result<(), TryReserveError> {
577        debug_assert!(additional > 0);
578
579        if elem_size == 0 {
580            return Err(CapacityOverflow.into());
581        }
582
583        let required_capacity = self.len.checked_add(additional).ok_or(CapacityOverflow)?;
584
585        if required_capacity > self.max_capacity {
586            return Err(CapacityOverflow.into());
587        }
588
589        let old_capacity = self.capacity;
590
591        let mut new_capacity = required_capacity;
592
593        if !exact {
594            new_capacity = cmp::max(new_capacity, self.growth_strategy.grow(old_capacity));
595            new_capacity = cmp::min(new_capacity, self.max_capacity);
596        }
597
598        let elements_offset = self.elements.addr() - self.allocation.ptr().addr();
599
600        let page_size = page_size();
601
602        // These can't overflow because the size is already allocated.
603        let old_size = align_up(elements_offset + old_capacity * elem_size, page_size);
604        let new_size = align_up(elements_offset + new_capacity * elem_size, page_size);
605
606        let ptr = self.allocation.ptr().wrapping_add(old_size);
607        let size = new_size - old_size;
608
609        self.allocation.commit(ptr, size).map_err(AllocError)?;
610
611        let new_capacity = (new_size - elements_offset) / elem_size;
612        let new_capacity = cmp::min(new_capacity, self.max_capacity);
613
614        self.capacity = new_capacity;
615
616        Ok(())
617    }
618}
619
620impl<T> AsRef<Vec<T>> for Vec<T> {
621    #[inline]
622    fn as_ref(&self) -> &Vec<T> {
623        self
624    }
625}
626
627impl<T> AsMut<Vec<T>> for Vec<T> {
628    #[inline]
629    fn as_mut(&mut self) -> &mut Vec<T> {
630        self
631    }
632}
633
634impl<T> AsRef<[T]> for Vec<T> {
635    #[inline]
636    fn as_ref(&self) -> &[T] {
637        self
638    }
639}
640
641impl<T> AsMut<[T]> for Vec<T> {
642    #[inline]
643    fn as_mut(&mut self) -> &mut [T] {
644        self
645    }
646}
647
648impl<T> Borrow<[T]> for Vec<T> {
649    #[inline]
650    fn borrow(&self) -> &[T] {
651        self
652    }
653}
654
655impl<T> BorrowMut<[T]> for Vec<T> {
656    #[inline]
657    fn borrow_mut(&mut self) -> &mut [T] {
658        self
659    }
660}
661
662impl<T: fmt::Debug> fmt::Debug for Vec<T> {
663    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
664        fmt::Debug::fmt(&**self, f)
665    }
666}
667
668impl<T> Deref for Vec<T> {
669    type Target = [T];
670
671    #[inline]
672    fn deref(&self) -> &Self::Target {
673        self.as_slice()
674    }
675}
676
677impl<T> DerefMut for Vec<T> {
678    #[inline]
679    fn deref_mut(&mut self) -> &mut Self::Target {
680        self.as_mut_slice()
681    }
682}
683
684impl<T> Drop for Vec<T> {
685    fn drop(&mut self) {
686        let elements = ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len());
687
688        // SAFETY: We own the collection, and it is being dropped, which ensures that the elements
689        // can't be accessed again.
690        unsafe { elements.drop_in_place() };
691    }
692}
693
694impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for Vec<T> {
695    #[inline]
696    fn eq(&self, other: &Vec<U>) -> bool {
697        **self == **other
698    }
699}
700
701impl<T: PartialEq<U>, U> PartialEq<&[U]> for Vec<T> {
702    #[inline]
703    fn eq(&self, other: &&[U]) -> bool {
704        **self == **other
705    }
706}
707
708impl<T: PartialEq<U>, U> PartialEq<&mut [U]> for Vec<T> {
709    #[inline]
710    fn eq(&self, other: &&mut [U]) -> bool {
711        **self == **other
712    }
713}
714
715impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for &[T] {
716    #[inline]
717    fn eq(&self, other: &Vec<U>) -> bool {
718        **self == **other
719    }
720}
721
722impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for &mut [T] {
723    #[inline]
724    fn eq(&self, other: &Vec<U>) -> bool {
725        **self == **other
726    }
727}
728
729impl<T: PartialEq<U>, U> PartialEq<[U]> for Vec<T> {
730    #[inline]
731    fn eq(&self, other: &[U]) -> bool {
732        **self == *other
733    }
734}
735
736impl<T: PartialEq<U>, U> PartialEq<Vec<U>> for [T] {
737    #[inline]
738    fn eq(&self, other: &Vec<U>) -> bool {
739        *self == **other
740    }
741}
742
743#[cfg(feature = "alloc")]
744impl<T: PartialEq<U> + Clone, U> PartialEq<Vec<U>> for alloc::borrow::Cow<'_, [T]> {
745    #[inline]
746    fn eq(&self, other: &Vec<U>) -> bool {
747        **self == **other
748    }
749}
750
751impl<T: PartialEq<U>, U, const N: usize> PartialEq<[U; N]> for Vec<T> {
752    #[inline]
753    fn eq(&self, other: &[U; N]) -> bool {
754        **self == *other
755    }
756}
757
758impl<T: PartialEq<U>, U, const N: usize> PartialEq<&[U; N]> for Vec<T> {
759    #[inline]
760    fn eq(&self, other: &&[U; N]) -> bool {
761        **self == **other
762    }
763}
764
765impl<T: Eq> Eq for Vec<T> {}
766
767impl<T> Extend<T> for Vec<T> {
768    #[inline]
769    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
770        for elem in iter {
771            self.push(elem);
772        }
773    }
774}
775
776impl<'a, T: Copy> Extend<&'a T> for Vec<T> {
777    #[inline]
778    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
779        for &elem in iter {
780            self.push(elem);
781        }
782    }
783}
784
785impl<T: Hash> Hash for Vec<T> {
786    #[inline]
787    fn hash<H: Hasher>(&self, state: &mut H) {
788        Hash::hash(&**self, state);
789    }
790}
791
792impl<T, I: SliceIndex<[T]>> Index<I> for Vec<T> {
793    type Output = I::Output;
794
795    #[inline]
796    fn index(&self, index: I) -> &Self::Output {
797        Index::index(&**self, index)
798    }
799}
800
801impl<T, I: SliceIndex<[T]>> IndexMut<I> for Vec<T> {
802    #[inline]
803    fn index_mut(&mut self, index: I) -> &mut Self::Output {
804        IndexMut::index_mut(&mut **self, index)
805    }
806}
807
808impl<'a, T> IntoIterator for &'a Vec<T> {
809    type Item = &'a T;
810
811    type IntoIter = slice::Iter<'a, T>;
812
813    #[inline]
814    fn into_iter(self) -> Self::IntoIter {
815        self.iter()
816    }
817}
818
819impl<'a, T> IntoIterator for &'a mut Vec<T> {
820    type Item = &'a mut T;
821
822    type IntoIter = slice::IterMut<'a, T>;
823
824    #[inline]
825    fn into_iter(self) -> Self::IntoIter {
826        self.iter_mut()
827    }
828}
829
830impl<T> IntoIterator for Vec<T> {
831    type Item = T;
832
833    type IntoIter = IntoIter<T>;
834
835    #[inline]
836    fn into_iter(self) -> Self::IntoIter {
837        IntoIter::new(self)
838    }
839}
840
841impl<T: PartialOrd> PartialOrd for Vec<T> {
842    #[inline]
843    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
844        PartialOrd::partial_cmp(&**self, &**other)
845    }
846}
847
848impl<T: Ord> Ord for Vec<T> {
849    #[inline]
850    fn cmp(&self, other: &Self) -> cmp::Ordering {
851        Ord::cmp(&**self, &**other)
852    }
853}
854
855/// An iterator that moves out of a vector.
856///
857/// This struct is created by the [`into_iter`] method on [`Vec`].
858///
859/// [`into_iter`]: Vec::into_iter
860pub struct IntoIter<T> {
861    start: *const T,
862    end: *const T,
863    #[allow(dead_code)]
864    allocation: Allocation,
865    marker: PhantomData<T>,
866}
867
868// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
869unsafe impl<T: Send> Send for IntoIter<T> {}
870
871// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
872unsafe impl<T: Sync> Sync for IntoIter<T> {}
873
874// We own the collection, so this should be no different than for `Vec`.
875impl<T: UnwindSafe> UnwindSafe for IntoIter<T> {}
876
877impl<T> IntoIter<T> {
878    #[inline]
879    fn new(vec: Vec<T>) -> Self {
880        let mut vec = ManuallyDrop::new(vec);
881
882        // SAFETY: `vec` is wrapped in a `ManuallyDrop` such that a double-free can't happen even
883        // if a panic was possible below.
884        let allocation = unsafe { ptr::read(&vec.inner.allocation) };
885
886        let start = vec.as_mut_ptr();
887
888        let end = if T::IS_ZST {
889            start.cast::<u8>().wrapping_add(vec.len()).cast::<T>()
890        } else {
891            // SAFETY: The modifier of `vec.inner.len` ensures that it is only done after writing
892            // the new elements.
893            unsafe { start.add(vec.len()) }
894        };
895
896        IntoIter {
897            start,
898            end,
899            allocation,
900            marker: PhantomData,
901        }
902    }
903
904    /// Returns the remaining items of this iterator as a slice.
905    #[inline]
906    #[must_use]
907    pub fn as_slice(&self) -> &[T] {
908        unsafe { slice::from_raw_parts(self.start, self.len()) }
909    }
910
911    /// Returns the remaining items of this iterator as a mutable slice.
912    #[inline]
913    #[must_use]
914    pub fn as_mut_slice(&mut self) -> &mut [T] {
915        unsafe { slice::from_raw_parts_mut(self.start.cast_mut(), self.len()) }
916    }
917}
918
919impl<T> AsRef<[T]> for IntoIter<T> {
920    #[inline]
921    fn as_ref(&self) -> &[T] {
922        self.as_slice()
923    }
924}
925
926impl<T> AsMut<[T]> for IntoIter<T> {
927    #[inline]
928    fn as_mut(&mut self) -> &mut [T] {
929        self.as_mut_slice()
930    }
931}
932
933impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
934    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
935        f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
936    }
937}
938
939impl<T> Drop for IntoIter<T> {
940    fn drop(&mut self) {
941        let elements = ptr::slice_from_raw_parts_mut(self.start.cast_mut(), self.len());
942
943        // SAFETY: We own the collection, and it is being dropped, which ensures that the elements
944        // can't be accessed again.
945        unsafe { elements.drop_in_place() };
946    }
947}
948
949impl<T> Iterator for IntoIter<T> {
950    type Item = T;
951
952    #[inline]
953    fn next(&mut self) -> Option<Self::Item> {
954        if self.start == self.end {
955            return None;
956        }
957
958        let ptr = if T::IS_ZST {
959            self.end = self.end.cast::<u8>().wrapping_sub(1).cast::<T>();
960
961            self.start
962        } else {
963            let old = self.start;
964
965            // SAFETY: We checked that there are still elements remaining above.
966            self.start = unsafe { old.add(1) };
967
968            old
969        };
970
971        // SAFETY: We own the collection, and have just incremented the `start` pointer such that
972        // this element can't be accessed again.
973        Some(unsafe { ptr.read() })
974    }
975
976    #[inline]
977    fn size_hint(&self) -> (usize, Option<usize>) {
978        let len = self.len();
979
980        (len, Some(len))
981    }
982
983    #[inline]
984    fn count(self) -> usize {
985        self.len()
986    }
987}
988
989impl<T> DoubleEndedIterator for IntoIter<T> {
990    #[inline]
991    fn next_back(&mut self) -> Option<Self::Item> {
992        if self.start == self.end {
993            return None;
994        }
995
996        let ptr = if T::IS_ZST {
997            self.end = self.end.cast::<u8>().wrapping_sub(1).cast::<T>();
998
999            self.start
1000        } else {
1001            // SAFETY: We checked that there are still elements remaining above.
1002            self.end = unsafe { self.end.sub(1) };
1003
1004            self.end
1005        };
1006
1007        // SAFETY: We own the collection, and have just decremented the `end` pointer such that
1008        // this element can't be accessed again.
1009        Some(unsafe { ptr.read() })
1010    }
1011}
1012
1013impl<T> ExactSizeIterator for IntoIter<T> {
1014    #[inline]
1015    fn len(&self) -> usize {
1016        if T::IS_ZST {
1017            self.end.addr().wrapping_sub(self.start.addr())
1018        } else {
1019            // SAFETY:
1020            // - By our invariant, `self.end` is always greater than or equal to `self.start`.
1021            // - `start` and `end` were both created from the same object in `IntoIter::new`.
1022            // - `Vec::new` ensures that the allocation size doesn't exceed `isize::MAX` bytes.
1023            // - We know that the allocation doesn't wrap around the address space.
1024            unsafe { self.end.offset_from_unsigned(self.start) }
1025        }
1026    }
1027}
1028
1029impl<T> FusedIterator for IntoIter<T> {}
1030
1031/// The strategy to employ when growing a vector.
1032///
1033/// Note that arithmetic overflow is not a concern for any of the strategies: if the final result
1034/// of the calculation (in infinite precision) exceeds `max_capacity`, `max_capacity` is used as
1035/// the new capacity.
1036#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
1037#[non_exhaustive]
1038pub enum GrowthStrategy {
1039    /// The current capacity is multiplied by `numerator`, then divided by `denominator`.
1040    ///
1041    /// If the new capacity results in the last committed [page] having spare room for more
1042    /// elements, those elements are added to the new capacity.
1043    ///
1044    /// [page]: crate#pages
1045    Exponential {
1046        /// The number to multiply the current capacity by. Must be greater than `denominator`.
1047        numerator: usize,
1048
1049        /// The number to divide the multiplied number by. Must be nonzero.
1050        denominator: usize,
1051    },
1052
1053    /// `elements` is added to the current capacity.
1054    ///
1055    /// If the new capacity results in the last committed [page] having spare room for more
1056    /// elements, those elements are added to the new capacity.
1057    ///
1058    /// [page]: crate#pages
1059    Linear {
1060        /// The number of elements to add to the current capacity. Must be nonzero.
1061        elements: usize,
1062    },
1063}
1064
1065impl Default for GrowthStrategy {
1066    #[inline]
1067    fn default() -> Self {
1068        Self::new()
1069    }
1070}
1071
1072impl GrowthStrategy {
1073    /// Returns the default growth strategy: exponential growth with a growth factor of 2.
1074    #[inline]
1075    #[must_use]
1076    pub const fn new() -> Self {
1077        Self::Exponential {
1078            numerator: 2,
1079            denominator: 1,
1080        }
1081    }
1082
1083    #[track_caller]
1084    pub(crate) const fn validate(&self) {
1085        match *self {
1086            Self::Exponential {
1087                numerator,
1088                denominator,
1089            } => {
1090                assert!(numerator > denominator);
1091                assert!(denominator != 0);
1092            }
1093            Self::Linear { elements } => {
1094                assert!(elements != 0);
1095            }
1096        }
1097    }
1098
1099    pub(crate) fn grow(&self, old_capacity: usize) -> usize {
1100        match *self {
1101            GrowthStrategy::Exponential {
1102                numerator,
1103                denominator,
1104            } => saturating_mul_div(old_capacity, numerator, denominator),
1105            GrowthStrategy::Linear { elements } => old_capacity.saturating_add(elements),
1106        }
1107    }
1108}
1109
1110#[cfg(target_pointer_width = "64")]
1111type DoubleUsize = u128;
1112#[cfg(target_pointer_width = "32")]
1113type DoubleUsize = u64;
1114#[cfg(target_pointer_width = "16")]
1115type DoubleUsize = u32;
1116
1117fn saturating_mul_div(val: usize, numerator: usize, denominator: usize) -> usize {
1118    (val as DoubleUsize * numerator as DoubleUsize / denominator as DoubleUsize)
1119        .try_into()
1120        .unwrap_or(usize::MAX)
1121}
1122
1123#[cold]
1124#[track_caller]
1125pub(crate) fn handle_error(err: TryReserveError) -> ! {
1126    match err.kind {
1127        CapacityOverflow => capacity_overflow(),
1128        AllocError(err) => handle_alloc_error(err),
1129    }
1130}
1131
1132#[inline(never)]
1133#[track_caller]
1134pub(crate) fn capacity_overflow() -> ! {
1135    panic!("capacity overflow");
1136}
1137
1138// Dear Clippy, `Error` is 4 bytes.
1139#[allow(clippy::needless_pass_by_value)]
1140#[cold]
1141#[inline(never)]
1142#[track_caller]
1143pub(crate) fn handle_alloc_error(err: Error) -> ! {
1144    panic!("allocation failed: {err}");
1145}
1146
1147/// Error that can happen when trying to [reserve] or [commit] memory for a vector.
1148///
1149/// [reserve]: crate#reserving
1150/// [commit]: crate#committing
1151#[derive(Debug)]
1152pub struct TryReserveError {
1153    pub(crate) kind: TryReserveErrorKind,
1154}
1155
1156impl From<TryReserveErrorKind> for TryReserveError {
1157    #[inline]
1158    fn from(kind: TryReserveErrorKind) -> Self {
1159        TryReserveError { kind }
1160    }
1161}
1162
1163#[derive(Debug)]
1164pub(crate) enum TryReserveErrorKind {
1165    CapacityOverflow,
1166    AllocError(Error),
1167}
1168
1169impl fmt::Display for TryReserveError {
1170    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1171        match self.kind {
1172            CapacityOverflow => f.write_str(
1173                "memory allocation failed because the computed capacity exceeded the collection's \
1174                maximum",
1175            ),
1176            AllocError(_) => f.write_str(
1177                "memory allocation failed because the operating system returned an error",
1178            ),
1179        }
1180    }
1181}
1182
1183impl core::error::Error for TryReserveError {
1184    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
1185        match &self.kind {
1186            TryReserveErrorKind::CapacityOverflow => None,
1187            TryReserveErrorKind::AllocError(err) => Some(err),
1188        }
1189    }
1190}