Skip to main content

shared_vector/
vector.rs

1use core::fmt::Debug;
2use core::ops::{Deref, DerefMut, Index, IndexMut};
3use core::ptr::NonNull;
4use core::{mem, ptr};
5use core::ops::RangeBounds;
6
7use crate::alloc::{AllocError, Allocator, Global};
8use crate::drain::Drain;
9use crate::raw::{
10    self, buffer_layout, AtomicRefCount, BufferSize, Header, HeaderBuffer, RefCount, VecHeader, move_data,
11};
12use crate::shared::{AtomicSharedVector, SharedVector};
13use crate::splice::Splice;
14use crate::{grow_amortized, DefaultRefCount};
15
16/// A heap allocated, mutable contiguous buffer containing elements of type `T`, with manual deallocation.
17///
18/// <svg width="280" height="120" viewBox="0 0 74.08 31.75" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><defs><linearGradient id="a"><stop offset="0" stop-color="#491c9c"/><stop offset="1" stop-color="#d54b27"/></linearGradient><linearGradient xlink:href="#a" id="b" gradientUnits="userSpaceOnUse" x1="6.27" y1="34.86" x2="87.72" y2="13.24" gradientTransform="translate(-2.64 -18.48)"/></defs><rect width="10.57" height="10.66" x="7.94" y="18.45" ry="1.37" fill="#3dbdaa"/><circle cx="12.7" cy="18.48" r=".79" fill="#666"/><path d="M12.7 18.48c0-3.93 7.14-1.28 7.14-5.25" fill="none" stroke="#999" stroke-width=".86" stroke-linecap="round"/><rect width="68.79" height="10.58" x="2.65" y="2.68" ry="1.37" fill="url(#b)"/><rect width="15.35" height="9.51" x="3.18" y="3.21" ry=".9" fill="#78a2d4"/><rect width="9.26" height="9.51" x="19.85" y="3.2" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="29.64" y="3.22" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="39.43" y="3.22" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="49.22" y="3.21" ry=".9" fill="#eaa577"/><circle cx="62.84" cy="7.97" r=".66" fill="#eaa577"/><circle cx="64.7" cy="7.97" r=".66" fill="#eaa577"/><circle cx="66.55" cy="7.97" r=".66" fill="#eaa577"/></svg>
19///
20/// See also `Vector<T, A>`.
21///
22/// This container is similar to this crate's `Vector` data structure with two key difference:
23/// - It does store an allocator field. Instead, all methods that require interacting with an allocator are
24///   marked unsafe and take the allocator as parameter.
25/// - `RawVector`'s `Drop` implementation does not automatically deallocate the memory. Instead the memory
26///   must be manually deallocated via the `deallocate` method. Dropping a raw vector without deallocating it
27///   silently leaks the memory.
28///
29/// `Vector<T, A>` is implemented as a thin wrapper around this type.
30///
31/// # Use cases
32///
33/// In most cases, `Vector<T, A>` is more appropriate. However in some situations it can be beneficial to not
34/// store the allocator in the container. In complex data structures that contain many vectors, for example,
35/// it may be preferable to store the allocator once at the root of the data structure than multiple times
36/// in each of the internally managed vectors.
37pub struct RawVector<T> {
38    pub(crate) data: NonNull<T>,
39    pub(crate) header: VecHeader,
40}
41
42impl<T> RawVector<T> {
43    /// Creates an empty, unallocated raw vector.
44    pub fn new() -> Self {
45        RawVector { data: NonNull::dangling(), header: VecHeader { len: 0, cap: 0 } }
46    }
47
48    /// Creates an empty pre-allocated vector with a given storage capacity.
49    ///
50    /// Does not allocate memory if `cap` is zero.
51    pub fn try_with_capacity<A: Allocator>(allocator: &A, cap: usize) -> Result<RawVector<T>, AllocError> {
52        if cap == 0 {
53            return Ok(RawVector::new());
54        }
55
56        unsafe {
57            let (base_ptr, cap) = raw::allocate_header_buffer::<T, A>(cap, allocator)?;
58            let data = NonNull::new_unchecked(raw::data_ptr::<raw::Header<DefaultRefCount, A>, T>(
59                base_ptr.cast(),
60            ));
61            Ok(RawVector {
62                data,
63                header: VecHeader { cap: cap as BufferSize, len: 0 },
64            })
65        }
66    }
67
68    pub fn try_from_slice<A: Allocator>(allocator: &A, data: &[T]) -> Result<Self, AllocError>
69    where
70        T: Clone,
71    {
72        let mut v = Self::try_with_capacity(allocator, data.len())?;
73        unsafe {
74            v.extend_from_slice(allocator, data);
75        }
76
77        Ok(v)
78    }
79
80    /// Creates a raw vector with `n` clones of `elem`.
81    pub fn try_from_elem<A: Allocator>(allocator: &A, elem: T, n: usize) -> Result<Self, AllocError>
82    where
83        T: Clone,
84    {
85        if n == 0 {
86            return Ok(Self::new());
87        }
88
89        let mut v = Self::try_with_capacity(allocator, n)?;
90        unsafe {
91            for _ in 0..(n - 1) {
92                v.push(allocator, elem.clone())
93            }
94
95            v.push(allocator, elem);
96        }
97
98        Ok(v)
99    }
100
101    /// Clears and deallocates this raw vector, leaving it in its unallocated state.
102    ///
103    /// It is safe (no-op) to call `deallocate` on a vector that is already in its unallocated state.
104    ///
105    /// # Safety
106    ///
107    /// The provided allocator must be the one currently used by this raw vector.
108    pub unsafe fn deallocate<A: Allocator>(&mut self, allocator: &A) {
109        if self.header.cap == 0 {
110            return;
111        }
112
113        self.clear();
114
115        self.deallocate_buffer(allocator);
116
117        self.data = NonNull::dangling();
118        self.header.cap = 0;
119        self.header.len = 0;
120    }
121
122    #[inline]
123    /// Returns `true` if the vector contains no elements.
124    pub fn is_empty(&self) -> bool {
125        self.header.len == 0
126    }
127
128    #[inline]
129    /// Returns the number of elements in the vector, also referred to as its ‘length’.
130    pub fn len(&self) -> usize {
131        self.header.len as usize
132    }
133
134    #[inline]
135    /// Returns the total number of elements the vector can hold without reallocating.
136    pub fn capacity(&self) -> usize {
137        self.header.cap as usize
138    }
139
140    /// Returns number of elements that can be added without reallocating.
141    #[inline]
142    pub fn remaining_capacity(&self) -> usize {
143        (self.header.cap - self.header.len) as usize
144    }
145
146    #[inline]
147    fn data_ptr(&self) -> *mut T {
148        self.data.as_ptr()
149    }
150
151    #[inline]
152    pub fn as_slice(&self) -> &[T] {
153        unsafe { core::slice::from_raw_parts(self.data_ptr(), self.len()) }
154    }
155
156    #[inline]
157    pub fn as_mut_slice(&mut self) -> &mut [T] {
158        unsafe { core::slice::from_raw_parts_mut(self.data_ptr(), self.len()) }
159    }
160
161    /// Clears the vector, removing all values.
162    pub fn clear(&mut self) {
163        unsafe {
164            raw::clear(self.data_ptr(), &mut self.header)
165        }
166    }
167
168    unsafe fn base_ptr<A: Allocator>(&self, _allocator: &A) -> NonNull<u8> {
169        debug_assert!(self.header.cap > 0);
170        raw::header_from_data_ptr::<Header<DefaultRefCount, A>, T>(self.data).cast()
171    }
172
173    /// Appends an element to the back of a collection.
174    ///
175    /// # Safety
176    ///
177    /// The provided allocator must be the one currently used by this raw vector.
178    ///
179    /// # Panics
180    ///
181    /// Panics if the new capacity exceeds `u32::MAX` bytes.
182    #[inline]
183    pub unsafe fn push<A: Allocator>(&mut self, allocator: &A, val: T) {
184        if self.header.len == self.header.cap {
185            self.try_realloc_additional(allocator, 1).unwrap();
186        }
187
188        raw::push_assuming_capacity(self.data_ptr(), &mut self.header, val);
189    }
190
191    /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
192    /// with the element.
193    ///
194    /// Unlike push this method will not reallocate when there’s insufficient capacity.
195    /// The caller should use reserve or try_reserve to ensure that there is enough capacity.
196    #[inline]
197    pub fn push_within_capacity(&mut self, val: T) -> Result<(), T> {
198        if self.header.len == self.header.cap {
199            return Err(val);
200        }
201
202        unsafe {
203            let dst = self.data_ptr().add(self.header.len as usize);
204            self.header.len += 1;
205            ptr::write(dst, val);
206        }
207
208        Ok(())
209    }
210
211    /// Removes the last element from the vector and returns it, or `None` if it is empty.
212    #[inline]
213    pub fn pop(&mut self) -> Option<T> {
214        unsafe {
215            raw::pop(self.data_ptr(), &mut self.header)
216        }
217    }
218
219    /// Removes and returns the element at position `index` within the vector,
220    /// shifting all elements after it to the left.
221    ///
222    /// # Panics
223    ///
224    /// Panics if `index` is out of bounds.
225    ///
226    pub fn remove(&mut self, index: usize) -> T {
227        #[cold]
228        #[inline(never)]
229        #[track_caller]
230        fn assert_failed(index: usize, len: usize) -> ! {
231            panic!("removal index (is {index}) should be < len (is {len})");
232        }
233
234        let len = self.len();
235        if index >= len {
236            assert_failed(index, len);
237        }
238        unsafe {
239            // infallible
240            let ret;
241            {
242                // the place we are taking from.
243                let ptr = self.as_mut_ptr().add(index);
244                // copy it out, unsafely having a copy of the value on
245                // the stack and in the vector at the same time.
246                ret = ptr::read(ptr);
247
248                // Shift everything down to fill in that spot.
249                ptr::copy(ptr.add(1), ptr, len - index - 1);
250            }
251            self.header.len = len as u32 - 1;
252            ret
253        }
254    }
255
256    /// Removes an element from the vector and returns it.
257    ///
258    /// The removed element is replaced by the last element of the vector.
259    ///
260    /// # Panics
261    ///
262    /// Panics if index is out of bounds.
263    #[inline]
264    pub fn swap_remove(&mut self, idx: usize) -> T {
265        let len = self.len();
266        assert!(idx < len);
267
268        unsafe {
269            let ptr = self.data_ptr().add(idx);
270            let item = ptr::read(ptr);
271
272            let last_idx = len - 1;
273            if idx != last_idx {
274                let last_ptr = self.data_ptr().add(last_idx);
275                ptr::write(ptr, ptr::read(last_ptr));
276            }
277
278            self.header.len -= 1;
279
280            item
281        }
282    }
283
284    /// Inserts an element at position `index` within the vector, shifting all
285    /// elements after it to the right.
286    ///
287    /// # Panics
288    ///
289    /// Panics if `index > len`.
290    pub unsafe fn insert<A: Allocator>(&mut self, allocator: &A, index: usize, element: T) {
291        #[cold]
292        #[inline(never)]
293        fn assert_failed(index: usize, len: usize) -> ! {
294            panic!("insertion index (is {index}) should be <= len (is {len})");
295        }
296
297        unsafe {
298            // space for the new element
299            if self.header.len == self.header.cap {
300                self.try_reserve(allocator, 1).unwrap();
301            }
302
303            let len = self.len();
304
305            // infallible
306            // The spot to put the new value
307            {
308                let p = self.as_mut_ptr().add(index);
309                if index < len {
310                    // Shift everything over to make space. (Duplicating the
311                    // `index`th element into two consecutive places.)
312                    ptr::copy(p, p.add(1), len - index);
313                } else if index == len {
314                    // No elements need shifting.
315                } else {
316                    assert_failed(index, len);
317                }
318                // Write it in, overwriting the first copy of the `index`th
319                // element.
320                ptr::write(p, element);
321            }
322            self.header.len += 1;
323        }
324    }
325
326    /// Clones and appends the contents of the slice to the back of a collection.
327    ///
328    /// # Safety
329    ///
330    /// The provided allocator must be the one currently used by this raw vector.
331    pub unsafe fn extend_from_slice<A: Allocator>(&mut self, allocator: &A, slice: &[T])
332    where
333        T: Clone,
334    {
335        self.try_reserve(allocator, slice.len()).unwrap();
336        unsafe {
337            raw::extend_from_slice_assuming_capacity(self.data_ptr(), &mut self.header, slice);
338        }
339    }
340
341    /// Moves all the elements of `other` into `self`, leaving `other` empty.
342    ///
343    /// # Safety
344    ///
345    /// The provided allocator must be the one currently used by this raw vector.
346    pub unsafe fn append<A: Allocator>(&mut self, allocator: &A, other: &mut Self)
347    where
348        T: Clone,
349    {
350        if other.is_empty() {
351            return;
352        }
353
354        self.try_reserve(allocator, other.len()).unwrap();
355
356        unsafe {
357            move_data(other.data_ptr(), &mut other.header, self.data_ptr(), &mut self.header);
358        }
359    }
360
361    /// Appends the contents of an iterator to the back of a collection.
362    ///
363    /// # Safety
364    ///
365    /// The provided allocator must be the one currently used by this raw vector.
366    pub unsafe fn extend<A: Allocator>(&mut self, allocator: &A, data: impl IntoIterator<Item = T>) {
367        let mut iter = data.into_iter();
368        let (min, max) = iter.size_hint();
369        self.try_reserve(allocator, max.unwrap_or(min)).unwrap();
370        unsafe {
371            self.extend_within_capacity(&mut iter);
372
373            for item in iter {
374                self.push(allocator, item);
375            }
376        }
377    }
378
379    unsafe fn extend_within_capacity(&mut self, iter: &mut impl Iterator<Item = T>) {
380        let n = self.remaining_capacity() as BufferSize;
381        if n == 0 {
382            return;
383        }
384
385        let mut ptr = self.data_ptr().add(self.len());
386        let mut count = 0;
387
388        unsafe {
389            for item in iter {
390                ptr::write(ptr, item);
391                ptr = ptr.add(1);
392                count += 1;
393                if count == n {
394                    break;
395                }
396            }
397            self.header.len += count;
398        }
399    }
400
401    /// Allocate a clone of this buffer.
402    ///
403    /// The provided allocator does not need to be the one this raw vector was created with.
404    /// The returned raw vector is considered to be created with the provided allocator.
405    pub fn clone_buffer<A: Allocator>(&self, allocator: &A) -> Self
406    where
407        T: Clone,
408    {
409        self.clone_buffer_with_capacity(allocator, self.len())
410    }
411
412    /// Allocate a clone of this buffer with a different capacity
413    ///
414    /// The capacity must be at least as large as the buffer's length.
415    pub fn clone_buffer_with_capacity<A: Allocator>(&self, allocator: &A, cap: usize) -> Self
416    where
417        T: Clone,
418    {
419        let mut clone =
420            Self::try_with_capacity(allocator, cap.max(self.len())).unwrap();
421
422        unsafe {
423            raw::extend_from_slice_assuming_capacity(clone.data_ptr(), &mut clone.header, self.as_slice());
424        }
425
426        clone
427    }
428
429    // Note: Marking this #[inline(never)] is a pretty large regression in the push benchmark.
430    #[cold]
431    unsafe fn try_realloc_additional<A: Allocator>(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> {
432        let new_cap = grow_amortized(self.len(), additional);
433        if new_cap < self.len() {
434            return Err(AllocError);
435        }
436
437        self.try_realloc_with_capacity(allocator, new_cap)
438    }
439
440    pub fn truncate(&mut self, new_len: usize) {
441        let old_len = self.header.len as usize;
442        if old_len <= new_len {
443            return;
444        }
445
446        unsafe {
447            let mut elt = self.data_ptr().add(new_len);
448            let end = self.data_ptr().add(old_len);
449            while elt < end {
450                ptr::drop_in_place(elt);
451                elt = elt.add(1)
452            }
453        }
454
455        self.header.len = new_len as u32;
456    }
457
458    #[cold]
459    unsafe fn try_realloc_with_capacity<A: Allocator>(&mut self, allocator: &A, new_cap: usize) -> Result<(), AllocError> {
460        type R = DefaultRefCount;
461
462        if new_cap == 0 {
463            self.deallocate(allocator);
464            return Ok(());
465        }
466
467        let new_layout = buffer_layout::<Header<R, A>, T>(new_cap).unwrap();
468
469        let old_len = self.len();
470        if old_len > new_cap {
471            self.truncate(new_cap);
472        }
473
474        let new_alloc = if self.header.cap == 0 {
475            allocator.allocate(new_layout)
476        } else {
477            let old_cap = self.capacity();
478            let old_ptr = self.base_ptr(allocator);
479            let old_layout = buffer_layout::<Header<R, A>, T>(old_cap).unwrap();
480            let new_layout = buffer_layout::<Header<R, A>, T>(new_cap).unwrap();
481
482            if new_layout.size() >= old_layout.size() {
483                allocator.grow(old_ptr, old_layout, new_layout)
484            } else {
485                allocator.shrink(old_ptr, old_layout, new_layout)
486            }
487        };
488
489        // If the new capacity is lower than the length, we already dropped
490        // the elements after the new capacity, so make sure to write the
491        // length before propagating a potential allocation error.
492        self.header.len = self.header.len.min(new_cap as u32);
493
494        // If allocation failed, we return an error here.
495        let new_alloc = new_alloc?;
496
497        // The data and capacity, however must be only written once we know that
498        // the grow/shrink operation suceeded.
499        let new_data_ptr = crate::raw::data_ptr::<Header<R, A>, T>(new_alloc.cast());
500        self.data = NonNull::new_unchecked(new_data_ptr);
501        self.header.cap = new_cap as u32;
502
503        Ok(())
504    }
505
506    /// Attempts to move this raw vector into another allocator.
507    ///
508    /// All of the data is copied over into a new allocation in `new_allocator` after which
509    /// the old allocation is deallocated from `old_allocator`.
510    ///
511    /// If this method succeeds, `new_allocator` becomes the allocator currently used by this
512    /// raw vector.
513    ///
514    /// # Safety
515    ///
516    /// The provided `old_allocator` must be the one currently used by this raw vector.
517    ///
518    /// # Error
519    ///
520    /// If reallocation fails:
521    ///  - The vector remains in its current state, still associated to the old allocator.
522    ///  - An allocation error is returned.
523    #[cold]
524    pub unsafe fn try_reallocate_in<OldA: Allocator, NewA: Allocator>(
525        &mut self,
526        old_allocator: &OldA,
527        new_allocator: &NewA,
528        new_cap: usize,
529    ) -> Result<(), AllocError> {
530        type R = DefaultRefCount;
531
532        let new_layout = buffer_layout::<Header<R, NewA>, T>(new_cap).unwrap();
533
534        if new_cap == 0 {
535            self.deallocate(old_allocator);
536            return Ok(());
537        }
538
539        let new_alloc = if new_cap > 0 {
540            Some(new_allocator.allocate(new_layout)?)
541        } else {
542            None
543        };
544
545        let old_len = self.len();
546        if old_len > new_cap {
547            self.truncate(new_cap);
548        }
549
550        let old_cap = self.capacity();
551
552        let old_base_ptr = if old_cap > 0 {
553            Some(self.base_ptr(old_allocator))
554        } else {
555            None
556        };
557
558        self.data = if let Some(new_alloc) = new_alloc {
559            let new_data_ptr = crate::raw::data_ptr::<Header<R, NewA>, T>(new_alloc.cast());
560
561            let copy_size = old_len.min(new_cap);
562            if copy_size > 0 {
563                let old_data_ptr = self.data_ptr();
564                ptr::copy_nonoverlapping(old_data_ptr, new_data_ptr, copy_size);
565            }
566
567            NonNull::new_unchecked(new_data_ptr)
568        } else {
569            NonNull::dangling()
570        };
571
572        self.header.cap = new_cap as u32;
573        self.header.len = self.header.len.min(new_cap as u32);
574
575        if let Some(old_base_ptr) = old_base_ptr {
576            let old_layout = buffer_layout::<Header<R, OldA>, T>(old_cap).unwrap();
577            old_allocator.deallocate(old_base_ptr, old_layout);
578        }
579
580        Ok(())
581    }
582
583    // Deallocates the memory, does not drop the vector's content.
584    unsafe fn deallocate_buffer<A: Allocator>(&mut self, allocator: &A) {
585        let layout = buffer_layout::<Header<DefaultRefCount, A>, T>(self.capacity()).unwrap();
586        let ptr = self.base_ptr(allocator);
587
588        allocator.deallocate(ptr, layout);
589
590        self.header.cap = 0;
591        self.header.len = 0;
592        self.data = NonNull::dangling();
593    }
594
595    /// Tries to reserve at least enough space for `additional` extra items.
596    ///
597    /// # Safety
598    ///
599    /// The provided allocator must be the one currently used by this raw vector.
600    #[inline]
601    pub unsafe fn try_reserve<A: Allocator>(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> {
602        if self.remaining_capacity() < additional {
603            self.try_realloc_additional(allocator, additional)?;
604        }
605
606        Ok(())
607    }
608
609    /// Tries to reserve the minimum capacity for at least `additional` elements to be inserted in the given vector.
610    ///
611    /// Unlike `try_reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations.
612    /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`.
613    /// This will also allocate if the vector is not unique.
614    /// Does nothing if the capacity is already sufficient and the vector is unique.
615    ///
616    /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not
617    /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected.
618    ///
619    /// # Safety
620    ///
621    /// The provided allocator must be the one currently used by this raw vector.
622    pub unsafe fn try_reserve_exact<A: Allocator>(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> {
623        if self.remaining_capacity() >= additional {
624            return Ok(());
625        }
626
627        self.try_realloc_with_capacity(allocator, self.len() + additional)
628    }
629
630    /// Shrinks the capacity of the vector with a lower bound.
631    ///
632    /// The capacity will remain at least as large as both the length and the supplied value.
633    /// If the current capacity is less than the lower limit, this is a no-op.
634    ///
635    /// # Safety
636    ///
637    /// The provided allocator must be the one currently used by this raw vector.
638    pub unsafe fn shrink_to<A: Allocator>(&mut self, allocator: &A, min_capacity: usize)
639    {
640        let min_capacity = min_capacity.max(self.len());
641        if self.capacity() <= min_capacity {
642            return;
643        }
644
645        self.try_realloc_with_capacity(allocator, min_capacity).unwrap();
646    }
647
648    /// Shrinks the capacity of the vector as much as possible.
649    ///
650    /// # Safety
651    ///
652    /// The provided allocator must be the one currently used by this raw vector.
653    pub unsafe fn shrink_to_fit<A: Allocator>(&mut self, allocator: &A)
654    {
655        self.shrink_to(allocator, self.len())
656    }
657
658    /// Removes the specified range from the vector in bulk, returning all
659    /// removed elements as an iterator. If the iterator is dropped before
660    /// being fully consumed, it drops the remaining removed elements.
661    ///
662    /// The returned iterator keeps a mutable borrow on the vector to optimize
663    /// its implementation.
664    ///
665    /// # Panics
666    ///
667    /// Panics if the starting point is greater than the end point or if
668    /// the end point is greater than the length of the vector.
669    ///
670    /// # Leaking
671    ///
672    /// If the returned iterator goes out of scope without being dropped (due to
673    /// [`mem::forget`], for example), the vector may have lost and leaked
674    /// elements arbitrarily, including elements outside the range.
675    ///
676    pub fn drain<R>(&mut self, range: R) -> Drain<'_, T>
677    where
678        R: RangeBounds<usize>,
679    {
680        // Memory safety
681        //
682        // When the Drain is first created, it shortens the length of
683        // the source vector to make sure no uninitialized or moved-from elements
684        // are accessible at all if the Drain's destructor never gets to run.
685        //
686        // Drain will ptr::read out the values to remove.
687        // When finished, remaining tail of the vec is copied back to cover
688        // the hole, and the vector length is restored to the new length.
689        //
690        use core::ops::Bound::*;
691        let len = self.len();
692        let end = match range.end_bound() {
693            Included(n) => *n + 1,
694            Excluded(n) => *n,
695            Unbounded => len
696        };
697        let start = match range.start_bound() {
698            Included(n) => *n,
699            Excluded(n) => *n+1,
700            Unbounded => 0
701        };
702        assert!(end <= len);
703        assert!(start <= end);
704
705        unsafe {
706            // Set self.vec length's to start, to be safe in case Drain is leaked
707            self.header.len = start as u32;
708            let range_slice = core::slice::from_raw_parts(self.data.as_ptr().add(start), end - start);
709            Drain {
710                tail_start: end,
711                tail_len: len - end,
712                iter: range_slice.iter(),
713                vec: NonNull::from(self),
714            }
715        }
716    }
717
718    /// Creates a splicing iterator that replaces the specified range in the vector
719    /// with the given `replace_with` iterator and yields the removed items.
720    /// `replace_with` does not need to be the same length as `range`.
721    ///
722    /// `range` is removed even if the iterator is not consumed until the end.
723    ///
724    /// It is unspecified how many elements are removed from the vector
725    /// if the `Splice` value is leaked.
726    ///
727    /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
728    ///
729    /// This is optimal if:
730    ///
731    /// * The tail (elements in the vector after `range`) is empty,
732    /// * or `replace_with` yields fewer or equal elements than `range`’s length
733    /// * or the lower bound of its `size_hint()` is exact.
734    ///
735    /// Otherwise, a temporary vector is allocated and the tail is moved twice.
736    ///
737    /// # Panics
738    ///
739    /// Panics if the starting point is greater than the end point or if
740    /// the end point is greater than the length of the vector.
741    ///
742    pub unsafe fn splice<'l, A, R, I>(
743        &'l mut self,
744        allocator: &'l A,
745        range: R,
746        replace_with: I
747    ) -> Splice<'l, <I as IntoIterator>::IntoIter, A>
748    where
749        A: Allocator,
750        R: RangeBounds<usize>,
751        I: IntoIterator<Item = T>,
752    {
753        Splice {
754            drain: self.drain(range),
755            replace_with: replace_with.into_iter(),
756            allocator,
757        }
758    }
759
760    /// Retains only the elements specified by the predicate.
761    ///
762    /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
763    /// This method operates in place, visiting each element exactly once in the
764    /// original order, and preserves the order of the retained elements.
765    pub fn retain<F>(&mut self, mut f: F)
766    where
767        F: FnMut(&T) -> bool,
768    {
769        self.retain_mut(|elem| f(elem));
770    }
771
772    /// Retains only the elements specified by the predicate, passing a mutable reference to it.
773    ///
774    /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
775    /// This method operates in place, visiting each element exactly once in the
776    /// original order, and preserves the order of the retained elements.
777    pub fn retain_mut<F>(&mut self, mut f: F)
778    where
779        F: FnMut(&mut T) -> bool,
780    {
781        let original_len = self.len();
782        // Avoid double drop if the drop guard is not executed,
783        // since we may make some holes during the process.
784        self.header.len = 0;
785
786        // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
787        //      |<-              processed len   ->| ^- next to check
788        //                  |<-  deleted cnt     ->|
789        //      |<-              original_len                          ->|
790        // Kept: Elements which predicate returns true on.
791        // Hole: Moved or dropped element slot.
792        // Unchecked: Unchecked valid elements.
793        //
794        // This drop guard will be invoked when predicate or `drop` of element panicked.
795        // It shifts unchecked elements to cover holes and `set_len` to the correct length.
796        // In cases when predicate and `drop` never panick, it will be optimized out.
797        struct BackshiftOnDrop<'a, T> {
798            v: &'a mut RawVector<T>,
799            processed_len: usize,
800            deleted_cnt: usize,
801            original_len: usize,
802        }
803
804        impl<T> Drop for BackshiftOnDrop<'_, T> {
805            fn drop(&mut self) {
806                if self.deleted_cnt > 0 {
807                    // SAFETY: Trailing unchecked items must be valid since we never touch them.
808                    unsafe {
809                        ptr::copy(
810                            self.v.as_ptr().add(self.processed_len),
811                            self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
812                            self.original_len - self.processed_len,
813                        );
814                    }
815                }
816                // SAFETY: After filling holes, all items are in contiguous memory.
817                self.v.header.len = (self.original_len - self.deleted_cnt) as u32;
818            }
819        }
820
821        let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
822
823        fn process_loop<F, T, const DELETED: bool>(
824            original_len: usize,
825            f: &mut F,
826            g: &mut BackshiftOnDrop<'_, T>,
827        ) where
828            F: FnMut(&mut T) -> bool,
829        {
830            while g.processed_len != original_len {
831                // SAFETY: Unchecked element must be valid.
832                let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
833                if !f(cur) {
834                    // Advance early to avoid double drop if `drop_in_place` panicked.
835                    g.processed_len += 1;
836                    g.deleted_cnt += 1;
837                    // SAFETY: We never touch this element again after dropped.
838                    unsafe { ptr::drop_in_place(cur) };
839                    // We already advanced the counter.
840                    if DELETED {
841                        continue;
842                    } else {
843                        break;
844                    }
845                }
846                if DELETED {
847                    // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
848                    // We use copy for move, and never touch this element again.
849                    unsafe {
850                        let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
851                        ptr::copy_nonoverlapping(cur, hole_slot, 1);
852                    }
853                }
854                g.processed_len += 1;
855            }
856        }
857
858        // Stage 1: Nothing was deleted.
859        process_loop::<F, T, false>(original_len, &mut f, &mut g);
860
861        // Stage 2: Some elements were deleted.
862        process_loop::<F, T, true>(original_len, &mut f, &mut g);
863
864        // All item are processed. This can be optimized to `set_len` by LLVM.
865        drop(g);
866    }
867
868    /// Transfers ownership of this raw vector's contents to the one that is returned, and leaves
869    /// this one empty and unallocated.
870    pub fn take(&mut self) -> Self {
871        mem::replace(self, RawVector::new())
872    }
873}
874
875impl<T: PartialEq<T>> PartialEq<RawVector<T>> for RawVector<T> {
876    fn eq(&self, other: &Self) -> bool {
877        self.as_slice() == other.as_slice()
878    }
879}
880
881impl<T: PartialEq<T>> PartialEq<&[T]> for RawVector<T> {
882    fn eq(&self, other: &&[T]) -> bool {
883        self.as_slice() == *other
884    }
885}
886
887impl<T: Eq> Eq for RawVector<T> {
888
889}
890
891impl<T> AsRef<[T]> for RawVector<T> {
892    fn as_ref(&self) -> &[T] {
893        self.as_slice()
894    }
895}
896
897impl<T> AsMut<[T]> for RawVector<T> {
898    fn as_mut(&mut self) -> &mut [T] {
899        self.as_mut_slice()
900    }
901}
902
903impl<T> Default for RawVector<T> {
904    fn default() -> Self {
905        Self::new()
906    }
907}
908
909impl<'a, T> IntoIterator for &'a RawVector<T> {
910    type Item = &'a T;
911    type IntoIter = core::slice::Iter<'a, T>;
912    fn into_iter(self) -> core::slice::Iter<'a, T> {
913        self.as_slice().iter()
914    }
915}
916
917impl<'a, T> IntoIterator for &'a mut RawVector<T> {
918    type Item = &'a mut T;
919    type IntoIter = core::slice::IterMut<'a, T>;
920    fn into_iter(self) -> core::slice::IterMut<'a, T> {
921        self.as_mut_slice().iter_mut()
922    }
923}
924
925impl<T, I> Index<I> for RawVector<T>
926where
927    I: core::slice::SliceIndex<[T]>,
928{
929    type Output = <I as core::slice::SliceIndex<[T]>>::Output;
930    fn index(&self, index: I) -> &Self::Output {
931        self.as_slice().index(index)
932    }
933}
934
935impl<T, I> IndexMut<I> for RawVector<T>
936where
937    I: core::slice::SliceIndex<[T]>,
938{
939    fn index_mut(&mut self, index: I) -> &mut Self::Output {
940        self.as_mut_slice().index_mut(index)
941    }
942}
943
944impl<T> Deref for RawVector<T> {
945    type Target = [T];
946    fn deref(&self) -> &[T] {
947        self.as_slice()
948    }
949}
950
951impl<T> DerefMut for RawVector<T> {
952    fn deref_mut(&mut self) -> &mut [T] {
953        self.as_mut_slice()
954    }
955}
956
957impl<T: Debug> Debug for RawVector<T> {
958    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
959        self.as_slice().fmt(f)
960    }
961}
962
963impl<T: core::hash::Hash> core::hash::Hash for RawVector<T> {
964    fn hash<H>(&self, state: &mut H) where H: core::hash::Hasher {
965        self.as_slice().hash(state)
966    }
967}
968
969
970/// A heap allocated, mutable contiguous buffer containing elements of type `T`.
971///
972/// <svg width="280" height="120" viewBox="0 0 74.08 31.75" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><defs><linearGradient id="a"><stop offset="0" stop-color="#491c9c"/><stop offset="1" stop-color="#d54b27"/></linearGradient><linearGradient xlink:href="#a" id="b" gradientUnits="userSpaceOnUse" x1="6.27" y1="34.86" x2="87.72" y2="13.24" gradientTransform="translate(-2.64 -18.48)"/></defs><rect width="10.57" height="10.66" x="7.94" y="18.45" ry="1.37" fill="#3dbdaa"/><circle cx="12.7" cy="18.48" r=".79" fill="#666"/><path d="M12.7 18.48c0-3.93 7.14-1.28 7.14-5.25" fill="none" stroke="#999" stroke-width=".86" stroke-linecap="round"/><rect width="68.79" height="10.58" x="2.65" y="2.68" ry="1.37" fill="url(#b)"/><rect width="15.35" height="9.51" x="3.18" y="3.21" ry=".9" fill="#78a2d4"/><rect width="9.26" height="9.51" x="19.85" y="3.2" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="29.64" y="3.22" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="39.43" y="3.22" ry=".9" fill="#eaa577"/><rect width="9.26" height="9.51" x="49.22" y="3.21" ry=".9" fill="#eaa577"/><circle cx="62.84" cy="7.97" r=".66" fill="#eaa577"/><circle cx="64.7" cy="7.97" r=".66" fill="#eaa577"/><circle cx="66.55" cy="7.97" r=".66" fill="#eaa577"/></svg>
973///
974/// Similar in principle to a `Vec<T>`.
975/// It can be converted for free into a reference counted `SharedVector<T>` or `AtomicSharedVector<T>`.
976///
977/// Unique and shared vectors expose similar functionality. `Vector` takes advantage of
978/// the guaranteed uniqueness at the type level to provide overall faster operations than its
979/// shared counterparts, while its memory layout makes it very cheap to convert to a shared vector
980/// (involving no allocation or copy).
981///
982/// # Internal representation
983///
984/// `Vector` stores its length and capacity inline and points to the first element of the
985/// allocated buffer. Room for a header is left uninitialized before the elements so that the
986/// vector can be converted into a `SharedVector` or `AtomicSharedVector` without reallocating
987/// the storage.
988///
989/// Internally, `Vector` is built on top of `RawVector`.
990pub struct Vector<T, A: Allocator = Global> {
991    pub(crate) raw: RawVector<T>,
992    pub(crate) allocator: A,
993}
994
995impl<T> Vector<T, Global> {
996    /// Creates an empty vector.
997    ///
998    /// This does not allocate memory.
999    pub fn new() -> Vector<T, Global> {
1000        Vector {
1001            raw: RawVector::new(),
1002            allocator: Global,
1003        }
1004    }
1005
1006
1007
1008    /// Creates an empty pre-allocated vector with a given storage capacity.
1009    ///
1010    /// Does not allocate memory if `cap` is zero.
1011    pub fn with_capacity(cap: usize) -> Vector<T, Global> {
1012        Self::try_with_capacity(cap).unwrap()
1013    }
1014
1015    /// Creates an empty pre-allocated vector with a given storage capacity.
1016    ///
1017    /// Does not allocate memory if `cap` is zero.
1018    pub fn try_with_capacity(cap: usize) -> Result<Vector<T, Global>, AllocError> {
1019        Vector::try_with_capacity_in(cap, Global)
1020    }
1021
1022    pub fn from_slice(data: &[T]) -> Self
1023    where
1024        T: Clone,
1025    {
1026        Vector { raw: RawVector::try_from_slice(&Global, data).unwrap(), allocator: Global }
1027    }
1028
1029    /// Creates a vector with `n` clones of `elem`.
1030    pub fn from_elem(elem: T, n: usize) -> Vector<T, Global>
1031    where
1032        T: Clone,
1033    {
1034        Vector { raw: RawVector::try_from_elem(&Global, elem, n).unwrap(), allocator: Global }
1035    }
1036}
1037
1038impl<T, A: Allocator> Vector<T, A> {
1039    /// Creates an empty vector without allocating memory.
1040    pub fn new_in(allocator: A) -> Self {
1041        Self::try_with_capacity_in(0, allocator).unwrap()
1042    }
1043
1044    /// Creates an empty pre-allocated vector with a given storage capacity.
1045    ///
1046    /// Does not allocate memory if `cap` is zero.
1047    pub fn with_capacity_in(cap: usize, allocator: A) -> Self {
1048        Self::try_with_capacity_in(cap, allocator).unwrap()
1049    }
1050
1051    /// Creates an empty pre-allocated vector with a given storage capacity.
1052    ///
1053    /// Does not allocate memory if `cap` is zero.
1054    pub fn try_with_capacity_in(cap: usize, allocator: A) -> Result<Vector<T, A>, AllocError> {
1055        let raw = RawVector::try_with_capacity(&allocator, cap)?;
1056
1057        Ok(Vector { raw, allocator })
1058    }
1059
1060    /// Attempts to move this vector into another allocator.
1061    pub fn try_reallocate_in<NewA: Allocator>(&mut self, new_allocator: NewA, new_cap: usize) -> Result<Vector<T, NewA>, AllocError> {
1062        unsafe {
1063            self.raw.try_reallocate_in(&self.allocator, &new_allocator, new_cap)?;
1064        }
1065
1066        Ok(Vector {
1067            raw: mem::take(&mut self.raw),
1068            allocator: new_allocator,
1069        })
1070    }
1071
1072    #[inline(always)]
1073    /// Returns `true` if the vector contains no elements.
1074    pub fn is_empty(&self) -> bool {
1075        self.raw.is_empty()
1076    }
1077
1078    #[inline(always)]
1079    /// Returns the number of elements in the vector, also referred to as its ‘length’.
1080    pub fn len(&self) -> usize {
1081        self.raw.len()
1082    }
1083
1084    #[inline(always)]
1085    /// Returns the total number of elements the vector can hold without reallocating.
1086    pub fn capacity(&self) -> usize {
1087        self.raw.capacity()
1088    }
1089
1090    /// Returns number of elements that can be added without reallocating.
1091    #[inline(always)]
1092    pub fn remaining_capacity(&self) -> usize {
1093        self.raw.remaining_capacity()
1094    }
1095
1096    /// Returns a reference to the underlying allocator.
1097    #[inline(always)]
1098    pub fn allocator(&self) -> &A {
1099        &self.allocator
1100    }
1101
1102    #[inline(always)]
1103    pub fn as_slice(&self) -> &[T] {
1104        self.raw.as_slice()
1105    }
1106
1107    #[inline(always)]
1108    pub fn as_mut_slice(&mut self) -> &mut [T] {
1109        self.raw.as_mut_slice()
1110    }
1111
1112    /// Clears the vector, removing all values.
1113    pub fn clear(&mut self) {
1114        unsafe {
1115            raw::clear(self.raw.data_ptr(), &mut self.raw.header)
1116        }
1117    }
1118
1119    unsafe fn into_header_buffer<R>(mut self) -> HeaderBuffer<T, R, A>
1120    where
1121        R: RefCount,
1122    {
1123        debug_assert!(self.raw.header.cap != 0);
1124        unsafe {
1125            let mut header = raw::header_from_data_ptr(self.raw.data);
1126
1127            *header.as_mut() = raw::Header {
1128                vec: VecHeader {
1129                    len: self.raw.header.len,
1130                    cap: self.raw.header.cap,
1131                },
1132                ref_count: R::new(1),
1133                allocator: ptr::read(&mut self.allocator),
1134            };
1135
1136            mem::forget(self);
1137
1138            HeaderBuffer::from_raw(header)
1139        }
1140    }
1141
1142    /// Make this vector immutable.
1143    ///
1144    /// This operation is cheap, the underlying storage does not not need
1145    /// to be reallocated.
1146    #[inline]
1147    pub fn into_shared(self) -> SharedVector<T, A>
1148    where
1149        A: Allocator + Clone,
1150    {
1151        if self.raw.header.cap == 0 {
1152            return SharedVector::try_with_capacity_in(0, self.allocator.clone()).unwrap();
1153        }
1154        unsafe {
1155            let inner = self.into_header_buffer::<DefaultRefCount>();
1156            SharedVector { inner }
1157        }
1158    }
1159
1160    /// Make this vector immutable.
1161    ///
1162    /// This operation is cheap, the underlying storage does not not need
1163    /// to be reallocated.
1164    #[inline]
1165    pub fn into_shared_atomic(self) -> AtomicSharedVector<T, A>
1166    where
1167        A: Allocator + Clone,
1168    {
1169        if self.raw.header.cap == 0 {
1170            return AtomicSharedVector::try_with_capacity_in(0, self.allocator.clone()).unwrap();
1171        }
1172        unsafe {
1173            let inner = self.into_header_buffer::<AtomicRefCount>();
1174            AtomicSharedVector { inner }
1175        }
1176    }
1177
1178    /// Appends an element to the back of a collection.
1179    ///
1180    /// # Panics
1181    ///
1182    /// Panics if the new capacity exceeds `u32::MAX` bytes.
1183    #[inline(always)]
1184    pub fn push(&mut self, val: T) {
1185        unsafe {
1186            self.raw.push(&self.allocator, val);
1187        }
1188    }
1189
1190    /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
1191    /// with the element.
1192    ///
1193    /// Unlike push this method will not reallocate when there’s insufficient capacity.
1194    /// The caller should use reserve or try_reserve to ensure that there is enough capacity.
1195    #[inline(always)]
1196    pub fn push_within_capacity(&mut self, val: T) -> Result<(), T> {
1197        self.raw.push_within_capacity(val)
1198    }
1199
1200    /// Removes the last element from the vector and returns it, or `None` if it is empty.
1201    #[inline(always)]
1202    pub fn pop(&mut self) -> Option<T> {
1203        self.raw.pop()
1204    }
1205
1206    /// Removes and returns the element at position `index` within the vector,
1207    /// shifting all elements after it to the left.
1208    ///
1209    /// # Panics
1210    ///
1211    /// Panics if `index` is out of bounds.
1212    ///
1213    #[inline(always)]
1214    pub fn remove(&mut self, index: usize) -> T {
1215        self.raw.remove(index)
1216    }
1217
1218    /// Removes an element from the vector and returns it.
1219    ///
1220    /// The removed element is replaced by the last element of the vector.
1221    ///
1222    /// # Panics
1223    ///
1224    /// Panics if index is out of bounds.
1225    #[inline(always)]
1226    pub fn swap_remove(&mut self, idx: usize) -> T {
1227        self.raw.swap_remove(idx)
1228    }
1229
1230    /// Inserts an element at position `index` within the vector, shifting all
1231    /// elements after it to the right.
1232    ///
1233    /// # Panics
1234    ///
1235    /// Panics if `index > len`.
1236    #[inline(always)]
1237    pub fn insert(&mut self, index: usize, element: T) {
1238       unsafe { self.raw.insert(&self.allocator, index, element) }
1239    }
1240
1241    /// Clones and appends the contents of the slice to the back of a collection.
1242    #[inline(always)]
1243    pub fn extend_from_slice(&mut self, data: &[T])
1244    where
1245        T: Clone,
1246    {
1247        unsafe {
1248            self.raw.extend_from_slice(&self.allocator, data)
1249        }
1250    }
1251
1252    /// Moves all the elements of `other` into `self`, leaving `other` empty.
1253    #[inline(always)]
1254    pub fn append(&mut self, other: &mut Self)
1255    where
1256        T: Clone,
1257    {
1258        unsafe {
1259            self.raw.append(&self.allocator, &mut other.raw)
1260        }
1261    }
1262
1263    /// Appends the contents of an iterator to the back of a collection.
1264    #[inline(always)]
1265    pub fn extend(&mut self, data: impl IntoIterator<Item = T>) {
1266        unsafe {
1267            self.raw.extend(&self.allocator, data)
1268        }
1269    }
1270
1271    /// Allocates a clone of this buffer.
1272    #[inline(always)]
1273    pub fn clone_buffer(&self) -> Self
1274    where
1275        T: Clone,
1276        A: Clone,
1277    {
1278        Vector {
1279            raw: self.raw.clone_buffer(&self.allocator),
1280            allocator: self.allocator.clone(),
1281        }
1282    }
1283
1284    /// Allocate a clone of this buffer with a different capacity
1285    ///
1286    /// The capacity must be at least as large as the buffer's length.
1287    #[inline(always)]
1288    pub fn clone_buffer_with_capacity(&self, cap: usize) -> Self
1289    where
1290        T: Clone,
1291        A: Clone,
1292    {
1293        Vector {
1294            raw: self.raw.clone_buffer_with_capacity(&self.allocator, cap),
1295            allocator: self.allocator.clone(),
1296        }
1297    }
1298
1299    #[inline(always)]
1300    pub fn reserve(&mut self, additional: usize) {
1301        unsafe {
1302            self.raw.try_reserve(&self.allocator, additional).unwrap()
1303        }
1304    }
1305
1306    #[inline(always)]
1307    pub fn try_reserve(&mut self, additional: usize) -> Result<(), AllocError> {
1308        unsafe {
1309            self.raw.try_reserve(&self.allocator, additional)
1310        }
1311    }
1312
1313    /// Reserves the minimum capacity for at least `additional` elements to be inserted in the given vector.
1314    ///
1315    /// Unlike `reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations.
1316    /// After calling `try_reserve_exact`, capacity will be greater than or equal to `self.len() + additional` if
1317    /// it returns `Ok(())`.
1318    /// This will also allocate if the vector is not unique.
1319    /// Does nothing if the capacity is already sufficient and the vector is unique.
1320    ///
1321    /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not
1322    /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected.
1323    pub fn reserve_exact(&mut self, additional: usize)
1324    where
1325        T: Clone,
1326    {
1327        self.try_reserve_exact(additional).unwrap();
1328    }
1329
1330    /// Tries to reserve the minimum capacity for at least `additional` elements to be inserted in the given vector.
1331    ///
1332    /// Unlike `try_reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations.
1333    /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`.
1334    /// This will also allocate if the vector is not unique.
1335    /// Does nothing if the capacity is already sufficient and the vector is unique.
1336    ///
1337    /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not
1338    /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected.
1339    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), AllocError> {
1340        unsafe {
1341            self.raw.try_reserve_exact(&self.allocator, additional)
1342        }
1343    }
1344
1345    /// Shrinks the capacity of the vector with a lower bound.
1346    ///
1347    /// The capacity will remain at least as large as both the length and the supplied value.
1348    /// If the current capacity is less than the lower limit, this is a no-op.
1349    #[inline(always)]
1350    pub fn shrink_to(&mut self, min_capacity: usize)
1351    where
1352        T: Clone,
1353    {
1354        unsafe {
1355            self.raw.shrink_to(&self.allocator, min_capacity)
1356        }
1357    }
1358
1359    /// Shrinks the capacity of the vector as much as possible.
1360    #[inline(always)]
1361    pub fn shrink_to_fit(&mut self)
1362    where
1363        T: Clone,
1364    {
1365        unsafe {
1366            self.raw.shrink_to_fit(&self.allocator)
1367        }
1368    }
1369
1370    /// Removes the specified range from the vector in bulk, returning all
1371    /// removed elements as an iterator. If the iterator is dropped before
1372    /// being fully consumed, it drops the remaining removed elements.
1373    ///
1374    /// The returned iterator keeps a mutable borrow on the vector to optimize
1375    /// its implementation.
1376    ///
1377    /// # Panics
1378    ///
1379    /// Panics if the starting point is greater than the end point or if
1380    /// the end point is greater than the length of the vector.
1381    ///
1382    /// # Leaking
1383    ///
1384    /// If the returned iterator goes out of scope without being dropped (due to
1385    /// [`mem::forget`], for example), the vector may have lost and leaked
1386    /// elements arbitrarily, including elements outside the range.
1387    ///
1388    pub fn drain<R>(&mut self, range: R) -> Drain<'_, T>
1389    where
1390        R: RangeBounds<usize>,
1391    {
1392        self.raw.drain(range)
1393    }
1394
1395    /// Creates a splicing iterator that replaces the specified range in the vector
1396    /// with the given `replace_with` iterator and yields the removed items.
1397    /// `replace_with` does not need to be the same length as `range`.
1398    ///
1399    /// `range` is removed even if the iterator is not consumed until the end.
1400    ///
1401    /// It is unspecified how many elements are removed from the vector
1402    /// if the `Splice` value is leaked.
1403    ///
1404    /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
1405    ///
1406    /// This is optimal if:
1407    ///
1408    /// * The tail (elements in the vector after `range`) is empty,
1409    /// * or `replace_with` yields fewer or equal elements than `range`’s length
1410    /// * or the lower bound of its `size_hint()` is exact.
1411    ///
1412    /// Otherwise, a temporary vector is allocated and the tail is moved twice.
1413    ///
1414    /// # Panics
1415    ///
1416    /// Panics if the starting point is greater than the end point or if
1417    /// the end point is greater than the length of the vector.
1418    ///
1419    pub fn splice<R, I>(
1420        &mut self,
1421        range: R,
1422        replace_with: I
1423    ) -> Splice<'_, <I as IntoIterator>::IntoIter, A>
1424    where
1425        R: RangeBounds<usize>,
1426        I: IntoIterator<Item = T>,
1427    {
1428        unsafe { self.raw.splice(&self.allocator, range, replace_with) }
1429    }
1430
1431    /// Retains only the elements specified by the predicate.
1432    ///
1433    /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
1434    /// This method operates in place, visiting each element exactly once in the
1435    /// original order, and preserves the order of the retained elements.
1436    pub fn retain<F>(&mut self, f: F)
1437    where
1438        F: FnMut(&T) -> bool,
1439    {
1440        self.raw.retain(f)
1441    }
1442
1443    /// Retains only the elements specified by the predicate, passing a mutable reference to it.
1444    ///
1445    /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
1446    /// This method operates in place, visiting each element exactly once in the
1447    /// original order, and preserves the order of the retained elements.
1448    pub fn retain_mut<F>(&mut self, f: F)
1449    where
1450        F: FnMut(&mut T) -> bool,
1451    {
1452        self.raw.retain_mut(f)
1453    }
1454
1455    #[inline(always)]
1456    pub fn take(&mut self) -> Self
1457    where
1458        A: Clone,
1459    {
1460        let other = Vector {
1461            raw: self.raw.take(),
1462            allocator: self.allocator.clone(),
1463        };
1464
1465        other
1466    }
1467}
1468
1469impl<T, A: Allocator> Drop for Vector<T, A> {
1470    fn drop(&mut self) {
1471        unsafe {
1472            self.raw.deallocate(&self.allocator)
1473        }
1474    }
1475}
1476
1477impl<T: Clone, A: Allocator + Clone> Clone for Vector<T, A> {
1478    fn clone(&self) -> Self {
1479        self.clone_buffer()
1480    }
1481}
1482
1483impl<T: PartialEq<T>, A: Allocator> PartialEq<Vector<T, A>> for Vector<T, A> {
1484    fn eq(&self, other: &Self) -> bool {
1485        self.as_slice() == other.as_slice()
1486    }
1487}
1488
1489impl<T: PartialEq<T>, A: Allocator> PartialEq<&[T]> for Vector<T, A> {
1490    fn eq(&self, other: &&[T]) -> bool {
1491        self.as_slice() == *other
1492    }
1493}
1494
1495impl<T, A: Allocator> AsRef<[T]> for Vector<T, A> {
1496    fn as_ref(&self) -> &[T] {
1497        self.as_slice()
1498    }
1499}
1500
1501impl<T, A: Allocator> AsMut<[T]> for Vector<T, A> {
1502    fn as_mut(&mut self) -> &mut [T] {
1503        self.as_mut_slice()
1504    }
1505}
1506
1507impl<T> Default for Vector<T, Global> {
1508    fn default() -> Self {
1509        Self::new()
1510    }
1511}
1512
1513impl<'a, T, A: Allocator> IntoIterator for &'a Vector<T, A> {
1514    type Item = &'a T;
1515    type IntoIter = core::slice::Iter<'a, T>;
1516    fn into_iter(self) -> core::slice::Iter<'a, T> {
1517        self.as_slice().iter()
1518    }
1519}
1520
1521impl<'a, T, A: Allocator> IntoIterator for &'a mut Vector<T, A> {
1522    type Item = &'a mut T;
1523    type IntoIter = core::slice::IterMut<'a, T>;
1524    fn into_iter(self) -> core::slice::IterMut<'a, T> {
1525        self.as_mut_slice().iter_mut()
1526    }
1527}
1528
1529impl<T, A: Allocator, I> Index<I> for Vector<T, A>
1530where
1531    I: core::slice::SliceIndex<[T]>,
1532{
1533    type Output = <I as core::slice::SliceIndex<[T]>>::Output;
1534    fn index(&self, index: I) -> &Self::Output {
1535        self.as_slice().index(index)
1536    }
1537}
1538
1539impl<T, A: Allocator, I> IndexMut<I> for Vector<T, A>
1540where
1541    I: core::slice::SliceIndex<[T]>,
1542{
1543    fn index_mut(&mut self, index: I) -> &mut Self::Output {
1544        self.as_mut_slice().index_mut(index)
1545    }
1546}
1547
1548impl<T, A: Allocator> Deref for Vector<T, A> {
1549    type Target = [T];
1550    fn deref(&self) -> &[T] {
1551        self.as_slice()
1552    }
1553}
1554
1555impl<T, A: Allocator> DerefMut for Vector<T, A> {
1556    fn deref_mut(&mut self) -> &mut [T] {
1557        self.as_mut_slice()
1558    }
1559}
1560
1561impl<T: Debug, A: Allocator> Debug for Vector<T, A> {
1562    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1563        self.as_slice().fmt(f)
1564    }
1565}
1566
1567impl<T: Clone, A: Allocator + Clone> From<SharedVector<T, A>> for Vector<T, A> {
1568    fn from(shared: SharedVector<T, A>) -> Self {
1569        shared.into_unique()
1570    }
1571}
1572
1573impl<T: Clone, A: Allocator + Clone> From<AtomicSharedVector<T, A>> for Vector<T, A> {
1574    fn from(shared: AtomicSharedVector<T, A>) -> Self {
1575        shared.into_unique()
1576    }
1577}
1578
1579impl<T: core::hash::Hash, A: Allocator> core::hash::Hash for Vector<T, A> {
1580    fn hash<H>(&self, state: &mut H) where H: core::hash::Hasher {
1581        self.as_slice().hash(state)
1582    }
1583}
1584
1585#[test]
1586fn bump_alloc() {
1587    use blink_alloc::BlinkAlloc;
1588
1589    let allocator = BlinkAlloc::new();
1590
1591    {
1592        let mut v1: Vector<u32, &BlinkAlloc> = Vector::try_with_capacity_in(4, &allocator).unwrap();
1593        v1.push(0);
1594        v1.push(1);
1595        v1.push(2);
1596        assert_eq!(v1.capacity(), 4);
1597        assert_eq!(v1.as_slice(), &[0, 1, 2]);
1598
1599        // let mut v2 = crate::vector!(@ &allocator [10, 11]);
1600        let mut v2 = crate::vector!([10, 11] in &allocator);
1601        assert_eq!(v2.capacity(), 2);
1602
1603        assert_eq!(v2.as_slice(), &[10, 11]);
1604
1605        v1.push(3);
1606        v1.push(4);
1607
1608        assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4]);
1609
1610        assert!(v1.capacity() > 4);
1611
1612        v2.push(12);
1613        v2.push(13);
1614        v2.push(14);
1615
1616        let v2 = v2.into_shared();
1617
1618        assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4]);
1619        assert_eq!(v2.as_slice(), &[10, 11, 12, 13, 14]);
1620    }
1621}
1622
1623#[test]
1624fn basic_unique() {
1625    fn num(val: u32) -> Box<u32> {
1626        Box::new(val)
1627    }
1628
1629    let mut a = Vector::with_capacity(256);
1630
1631    a.push(num(0));
1632    a.push(num(1));
1633    a.push(num(2));
1634
1635    let a = a.into_shared();
1636
1637    assert_eq!(a.len(), 3);
1638
1639    assert_eq!(a.as_slice(), &[num(0), num(1), num(2)]);
1640
1641    assert!(a.is_unique());
1642
1643    let b = Vector::from_slice(&[num(0), num(1), num(2), num(3), num(4)]);
1644
1645    assert_eq!(b.as_slice(), &[num(0), num(1), num(2), num(3), num(4)]);
1646
1647    let c = a.clone_buffer();
1648    assert!(!c.ptr_eq(&a));
1649
1650    let a2 = a.new_ref();
1651    assert!(a2.ptr_eq(&a));
1652    assert!(!a.is_unique());
1653    assert!(!a2.is_unique());
1654
1655    mem::drop(a2);
1656
1657    assert!(a.is_unique());
1658
1659    let _ = c.clone_buffer();
1660    let _ = b.clone_buffer();
1661
1662    let mut d = Vector::with_capacity(64);
1663    d.extend_from_slice(&[num(0), num(1), num(2)]);
1664    d.extend_from_slice(&[]);
1665    d.extend_from_slice(&[num(3), num(4)]);
1666
1667    assert_eq!(d.as_slice(), &[num(0), num(1), num(2), num(3), num(4)]);
1668}
1669
1670#[test]
1671fn shrink() {
1672    let mut v: Vector<u32> = Vector::with_capacity(32);
1673    v.shrink_to(8);
1674}
1675
1676#[test]
1677fn zst() {
1678    let mut v = Vector::new();
1679    v.push(());
1680    v.push(());
1681    v.push(());
1682    v.push(());
1683
1684    assert_eq!(v.len(), 4);
1685}
1686
1687#[test]
1688fn dyn_allocator() {
1689    let allocator: &dyn Allocator = &Global;
1690    let mut v = crate::vector!([1u32, 2, 3] in allocator);
1691
1692    v.push(4);
1693
1694    assert_eq!(&v[..], &[1, 2, 3, 4]);
1695}
1696
1697#[test]
1698fn borrowd_dyn_alloc() {
1699    struct DataStructure<'a> {
1700        data: Vector<u32, &'a dyn Allocator>,
1701    }
1702
1703    impl DataStructure<'static> {
1704        fn new() -> DataStructure<'static> {
1705            DataStructure {
1706                data: Vector::new_in(&Global as &'static dyn Allocator)
1707            }
1708        }
1709    }
1710
1711    impl<'a> DataStructure<'a> {
1712        fn new_in(allocator: &'a dyn Allocator) -> DataStructure<'a> {
1713            DataStructure { data: Vector::new_in(allocator) }
1714        }
1715
1716        fn push(&mut self, val: u32) {
1717            self.data.push(val);
1718        }
1719    }
1720
1721    let mut ds1 = DataStructure::new();
1722    ds1.push(1);
1723
1724    let alloc = Global;
1725    let mut ds2 = DataStructure::new_in(&alloc);
1726    ds2.push(2);
1727
1728}
1729
1730#[test]
1731fn splice1() {
1732    let mut vec = Vector::new();
1733    vec.splice(0..0, vec![Box::new(1); 5].into_iter());
1734    vec.splice(0..0, vec![Box::new(2); 5].into_iter());
1735}
1736
1737#[test]
1738fn drain1() {
1739    let mut vectors: [Vector<Box<u32>>; 4] = [
1740        Vector::new(),
1741        Vector::new(),
1742        Vector::new(),
1743        Vector::new(),
1744    ];
1745    vectors[0].shrink_to(3906369431118283232);
1746    vectors[2].extend_from_slice(&[Box::new(1), Box::new(2), Box::new(3)]);
1747    let vec = &mut vectors[2];
1748    let len = vec.len();
1749    let start = if len > 0 { 16059518370053021184 % len } else { 0 };
1750    let end = 16059518370053021185.min(len);
1751    vectors[2].drain(start..end);
1752}
1753
1754#[test]
1755fn reallocate_in() {
1756    pub use crate::alloc::Global;
1757    use std::rc::Rc;
1758
1759    let alloc1 = Global;
1760    let alloc2 = Global;
1761
1762    let mut vec = Vector::new_in(alloc1);
1763    let val = Rc::new(());
1764
1765    for _ in 0..8 {
1766        vec.push(val.clone());
1767    }
1768
1769    assert_eq!(Rc::strong_count(&val), 9);
1770
1771    let mut vec = vec.try_reallocate_in(alloc2, 4).unwrap();
1772
1773    assert_eq!(vec.len(), 4);
1774    assert_eq!(vec.capacity(), 4);
1775    assert_eq!(Rc::strong_count(&val), 5);
1776
1777    for _ in 0..12 {
1778        vec.push(val.clone());
1779    }
1780
1781    assert_eq!(vec.len(), 16);
1782    assert!(vec.capacity() >= 16);
1783    assert_eq!(Rc::strong_count(&val), 17);
1784
1785    let vec = vec.try_reallocate_in(alloc1, 0).unwrap();
1786
1787    assert_eq!(vec.len(), 0);
1788    assert_eq!(vec.capacity(), 0);
1789    assert_eq!(Rc::strong_count(&val), 1);
1790}
1791
1792#[test]
1793fn extend_within_capacity() {
1794    use std::sync::atomic::{AtomicUsize, Ordering};
1795    static COUNTER: AtomicUsize = AtomicUsize::new(0);
1796
1797    struct Foo;
1798    impl Drop for Foo {
1799        fn drop(&mut self) {
1800            COUNTER.fetch_add(1, Ordering::SeqCst);
1801        }
1802    }
1803
1804    let values = [
1805        Foo,
1806        Foo,
1807        Foo,
1808        Foo,
1809        Foo,
1810        Foo,
1811        Foo,
1812        Foo,
1813        Foo,
1814        Foo,
1815        Foo,
1816        Foo,
1817        Foo,
1818        Foo,
1819        Foo,
1820        Foo,
1821    ];
1822
1823    let n = values.len();
1824    let cap;
1825
1826    let mut vector = RawVector::new();
1827    unsafe {
1828        vector.try_reserve(&Global, 4).unwrap();
1829        cap = vector.capacity();
1830        // What's mportant here is that we exercise the code path
1831        // where we extend within a capcity that is inferior to the
1832        // amount of elements that we need to push. So if the default
1833        // capacity grows and this fails, just increase the number of
1834        // elements in values.
1835        assert!(cap < n);
1836
1837        let mut iter = values.into_iter();
1838
1839        vector.extend_within_capacity(&mut iter);
1840
1841        assert_eq!(COUNTER.load(Ordering::SeqCst), 0);
1842    }
1843
1844    assert_eq!(COUNTER.load(Ordering::SeqCst), n - cap);
1845
1846    vector.clear();
1847
1848    assert_eq!(COUNTER.load(Ordering::SeqCst), n);
1849
1850    unsafe {
1851        vector.deallocate(&Global);
1852    }
1853}