memvec/
mem_vec.rs

1use crate::memory::{Memory, MemoryLayoutError};
2use core::{
3    cmp::Ordering,
4    hash::Hash,
5    marker::PhantomData,
6    mem::MaybeUninit,
7    ops::{Deref, DerefMut, Index, IndexMut},
8    ptr,
9    slice::{self, SliceIndex},
10};
11/// A memory-backed vector that provides a Vec-like interface over memory-mapped storage.
12///
13/// `MemVec<T, A>` is a frontend that wraps any `Memory` backend (such as memory-mapped files
14/// or anonymous memory mappings) and exposes the familiar `Vec<T>` API. This allows you to
15/// work with persistent or high-performance memory using the same methods you know from
16/// standard vectors.
17///
18/// # Type Parameters
19///
20/// * `T` - The element type. Must implement `Copy` for zero-drop semantics.
21/// * `A` - The memory backend implementing the `Memory` trait (e.g., `VecFile`, `MmapAnon`).
22///
23/// # Examples
24///
25/// ```rust
26/// use memvec::{MemVec, MmapAnon};
27///
28/// #[derive(Copy, Clone)]
29/// struct Point { x: i32, y: i32 }
30///
31/// // Create with anonymous memory mapping
32/// let mmap = MmapAnon::with_size(1024)?;
33/// let mut vec = unsafe { MemVec::<Point, _>::try_from_memory(mmap).unwrap() };
34///
35/// // Use like a regular Vec
36/// vec.push(Point { x: 1, y: 2 });
37/// vec.push(Point { x: 3, y: 4 });
38/// assert_eq!(vec.len(), 2);
39/// # Ok::<(), Box<dyn std::error::Error>>(())
40/// ```
41///
42/// # Safety
43///
44/// `MemVec` requires `unsafe` construction via `try_from_memory()` because it assumes
45/// the memory backend contains valid representations of `T`. The memory layout and
46/// alignment must be compatible with the target type.
47///
48/// # Performance
49///
50/// `MemVec` provides zero-copy access to the underlying memory. Operations like indexing,
51/// iteration, and slicing work directly on the memory-mapped data without additional
52/// copying or serialization overhead.
53///
54/// Many methods are identical to `std::vec::Vec` - see the Vec documentation for details
55/// on specific method behavior.
56pub struct MemVec<'a, T: Copy, A: 'a + Memory> {
57    mem: A,
58    _marker: PhantomData<&'a T>,
59}
60
61impl<'a, T: Copy, A: 'a + Memory> MemVec<'a, T, A> {
62    /// Create a new memory-backed vector.
63    /// # Safety
64    /// The memory must represent valid len and bytes representations of T.
65    pub unsafe fn try_from_memory(mem: A) -> Result<Self, (A, MemoryLayoutError)> {
66        let (prefix, _, _suffix) = mem.deref().align_to::<T>();
67        if !prefix.is_empty() {
68            return Err((mem, MemoryLayoutError::MisalignedMemory));
69        }
70        // assert_eq!(_suffix.len(), 0);
71
72        let vec = Self {
73            mem,
74            _marker: PhantomData,
75        };
76        if vec.len() > vec.capacity() {
77            let mem = vec.into_mem();
78            return Err((mem, MemoryLayoutError::CapacityExceeded));
79        }
80        Ok(vec)
81    }
82
83    pub fn into_mem(self) -> A {
84        self.mem
85    }
86    pub fn as_mem(&self) -> &A {
87        &self.mem
88    }
89    pub fn as_mem_mut(&mut self) -> &mut A {
90        &mut self.mem
91    }
92}
93
94// std::vec::Vec methods
95impl<'a, T: Copy, A: 'a + Memory> MemVec<'a, T, A> {
96    fn as_buf(&self) -> &[T] {
97        unsafe {
98            let (prefix, slice, _suffix) = self.mem.deref().align_to::<T>();
99            debug_assert_eq!(prefix.len(), 0);
100            // debug_assert_eq!(_suffix.len(), 0);
101            slice
102        }
103    }
104
105    fn as_buf_mut(&mut self) -> &mut [T] {
106        unsafe {
107            let (prefix, slice, _suffix) = self.mem.deref_mut().align_to_mut::<T>();
108            debug_assert_eq!(prefix.len(), 0);
109            // debug_assert_eq!(_suffix.len(), 0);
110            slice
111        }
112    }
113
114    /// Returns the number of elements the vector can hold without reallocating.
115    #[inline]
116    pub fn capacity(&self) -> usize {
117        self.as_buf().len()
118    }
119
120    /// Reserves capacity for at least `additional` more elements to be inserted
121    /// in the given `Vec<T>`. The collection may reserve more space to
122    /// speculatively avoid frequent reallocations. After calling `reserve`,
123    /// capacity will be greater than or equal to `self.len() + additional`.
124    /// Does nothing if capacity is already sufficient.
125    #[inline]
126    pub fn reserve(&mut self, additional: usize) {
127        self.try_reserve(additional).expect("reserve failed");
128    }
129
130    /// Tries to reserve capacity for at least `additional` more elements to be inserted
131    /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
132    /// frequent reallocations. After calling `try_reserve`, capacity will be greater
133    /// than or equal to `self.len() + additional` if it returns `Ok(())`.
134    /// Does nothing if capacity is already sufficient.
135    ///
136    /// # Errors
137    ///
138    /// If the capacity overflows, or the allocator reports a failure, then an error
139    /// is returned.
140    pub fn try_reserve(&mut self, additional: usize) -> Result<(), A::Error> {
141        let len = self.len();
142        if self.needs_to_grow(len, additional) {
143            self.grow_amortized(len, additional)
144        } else {
145            Ok(())
146        }
147    }
148
149    /// Reserves the minimum capacity for at least `additional` more elements to
150    /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
151    /// deliberately over-allocate to speculatively avoid frequent allocations.
152    /// After calling `reserve_exact`, capacity will be greater than or equal to
153    /// `self.len() + additional`. Does nothing if the capacity is already
154    /// sufficient.
155    ///
156    /// Note that the allocator may give the collection more space than it
157    /// requests. Therefore, capacity can not be relied upon to be precisely
158    /// minimal. Prefer [`reserve`] if future insertions are expected.
159    ///
160    /// [`reserve`]: MemVec::reserve
161    pub fn reserve_exact(&mut self, additional: usize) {
162        self.try_reserve_exact(additional).expect("reserve failed");
163    }
164
165    /// Tries to reserve the minimum capacity for at least `additional`
166    /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
167    /// this will not deliberately over-allocate to speculatively avoid frequent
168    /// allocations. After calling `try_reserve_exact`, capacity will be greater
169    /// than or equal to `self.len() + additional` if it returns `Ok(())`.
170    /// Does nothing if the capacity is already sufficient.
171    ///
172    /// Note that the allocator may give the collection more space than it
173    /// requests. Therefore, capacity can not be relied upon to be precisely
174    /// minimal. Prefer [`try_reserve`] if future insertions are expected.
175    ///
176    /// [`try_reserve`]: MemVec::try_reserve
177    ///
178    /// # Errors
179    ///
180    /// If the capacity overflows, or the allocator reports a failure, then an error
181    /// is returned.
182    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), A::Error> {
183        let len = self.len();
184        if self.needs_to_grow(len, additional) {
185            self.grow_exact(len, additional)
186        } else {
187            Ok(())
188        }
189    }
190
191    /// Shrinks the capacity of the vector as much as possible.
192    ///
193    /// It will drop down as close as possible to the length but the allocator
194    /// may still inform the vector that there is space for a few more elements.
195    pub fn shrink_to_fit(&mut self) {
196        // The capacity is never less than the length, and there's nothing to do when
197        // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
198        // by only calling it with a greater capacity.
199        let len = self.mem.len();
200        if self.capacity() > len {
201            self.mem
202                .shrink_to(len * core::mem::size_of::<T>())
203                .expect("shrink_to failed");
204        }
205    }
206
207    /// Shrinks the capacity of the vector with a lower bound.
208    ///
209    /// The capacity will remain at least as large as both the length
210    /// and the supplied value.
211    ///
212    /// If the current capacity is less than the lower limit, this is a no-op.
213    pub fn shrink_to(&mut self, min_capacity: usize) {
214        if self.capacity() > min_capacity {
215            let new_cap = core::cmp::max(self.len(), min_capacity);
216            self.mem
217                .shrink_to(new_cap * core::mem::size_of::<T>())
218                .expect("shrink_to failed");
219        }
220    }
221
222    /// Shortens the vector, keeping the first `len` elements and dropping
223    /// the rest.
224    ///
225    /// If `len` is greater than the vector's current length, this has no
226    /// effect.
227    ///
228    /// Note that this method has no effect on the allocated capacity
229    /// of the vector.
230    pub fn truncate(&mut self, len: usize) {
231        if len > self.len() {
232            return;
233        }
234        unsafe {
235            // Note: It's intentional that this is `>` and not `>=`.
236            //       Changing it to `>=` has negative performance
237            //       implications in some cases. See #78884 for more.
238
239            let remaining_len = self.mem.len() - len;
240            let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
241            *self.mem.len_mut() = len;
242            ptr::drop_in_place(s);
243        }
244    }
245
246    /// Extracts a slice containing the entire vector.
247    ///
248    /// Equivalent to `&s[..]`.
249    pub fn as_slice(&self) -> &[T] {
250        let len = self.mem.len();
251        unsafe { self.as_buf().get_unchecked(..len) }
252    }
253
254    /// Extracts a mutable slice of the entire vector.
255    ///
256    /// Equivalent to `&mut s[..]`.
257    pub fn as_mut_slice(&mut self) -> &mut [T] {
258        let len = self.mem.len();
259        unsafe { self.as_buf_mut().get_unchecked_mut(..len) }
260    }
261
262    /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
263    /// valid for zero sized reads if the vector didn't allocate.
264    ///
265    /// The caller must ensure that the vector outlives the pointer this
266    /// function returns, or else it will end up pointing to garbage.
267    /// Modifying the vector may cause its buffer to be reallocated,
268    /// which would also make any pointers to it invalid.
269    ///
270    /// The caller must also ensure that the memory the pointer (non-transitively) points to
271    /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
272    /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
273    ///
274    /// [`as_mut_ptr`]: MemVec::as_mut_ptr
275    #[inline]
276    pub fn as_ptr(&self) -> *const T {
277        self.mem.as_ptr() as *const _
278    }
279
280    /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
281    /// raw pointer valid for zero sized reads if the vector didn't allocate.
282    ///
283    /// The caller must ensure that the vector outlives the pointer this
284    /// function returns, or else it will end up pointing to garbage.
285    /// Modifying the vector may cause its buffer to be reallocated,
286    /// which would also make any pointers to it invalid.
287    #[inline]
288    pub fn as_mut_ptr(&mut self) -> *mut T {
289        self.mem.as_mut_ptr() as *mut _
290    }
291
292    /// Forces the length of the vector to `new_len`.
293    ///
294    /// This is a low-level operation that maintains none of the normal
295    /// invariants of the type. Normally changing the length of a vector
296    /// is done using one of the safe operations instead, such as
297    /// [`truncate`] or [`clear`].
298    ///
299    /// [`truncate`]: MemVec::truncate
300    /// [`clear`]: MemVec::clear
301    ///
302    /// # Safety
303    ///
304    /// - `new_len` must be less than or equal to [`capacity()`].
305    /// - The elements at `old_len..new_len` must be initialized.
306    ///
307    /// [`capacity()`]: MemVec::capacity
308    pub unsafe fn set_len(&mut self, len: usize) {
309        #[cold]
310        #[inline(never)]
311        fn assert_failed(len: usize, cap: usize) -> ! {
312            panic!("`set_len` len (is {len}) should be <= cap (is {cap})");
313        }
314        let cap = self.capacity();
315        if len > cap {
316            assert_failed(len, cap);
317        }
318        *self.mem.len_mut() = len;
319    }
320
321    /// Removes an element from the vector and returns it.
322    ///
323    /// The removed element is replaced by the last element of the vector.
324    ///
325    /// This does not preserve ordering, but is *O*(1).
326    /// If you need to preserve the element order, use [`remove`] instead.
327    ///
328    /// [`remove`]: MemVec::remove
329    ///
330    /// # Panics
331    ///
332    /// Panics if `index` is out of bounds.
333    #[inline]
334    pub fn swap_remove(&mut self, index: usize) -> T {
335        #[cold]
336        #[inline(never)]
337        fn assert_failed(index: usize, len: usize) -> ! {
338            panic!("swap_remove index (is {index}) should be < len (is {len})");
339        }
340
341        let len = self.len();
342        if index >= len {
343            assert_failed(index, len);
344        }
345        unsafe {
346            // We replace self[index] with the last element. Note that if the
347            // bounds check above succeeds there must be a last element (which
348            // can be self[index] itself).
349            let value = ptr::read(self.as_ptr().add(index));
350            let base_ptr = self.as_mut_ptr();
351            ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
352            self.set_len(len - 1);
353            value
354        }
355    }
356
357    /// Inserts an element at position `index` within the vector, shifting all
358    /// elements after it to the right.
359    ///
360    /// # Panics
361    ///
362    /// Panics if `index > len`.
363    pub fn insert(&mut self, index: usize, element: T) {
364        #[cold]
365        #[inline(never)]
366        fn assert_failed(index: usize, len: usize) -> ! {
367            panic!("insertion index (is {index}) should be <= len (is {len})");
368        }
369
370        let len = self.len();
371        if index > len {
372            assert_failed(index, len);
373        }
374
375        // space for the new element
376        if len == self.capacity() {
377            self.reserve(1);
378        }
379
380        unsafe {
381            // infallible
382            // The spot to put the new value
383            {
384                let p = self.as_mut_ptr().add(index);
385                // Shift everything over to make space. (Duplicating the
386                // `index`th element into two consecutive places.)
387                ptr::copy(p, p.offset(1), len - index);
388                // Write it in, overwriting the first copy of the `index`th
389                // element.
390                ptr::write(p, element);
391            }
392            self.set_len(len + 1);
393        }
394    }
395
396    /// Removes and returns the element at position `index` within the vector,
397    /// shifting all elements after it to the left.
398    ///
399    /// Note: Because this shifts over the remaining elements, it has a
400    /// worst-case performance of *O*(*n*). If you don't need the order of elements
401    /// to be preserved, use [`swap_remove`] instead. If you'd like to remove
402    /// elements from the beginning of the `Vec`, consider using
403    /// [`VecDeque::pop_front`] instead.
404    ///
405    /// [`swap_remove`]: MemVec::swap_remove
406    /// [`VecDeque::pop_front`]: std::collections::VecDeque::pop_front
407    ///
408    /// # Panics
409    ///
410    /// Panics if `index` is out of bounds.
411    #[track_caller]
412    pub fn remove(&mut self, index: usize) -> T {
413        #[cold]
414        #[inline(never)]
415        #[track_caller]
416        fn assert_failed(index: usize, len: usize) -> ! {
417            panic!("removal index (is {index}) should be < len (is {len})");
418        }
419
420        let len = self.len();
421        if index >= len {
422            assert_failed(index, len);
423        }
424        unsafe {
425            // infallible
426            let ret;
427            {
428                // the place we are taking from.
429                let ptr = self.as_mut_ptr().add(index);
430                // copy it out, unsafely having a copy of the value on
431                // the stack and in the vector at the same time.
432                ret = ptr::read(ptr);
433
434                // Shift everything down to fill in that spot.
435                ptr::copy(ptr.offset(1), ptr, len - index - 1);
436            }
437            self.set_len(len - 1);
438            ret
439        }
440    }
441
442    /// Retains only the elements specified by the predicate.
443    ///
444    /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
445    /// This method operates in place, visiting each element exactly once in the
446    /// original order, and preserves the order of the retained elements.
447    pub fn retain<F>(&mut self, mut f: F)
448    where
449        F: FnMut(&T) -> bool,
450    {
451        self.retain_mut(|elem| f(elem));
452    }
453
454    /// Retains only the elements specified by the predicate, passing a mutable reference to it.
455    ///
456    /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
457    /// This method operates in place, visiting each element exactly once in the
458    /// original order, and preserves the order of the retained elements.
459    pub fn retain_mut<F>(&mut self, mut f: F)
460    where
461        F: FnMut(&mut T) -> bool,
462    {
463        let original_len = self.len();
464        // Avoid double drop if the drop guard is not executed,
465        // since we may make some holes during the process.
466        unsafe { self.set_len(0) };
467
468        // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
469        //      |<-              processed len   ->| ^- next to check
470        //                  |<-  deleted cnt     ->|
471        //      |<-              original_len                          ->|
472        // Kept: Elements which predicate returns true on.
473        // Hole: Moved or dropped element slot.
474        // Unchecked: Unchecked valid elements.
475        //
476        // This drop guard will be invoked when predicate or `drop` of element panicked.
477        // It shifts unchecked elements to cover holes and `set_len` to the correct length.
478        // In cases when predicate and `drop` never panick, it will be optimized out.
479        struct BackshiftOnDrop<'a, 'v, T: Copy, A: Memory> {
480            v: &'a mut MemVec<'v, T, A>,
481            processed_len: usize,
482            deleted_cnt: usize,
483            original_len: usize,
484        }
485
486        impl<T: Copy, A: Memory> Drop for BackshiftOnDrop<'_, '_, T, A> {
487            fn drop(&mut self) {
488                if self.deleted_cnt > 0 {
489                    // SAFETY: Trailing unchecked items must be valid since we never touch them.
490                    unsafe {
491                        ptr::copy(
492                            self.v.as_ptr().add(self.processed_len),
493                            self.v
494                                .as_mut_ptr()
495                                .add(self.processed_len - self.deleted_cnt),
496                            self.original_len - self.processed_len,
497                        );
498                    }
499                }
500                // SAFETY: After filling holes, all items are in contiguous memory.
501                unsafe {
502                    self.v.set_len(self.original_len - self.deleted_cnt);
503                }
504            }
505        }
506
507        let mut g = BackshiftOnDrop {
508            v: self,
509            processed_len: 0,
510            deleted_cnt: 0,
511            original_len,
512        };
513
514        fn process_loop<F, T: Copy, A: Memory, const DELETED: bool>(
515            original_len: usize,
516            f: &mut F,
517            g: &mut BackshiftOnDrop<'_, '_, T, A>,
518        ) where
519            F: FnMut(&mut T) -> bool,
520        {
521            while g.processed_len != original_len {
522                // SAFETY: Unchecked element must be valid.
523                let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
524                if !f(cur) {
525                    // Advance early to avoid double drop if `drop_in_place` panicked.
526                    g.processed_len += 1;
527                    g.deleted_cnt += 1;
528                    // SAFETY: We never touch this element again after dropped.
529                    unsafe { ptr::drop_in_place(cur) };
530                    // We already advanced the counter.
531                    if DELETED {
532                        continue;
533                    } else {
534                        break;
535                    }
536                }
537                if DELETED {
538                    // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
539                    // We use copy for move, and never touch this element again.
540                    unsafe {
541                        let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
542                        ptr::copy_nonoverlapping(cur, hole_slot, 1);
543                    }
544                }
545                g.processed_len += 1;
546            }
547        }
548
549        // Stage 1: Nothing was deleted.
550        process_loop::<F, T, A, false>(original_len, &mut f, &mut g);
551
552        // Stage 2: Some elements were deleted.
553        process_loop::<F, T, A, true>(original_len, &mut f, &mut g);
554
555        // All item are processed. This can be optimized to `set_len` by LLVM.
556        drop(g);
557    }
558
559    /// Removes all but the first of consecutive elements in the vector that resolve to the same
560    /// key.
561    ///
562    /// If the vector is sorted, this removes all duplicates.
563    #[inline]
564    pub fn dedup_by_key<F, K>(&mut self, mut key: F)
565    where
566        F: FnMut(&mut T) -> K,
567        K: PartialEq,
568    {
569        self.dedup_by(|a, b| key(a) == key(b))
570    }
571
572    /// Removes all but the first of consecutive elements in the vector satisfying a given equality
573    /// relation.
574    ///
575    /// The `same_bucket` function is passed references to two elements from the vector and
576    /// must determine if the elements compare equal. The elements are passed in opposite order
577    /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
578    ///
579    /// If the vector is sorted, this removes all duplicates.
580    pub fn dedup_by<F>(&mut self, mut same_bucket: F)
581    where
582        F: FnMut(&mut T, &mut T) -> bool,
583    {
584        let len = self.len();
585        if len <= 1 {
586            return;
587        }
588
589        /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
590        struct FillGapOnDrop<'a, 'b, T: Copy, A: Memory> {
591            /* Offset of the element we want to check if it is duplicate */
592            read: usize,
593
594            /* Offset of the place where we want to place the non-duplicate
595             * when we find it. */
596            write: usize,
597
598            /* The Vec that would need correction if `same_bucket` panicked */
599            vec: &'a mut MemVec<'b, T, A>,
600        }
601
602        impl<'a, 'b, T: Copy, A: Memory> Drop for FillGapOnDrop<'a, 'b, T, A> {
603            fn drop(&mut self) {
604                /* This code gets executed when `same_bucket` panics */
605                /* SAFETY: invariant guarantees that `read - write`
606                 * and `len - read` never overflow and that the copy is always
607                 * in-bounds. */
608                unsafe {
609                    let ptr = self.vec.as_mut_ptr();
610                    let len = self.vec.len();
611
612                    /* How many items were left when `same_bucket` panicked.
613                     * Basically vec[read..].len() */
614                    let items_left = len.wrapping_sub(self.read);
615
616                    /* Pointer to first item in vec[write..write+items_left] slice */
617                    let dropped_ptr = ptr.add(self.write);
618                    /* Pointer to first item in vec[read..] slice */
619                    let valid_ptr = ptr.add(self.read);
620
621                    /* Copy `vec[read..]` to `vec[write..write+items_left]`.
622                     * The slices can overlap, so `copy_nonoverlapping` cannot be used */
623                    ptr::copy(valid_ptr, dropped_ptr, items_left);
624
625                    /* How many items have been already dropped
626                     * Basically vec[read..write].len() */
627                    let dropped = self.read.wrapping_sub(self.write);
628
629                    self.vec.set_len(len - dropped);
630                }
631            }
632        }
633
634        let mut gap = FillGapOnDrop {
635            read: 1,
636            write: 1,
637            vec: self,
638        };
639        let ptr = gap.vec.as_mut_ptr();
640
641        /* Drop items while going through Vec, it should be more efficient than
642         * doing slice partition_dedup + truncate */
643        /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
644         * are always in-bounds and read_ptr never aliases prev_ptr */
645        unsafe {
646            while gap.read < len {
647                let read_ptr = ptr.add(gap.read);
648                let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
649
650                if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
651                    // Increase `gap.read` now since the drop may panic.
652                    gap.read += 1;
653                    /* We have found duplicate, drop it in-place */
654                    ptr::drop_in_place(read_ptr);
655                } else {
656                    let write_ptr = ptr.add(gap.write);
657
658                    /* Because `read_ptr` can be equal to `write_ptr`, we either
659                     * have to use `copy` or conditional `copy_nonoverlapping`.
660                     * Looks like the first option is faster. */
661                    ptr::copy(read_ptr, write_ptr, 1);
662
663                    /* We have filled that place, so go further */
664                    gap.write += 1;
665                    gap.read += 1;
666                }
667            }
668
669            /* Technically we could let `gap` clean up with its Drop, but
670             * when `same_bucket` is guaranteed to not panic, this bloats a little
671             * the codegen, so we just do it manually */
672            gap.vec.set_len(gap.write);
673            core::mem::forget(gap);
674        }
675    }
676
677    /// Appends an element to the back of a collection.
678    ///
679    /// # Panics
680    ///
681    /// Panics if the new capacity exceeds `isize::MAX` bytes.
682    #[inline]
683    pub fn push(&mut self, value: T) {
684        if self.len() == self.capacity() {
685            self.reserve_for_push(self.len()).unwrap();
686        }
687        unsafe {
688            let end = self.as_mut_ptr().add(self.len());
689            ptr::write(end, value);
690            *self.mem.len_mut() += 1;
691        }
692    }
693
694    /// Removes the last element from a vector and returns it, or [`None`] if it
695    /// is empty.
696    #[inline]
697    pub fn pop(&mut self) -> Option<T> {
698        if self.mem.len() == 0 {
699            None
700        } else {
701            unsafe {
702                *self.mem.len_mut() -= 1;
703                Some(ptr::read(self.as_mut_ptr().add(self.len())))
704            }
705        }
706    }
707
708    // #[inline]
709    // unsafe fn append_elements(&mut self, other: *const [T]) {
710    //     let count = unsafe { (*other).len() };
711    //     self.reserve(count);
712    //     let len = self.len();
713    //     unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
714    //     *self.mem.len_mut() += count;
715    // }
716
717    // drain
718
719    /// Clears the vector, removing all values.
720    ///
721    /// Note that this method has no effect on the allocated capacity
722    /// of the vector.
723    #[inline]
724    pub fn clear(&mut self) {
725        self.truncate(0)
726    }
727
728    /// Returns the number of elements in the vector, also referred to
729    /// as its 'length'.
730    #[inline]
731    pub fn len(&self) -> usize {
732        self.mem.len()
733    }
734
735    /// Returns `true` if the vector contains no elements.
736    #[inline]
737    pub fn is_empty(&self) -> bool {
738        self.len() == 0
739    }
740
741    /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
742    ///
743    /// If `new_len` is greater than `len`, the `Vec` is extended by the
744    /// difference, with each additional slot filled by calling the closure `f`.
745    /// The return values from `f` will end up in the `Vec` in the order they
746    /// have been generated.
747    ///
748    /// If `new_len` is less than `len`, the `Vec` is simply truncated.
749    ///
750    /// This method uses a closure to create new values on every push. If you
751    /// want to use the [`Default`] trait to generate values, you can
752    /// pass [`Default::default`] as the second argument.
753    pub fn resize_with<F>(&mut self, new_len: usize, f: F)
754    where
755        F: FnMut() -> T,
756    {
757        let len = self.len();
758        if new_len > len {
759            self.extend_with(new_len - len, ExtendFunc(f));
760        } else {
761            self.truncate(new_len);
762        }
763    }
764
765    /// Returns the remaining spare capacity of the vector as a slice of
766    /// `MaybeUninit<T>`.
767    ///
768    /// The returned slice can be used to fill the vector with data (e.g. by
769    /// reading from a file) before marking the data as initialized using the
770    /// [`set_len`] method.
771    ///
772    /// [`set_len`]: MemVec::set_len
773    #[inline]
774    pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
775        // Note:
776        // This method is not implemented in terms of `split_at_spare_mut`,
777        // to prevent invalidation of pointers to the buffer.
778        unsafe {
779            core::slice::from_raw_parts_mut(
780                self.as_mut_ptr().add(self.len()) as *mut MaybeUninit<T>,
781                self.capacity() - self.len(),
782            )
783        }
784    }
785}
786
787trait ExtendWith<T> {
788    fn next(&mut self) -> T;
789    fn last(self) -> T;
790}
791
792struct ExtendFunc<F>(F);
793impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
794    fn next(&mut self) -> T {
795        (self.0)()
796    }
797    fn last(mut self) -> T {
798        (self.0)()
799    }
800}
801
802fn capacity_overflow() -> usize {
803    panic!("capacity overflow");
804}
805
806impl<'a, T: Copy + std::cmp::PartialEq, A: 'a + Memory> MemVec<'a, T, A> {
807    /// Removes consecutive repeated elements in the vector according to the
808    /// [`PartialEq`] trait implementation.
809    ///
810    /// If the vector is sorted, this removes all duplicates.
811    #[inline]
812    pub fn dedup(&mut self) {
813        self.dedup_by(|a, b| a == b)
814    }
815}
816
817/// port ofRawVec utilities
818impl<'a, T: Copy, A: 'a + Memory> MemVec<'a, T, A> {
819    pub(crate) const MIN_NON_ZERO_CAP: usize = if core::mem::size_of::<T>() == 1 {
820        8
821    } else if core::mem::size_of::<T>() <= 1024 {
822        4
823    } else {
824        1
825    };
826
827    fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
828        additional > self.capacity().wrapping_sub(len)
829    }
830
831    fn reserve_for_push(&mut self, len: usize) -> Result<(), A::Error> {
832        self.grow_amortized(len, 1)
833    }
834
835    fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), A::Error> {
836        // This is ensured by the calling contexts.
837        debug_assert!(additional > 0);
838
839        // if core::mem::size_of::<T>() == 0 {
840        //     // Since we return a capacity of `usize::MAX` when `elem_size` is
841        //     // 0, getting to here necessarily means the `RawVec` is overfull.
842        //     return Error(CapacityOverflow.into());
843        // }
844
845        // Nothing we can really do about these checks, sadly.
846        let required_cap = len
847            .checked_add(additional)
848            .unwrap_or_else(capacity_overflow);
849
850        // This guarantees exponential growth. The doubling cannot overflow
851        // because `cap <= isize::MAX` and the type of `cap` is `usize`.
852        let cap = core::cmp::max(self.capacity() * 2, required_cap);
853        let cap = core::cmp::max(Self::MIN_NON_ZERO_CAP, cap);
854        self.mem.reserve(cap * core::mem::size_of::<T>())
855    }
856
857    // The constraints on this method are much the same as those on
858    // `grow_amortized`, but this method is usually instantiated less often so
859    // it's less critical.
860    fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), A::Error> {
861        // if core::mem::size_of::<T>() == 0 {
862        //     // Since we return a capacity of `usize::MAX` when the type size is
863        //     // 0, getting to here necessarily means the `RawVec` is overfull.
864        //     return Error(CapacityOverflow.into());
865        // }
866
867        let cap = len
868            .checked_add(additional)
869            .unwrap_or_else(capacity_overflow);
870        self.mem.reserve(cap * core::mem::size_of::<T>())
871    }
872
873    /// Extend the vector by `n` values, using the given generator.
874    fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
875        self.reserve(n);
876
877        unsafe {
878            let mut ptr = self.as_mut_ptr().add(self.len());
879            // Write all elements except the last one
880            for _ in 1..n {
881                ptr::write(ptr, value.next());
882                ptr = ptr.offset(1);
883                // Increment the length in every step in case next() panics
884                *self.mem.len_mut() += 1;
885            }
886
887            if n > 0 {
888                // We can write the last element directly without cloning needlessly
889                core::ptr::write(ptr, value.last());
890                *self.mem.len_mut() += 1;
891            }
892
893            // len set by scope guard
894        }
895    }
896}
897
898impl<'a, T: Copy, A: 'a + Memory> Deref for MemVec<'a, T, A> {
899    type Target = [T];
900    fn deref(&self) -> &Self::Target {
901        self.as_slice()
902    }
903}
904
905impl<'a, T: Copy, A: 'a + Memory> DerefMut for MemVec<'a, T, A> {
906    fn deref_mut(&mut self) -> &mut Self::Target {
907        self.as_mut_slice()
908    }
909}
910
911impl<'a, T: Copy + Hash, A: Memory> Hash for MemVec<'a, T, A> {
912    #[inline]
913    fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
914        Hash::hash(&**self, state)
915    }
916}
917
918impl<'a, T: Copy, I: SliceIndex<[T]>, A: Memory> Index<I> for MemVec<'a, T, A> {
919    type Output = I::Output;
920
921    #[inline]
922    fn index(&self, index: I) -> &Self::Output {
923        Index::index(&**self, index)
924    }
925}
926
927impl<'a, T: Copy, I: SliceIndex<[T]>, A: Memory> IndexMut<I> for MemVec<'a, T, A> {
928    #[inline]
929    fn index_mut(&mut self, index: I) -> &mut Self::Output {
930        IndexMut::index_mut(&mut **self, index)
931    }
932}
933
934impl<'a, 'm, T: Copy, A: Memory> IntoIterator for &'a MemVec<'m, T, A> {
935    type Item = &'a T;
936    type IntoIter = slice::Iter<'a, T>;
937
938    fn into_iter(self) -> slice::Iter<'a, T> {
939        self.iter()
940    }
941}
942
943impl<'a, 'm, T: Copy, A: Memory> IntoIterator for &'a mut MemVec<'m, T, A> {
944    type Item = &'a mut T;
945    type IntoIter = slice::IterMut<'a, T>;
946
947    fn into_iter(self) -> slice::IterMut<'a, T> {
948        self.iter_mut()
949    }
950}
951
952// impl<'a, T: Copy + Hash, A: Memory> Extend<T> for MemVec<'a, T, A> {
953//     #[inline]
954//     fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
955//         unsafe {
956//             self.append_elements(iter.as_slice() as _);
957//         }
958//         iter.forget_remaining_elements();
959//     }
960
961//     // #[inline]
962//     // fn extend_one(&mut self, item: T) {
963//     //     self.push(item);
964//     // }
965
966//     // #[inline]
967//     // fn extend_reserve(&mut self, additional: usize) {
968//     //     self.reserve(additional);
969//     // }
970// }
971
972// impl<'a, T: Copy + 'a, A: Memory> Extend<&'a T> for MemVec<'a, T, A> {
973//     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
974//         unsafe {
975//             self.append_elements(iter.as_slice() as _);
976//         }
977//         iter.forget_remaining_elements();
978//     }
979
980//     // #[inline]
981//     // fn extend_one(&mut self, &item: &'a T) {
982//     //     self.push(item);
983//     // }
984
985//     // #[inline]
986//     // fn extend_reserve(&mut self, additional: usize) {
987//     //     self.reserve(additional);
988//     // }
989// }
990
991impl<'a, T: Copy + PartialEq, A: Memory> PartialEq for MemVec<'a, T, A> {
992    fn eq(&self, other: &Self) -> bool {
993        self.as_slice() == other.as_slice()
994    }
995}
996
997impl<'a, T: Copy + PartialOrd, A: Memory> PartialOrd for MemVec<'a, T, A> {
998    #[inline]
999    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1000        PartialOrd::partial_cmp(&**self, &**other)
1001    }
1002}
1003
1004impl<'a, T: Copy + Eq, A: Memory> Eq for MemVec<'a, T, A> {}
1005
1006impl<'a, T: Ord + Copy, A: Memory> Ord for MemVec<'a, T, A> {
1007    #[inline]
1008    fn cmp(&self, other: &Self) -> Ordering {
1009        Ord::cmp(&**self, &**other)
1010    }
1011}
1012
1013// skip drop - T: Copy
1014
1015impl<'a, T: core::fmt::Debug + Copy, A: Memory> core::fmt::Debug for MemVec<'a, T, A> {
1016    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1017        core::fmt::Debug::fmt(&**self, f)
1018    }
1019}
1020
1021impl<'a, T: Copy, A: Memory> AsRef<[T]> for MemVec<'a, T, A> {
1022    fn as_ref(&self) -> &[T] {
1023        self
1024    }
1025}
1026
1027impl<'a, T: Copy, A: Memory> AsMut<[T]> for MemVec<'a, T, A> {
1028    fn as_mut(&mut self) -> &mut [T] {
1029        self
1030    }
1031}