allocator_api2/
raw_vec.rs

1use core::alloc::LayoutError;
2use core::mem::{self, ManuallyDrop, MaybeUninit};
3use core::ops::Drop;
4use core::ptr::{self, NonNull};
5use core::slice;
6use core::{cmp, fmt};
7
8use super::{
9    alloc::{Allocator, Global, Layout},
10    assume,
11    boxed::Box,
12};
13
14#[cfg(not(no_global_oom_handling))]
15use super::alloc::handle_alloc_error;
16
17/// The error type for `try_reserve` methods.
18#[derive(Clone, PartialEq, Eq, Debug)]
19pub struct TryReserveError {
20    kind: TryReserveErrorKind,
21}
22
23impl TryReserveError {
24    /// Details about the allocation that caused the error
25    pub fn kind(&self) -> TryReserveErrorKind {
26        self.kind.clone()
27    }
28}
29
30/// Details of the allocation that caused a `TryReserveError`
31#[derive(Clone, PartialEq, Eq, Debug)]
32pub enum TryReserveErrorKind {
33    /// Error due to the computed capacity exceeding the collection's maximum
34    /// (usually `isize::MAX` bytes).
35    CapacityOverflow,
36
37    /// The memory allocator returned an error
38    AllocError {
39        /// The layout of allocation request that failed
40        layout: Layout,
41
42        #[doc(hidden)]
43        non_exhaustive: (),
44    },
45}
46
47use TryReserveErrorKind::*;
48
49impl From<TryReserveErrorKind> for TryReserveError {
50    #[inline(always)]
51    fn from(kind: TryReserveErrorKind) -> Self {
52        Self { kind }
53    }
54}
55
56impl From<LayoutError> for TryReserveErrorKind {
57    /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
58    #[inline(always)]
59    fn from(_: LayoutError) -> Self {
60        TryReserveErrorKind::CapacityOverflow
61    }
62}
63
64impl fmt::Display for TryReserveError {
65    fn fmt(
66        &self,
67        fmt: &mut core::fmt::Formatter<'_>,
68    ) -> core::result::Result<(), core::fmt::Error> {
69        fmt.write_str("memory allocation failed")?;
70        let reason = match self.kind {
71            TryReserveErrorKind::CapacityOverflow => {
72                " because the computed capacity exceeded the collection's maximum"
73            }
74            TryReserveErrorKind::AllocError { .. } => {
75                " because the memory allocator returned an error"
76            }
77        };
78        fmt.write_str(reason)
79    }
80}
81
82#[cfg(feature = "fresh-rust")]
83impl core::error::Error for TryReserveError {}
84
85#[cfg(all(not(feature = "fresh-rust"), feature = "std"))]
86impl std::error::Error for TryReserveError {}
87
88#[cfg(not(no_global_oom_handling))]
89enum AllocInit {
90    /// The contents of the new memory are uninitialized.
91    Uninitialized,
92    /// The new memory is guaranteed to be zeroed.
93    Zeroed,
94}
95
96/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
97/// a buffer of memory on the heap without having to worry about all the corner cases
98/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
99/// In particular:
100///
101/// * Produces `NonNull::dangling()` on zero-sized types.
102/// * Produces `NonNull::dangling()` on zero-length allocations.
103/// * Avoids freeing `NonNull::dangling()`.
104/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
105/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
106/// * Guards against overflowing your length.
107/// * Calls `handle_alloc_error` for fallible allocations.
108/// * Contains a `ptr::NonNull` and thus endows the user with all related benefits.
109/// * Uses the excess returned from the allocator to use the largest available capacity.
110///
111/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
112/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
113/// to handle the actual things *stored* inside of a `RawVec`.
114///
115/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
116/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
117/// `Box<[T]>`, since `capacity()` won't yield the length.
118#[allow(missing_debug_implementations)]
119pub(crate) struct RawVec<T, A: Allocator = Global> {
120    ptr: NonNull<T>,
121    cap: usize,
122    alloc: A,
123}
124
125// Safety: RawVec owns both T and A, so sending is safe if
126// sending is safe for T and A.
127unsafe impl<T, A: Allocator> Send for RawVec<T, A>
128where
129    T: Send,
130    A: Send,
131{
132}
133
134// Safety: RawVec owns both T and A, so sharing is safe if
135// sharing is safe for T and A.
136unsafe impl<T, A: Allocator> Sync for RawVec<T, A>
137where
138    T: Sync,
139    A: Sync,
140{
141}
142
143impl<T> RawVec<T, Global> {
144    /// Creates the biggest possible `RawVec` (on the system heap)
145    /// without allocating. If `T` has positive size, then this makes a
146    /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
147    /// `RawVec` with capacity `usize::MAX`. Useful for implementing
148    /// delayed allocation.
149    #[must_use]
150    pub const fn new() -> Self {
151        Self::new_in(Global)
152    }
153
154    /// Creates a `RawVec` (on the system heap) with exactly the
155    /// capacity and alignment requirements for a `[T; capacity]`. This is
156    /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
157    /// zero-sized. Note that if `T` is zero-sized this means you will
158    /// *not* get a `RawVec` with the requested capacity.
159    ///
160    /// # Panics
161    ///
162    /// Panics if the requested capacity exceeds `isize::MAX` bytes.
163    ///
164    /// # Aborts
165    ///
166    /// Aborts on OOM.
167    #[cfg(not(no_global_oom_handling))]
168    #[must_use]
169    #[inline(always)]
170    pub fn with_capacity(capacity: usize) -> Self {
171        Self::with_capacity_in(capacity, Global)
172    }
173
174    /// Like `with_capacity`, but guarantees the buffer is zeroed.
175    #[cfg(not(no_global_oom_handling))]
176    #[must_use]
177    #[inline(always)]
178    pub fn with_capacity_zeroed(capacity: usize) -> Self {
179        Self::with_capacity_zeroed_in(capacity, Global)
180    }
181}
182
183impl<T, A: Allocator> RawVec<T, A> {
184    // Tiny Vecs are dumb. Skip to:
185    // - 8 if the element size is 1, because any heap allocators is likely
186    //   to round up a request of less than 8 bytes to at least 8 bytes.
187    // - 4 if elements are moderate-sized (<= 1 KiB).
188    // - 1 otherwise, to avoid wasting too much space for very short Vecs.
189    pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
190        8
191    } else if mem::size_of::<T>() <= 1024 {
192        4
193    } else {
194        1
195    };
196
197    /// Like `new`, but parameterized over the choice of allocator for
198    /// the returned `RawVec`.
199    #[inline(always)]
200    pub const fn new_in(alloc: A) -> Self {
201        // `cap: 0` means "unallocated". zero-sized types are ignored.
202        Self {
203            ptr: NonNull::dangling(),
204            cap: 0,
205            alloc,
206        }
207    }
208
209    /// Like `with_capacity`, but parameterized over the choice of
210    /// allocator for the returned `RawVec`.
211    #[cfg(not(no_global_oom_handling))]
212    #[inline(always)]
213    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
214        Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
215    }
216
217    /// Like `with_capacity_zeroed`, but parameterized over the choice
218    /// of allocator for the returned `RawVec`.
219    #[cfg(not(no_global_oom_handling))]
220    #[inline(always)]
221    pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
222        Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
223    }
224
225    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
226    ///
227    /// Note that this will correctly reconstitute any `cap` changes
228    /// that may have been performed. (See description of type for details.)
229    ///
230    /// # Safety
231    ///
232    /// * `len` must be greater than or equal to the most recently requested capacity, and
233    /// * `len` must be less than or equal to `self.capacity()`.
234    ///
235    /// Note, that the requested capacity and `self.capacity()` could differ, as
236    /// an allocator could overallocate and return a greater memory block than requested.
237    #[inline(always)]
238    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
239        // Sanity-check one half of the safety requirement (we cannot check the other half).
240        debug_assert!(
241            len <= self.capacity(),
242            "`len` must be smaller than or equal to `self.capacity()`"
243        );
244
245        let me = ManuallyDrop::new(self);
246        unsafe {
247            let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
248            Box::<[MaybeUninit<T>], A>::from_raw_in(slice, ptr::read(&me.alloc))
249        }
250    }
251
252    #[cfg(not(no_global_oom_handling))]
253    #[inline(always)]
254    fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
255        // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
256        if mem::size_of::<T>() == 0 || capacity == 0 {
257            Self::new_in(alloc)
258        } else {
259            // We avoid `unwrap_or_else` here because it bloats the amount of
260            // LLVM IR generated.
261            let layout = match Layout::array::<T>(capacity) {
262                Ok(layout) => layout,
263                Err(_) => capacity_overflow(),
264            };
265            match alloc_guard(layout.size()) {
266                Ok(_) => {}
267                Err(_) => capacity_overflow(),
268            }
269            let result = match init {
270                AllocInit::Uninitialized => alloc.allocate(layout),
271                AllocInit::Zeroed => alloc.allocate_zeroed(layout),
272            };
273            let ptr = match result {
274                Ok(ptr) => ptr,
275                Err(_) => handle_alloc_error(layout),
276            };
277
278            // Allocators currently return a `NonNull<[u8]>` whose length
279            // matches the size requested. If that ever changes, the capacity
280            // here should change to `ptr.len() / mem::size_of::<T>()`.
281            Self {
282                ptr: unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) },
283                cap: capacity,
284                alloc,
285            }
286        }
287    }
288
289    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
290    ///
291    /// # Safety
292    ///
293    /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
294    /// `capacity`.
295    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
296    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
297    /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
298    /// guaranteed.
299    #[inline(always)]
300    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
301        Self {
302            ptr: unsafe { NonNull::new_unchecked(ptr) },
303            cap: capacity,
304            alloc,
305        }
306    }
307
308    /// Gets a raw pointer to the start of the allocation. Note that this is
309    /// `NonNull::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
310    /// be careful.
311    #[inline(always)]
312    pub fn ptr(&self) -> *mut T {
313        self.ptr.as_ptr()
314    }
315
316    /// Gets the capacity of the allocation.
317    ///
318    /// This will always be `usize::MAX` if `T` is zero-sized.
319    #[inline(always)]
320    pub fn capacity(&self) -> usize {
321        if mem::size_of::<T>() == 0 {
322            usize::MAX
323        } else {
324            self.cap
325        }
326    }
327
328    /// Returns a shared reference to the allocator backing this `RawVec`.
329    #[inline(always)]
330    pub fn allocator(&self) -> &A {
331        &self.alloc
332    }
333
334    #[inline(always)]
335    fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
336        if mem::size_of::<T>() == 0 || self.cap == 0 {
337            None
338        } else {
339            // We have an allocated chunk of memory, so we can bypass runtime
340            // checks to get our current layout.
341            unsafe {
342                let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
343                Some((self.ptr.cast(), layout))
344            }
345        }
346    }
347
348    /// Ensures that the buffer contains at least enough space to hold `len +
349    /// additional` elements. If it doesn't already have enough capacity, will
350    /// reallocate enough space plus comfortable slack space to get amortized
351    /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
352    /// itself to panic.
353    ///
354    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
355    /// the requested space. This is not really unsafe, but the unsafe
356    /// code *you* write that relies on the behavior of this function may break.
357    ///
358    /// This is ideal for implementing a bulk-push operation like `extend`.
359    ///
360    /// # Panics
361    ///
362    /// Panics if the new capacity exceeds `isize::MAX` bytes.
363    ///
364    /// # Aborts
365    ///
366    /// Aborts on OOM.
367    #[cfg(not(no_global_oom_handling))]
368    #[inline(always)]
369    pub fn reserve(&mut self, len: usize, additional: usize) {
370        // Callers expect this function to be very cheap when there is already sufficient capacity.
371        // Therefore, we move all the resizing and error-handling logic from grow_amortized and
372        // handle_reserve behind a call, while making sure that this function is likely to be
373        // inlined as just a comparison and a call if the comparison fails.
374        #[cold]
375        #[inline(always)]
376        fn do_reserve_and_handle<T, A: Allocator>(
377            slf: &mut RawVec<T, A>,
378            len: usize,
379            additional: usize,
380        ) {
381            handle_reserve(slf.grow_amortized(len, additional));
382        }
383
384        if self.needs_to_grow(len, additional) {
385            do_reserve_and_handle(self, len, additional);
386        }
387    }
388
389    /// A specialized version of `reserve()` used only by the hot and
390    /// oft-instantiated `Vec::push()`, which does its own capacity check.
391    #[cfg(not(no_global_oom_handling))]
392    #[inline(always)]
393    pub fn reserve_for_push(&mut self, len: usize) {
394        handle_reserve(self.grow_amortized(len, 1));
395    }
396
397    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
398    #[inline(always)]
399    pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
400        if self.needs_to_grow(len, additional) {
401            self.grow_amortized(len, additional)
402        } else {
403            Ok(())
404        }
405    }
406
407    /// Ensures that the buffer contains at least enough space to hold `len +
408    /// additional` elements. If it doesn't already, will reallocate the
409    /// minimum possible amount of memory necessary. Generally this will be
410    /// exactly the amount of memory necessary, but in principle the allocator
411    /// is free to give back more than we asked for.
412    ///
413    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
414    /// the requested space. This is not really unsafe, but the unsafe code
415    /// *you* write that relies on the behavior of this function may break.
416    ///
417    /// # Panics
418    ///
419    /// Panics if the new capacity exceeds `isize::MAX` bytes.
420    ///
421    /// # Aborts
422    ///
423    /// Aborts on OOM.
424    #[cfg(not(no_global_oom_handling))]
425    #[inline(always)]
426    pub fn reserve_exact(&mut self, len: usize, additional: usize) {
427        handle_reserve(self.try_reserve_exact(len, additional));
428    }
429
430    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
431    #[inline(always)]
432    pub fn try_reserve_exact(
433        &mut self,
434        len: usize,
435        additional: usize,
436    ) -> Result<(), TryReserveError> {
437        if self.needs_to_grow(len, additional) {
438            self.grow_exact(len, additional)
439        } else {
440            Ok(())
441        }
442    }
443
444    /// Shrinks the buffer down to the specified capacity. If the given amount
445    /// is 0, actually completely deallocates.
446    ///
447    /// # Panics
448    ///
449    /// Panics if the given amount is *larger* than the current capacity.
450    ///
451    /// # Aborts
452    ///
453    /// Aborts on OOM.
454    #[cfg(not(no_global_oom_handling))]
455    #[inline(always)]
456    pub fn shrink_to_fit(&mut self, cap: usize) {
457        handle_reserve(self.shrink(cap));
458    }
459}
460
461impl<T, A: Allocator> RawVec<T, A> {
462    /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
463    /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
464    #[inline(always)]
465    fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
466        additional > self.capacity().wrapping_sub(len)
467    }
468
469    #[inline(always)]
470    fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
471        // Allocators currently return a `NonNull<[u8]>` whose length matches
472        // the size requested. If that ever changes, the capacity here should
473        // change to `ptr.len() / mem::size_of::<T>()`.
474        self.ptr = unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) };
475        self.cap = cap;
476    }
477
478    // This method is usually instantiated many times. So we want it to be as
479    // small as possible, to improve compile times. But we also want as much of
480    // its contents to be statically computable as possible, to make the
481    // generated code run faster. Therefore, this method is carefully written
482    // so that all of the code that depends on `T` is within it, while as much
483    // of the code that doesn't depend on `T` as possible is in functions that
484    // are non-generic over `T`.
485    #[inline(always)]
486    fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
487        // This is ensured by the calling contexts.
488        debug_assert!(additional > 0);
489
490        if mem::size_of::<T>() == 0 {
491            // Since we return a capacity of `usize::MAX` when `elem_size` is
492            // 0, getting to here necessarily means the `RawVec` is overfull.
493            return Err(CapacityOverflow.into());
494        }
495
496        // Nothing we can really do about these checks, sadly.
497        let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
498
499        // This guarantees exponential growth. The doubling cannot overflow
500        // because `cap <= isize::MAX` and the type of `cap` is `usize`.
501        let cap = cmp::max(self.cap * 2, required_cap);
502        let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
503
504        let new_layout = Layout::array::<T>(cap);
505
506        // `finish_grow` is non-generic over `T`.
507        let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
508        self.set_ptr_and_cap(ptr, cap);
509        Ok(())
510    }
511
512    // The constraints on this method are much the same as those on
513    // `grow_amortized`, but this method is usually instantiated less often so
514    // it's less critical.
515    #[inline(always)]
516    fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
517        if mem::size_of::<T>() == 0 {
518            // Since we return a capacity of `usize::MAX` when the type size is
519            // 0, getting to here necessarily means the `RawVec` is overfull.
520            return Err(CapacityOverflow.into());
521        }
522
523        let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
524        let new_layout = Layout::array::<T>(cap);
525
526        // `finish_grow` is non-generic over `T`.
527        let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
528        self.set_ptr_and_cap(ptr, cap);
529        Ok(())
530    }
531
532    #[cfg(not(no_global_oom_handling))]
533    #[inline(always)]
534    fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
535        assert!(
536            cap <= self.capacity(),
537            "Tried to shrink to a larger capacity"
538        );
539
540        let (ptr, layout) = if let Some(mem) = self.current_memory() {
541            mem
542        } else {
543            return Ok(());
544        };
545
546        let ptr = unsafe {
547            // `Layout::array` cannot overflow here because it would have
548            // overflowed earlier when capacity was larger.
549            let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
550            self.alloc
551                .shrink(ptr, layout, new_layout)
552                .map_err(|_| AllocError {
553                    layout: new_layout,
554                    non_exhaustive: (),
555                })?
556        };
557        self.set_ptr_and_cap(ptr, cap);
558        Ok(())
559    }
560}
561
562// This function is outside `RawVec` to minimize compile times. See the comment
563// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
564// significant, because the number of different `A` types seen in practice is
565// much smaller than the number of `T` types.)
566#[inline(always)]
567fn finish_grow<A>(
568    new_layout: Result<Layout, LayoutError>,
569    current_memory: Option<(NonNull<u8>, Layout)>,
570    alloc: &mut A,
571) -> Result<NonNull<[u8]>, TryReserveError>
572where
573    A: Allocator,
574{
575    // Check for the error here to minimize the size of `RawVec::grow_*`.
576    let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
577
578    alloc_guard(new_layout.size())?;
579
580    let memory = if let Some((ptr, old_layout)) = current_memory {
581        debug_assert_eq!(old_layout.align(), new_layout.align());
582        unsafe {
583            // The allocator checks for alignment equality
584            assume(old_layout.align() == new_layout.align());
585            alloc.grow(ptr, old_layout, new_layout)
586        }
587    } else {
588        alloc.allocate(new_layout)
589    };
590
591    memory.map_err(|_| {
592        AllocError {
593            layout: new_layout,
594            non_exhaustive: (),
595        }
596        .into()
597    })
598}
599
600impl<T, A: Allocator> Drop for RawVec<T, A> {
601    /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
602    #[inline(always)]
603    fn drop(&mut self) {
604        if let Some((ptr, layout)) = self.current_memory() {
605            unsafe { self.alloc.deallocate(ptr, layout) }
606        }
607    }
608}
609
610// Central function for reserve error handling.
611#[cfg(not(no_global_oom_handling))]
612#[inline(always)]
613fn handle_reserve(result: Result<(), TryReserveError>) {
614    match result.map_err(|e| e.kind()) {
615        Err(CapacityOverflow) => capacity_overflow(),
616        Err(AllocError { layout, .. }) => handle_alloc_error(layout),
617        Ok(()) => { /* yay */ }
618    }
619}
620
621// We need to guarantee the following:
622// * We don't ever allocate `> isize::MAX` byte-size objects.
623// * We don't overflow `usize::MAX` and actually allocate too little.
624//
625// On 64-bit we just need to check for overflow since trying to allocate
626// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
627// an extra guard for this in case we're running on a platform which can use
628// all 4GB in user-space, e.g., PAE or x32.
629
630#[inline(always)]
631fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
632    if usize::BITS < 64 && alloc_size > isize::MAX as usize {
633        Err(CapacityOverflow.into())
634    } else {
635        Ok(())
636    }
637}
638
639// One central function responsible for reporting capacity overflows. This'll
640// ensure that the code generation related to these panics is minimal as there's
641// only one location which panics rather than a bunch throughout the module.
642#[cfg(not(no_global_oom_handling))]
643fn capacity_overflow() -> ! {
644    panic!("capacity overflow");
645}