Skip to main content

slop_alloc/
raw_buffer.rs

1use core::{
2    alloc::Layout,
3    marker::PhantomData,
4    mem::{self, ManuallyDrop},
5    ptr::{self, NonNull},
6};
7
8use thiserror::Error;
9
10use crate::Allocator;
11
12#[derive(Debug)]
13pub struct RawBuffer<T, A: Allocator> {
14    inner: RawBufferInner<A>,
15    _marker: PhantomData<T>,
16}
17
18#[derive(Debug)]
19struct RawBufferInner<A> {
20    ptr: NonNull<u8>,
21    cap: usize,
22    alloc: A,
23}
24
25#[derive(Copy, Clone, PartialEq, Eq, Debug, Error)]
26pub enum TryReserveError {
27    /// Error due to the computed capacity exceeding the collection's maximum
28    /// (usually `isize::MAX` bytes).
29    #[error("capacity overflow")]
30    CapacityOverflow,
31
32    /// The memory allocator returned an error
33    #[error("allocation error for layout {:?}", layout)]
34    AllocError {
35        /// The layout of allocation request that failed
36        layout: Layout,
37    },
38}
39
40impl<T, A: Allocator> RawBuffer<T, A> {
41    /// Like `new`, but parameterized over the choice of allocator for
42    /// the returned `RawVec`.
43    #[inline]
44    pub const fn new_in(alloc: A) -> Self {
45        Self { inner: RawBufferInner::new_in(alloc, align_of::<T>()), _marker: PhantomData }
46    }
47
48    #[inline]
49    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
50        Self { inner: RawBufferInner::with_capacity_in::<T>(capacity, alloc), _marker: PhantomData }
51    }
52
53    #[inline]
54    pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
55        match RawBufferInner::try_with_capacity_in::<T>(capacity, alloc) {
56            Ok(inner) => Ok(Self { inner, _marker: PhantomData }),
57            Err(e) => Err(e),
58        }
59    }
60
61    #[must_use = "losing the pointer will leak memory"]
62    pub fn into_raw_parts(self) -> (*mut T, usize, A) {
63        let me = ManuallyDrop::new(self);
64        let capacity = me.capacity();
65        let ptr = me.ptr();
66        let alloc = unsafe { ptr::read(me.allocator()) };
67        (ptr, capacity, alloc)
68    }
69
70    /// Reconstitutes a `RawBuffer` from a pointer, capacity, and allocator.
71    ///
72    /// # Safety
73    ///
74    /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
75    /// `capacity`.
76    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
77    /// systems). For ZSTs capacity is ignored.
78    /// If the `ptr` and `capacity` come from a `RawBuffer` created via `alloc`, then this is
79    /// guaranteed.
80    #[inline]
81    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
82        // SAFETY: Precondition passed to the caller
83        unsafe {
84            let ptr = ptr.cast();
85            Self {
86                inner: RawBufferInner::from_raw_parts_in(ptr, capacity, alloc),
87                _marker: PhantomData,
88            }
89        }
90    }
91
92    /// Gets a raw pointer to the start of the allocation. Note that this is
93    /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
94    /// be careful.
95    #[inline]
96    pub fn ptr(&self) -> *mut T {
97        self.inner.ptr()
98    }
99
100    #[inline]
101    pub fn non_null(&self) -> NonNull<T> {
102        self.inner.non_null()
103    }
104
105    /// Gets the capacity of the allocation.
106    ///
107    /// This will always be `usize::MAX` if `T` is zero-sized.
108    #[inline]
109    pub fn capacity(&self) -> usize {
110        self.inner.capacity(size_of::<T>())
111    }
112
113    /// Returns a shared reference to the allocator backing this `RawVec`.
114    #[inline]
115    pub const fn allocator(&self) -> &A {
116        self.inner.allocator()
117    }
118
119    /// # Safety
120    ///
121    /// TODO
122    #[inline]
123    pub unsafe fn allocator_mut(&mut self) -> &mut A {
124        &mut self.inner.alloc
125    }
126}
127
128impl<A: Allocator> RawBufferInner<A> {
129    /// Like `new`, but parameterized over the choice of allocator for
130    /// the returned `RawVec`.
131    #[inline]
132    const fn new_in(alloc: A, align: usize) -> Self {
133        let ptr = unsafe { core::mem::transmute::<usize, NonNull<u8>>(align) };
134        // `cap: 0` means "unallocated". zero-sized types are ignored.
135        Self { ptr, cap: 0, alloc }
136    }
137
138    #[inline]
139    fn with_capacity_in<T>(capacity: usize, alloc: A) -> Self {
140        match Self::try_allocate_in::<T>(capacity, alloc) {
141            Ok(this) => this,
142            Err(err) => handle_error(err),
143        }
144    }
145
146    fn try_allocate_in<T>(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
147        // We avoid `unwrap_or_else` here because it bloats the amount of
148        // LLVM IR generated.
149        let layout = Layout::array::<T>(capacity).map_err(|_| TryReserveError::CapacityOverflow)?;
150
151        // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
152        if layout.size() == 0 {
153            return Ok(Self::new_in(alloc, layout.align()));
154        }
155
156        alloc_guard(layout.size())?;
157
158        let result = unsafe { alloc.allocate(layout) };
159
160        let ptr = match result {
161            Ok(ptr) => ptr,
162            Err(_) => return Err(TryReserveError::AllocError { layout }),
163        };
164
165        // Allocators currently return a `NonNull<[u8]>` whose length
166        // matches the size requested. If that ever changes, the capacity
167        // here should change to `ptr.len() / mem::size_of::<T>()`.
168        Ok(Self { ptr: ptr.cast(), cap: capacity, alloc })
169    }
170
171    #[inline]
172    fn ptr<T>(&self) -> *mut T {
173        self.non_null::<T>().as_ptr()
174    }
175
176    #[inline]
177    fn non_null<T>(&self) -> NonNull<T> {
178        self.ptr.cast()
179    }
180
181    #[inline]
182    fn capacity(&self, elem_size: usize) -> usize {
183        if elem_size == 0 {
184            usize::MAX
185        } else {
186            self.cap
187        }
188    }
189
190    #[inline]
191    unsafe fn from_raw_parts_in(ptr: *mut u8, cap: usize, alloc: A) -> Self {
192        Self { ptr: unsafe { NonNull::new_unchecked(ptr) }, cap, alloc }
193    }
194
195    #[inline]
196    const fn allocator(&self) -> &A {
197        &self.alloc
198    }
199
200    #[inline]
201    fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
202        if elem_layout.size() == 0 || self.cap == 0 {
203            None
204        } else {
205            // We could use Layout::array here which ensures the absence of isize and usize
206            // overflows and could hypothetically handle differences between stride and
207            // size, but this memory has already been allocated so we know it can't
208            // overflow and currently Rust does not support such types. So we can do
209            // better by skipping some checks and avoid an unwrap.
210            unsafe {
211                let alloc_size = elem_layout.size().unchecked_mul(self.cap);
212                let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
213                Some((self.ptr, layout))
214            }
215        }
216    }
217
218    #[inline]
219    fn try_with_capacity_in<T>(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
220        Self::try_allocate_in::<T>(capacity, alloc)
221    }
222
223    /// # Safety
224    ///
225    /// This function deallocates the owned allocation, but does not update `ptr` or `cap` to
226    /// prevent double-free or use-after-free. Essentially, do not do anything with the caller
227    /// after this function returns.
228    /// Ideally this function would take `self` by move, but it cannot because it exists to be
229    /// called from a `Drop` impl.
230    unsafe fn deallocate(&mut self, elem_layout: Layout) {
231        if let Some((ptr, layout)) = self.current_memory(elem_layout) {
232            unsafe {
233                self.alloc.deallocate(ptr, layout);
234            }
235        }
236    }
237}
238
239impl<T, A: Allocator> Drop for RawBuffer<T, A> {
240    /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
241    fn drop(&mut self) {
242        // SAFETY: We are in a Drop impl, self.inner will not be used again.
243        unsafe {
244            let layout =
245                Layout::from_size_align_unchecked(mem::size_of::<T>(), mem::align_of::<T>());
246            self.inner.deallocate(layout)
247        }
248    }
249}
250
251// Central function for reserve error handling.
252#[cold]
253fn handle_error(e: TryReserveError) -> ! {
254    match e {
255        TryReserveError::CapacityOverflow => capacity_overflow(),
256        TryReserveError::AllocError { layout } => handle_alloc_error(layout),
257    }
258}
259
260// One central function responsible for reporting capacity overflows. This'll
261// ensure that the code generation related to these panics is minimal as there's
262// only one location which panics rather than a bunch throughout the module.
263#[inline(never)]
264fn capacity_overflow() -> ! {
265    panic!("capacity overflow");
266}
267
268#[cold]
269pub const fn handle_alloc_error(layout: Layout) -> ! {
270    const fn ct_error(_: Layout) -> ! {
271        panic!("allocation failed");
272    }
273
274    ct_error(layout)
275}
276
277// We need to guarantee the following:
278// * We don't ever allocate `> isize::MAX` byte-size objects.
279// * We don't overflow `usize::MAX` and actually allocate too little.
280//
281// On 64-bit we just need to check for overflow since trying to allocate
282// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
283// an extra guard for this in case we're running on a platform which can use
284// all 4GB in user-space, e.g., PAE or x32.
285#[inline]
286fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
287    if usize::BITS < 64 && alloc_size > isize::MAX as usize {
288        Err(TryReserveError::CapacityOverflow)
289    } else {
290        Ok(())
291    }
292}