Skip to main content

virtual_buffer/concurrent/vec/
raw.rs

1//! A low-level, concurrent, in-place growable vector.
2
3use super::{RawVec, RawVecInner};
4use crate::{
5    Allocation, SizedTypeProperties, is_aligned,
6    vec::{GrowthStrategy, TryReserveError, handle_error},
7};
8use core::{
9    alloc::Layout, cmp, fmt, iter::FusedIterator, marker::PhantomData, mem::ManuallyDrop,
10    panic::UnwindSafe, ptr, slice,
11};
12
13/// Used to build a new vector.
14///
15/// This struct is created by the [`builder`] method on [`RawVec`].
16///
17/// [`builder`]: RawVec::builder
18#[derive(Clone, Copy, Debug)]
19pub struct VecBuilder {
20    max_capacity: usize,
21    capacity: usize,
22    growth_strategy: GrowthStrategy,
23    header_layout: Layout,
24}
25
26impl VecBuilder {
27    #[inline]
28    pub(super) const fn new(max_capacity: usize) -> Self {
29        VecBuilder {
30            max_capacity,
31            capacity: 0,
32            growth_strategy: GrowthStrategy::new(),
33            header_layout: Layout::new::<()>(),
34        }
35    }
36
37    /// The built `RawVec` will have the minimum capacity required for `capacity` elements.
38    ///
39    /// The capacity can be greater due to the alignment to the [page size].
40    ///
41    /// [page size]: crate#pages
42    #[inline]
43    pub const fn capacity(&mut self, capacity: usize) -> &mut Self {
44        self.capacity = capacity;
45
46        self
47    }
48
49    /// The built `RawVec` will have the given `growth_strategy`.
50    ///
51    /// # Panics
52    ///
53    /// Panics if `growth_strategy` isn't valid per the documentation of [`GrowthStrategy`].
54    #[inline]
55    #[track_caller]
56    pub const fn growth_strategy(&mut self, growth_strategy: GrowthStrategy) -> &mut Self {
57        growth_strategy.validate();
58
59        self.growth_strategy = growth_strategy;
60
61        self
62    }
63
64    /// The built `RawVec` will have a header with the given `header_layout`.
65    ///
66    /// `header_layout.size()` bytes will be allocated before the start of the vector's elements,
67    /// with the start aligned to `header_layout.align()`. You can use [`RawVec::as_ptr`] and
68    /// offset backwards to access the header.
69    ///
70    /// # Panics
71    ///
72    /// Panics if `header_layout` is not padded to its alignment.
73    #[inline]
74    #[track_caller]
75    pub const fn header(&mut self, header_layout: Layout) -> &mut Self {
76        assert!(is_aligned(header_layout.size(), header_layout.align()));
77
78        self.header_layout = header_layout;
79
80        self
81    }
82
83    /// Builds the `RawVec`.
84    ///
85    /// # Safety
86    ///
87    /// `T` must be zeroable.
88    ///
89    /// # Panics
90    ///
91    /// - Panics if the `max_capacity` would exceed `isize::MAX` bytes.
92    /// - Panics if the `capacity` is greater than the `max_capacity`.
93    /// - Panics if [reserving] the allocation returns an error.
94    ///
95    /// [reserving]: crate#reserving
96    #[must_use]
97    #[track_caller]
98    pub unsafe fn build<T>(&self) -> RawVec<T> {
99        match unsafe { self.try_build() } {
100            Ok(vec) => vec,
101            Err(err) => handle_error(err),
102        }
103    }
104
105    /// Tries to build the `RawVec`, returning an error when allocation fails.
106    ///
107    /// # Safety
108    ///
109    /// `T` must be zeroable.
110    ///
111    /// # Errors
112    ///
113    /// - Returns an error if the `max_capacity` would exceed `isize::MAX` bytes.
114    /// - Returns an error if the `capacity` is greater than the `max_capacity`.
115    /// - Returns an error if [reserving] the allocation returns an error.
116    ///
117    /// [reserving]: crate#reserving
118    pub unsafe fn try_build<T>(&self) -> Result<RawVec<T>, TryReserveError> {
119        Ok(RawVec {
120            inner: unsafe {
121                RawVecInner::new(
122                    self.max_capacity,
123                    self.capacity,
124                    self.growth_strategy,
125                    self.header_layout,
126                    size_of::<T>(),
127                    align_of::<T>(),
128                )
129            }?,
130            marker: PhantomData,
131        })
132    }
133}
134
135/// An iterator that moves out of a vector.
136///
137/// This struct is created by the [`into_iter`] method on [`RawVec`].
138///
139/// [`into_iter`]: RawVec::into_iter
140pub struct IntoIter<T> {
141    start: *const T,
142    end: *const T,
143    #[allow(dead_code)]
144    allocation: Allocation,
145    marker: PhantomData<T>,
146}
147
148// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
149unsafe impl<T: Send> Send for IntoIter<T> {}
150
151// SAFETY: We own the collection, and synchronization to it is ensured using mutable references.
152unsafe impl<T: Sync> Sync for IntoIter<T> {}
153
154// We own the collection, so this should be no different than for `RawVec`.
155impl<T: UnwindSafe> UnwindSafe for IntoIter<T> {}
156
157impl<T> IntoIter<T> {
158    #[inline]
159    pub(super) fn new(vec: RawVec<T>) -> Self {
160        let mut vec = ManuallyDrop::new(vec);
161
162        // SAFETY: `vec` is wrapped in a `ManuallyDrop` such that a double-free can't happen even
163        // if a panic was possible below.
164        let allocation = unsafe { ptr::read(&vec.inner.allocation) };
165
166        let start = vec.as_mut_ptr();
167
168        let len = cmp::min(vec.len_mut(), vec.capacity_mut());
169
170        let end = if T::IS_ZST {
171            start.cast::<u8>().wrapping_add(len).cast::<T>()
172        } else {
173            // SAFETY: The ownership synchronizes with setting the capacity, making sure that the
174            // newly committed memory is visible here. The constructor of `RawVec` must ensure that
175            // `T` is zeroable.
176            unsafe { start.add(len) }
177        };
178
179        IntoIter {
180            start,
181            end,
182            allocation,
183            marker: PhantomData,
184        }
185    }
186
187    /// Returns the remaining items of this iterator as a slice.
188    #[inline]
189    #[must_use]
190    pub fn as_slice(&self) -> &[T] {
191        unsafe { slice::from_raw_parts(self.start, self.len()) }
192    }
193
194    /// Returns the remaining items of this iterator as a mutable slice.
195    #[inline]
196    #[must_use]
197    pub fn as_mut_slice(&mut self) -> &mut [T] {
198        unsafe { slice::from_raw_parts_mut(self.start.cast_mut(), self.len()) }
199    }
200}
201
202impl<T> AsRef<[T]> for IntoIter<T> {
203    #[inline]
204    fn as_ref(&self) -> &[T] {
205        self.as_slice()
206    }
207}
208
209impl<T> AsMut<[T]> for IntoIter<T> {
210    #[inline]
211    fn as_mut(&mut self) -> &mut [T] {
212        self.as_mut_slice()
213    }
214}
215
216impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
217    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
218        f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
219    }
220}
221
222impl<T> Drop for IntoIter<T> {
223    fn drop(&mut self) {
224        let elements = ptr::slice_from_raw_parts_mut(self.start.cast_mut(), self.len());
225
226        // SAFETY: We own the collection, and it is being dropped, which ensures that the elements
227        // can't be accessed again.
228        unsafe { elements.drop_in_place() };
229    }
230}
231
232impl<T> Iterator for IntoIter<T> {
233    type Item = T;
234
235    #[inline]
236    fn next(&mut self) -> Option<Self::Item> {
237        if self.start == self.end {
238            return None;
239        }
240
241        let ptr = if T::IS_ZST {
242            self.end = self.end.cast::<u8>().wrapping_sub(1).cast::<T>();
243
244            self.start
245        } else {
246            let old = self.start;
247
248            // SAFETY: We checked that there are still elements remaining above.
249            self.start = unsafe { old.add(1) };
250
251            old
252        };
253
254        // SAFETY: We own the collection, and have just incremented the `start` pointer such that
255        // this element can't be accessed again.
256        Some(unsafe { ptr.read() })
257    }
258
259    #[inline]
260    fn size_hint(&self) -> (usize, Option<usize>) {
261        let len = self.len();
262
263        (len, Some(len))
264    }
265
266    #[inline]
267    fn count(self) -> usize {
268        self.len()
269    }
270}
271
272impl<T> DoubleEndedIterator for IntoIter<T> {
273    #[inline]
274    fn next_back(&mut self) -> Option<Self::Item> {
275        if self.start == self.end {
276            return None;
277        }
278
279        let ptr = if T::IS_ZST {
280            self.end = self.end.cast::<u8>().wrapping_sub(1).cast::<T>();
281
282            self.start
283        } else {
284            // SAFETY: We checked that there are still elements remaining above.
285            self.end = unsafe { self.end.sub(1) };
286
287            self.end
288        };
289
290        // SAFETY: We own the collection, and have just decremented the `end` pointer such that
291        // this element can't be accessed again.
292        Some(unsafe { ptr.read() })
293    }
294}
295
296impl<T> ExactSizeIterator for IntoIter<T> {
297    #[inline]
298    fn len(&self) -> usize {
299        if T::IS_ZST {
300            self.end.addr().wrapping_sub(self.start.addr())
301        } else {
302            // SAFETY:
303            // - By our invariant, `self.end` is always greater than or equal to `self.start`.
304            // - `start` and `end` were both created from the same object in `IntoIter::new`.
305            // - `RawVec::new` ensures that the allocation size doesn't exceed `isize::MAX` bytes.
306            // - We know that the allocation doesn't wrap around the address space.
307            unsafe { self.end.offset_from_unsigned(self.start) }
308        }
309    }
310}
311
312impl<T> FusedIterator for IntoIter<T> {}