flatty_base/
vec.rs

1use crate::{
2    emplacer::Emplacer,
3    error::{Error, ErrorKind},
4    traits::{Flat, FlatBase, FlatDefault, FlatSized, FlatUnsized, FlatValidate},
5    utils::{floor_mul, max, mem::slice_ptr_len},
6};
7use core::{mem::MaybeUninit, ptr};
8use stavec::GenericVec;
9
10pub use stavec::traits::{Length, Slot};
11
12#[repr(transparent)]
13pub struct MaybeInvalid<T: FlatSized>(MaybeUninit<T>);
14impl<T: FlatSized> MaybeInvalid<T> {
15    fn as_ptr(&self) -> *const T {
16        self.0.as_ptr()
17    }
18}
19unsafe impl<T: FlatSized> Slot for MaybeInvalid<T> {
20    type Item = T;
21
22    fn new(item: Self::Item) -> Self {
23        Self(MaybeUninit::new(item))
24    }
25    unsafe fn assume_init(self) -> Self::Item {
26        self.0.assume_init()
27    }
28    unsafe fn assume_init_read(&self) -> Self::Item {
29        self.0.assume_init_read()
30    }
31}
32
33/// Growable flat vector of sized items.
34///
35/// It doesn't allocate memory on the heap but instead stores its contents in the same memory behind itself.
36///
37/// Obviously, this type is DST.
38pub type FlatVec<T, L = usize> = GenericVec<[MaybeInvalid<T>], L>;
39
40trait DataOffset<T, L>
41where
42    T: Flat + Sized,
43    L: Flat + Length,
44{
45    const DATA_OFFSET: usize = max(L::SIZE, T::ALIGN);
46}
47
48impl<T, L> DataOffset<T, L> for FlatVec<T, L>
49where
50    T: Flat + Sized,
51    L: Flat + Length,
52{
53}
54
55/// Sized type that has same alignment as [`FlatVec<T, L>`](`FlatVec`).
56#[repr(C)]
57pub struct FlatVecAlignAs<T, L>(T, L)
58where
59    T: Flat + Sized,
60    L: Flat + Length;
61
62unsafe impl<T, L> FlatBase for FlatVec<T, L>
63where
64    T: Flat + Sized,
65    L: Flat + Length,
66{
67    const ALIGN: usize = max(L::ALIGN, T::ALIGN);
68    const MIN_SIZE: usize = Self::DATA_OFFSET;
69
70    fn size(&self) -> usize {
71        Self::DATA_OFFSET + T::SIZE * self.len()
72    }
73}
74
75unsafe impl<T, L> FlatUnsized for FlatVec<T, L>
76where
77    T: Flat + Sized,
78    L: Flat + Length,
79{
80    type AlignAs = FlatVecAlignAs<T, L>;
81
82    unsafe fn ptr_from_bytes(bytes: *mut [u8]) -> *mut Self {
83        let meta = floor_mul(slice_ptr_len(bytes) - Self::DATA_OFFSET, Self::ALIGN) / T::SIZE;
84        ptr::slice_from_raw_parts_mut(bytes as *mut u8, meta) as *mut Self
85    }
86    unsafe fn ptr_to_bytes(this: *mut Self) -> *mut [u8] {
87        let len = Self::DATA_OFFSET + slice_ptr_len(this as *mut [T]) * T::SIZE;
88        ptr::slice_from_raw_parts_mut(this as *mut u8, len)
89    }
90}
91
92pub struct Empty;
93pub struct FromArray<T, const N: usize>(pub [T; N]);
94pub struct FromIterator<T, I: Iterator<Item = T>>(pub I);
95
96unsafe impl<T, L> Emplacer<FlatVec<T, L>> for Empty
97where
98    T: Flat + Sized,
99    L: Flat + Length,
100{
101    unsafe fn emplace_unchecked(self, bytes: &mut [u8]) -> Result<(), Error> {
102        unsafe { (bytes.as_mut_ptr() as *mut L).write(L::zero()) };
103        // Now it's safe to assume that `Self` is initialized, because vector data is `[MaybeInvalid<T>]`.
104        Ok(())
105    }
106}
107
108unsafe impl<T, L, const N: usize> Emplacer<FlatVec<T, L>> for FromArray<T, N>
109where
110    T: Flat + Sized,
111    L: Flat + Length,
112{
113    unsafe fn emplace_unchecked(self, bytes: &mut [u8]) -> Result<(), Error> {
114        unsafe { <Empty as Emplacer<FlatVec<T, L>>>::emplace_unchecked(Empty, bytes) }?;
115        let vec = unsafe { FlatVec::<T, L>::from_mut_bytes_unchecked(bytes) };
116        if vec.capacity() < N {
117            return Err(Error {
118                kind: ErrorKind::InsufficientSize,
119                pos: 0,
120            });
121        }
122        assert_eq!(vec.extend_from_iter(self.0.into_iter()), N);
123        Ok(())
124    }
125}
126
127unsafe impl<T, L, I: Iterator<Item = T>> Emplacer<FlatVec<T, L>> for FromIterator<T, I>
128where
129    T: Flat + Sized,
130    L: Flat + Length,
131{
132    unsafe fn emplace_unchecked(self, bytes: &mut [u8]) -> Result<(), Error> {
133        unsafe { <Empty as Emplacer<FlatVec<T, L>>>::emplace_unchecked(Empty, bytes) }?;
134        let vec = unsafe { FlatVec::<T, L>::from_mut_bytes_unchecked(bytes) };
135        for x in self.0 {
136            if vec.push(x).is_err() {
137                return Err(Error {
138                    kind: ErrorKind::InsufficientSize,
139                    pos: 0,
140                });
141            }
142        }
143        Ok(())
144    }
145}
146
147impl<T, L> FlatDefault for FlatVec<T, L>
148where
149    T: Flat + Sized,
150    L: Flat + Length,
151{
152    type DefaultEmplacer = Empty;
153
154    fn default_emplacer() -> Empty {
155        Empty
156    }
157}
158
159unsafe impl<T, L> FlatValidate for FlatVec<T, L>
160where
161    T: Flat + Sized,
162    L: Flat + Length,
163{
164    unsafe fn validate_unchecked(bytes: &[u8]) -> Result<(), Error> {
165        unsafe { L::validate_unchecked(bytes) }?;
166        // Now it's safe to assume that `Self` is initialized, because vector data is `[MaybeInvalid<T>]`.
167        let this = unsafe { Self::from_bytes_unchecked(bytes) };
168        if this.len() > this.capacity() {
169            return Err(Error {
170                kind: ErrorKind::InsufficientSize,
171                pos: Self::DATA_OFFSET,
172            });
173        }
174        for x in unsafe { this.data().get_unchecked(..this.len()) } {
175            unsafe { T::validate_ptr(x.as_ptr()) }?;
176        }
177        Ok(())
178    }
179}
180
181unsafe impl<T, L> Flat for FlatVec<T, L>
182where
183    T: Flat + Sized,
184    L: Flat + Length,
185{
186}
187
188/// Creates [`FlatVec`] emplacer from given array.
189#[macro_export]
190macro_rules! flat_vec {
191    () => {
192        $crate::vec::FromArray([])
193    };
194    ($elem:expr; $n:expr) => {
195        $crate::vec::FromArray([$elem; $n])
196    };
197    ($($x:expr),+ $(,)?) => {
198        $crate::vec::FromArray([$($x),+])
199    };
200}
201pub use flat_vec;
202
203#[cfg(all(test, feature = "std"))]
204mod tests {
205    use super::*;
206    use crate::utils::alloc::AlignedBytes;
207    use std::mem::{align_of_val, size_of_val};
208
209    #[test]
210    fn data_offset() {
211        let mut bytes = AlignedBytes::new(4 + 3 * 4, 4);
212        let flat_vec = FlatVec::<i32, u16>::default_in_place(&mut bytes).unwrap();
213
214        assert_eq!(align_of_val(flat_vec), FlatVec::<i32, u16>::ALIGN);
215    }
216
217    #[test]
218    fn align() {
219        let mut bytes = AlignedBytes::new(4 + 3 * 2, 4);
220        let flat_vec = FlatVec::<i16, u32>::default_in_place(&mut bytes).unwrap();
221
222        assert_eq!(align_of_val(flat_vec), 4);
223        assert_eq!(flat_vec.capacity(), 2);
224        assert_eq!(size_of_val(flat_vec), 8);
225    }
226
227    #[test]
228    fn len_cap() {
229        let mut bytes = AlignedBytes::new(4 + 3 * 4, 4);
230        let flat_vec = FlatVec::<i32, u32>::default_in_place(&mut bytes).unwrap();
231        assert_eq!(flat_vec.capacity(), 3);
232        assert_eq!(flat_vec.len(), 0);
233    }
234
235    #[test]
236    fn size() {
237        let mut bytes = AlignedBytes::new(4 + 3 * 4, 4);
238        let flat_vec = FlatVec::<i32, u32>::default_in_place(&mut bytes).unwrap();
239        assert_eq!(FlatVec::<i32, u32>::DATA_OFFSET, flat_vec.size());
240
241        for i in 0.. {
242            if flat_vec.push(i).is_err() {
243                break;
244            }
245        }
246        assert_eq!(flat_vec.len(), 3);
247        assert_eq!(size_of_val(flat_vec), flat_vec.size());
248    }
249
250    #[test]
251    fn extend_from_slice() {
252        let mut bytes = AlignedBytes::new(4 * 6, 4);
253        let vec = FlatVec::<i32, u32>::default_in_place(&mut bytes).unwrap();
254        assert_eq!(vec.capacity(), 5);
255        assert_eq!(vec.len(), 0);
256        assert_eq!(vec.remaining(), 5);
257
258        assert_eq!(vec.extend_from_slice(&[1, 2, 3]), 3);
259        assert_eq!(vec.len(), 3);
260        assert_eq!(vec.remaining(), 2);
261        assert_eq!(vec.as_slice(), &[1, 2, 3][..]);
262
263        assert_eq!(vec.extend_from_slice(&[4, 5, 6]), 2);
264        assert_eq!(vec.len(), 5);
265        assert_eq!(vec.remaining(), 0);
266        assert_eq!(vec.as_slice(), &[1, 2, 3, 4, 5][..]);
267    }
268
269    #[test]
270    fn eq() {
271        let mut mem_a = AlignedBytes::new(4 * 5, 4);
272        let vec_a = FlatVec::<i32, u32>::new_in_place(&mut mem_a, flat_vec![1, 2, 3, 4]).unwrap();
273
274        let mut mem_b = AlignedBytes::new(4 * 5, 4);
275        let vec_b = FlatVec::<i32, u32>::new_in_place(&mut mem_b, flat_vec![1, 2, 3, 4]).unwrap();
276
277        let mut mem_c = AlignedBytes::new(4 * 3, 4);
278        let vec_c = FlatVec::<i32, u32>::new_in_place(&mut mem_c, flat_vec![1, 2]).unwrap();
279
280        assert_eq!(vec_a, vec_b);
281        assert_ne!(vec_a, vec_c);
282        assert_ne!(vec_b, vec_c);
283
284        vec_b[3] = 5;
285        assert_ne!(vec_a, vec_b);
286    }
287}