libdd_alloc/
linear.rs

1// Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{AllocError, Allocator};
5use core::alloc::Layout;
6use core::cell::Cell;
7use core::ptr::{slice_from_raw_parts_mut, NonNull};
8
9/// [LinearAllocator] is an arena allocator, meaning that deallocating
10/// individual allocations made by this allocator does nothing. Instead, the
11/// whole backing memory is dropped at once. Destructors for these objects
12/// are not called automatically and must be done by the caller if it's
13/// necessary.
14///
15/// Once the slice of memory that underpins the LinearAllocator has been
16/// allocated, allocations will begin to fail. It will not find new memory
17/// to back allocations.
18pub struct LinearAllocator<A: Allocator> {
19    allocation_ptr: NonNull<u8>,
20    allocation_layout: Layout,
21    size: Cell<usize>,
22    allocator: A,
23}
24
25unsafe impl<A: Allocator> Send for LinearAllocator<A> {}
26
27impl<A: Allocator> LinearAllocator<A> {
28    /// Creates a new [LinearAllocator] by requesting the `layout` from the
29    /// provided `allocator`. Note that if the allocation is over-sized,
30    /// meaning it's larger than the requested `layout.size()`, then the
31    /// [LinearAllocator] will utilize this excess.
32    pub fn new_in(layout: Layout, allocator: A) -> Result<Self, AllocError> {
33        let allocation = allocator.allocate(layout)?;
34        // SAFETY: this is the size/align of the actual allocation, so it must
35        // be valid since the object exists.
36        let allocation_layout =
37            unsafe { Layout::from_size_align(allocation.len(), layout.align()).unwrap_unchecked() };
38        Ok(Self {
39            allocation_ptr: allocation.cast(),
40            allocation_layout,
41            size: Cell::new(0),
42            allocator,
43        })
44    }
45
46    /// Get the number of bytes allocated.
47    #[inline]
48    pub fn used_bytes(&self) -> usize {
49        self.size.get()
50    }
51
52    /// Get the number of bytes allocated by the underlying allocator.
53    /// This number is greater than or equal to [Self::used_bytes].
54    #[inline]
55    pub fn reserved_bytes(&self) -> usize {
56        self.allocation_layout.size()
57    }
58
59    /// Gets the number of bytes that can be allocated without requesting more
60    /// from the underlying allocator.
61    pub fn remaining_capacity(&self) -> usize {
62        self.reserved_bytes() - self.used_bytes()
63    }
64
65    fn base_ptr(&self) -> *mut u8 {
66        self.allocation_ptr.as_ptr()
67    }
68
69    /// Determine if the given layout will fit in the current allocator
70    pub fn has_capacity_for(&self, layout: Layout) -> bool {
71        // SAFETY: base_ptr + size will always be in the allocated range, or
72        // be the legally allowed one-past-the-end. If it doesn't fit, that's
73        // a serious bug elsewhere in our logic.
74        let align_offset =
75            unsafe { self.base_ptr().add(self.used_bytes()) }.align_offset(layout.align());
76        if let Some(needed_size) = align_offset.checked_add(layout.size()) {
77            self.remaining_capacity() >= needed_size
78        } else {
79            false
80        }
81    }
82}
83
84impl<A: Allocator> Drop for LinearAllocator<A> {
85    fn drop(&mut self) {
86        let ptr = self.allocation_ptr;
87        let layout = self.allocation_layout;
88        // SAFETY: passing the original ptr back in, with a compatible layout.
89        unsafe { self.allocator.deallocate(ptr, layout) };
90    }
91}
92
93unsafe impl<A: Allocator> Allocator for LinearAllocator<A> {
94    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
95        if layout.size() == 0 {
96            return Err(AllocError);
97        }
98
99        // Find the needed allocation size including the necessary alignment.
100        let size = self.used_bytes();
101        // SAFETY: base_ptr + size will always be in the allocated range, or
102        // be the legally allowed one-past-the-end. If it doesn't fit, that's
103        // a serious bug elsewhere in our logic.
104        let align_offset = unsafe { self.base_ptr().add(size) }.align_offset(layout.align());
105        let needed_size = align_offset.checked_add(layout.size()).ok_or(AllocError)?;
106        let remaining_capacity = self.reserved_bytes() - size;
107
108        // Fail if there isn't room.
109        if needed_size > remaining_capacity {
110            return Err(AllocError);
111        }
112
113        // Create a wide pointer to the correct place and len.
114        let wide_ptr = {
115            // SAFETY: just checked above that base_ptr + align_offset + size
116            // of the requested layout fits within the underlying allocation.
117            let thin_ptr = unsafe { self.base_ptr().add(size + align_offset) };
118
119            // Do a debug check that the pointer is actually aligned.
120            debug_assert_eq!(0, thin_ptr.align_offset(layout.align()));
121            slice_from_raw_parts_mut(thin_ptr, layout.size())
122        };
123
124        // SAFETY: derived from the underlying allocation pointer, so it is
125        // inherently not null.
126        let non_null = unsafe { NonNull::new_unchecked(wide_ptr) };
127
128        // Update the size before returning.
129        self.size.set(size + needed_size);
130        Ok(non_null)
131    }
132
133    unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
134        // This is an arena. It does batch de-allocation when dropped.
135    }
136}
137
138#[cfg(test)]
139mod tests {
140    use super::*;
141    use crate::utils::*;
142    use allocator_api2::alloc::Global;
143    use bolero::generator::TypeGenerator;
144
145    #[test]
146    fn fuzz() {
147        // avoid SUMMARY: libFuzzer: out-of-memory
148        const MAX_SIZE: usize = 0x10000000;
149
150        let size_hint = 0..=MAX_SIZE;
151        let align_bits = 0..=32;
152        let size = 0..=MAX_SIZE;
153        let idx = 0..=MAX_SIZE;
154        let val = u8::produce();
155        let allocs = Vec::<(usize, u32, usize, u8)>::produce()
156            .with()
157            .values((size, align_bits, idx, val));
158        bolero::check!()
159            .with_generator((size_hint, allocs))
160            .for_each(|(size_hint, size_align_vec)| {
161                let allocator = LinearAllocator::new_in(
162                    Layout::from_size_align(*size_hint, 1).unwrap(),
163                    Global,
164                )
165                .unwrap();
166
167                for (size, align_bits, idx, val) in size_align_vec {
168                    fuzzer_inner_loop(&allocator, *size, *align_bits, *idx, *val, MAX_SIZE)
169                }
170            })
171    }
172
173    #[test]
174    fn test_basics() -> Result<(), AllocError> {
175        let alloc = LinearAllocator::new_in(Layout::array::<u8>(24).unwrap(), Global)?;
176        const WIDTH: usize = 8;
177        let layout = Layout::new::<[u8; WIDTH]>();
178        assert!(alloc.has_capacity_for(layout));
179        let first = alloc.allocate(layout)?;
180        assert!(alloc.has_capacity_for(layout));
181        let second = alloc.allocate(layout)?;
182        assert!(alloc.has_capacity_for(layout));
183        let third = alloc.allocate(layout)?;
184
185        assert_ne!(first.as_ptr(), second.as_ptr());
186        assert_ne!(first.as_ptr(), third.as_ptr());
187        assert_ne!(second.as_ptr(), third.as_ptr());
188
189        // LinearAllocator doesn't over-allocate, so we can test exact widths
190        // and distances apart.
191        assert_eq!(WIDTH, first.len());
192        assert_eq!(WIDTH, second.len());
193        assert_eq!(WIDTH, third.len());
194
195        let first = first.as_ptr() as *mut u8;
196        let second = second.as_ptr() as *mut u8;
197        let third = third.as_ptr() as *mut u8;
198
199        unsafe {
200            assert_eq!(WIDTH, second.offset_from(first) as usize);
201            assert_eq!(WIDTH, third.offset_from(second) as usize);
202        }
203
204        // No capacity left.
205        assert!(!alloc.has_capacity_for(Layout::new::<bool>()));
206        _ = alloc.allocate(Layout::new::<bool>()).unwrap_err();
207
208        Ok(())
209    }
210}
211
212#[cfg(test)]
213mod alignment_tests {
214    use super::*;
215    use allocator_api2::alloc::Global;
216    use core::mem::{align_of, size_of};
217    use core::ops::RangeInclusive;
218
219    // This is the order things will be allocated in.
220    #[repr(C)]
221    struct S {
222        first: u8,
223        second: u16,
224        third: u32,
225        fourth: u64,
226    }
227
228    struct TestAllocator {
229        wide_ptr: NonNull<[u8]>,
230        align_to: usize,
231        allocated: Cell<bool>,
232    }
233
234    fn align_offset(ptr: *const u8, align_to: usize) -> usize {
235        let uintptr = ptr as usize;
236        let rem = uintptr % align_to;
237        if rem == 0 {
238            0
239        } else {
240            align_to - rem
241        }
242    }
243
244    impl Drop for TestAllocator {
245        fn drop(&mut self) {
246            #[cfg(debug_assertions)]
247            if self.allocated.get() {
248                panic!("TestAllocator dropped while allocation was still held.");
249            }
250
251            let layout = unsafe { Layout::from_size_align_unchecked(self.wide_ptr.len(), 1) };
252            unsafe { Global.deallocate(self.wide_ptr.cast(), layout) };
253        }
254    }
255
256    impl TestAllocator {
257        #[track_caller]
258        fn new(align_to: usize) -> Self {
259            assert!(align_to <= 8);
260
261            // Leave room for full S even when mis-aligned.
262            let size = 2 * align_of::<S>() + size_of::<S>();
263            let layout = unsafe { Layout::from_size_align_unchecked(size, 1) };
264            let orig = Global.allocate(layout).unwrap();
265
266            Self {
267                wide_ptr: orig,
268                align_to,
269                allocated: Cell::new(false),
270            }
271        }
272    }
273
274    unsafe impl Allocator for TestAllocator {
275        fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
276            assert!(layout.size() <= self.wide_ptr.len());
277            assert_eq!(1, layout.align());
278            if self.allocated.get() {
279                Err(AllocError)
280            } else {
281                self.allocated.set(true);
282                let unaligned = self.wide_ptr.as_ptr() as *mut u8;
283                let offset = align_offset(unaligned, self.align_to);
284                let aligned = unsafe { unaligned.add(offset) };
285                let wide = slice_from_raw_parts_mut(aligned, self.wide_ptr.len() - offset);
286                Ok(unsafe { NonNull::new_unchecked(wide) })
287            }
288        }
289
290        #[track_caller]
291        unsafe fn deallocate(&self, _ptr: NonNull<u8>, layout: Layout) {
292            assert!(self.allocated.get());
293            let unaligned = self.wide_ptr.as_ptr() as *mut u8;
294            let offset = align_offset(unaligned, self.align_to);
295            assert_eq!(self.wide_ptr.len() - offset, layout.size());
296            assert_eq!(1, layout.align());
297            self.allocated.set(false);
298        }
299    }
300
301    #[track_caller]
302    fn test_alignment(align_to: usize) {
303        let layout_u8 = Layout::new::<u8>();
304        let layout_u16 = Layout::new::<u16>();
305        let layout_u32 = Layout::new::<u32>();
306        let layout_u64 = Layout::new::<u64>();
307
308        let max_size = size_of::<S>() + align_of::<S>() - 1;
309        let test_alloc = TestAllocator::new(align_to);
310
311        let alloc = {
312            let layout = Layout::array::<u8>(max_size).unwrap();
313            LinearAllocator::new_in(layout, test_alloc).unwrap()
314        };
315
316        // To test alignment, allocate smallest to largest.
317        assert!(alloc.has_capacity_for(layout_u8));
318        let ptr_u8 = alloc.allocate(layout_u8).unwrap();
319        assert!(alloc.has_capacity_for(layout_u16));
320        let ptr_u16 = alloc.allocate(layout_u16).unwrap();
321        assert!(alloc.has_capacity_for(layout_u32));
322        let ptr_u32 = alloc.allocate(layout_u32).unwrap();
323        assert!(alloc.has_capacity_for(layout_u64));
324        let ptr_u64 = alloc.allocate(layout_u64).unwrap();
325
326        // LinearAllocator doesn't over-allocate, so we can test exact widths.
327        assert_eq!(layout_u8.size(), ptr_u8.len());
328        assert_eq!(layout_u16.size(), ptr_u16.len());
329        assert_eq!(layout_u32.size(), ptr_u32.len());
330        assert_eq!(layout_u64.size(), ptr_u64.len());
331
332        let thinptr_u8 = ptr_u8.as_ptr() as *mut u8;
333        let thinptr_u16 = ptr_u16.as_ptr() as *mut u8;
334        let thinptr_u32 = ptr_u32.as_ptr() as *mut u8;
335        let thinptr_u64 = ptr_u64.as_ptr() as *mut u8;
336
337        #[track_caller]
338        fn assert_distance_in(second: *mut u8, first: *mut u8, range: RangeInclusive<usize>) {
339            // SAFETY: pointers are part of the same underlying allocation.
340            let distance = unsafe { second.offset_from(first) };
341            let udistance = usize::try_from(distance).unwrap();
342            assert!(range.contains(&udistance));
343        }
344
345        // These are a little permissive, but exact alignment is checked below.
346        assert_distance_in(thinptr_u16, thinptr_u8, 1..=2);
347        assert_distance_in(thinptr_u32, thinptr_u16, 2..=4);
348        assert_distance_in(thinptr_u64, thinptr_u32, 4..=8);
349
350        assert_eq!(0, thinptr_u8.align_offset(layout_u8.align()));
351        assert_eq!(0, thinptr_u16.align_offset(layout_u16.align()));
352        assert_eq!(0, thinptr_u32.align_offset(layout_u32.align()));
353        assert_eq!(0, thinptr_u64.align_offset(layout_u64.align()));
354
355        // There _may_ be a little bit of space left, depends on if the
356        // underlying allocator over-allocates. But it should not panic.
357        let has_capacity = alloc.has_capacity_for(layout_u64);
358        assert_eq!(has_capacity, alloc.allocate(layout_u64).is_ok())
359    }
360    #[test]
361    fn test_alignment_1() {
362        test_alignment(1);
363    }
364
365    #[test]
366    fn test_alignment_2() {
367        test_alignment(2);
368    }
369
370    #[test]
371    fn test_alignment_3() {
372        test_alignment(3);
373    }
374
375    #[test]
376    fn test_alignment_4() {
377        test_alignment(4);
378    }
379
380    #[test]
381    fn test_alignment_5() {
382        test_alignment(5);
383    }
384
385    #[test]
386    fn test_alignment_6() {
387        test_alignment(6);
388    }
389
390    #[test]
391    fn test_alignment_7() {
392        test_alignment(7);
393    }
394
395    #[test]
396    fn test_alignment_8() {
397        test_alignment(8);
398    }
399}