libdd_alloc/
chain.rs

1// Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::LinearAllocator;
5use crate::{AllocError, Allocator};
6use core::alloc::Layout;
7use core::cell::UnsafeCell;
8use core::mem::size_of;
9use core::ptr::NonNull;
10
11/// [ChainAllocator] is an arena allocator, meaning that deallocating
12/// individual allocations made by this allocator does nothing. Instead, the
13/// whole backing memory is dropped at once.  Destructors for these objects
14/// are not called automatically and must be done by the caller if it's
15/// necessary.
16///
17/// [ChainAllocator] creates a new [LinearAllocator] when the current one
18/// doesn't have enough space for the requested allocation, and then links the
19/// new [LinearAllocator] to the previous one, creating a chain. This is where
20/// its name comes from.
21pub struct ChainAllocator<A: Allocator + Clone> {
22    top: UnsafeCell<ChainNodePtr<A>>,
23    /// The size hint for the linear allocator's chunk.
24    node_size: usize,
25    allocator: A,
26}
27
28#[derive(Clone, Copy)]
29struct ChainNodePtr<A: Allocator> {
30    ptr: Option<NonNull<ChainNode<A>>>,
31}
32
33impl<A: Allocator> ChainNodePtr<A> {
34    #[inline]
35    fn as_mut_ptr(&self) -> *mut ChainNode<A> {
36        match self.ptr {
37            Some(non_null) => non_null.as_ptr(),
38            None => core::ptr::null_mut(),
39        }
40    }
41}
42
43impl<A: Allocator> ChainNodePtr<A> {
44    const fn new() -> Self {
45        Self { ptr: None }
46    }
47
48    fn as_ref(&self) -> Option<&ChainNode<A>> {
49        // SAFETY: active as long as not-null, never give out mut refs.
50        self.ptr.map(|p| unsafe { p.as_ref() })
51    }
52}
53
54/// The node exists inside the allocation owned by `linear`.
55struct ChainNode<A: Allocator> {
56    prev: UnsafeCell<ChainNodePtr<A>>,
57    linear: LinearAllocator<A>,
58}
59
60impl<A: Allocator> ChainNode<A> {
61    #[inline]
62    fn prev_ptr(&self) -> *mut ChainNode<A> {
63        // SAFETY: all references are temporary and do not escape local scope,
64        // preventing multiple references.
65        unsafe { (*self.prev.get()).as_mut_ptr() }
66    }
67}
68
69unsafe impl<A: Allocator + Clone> Send for ChainAllocator<A> {}
70
71impl<A: Allocator> ChainNode<A> {
72    fn remaining_capacity(&self) -> usize {
73        self.linear.remaining_capacity()
74    }
75
76    fn has_capacity_for(&self, layout: Layout) -> bool {
77        self.linear.has_capacity_for(layout)
78    }
79}
80
81impl<A: Allocator + Clone> ChainAllocator<A> {
82    /// The amount of bytes used by the [ChainAllocator] at the start of each
83    /// chunk of the chain for bookkeeping.
84    pub const CHAIN_NODE_OVERHEAD: usize = size_of::<ChainNode<A>>();
85
86    /// The individual nodes need to be big enough that the overhead of a chain
87    /// is worth it. This is somewhat arbitrarily chosen at the moment.
88    const MIN_NODE_SIZE: usize = 4 * Self::CHAIN_NODE_OVERHEAD;
89
90    /// Creates a new [ChainAllocator]. The `chunk_size_hint` is used as a
91    /// size hint when creating new chunks of the chain. Note that the
92    /// [ChainAllocator] will use some bytes at the beginning of each chunk of
93    /// the chain. The number of bytes is [Self::CHAIN_NODE_OVERHEAD]. Keep
94    /// this in mind when sizing your hint if you are trying to be precise,
95    /// such as making sure a specific object fits.
96    pub const fn new_in(chunk_size_hint: usize, allocator: A) -> Self {
97        Self {
98            top: UnsafeCell::new(ChainNodePtr::new()),
99            // max is not a const fn, do it manually.
100            node_size: if chunk_size_hint < Self::MIN_NODE_SIZE {
101                Self::MIN_NODE_SIZE
102            } else {
103                chunk_size_hint
104            },
105            allocator,
106        }
107    }
108
109    #[cold]
110    #[inline(never)]
111    fn grow(&self, min_size: usize) -> Result<(), AllocError> {
112        let top = self.top.get();
113        let chain_layout = Layout::new::<ChainNode<A>>();
114
115        let node_size = min_size.max(self.node_size);
116        let linear = {
117            let layout = Layout::from_size_align(node_size, chain_layout.align())
118                .map_err(|_| AllocError)?
119                .pad_to_align();
120            LinearAllocator::new_in(layout, self.allocator.clone())?
121        };
122
123        // This shouldn't fail.
124        let chain_node_addr = linear
125            .allocate(chain_layout)?
126            .as_ptr()
127            .cast::<ChainNode<A>>();
128        let chain_node = {
129            // SAFETY: If non-null, this is a valid pointer, and the reference
130            // is temporary, as all references for the chain nodes are.
131            let ptr = unsafe { (*top).ptr };
132            ChainNode {
133                prev: UnsafeCell::new(ChainNodePtr { ptr }),
134                linear,
135            }
136        };
137
138        // SAFETY: this is a write operation to freshly allocated memory which
139        // has the correct layout.
140        unsafe { chain_node_addr.write(chain_node) };
141
142        let chain_node_ptr = ChainNodePtr {
143            // SAFETY: derived from allocation (not null).
144            ptr: Some(unsafe { NonNull::new_unchecked(chain_node_addr) }),
145        };
146
147        // SAFETY: the value is just a pointer, no drops need to occur.
148        // Additionally, references are always temporary for the top, so this
149        // write will not violate aliasing rules.
150        unsafe { self.top.get().write(chain_node_ptr) };
151
152        Ok(())
153    }
154
155    fn capacity_helper(mut ptr: *mut ChainNode<A>) -> usize {
156        let mut capacity = 0_usize;
157        // SAFETY: if non-null, it's a valid pointer. The reference is
158        // short-lived as usual to avoid aliasing issues.
159        while let Some(chain_node) = unsafe { ptr.as_ref() } {
160            capacity += chain_node.linear.reserved_bytes();
161            ptr = chain_node.prev_ptr();
162        }
163        capacity
164    }
165
166    fn top_chain_node_ptr(&self) -> *mut ChainNode<A> {
167        // SAFETY: This is never exposed to users, and never used internally
168        // in a way it will provide simultaneous mutable references.
169        unsafe { (*self.top.get()).as_mut_ptr() }
170    }
171
172    /// Get the number of bytes allocated, including bytes for overhead.
173    /// It does not count space it _could_ allocate still, such as unused space
174    /// at the end of the top node in the chain. It does count unallocated
175    /// space at the end of previous nodes in the chain.
176    pub fn used_bytes(&self) -> usize {
177        let mut chain_node_ptr = self.top_chain_node_ptr();
178        let Some(chain_node) = (unsafe { chain_node_ptr.as_ref() }) else {
179            return 0;
180        };
181
182        // The top node is the one that new allocations are made from, so it
183        // is likely only partially full.
184        let size = {
185            let size = chain_node.linear.used_bytes();
186            chain_node_ptr = chain_node.prev_ptr();
187            size
188        };
189
190        // However, the previous nodes in the chain are all full, or at least
191        // they should be considered full as any unused space at the end of
192        // the allocation won't get used. So fetch `capacity` for previous
193        // nodes in the chain.
194        let prev_capacity = Self::capacity_helper(chain_node_ptr);
195        size + prev_capacity
196    }
197
198    /// Get the number of bytes allocated by the underlying allocators for
199    /// this chain. This number is greater than or equal to [Self::used_bytes].
200    pub fn reserved_bytes(&self) -> usize {
201        let ptr = self.top_chain_node_ptr();
202        Self::capacity_helper(ptr)
203    }
204
205    /// Gets the number of bytes that can be allocated without requesting more
206    /// from the underlying allocator.
207    pub fn remaining_capacity(&self) -> usize {
208        // Only need to look at the top node of the chain, all the previous
209        // nodes are considered full.
210        let chain_ptr = self.top.get();
211        // SAFETY: If non-null, this is a valid pointer, and the reference is
212        // temporary, as all references for the chain nodes are.
213        let top = unsafe { (*chain_ptr).as_ref() };
214        top.map(ChainNode::remaining_capacity).unwrap_or(0)
215    }
216
217    /// Can the requested `layout` be allocated without requesting more
218    /// from the underlying allocator.
219    pub fn has_capacity_for(&self, layout: Layout) -> bool {
220        // Only need to look at the top node of the chain, all the previous
221        // nodes are considered full.
222        let chain_ptr = self.top.get();
223        // SAFETY: If non-null, this is a valid pointer, and the reference is
224        // temporary, as all references for the chain nodes are.
225        if let Some(top) = unsafe { (*chain_ptr).as_ref() } {
226            top.has_capacity_for(layout)
227        } else {
228            false
229        }
230    }
231}
232
233unsafe impl<A: Allocator + Clone> Allocator for ChainAllocator<A> {
234    #[cfg_attr(debug_assertions, track_caller)]
235    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
236        if layout.size() == 0 {
237            return Err(AllocError);
238        }
239        let layout = layout.pad_to_align();
240
241        if !self.has_capacity_for(layout) {
242            let header_overhead = size_of::<ChainNode<A>>();
243            let min_size = layout
244                .size()
245                .checked_add(header_overhead)
246                .ok_or(AllocError)?;
247            // The item may have an alignment requirement. `align-1` bytes are sufficient to give
248            // space for padding.
249            let min_size_with_alignment =
250                min_size.checked_add(layout.align() - 1).ok_or(AllocError)?;
251
252            self.grow(min_size_with_alignment)?;
253        }
254        debug_assert!(self.has_capacity_for(layout));
255
256        // At this point:
257        //  1. There's a top node.
258        //  2. It has enough capacity for the allocation.
259
260        let top = self.top.get();
261        let chain_node = unsafe { (*top).as_ref().unwrap_unchecked() };
262
263        debug_assert!(chain_node.remaining_capacity() >= layout.size());
264
265        let result = chain_node.linear.allocate(layout);
266        // If this fails, there's a bug in the allocator.
267        debug_assert!(result.is_ok());
268        result
269    }
270
271    unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
272        // This is an arena. It does batch de-allocation when dropped.
273    }
274}
275
276impl<A: Allocator + Clone> Drop for ChainAllocator<A> {
277    fn drop(&mut self) {
278        // SAFETY: top node is alive, type is fine to `read` because it is
279        // behind a cell type, so it will not get double-dropped.
280        let mut chain_node_ptr = unsafe { self.top.get().read() };
281
282        loop {
283            match chain_node_ptr.ptr {
284                None => break,
285                Some(non_null) => {
286                    // SAFETY: the chunk hasn't been dropped yet, so the ptr
287                    // to the chunk is alive. The prev pointer of the chunk is
288                    // moved to the stack before the chunk is dropped, so it's
289                    // alive and valid after the chunk is dropped below.
290                    chain_node_ptr = unsafe {
291                        // Save to variable to avoid a dangling temporary.
292                        let unsafe_cell = core::ptr::addr_of!((*non_null.as_ptr()).prev).read();
293                        unsafe_cell.get().read()
294                    };
295
296                    // SAFETY: the chunk hasn't been dropped yet, and the
297                    // linear allocator lives in the chunk. Moving it to the
298                    // stack before dropping it avoids a fringe lifetime issue
299                    // which could happen occur with drop_in_place instead.
300                    let alloc =
301                        unsafe { core::ptr::addr_of_mut!((*non_null.as_ptr()).linear).read() };
302
303                    // The drop will happen anyway, but being explicit.
304                    drop(alloc);
305                }
306            }
307        }
308    }
309}
310
311#[cfg(test)]
312mod tests {
313    use super::*;
314    use crate::utils::*;
315    use allocator_api2::alloc::Global;
316    use bolero::TypeGenerator;
317
318    #[test]
319    fn fuzz() {
320        // avoid SUMMARY: libFuzzer: out-of-memory
321        const MAX_SIZE: usize = 0x10000000;
322
323        let size_hint = 0..=MAX_SIZE;
324        // Giving large values for align bits can lead to failed allocations
325        // This would normally be OK, since the fuzz-test is resilient to this.
326        // However, the chain allocator has a debug assert that allocs don't fail, which means that
327        // running this in unit-test mode DOES spuriously fail.
328        // Clamping the size in unit-test mode avoids the problem while not losing coverage in fuzz
329        // test mode.
330        let align_bits = 0..32;
331        let size = 0..=MAX_SIZE;
332        let idx = 0..=MAX_SIZE;
333        let val = u8::produce();
334        let allocs = Vec::<(usize, u32, usize, u8)>::produce()
335            .with()
336            .values((size, align_bits, idx, val));
337        bolero::check!()
338            .with_generator((size_hint, allocs))
339            .for_each(|(size_hint, size_align_vec)| {
340                let allocator = ChainAllocator::new_in(*size_hint, Global);
341
342                for (size, align_bits, idx, val) in size_align_vec {
343                    fuzzer_inner_loop(&allocator, *size, *align_bits, *idx, *val, MAX_SIZE)
344                }
345            })
346    }
347
348    #[test]
349    fn test_basics() {
350        let allocator = ChainAllocator::new_in(4096, Global);
351        let layout = Layout::new::<[u8; 8]>();
352        let ptr = allocator.allocate(layout).unwrap();
353
354        // deallocate doesn't return memory to the allocator, but it shouldn't
355        // panic, as that prevents its use in containers like Vec.
356        unsafe { allocator.deallocate(ptr.cast(), layout) };
357    }
358
359    #[test]
360    fn test_large_allocations() {
361        let allocator = ChainAllocator::new_in(4096, Global);
362
363        // Force an allocation, so it makes a chunk of the minimum size.
364        {
365            let ptr = allocator.allocate(Layout::new::<u8>()).unwrap();
366            unsafe { allocator.deallocate(ptr.cast(), Layout::new::<u8>()) };
367        }
368        // Should be a bit less than 4096, but use this over hard-coding a
369        // number, to make it more resilient to implementation changes.
370        let remaining_capacity = allocator.remaining_capacity();
371
372        // Now make something bigger than the chunk.
373        let size = 4 * (remaining_capacity + 1);
374        let layout = Layout::from_size_align(size, 1).unwrap();
375        let ptr = allocator.allocate(layout).unwrap();
376        let actual_size = ptr.len();
377        assert!(
378            actual_size >= size,
379            "failed to allocate large allocation, expected at least {size} bytes, saw {actual_size}"
380        );
381        // Doesn't return memory, just ensuring we don't panic.
382        unsafe { allocator.deallocate(ptr.cast(), layout) };
383    }
384
385    #[track_caller]
386    fn fill_to_capacity<A: Allocator + Clone>(allocator: &ChainAllocator<A>) {
387        let remaining_capacity = allocator.remaining_capacity();
388        if remaining_capacity != 0 {
389            let layout = Layout::array::<u8>(remaining_capacity).unwrap();
390            let ptr = allocator.allocate(layout).unwrap();
391            // Doesn't return memory, just ensuring we don't panic.
392            unsafe { allocator.deallocate(ptr.cast(), layout) };
393            assert_eq!(0, allocator.remaining_capacity());
394        }
395    }
396
397    #[test]
398    fn test_growth() {
399        let page_size = crate::os::page_size().unwrap();
400        let allocator = ChainAllocator::new_in(page_size, Global);
401
402        let bool_layout = Layout::new::<bool>();
403
404        // test that it fills to capacity a few times.
405        for _ in 0..100 {
406            fill_to_capacity(&allocator);
407
408            // This check is theoretically redundant because fill_to_capacity
409            // ensures this already, but this tests using the public API.
410            let size = allocator.used_bytes();
411            let capacity = allocator.reserved_bytes();
412            assert_eq!(size, capacity);
413
414            // Trigger it to grow.
415            let ptr = allocator.allocate(bool_layout).unwrap();
416
417            // Doesn't free, shouldn't panic though.
418            unsafe { allocator.deallocate(ptr.cast(), bool_layout) };
419
420            // The growth means there should be many used bytes.
421            let size = allocator.used_bytes();
422            let capacity = allocator.reserved_bytes();
423            assert!(size < capacity, "failed: {size} < {capacity}");
424        }
425
426        let reserved_bytes = allocator.reserved_bytes();
427        // The allocations can theoretically be over-allocated, so use >= to
428        // do the comparison.
429        assert!(reserved_bytes >= page_size * 100);
430
431        // Everything is filled to capacity except the last iteration.
432        let used_bytes = allocator.used_bytes();
433        assert!(
434            used_bytes < reserved_bytes,
435            "failed: {used_bytes} < {reserved_bytes}"
436        );
437    }
438}