edit/arena/
release.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4#![allow(clippy::mut_from_ref)]
5
6use std::alloc::{AllocError, Allocator, Layout};
7use std::cell::Cell;
8use std::hint::cold_path;
9use std::mem::MaybeUninit;
10use std::ptr::{self, NonNull};
11use std::{mem, slice};
12
13use crate::helpers::*;
14use crate::{apperr, sys};
15
16const ALLOC_CHUNK_SIZE: usize = 64 * KIBI;
17
18/// An arena allocator.
19///
20/// If you have never used an arena allocator before, think of it as
21/// allocating objects on the stack, but the stack is *really* big.
22/// Each time you allocate, memory gets pushed at the end of the stack,
23/// each time you deallocate, memory gets popped from the end of the stack.
24///
25/// One reason you'd want to use this is obviously performance: It's very simple
26/// and so it's also very fast, >10x faster than your system allocator.
27///
28/// However, modern allocators such as `mimalloc` are just as fast, so why not use them?
29/// Because their performance comes at the cost of binary size and we can't have that.
30///
31/// The biggest benefit though is that it sometimes massively simplifies lifetime
32/// and memory management. This can best be seen by this project's UI code, which
33/// uses an arena to allocate a tree of UI nodes. This is infamously difficult
34/// to do in Rust, but not so when you got an arena allocator:
35/// All nodes have the same lifetime, so you can just use references.
36///
37/// <div class="warning">
38///
39/// **Do not** push objects into the arena that require destructors.
40/// Destructors are not executed. Use a pool allocator for that.
41///
42/// </div>
43pub struct Arena {
44    base: NonNull<u8>,
45    capacity: usize,
46    commit: Cell<usize>,
47    offset: Cell<usize>,
48
49    /// See [`super::debug`], which uses this for borrow tracking.
50    #[cfg(debug_assertions)]
51    pub(super) borrows: Cell<usize>,
52}
53
54impl Arena {
55    pub const fn empty() -> Self {
56        Self {
57            base: NonNull::dangling(),
58            capacity: 0,
59            commit: Cell::new(0),
60            offset: Cell::new(0),
61
62            #[cfg(debug_assertions)]
63            borrows: Cell::new(0),
64        }
65    }
66
67    pub fn new(capacity: usize) -> apperr::Result<Self> {
68        let capacity = (capacity.max(1) + ALLOC_CHUNK_SIZE - 1) & !(ALLOC_CHUNK_SIZE - 1);
69        let base = unsafe { sys::virtual_reserve(capacity)? };
70
71        Ok(Self {
72            base,
73            capacity,
74            commit: Cell::new(0),
75            offset: Cell::new(0),
76
77            #[cfg(debug_assertions)]
78            borrows: Cell::new(0),
79        })
80    }
81
82    pub fn offset(&self) -> usize {
83        self.offset.get()
84    }
85
86    /// "Deallocates" the memory in the arena down to the given offset.
87    ///
88    /// # Safety
89    ///
90    /// Obviously, this is GIGA UNSAFE. It runs no destructors and does not check
91    /// whether the offset is valid. You better take care when using this function.
92    pub unsafe fn reset(&self, to: usize) {
93        // Fill the deallocated memory with 0xDD to aid debugging.
94        if cfg!(debug_assertions) && self.offset.get() > to {
95            let commit = self.commit.get();
96            let len = (self.offset.get() + 128).min(commit) - to;
97            unsafe { slice::from_raw_parts_mut(self.base.add(to).as_ptr(), len).fill(0xDD) };
98        }
99
100        self.offset.replace(to);
101    }
102
103    #[inline]
104    pub(super) fn alloc_raw(
105        &self,
106        bytes: usize,
107        alignment: usize,
108    ) -> Result<NonNull<[u8]>, AllocError> {
109        let commit = self.commit.get();
110        let offset = self.offset.get();
111
112        let beg = (offset + alignment - 1) & !(alignment - 1);
113        let end = beg + bytes;
114
115        if end > commit {
116            return self.alloc_raw_bump(beg, end);
117        }
118
119        if cfg!(debug_assertions) {
120            let ptr = unsafe { self.base.add(offset) };
121            let len = (end + 128).min(self.commit.get()) - offset;
122            unsafe { slice::from_raw_parts_mut(ptr.as_ptr(), len).fill(0xCD) };
123        }
124
125        self.offset.replace(end);
126        Ok(unsafe { NonNull::slice_from_raw_parts(self.base.add(beg), bytes) })
127    }
128
129    // With the code in `alloc_raw_bump()` out of the way, `alloc_raw()` compiles down to some super tight assembly.
130    #[cold]
131    fn alloc_raw_bump(&self, beg: usize, end: usize) -> Result<NonNull<[u8]>, AllocError> {
132        let offset = self.offset.get();
133        let commit_old = self.commit.get();
134        let commit_new = (end + ALLOC_CHUNK_SIZE - 1) & !(ALLOC_CHUNK_SIZE - 1);
135
136        if commit_new > self.capacity
137            || unsafe {
138                sys::virtual_commit(self.base.add(commit_old), commit_new - commit_old).is_err()
139            }
140        {
141            return Err(AllocError);
142        }
143
144        if cfg!(debug_assertions) {
145            let ptr = unsafe { self.base.add(offset) };
146            let len = (end + 128).min(self.commit.get()) - offset;
147            unsafe { slice::from_raw_parts_mut(ptr.as_ptr(), len).fill(0xCD) };
148        }
149
150        self.commit.replace(commit_new);
151        self.offset.replace(end);
152        Ok(unsafe { NonNull::slice_from_raw_parts(self.base.add(beg), end - beg) })
153    }
154
155    #[allow(clippy::mut_from_ref)]
156    pub fn alloc_uninit<T>(&self) -> &mut MaybeUninit<T> {
157        let bytes = mem::size_of::<T>();
158        let alignment = mem::align_of::<T>();
159        let ptr = self.alloc_raw(bytes, alignment).unwrap();
160        unsafe { ptr.cast().as_mut() }
161    }
162
163    #[allow(clippy::mut_from_ref)]
164    pub fn alloc_uninit_slice<T>(&self, count: usize) -> &mut [MaybeUninit<T>] {
165        let bytes = mem::size_of::<T>() * count;
166        let alignment = mem::align_of::<T>();
167        let ptr = self.alloc_raw(bytes, alignment).unwrap();
168        unsafe { slice::from_raw_parts_mut(ptr.cast().as_ptr(), count) }
169    }
170}
171
172impl Drop for Arena {
173    fn drop(&mut self) {
174        if self.base != NonNull::dangling() {
175            unsafe { sys::virtual_release(self.base, self.capacity) };
176        }
177    }
178}
179
180impl Default for Arena {
181    fn default() -> Self {
182        Self::empty()
183    }
184}
185
186unsafe impl Allocator for Arena {
187    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
188        self.alloc_raw(layout.size(), layout.align())
189    }
190
191    fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
192        let p = self.alloc_raw(layout.size(), layout.align())?;
193        unsafe { p.cast::<u8>().as_ptr().write_bytes(0, p.len()) }
194        Ok(p)
195    }
196
197    // While it is possible to shrink the tail end of the arena, it is
198    // not very useful given the existence of scoped scratch arenas.
199    unsafe fn deallocate(&self, _: NonNull<u8>, _: Layout) {}
200
201    unsafe fn grow(
202        &self,
203        ptr: NonNull<u8>,
204        old_layout: Layout,
205        new_layout: Layout,
206    ) -> Result<NonNull<[u8]>, AllocError> {
207        debug_assert!(new_layout.size() >= old_layout.size());
208        debug_assert!(new_layout.align() <= old_layout.align());
209
210        let new_ptr;
211
212        // Growing the given area is possible if it is at the end of the arena.
213        if unsafe { ptr.add(old_layout.size()) == self.base.add(self.offset.get()) } {
214            new_ptr = ptr;
215            let delta = new_layout.size() - old_layout.size();
216            // Assuming that the given ptr/length area is at the end of the arena,
217            // we can just push more memory to the end of the arena to grow it.
218            self.alloc_raw(delta, 1)?;
219        } else {
220            cold_path();
221
222            new_ptr = self.allocate(new_layout)?.cast();
223
224            // SAFETY: It's weird to me that this doesn't assert new_layout.size() >= old_layout.size(),
225            // but neither does the stdlib code at the time of writing.
226            // So, assuming that is not needed, this code is safe since it just copies the old data over.
227            unsafe {
228                ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_layout.size());
229                self.deallocate(ptr, old_layout);
230            }
231        }
232
233        Ok(NonNull::slice_from_raw_parts(new_ptr, new_layout.size()))
234    }
235
236    unsafe fn grow_zeroed(
237        &self,
238        ptr: NonNull<u8>,
239        old_layout: Layout,
240        new_layout: Layout,
241    ) -> Result<NonNull<[u8]>, AllocError> {
242        unsafe {
243            // SAFETY: Same as grow().
244            let ptr = self.grow(ptr, old_layout, new_layout)?;
245
246            // SAFETY: At this point, `ptr` must be valid for `new_layout.size()` bytes,
247            // allowing us to safely zero out the delta since `old_layout.size()`.
248            ptr.cast::<u8>()
249                .add(old_layout.size())
250                .write_bytes(0, new_layout.size() - old_layout.size());
251
252            Ok(ptr)
253        }
254    }
255
256    unsafe fn shrink(
257        &self,
258        ptr: NonNull<u8>,
259        old_layout: Layout,
260        new_layout: Layout,
261    ) -> Result<NonNull<[u8]>, AllocError> {
262        debug_assert!(new_layout.size() <= old_layout.size());
263        debug_assert!(new_layout.align() <= old_layout.align());
264
265        let mut len = old_layout.size();
266
267        // Shrinking the given area is possible if it is at the end of the arena.
268        if unsafe { ptr.add(len) == self.base.add(self.offset.get()) } {
269            self.offset.set(self.offset.get() - len + new_layout.size());
270            len = new_layout.size();
271        } else {
272            debug_assert!(
273                false,
274                "Did you call shrink_to_fit()? Only the last allocation can be shrunk!"
275            );
276        }
277
278        Ok(NonNull::slice_from_raw_parts(ptr, len))
279    }
280}