libdd_alloc/
virtual_alloc.rs

1// Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::AllocError;
5use core::alloc::Layout;
6
7/// Allocates entire pages of virtual memory for each allocation. This is
8/// intended for large allocations only, such as working with other allocators
9/// to provide a large chunk for them.
10#[derive(Clone, Copy, Debug)]
11pub struct VirtualAllocator {}
12
13#[cfg_attr(debug_assertions, track_caller)]
14#[inline]
15fn layout_to_page_size(layout: Layout) -> Result<usize, AllocError> {
16    if layout.size() == 0 {
17        return Err(AllocError);
18    }
19
20    let page_size = os::page_size()?;
21    let alignment = layout.align();
22    if alignment > page_size {
23        return Err(AllocError);
24    }
25
26    pad_to_pow2(layout.size(), page_size).ok_or(AllocError)
27}
28
29#[cfg_attr(debug_assertions, track_caller)]
30#[inline]
31fn pad_to_pow2(num: usize, pow2: usize) -> Option<usize> {
32    debug_assert!(pow2.is_power_of_two());
33
34    // Usually, if num is evenly divisible by the pow2, then use that without
35    // bumping to the next size. However, we need to avoid zero.
36    let bytes = num.max(pow2);
37
38    // There's a bit-trick for powers of 2. This means they have 1 bit set:
39    //     00001000     (decimal 8)
40    // So by subtracting one, you get a pattern like:
41    //     00000111     (decimal 7)
42    // If we do num & (pow - 1), we get the same result as doing
43    // num % pow, but is faster and easier to implement.
44    //     11111101     (decimal 253)
45    //   & 00000111     (decimal 7)
46    //     --------
47    //     00000101     (decimal 5)
48    let remainder = bytes & (pow2 - 1);
49    match remainder {
50        0 => Some(bytes),
51
52        // e.g. num=1024, pow=4096, remainder = 3072:
53        // 1024 + (4096 - 3072) = 4096
54        _ => bytes.checked_add(pow2 - remainder),
55        // By definition, the remainder is less than the divisor, so this
56        // pow - remainder cannot underflow.
57    }
58}
59
60macro_rules! validate_page_size {
61    ($x:expr) => {
62        // On some platforms this may be unsigned or signed. We don't care if
63        // this macro generates "dead code" for such things.
64        #[allow(unused_comparisons)]
65        if $x < 0 {
66            Err(AllocError)
67        } else {
68            let size = $x as usize;
69            if !size.is_power_of_two() {
70                Err(AllocError)
71            } else {
72                Ok(size)
73            }
74        }
75    };
76}
77
78#[cfg(unix)]
79pub mod os {
80    use super::VirtualAllocator;
81    use allocator_api2::alloc::{AllocError, Allocator};
82    use core::alloc::Layout;
83    use core::ptr;
84
85    pub fn page_size() -> Result<usize, AllocError> {
86        // SAFETY: calling sysconf with correct arguments.
87        let result = unsafe { libc::sysconf(libc::_SC_PAGESIZE) };
88        validate_page_size!(result)
89    }
90
91    unsafe impl Allocator for VirtualAllocator {
92        fn allocate(&self, layout: Layout) -> Result<ptr::NonNull<[u8]>, AllocError> {
93            self.allocate_zeroed(layout)
94        }
95
96        fn allocate_zeroed(&self, layout: Layout) -> Result<ptr::NonNull<[u8]>, AllocError> {
97            if layout.size() == 0 {
98                return Err(AllocError);
99            }
100
101            let size = super::layout_to_page_size(layout)?;
102
103            let null = ptr::null_mut();
104            let len = size as libc::size_t;
105            let prot = libc::PROT_READ | libc::PROT_WRITE;
106            let flags = libc::MAP_PRIVATE | libc::MAP_ANON;
107            // SAFETY: these args create a new mapping (no weird behavior),
108            // akin to malloc.
109            let result = unsafe { libc::mmap(null, len, prot, flags, -1, 0) };
110
111            if ptr::eq(result, libc::MAP_FAILED) {
112                return Err(AllocError);
113            }
114
115            // SAFETY: from my understanding of the spec, it's not possible to get
116            // a mapping which starts at 0 (aka null) when MAP_FIXED wasn't given
117            // and the specified address is 0.
118            let addr = unsafe { ptr::NonNull::new_unchecked(result.cast()) };
119            Ok(ptr::NonNull::slice_from_raw_parts(addr, size))
120        }
121
122        unsafe fn deallocate(&self, nonnull: ptr::NonNull<u8>, layout: Layout) {
123            let ptr = nonnull.as_ptr();
124            // SAFETY: this would have failed if it didn't fit, unless the
125            // caller violated the preconditions.
126            let size = super::layout_to_page_size(layout).unwrap_unchecked();
127
128            // SAFETY: if the caller meets the safety conditions of this function,
129            // then this is safe by extension.
130            _ = libc::munmap(ptr.cast(), size);
131        }
132    }
133}
134
135#[cfg(windows)]
136pub mod os {
137    use super::VirtualAllocator;
138    use allocator_api2::alloc::{AllocError, Allocator};
139    use core::alloc::Layout;
140    use core::{mem, ptr};
141    use windows_sys::Win32::System::Memory;
142    use windows_sys::Win32::System::SystemInformation::{GetSystemInfo, SYSTEM_INFO};
143
144    pub fn page_size() -> Result<usize, AllocError> {
145        let mut system_info = mem::MaybeUninit::<SYSTEM_INFO>::uninit();
146        // SAFETY: calling C function with correct uninit repr.
147        unsafe { GetSystemInfo(system_info.as_mut_ptr()) };
148
149        // SAFETY: GetSystemInfo is not documented to fail in any way, so it
150        // should be safe to assume system_info was initialized.
151        let system_info = unsafe { system_info.assume_init() };
152
153        validate_page_size!(system_info.dwPageSize)
154    }
155
156    unsafe impl Allocator for VirtualAllocator {
157        fn allocate(&self, layout: Layout) -> Result<ptr::NonNull<[u8]>, AllocError> {
158            self.allocate_zeroed(layout)
159        }
160
161        fn allocate_zeroed(&self, layout: Layout) -> Result<ptr::NonNull<[u8]>, AllocError> {
162            let size = super::layout_to_page_size(layout)?;
163
164            let null = ptr::null_mut();
165            let alloc_type = Memory::MEM_COMMIT | Memory::MEM_RESERVE;
166            let protection = Memory::PAGE_READWRITE;
167            // SAFETY: these args create a new allocation (no weird behavior),
168            // akin to malloc.
169            let result = unsafe { Memory::VirtualAlloc(null, size, alloc_type, protection) };
170
171            match ptr::NonNull::new(result.cast::<u8>()) {
172                Some(addr) => Ok(ptr::NonNull::slice_from_raw_parts(addr, size)),
173                None => Err(AllocError),
174            }
175        }
176
177        unsafe fn deallocate(&self, ptr: ptr::NonNull<u8>, _layout: Layout) {
178            _ = Memory::VirtualFree(ptr.as_ptr() as *mut _, 0, Memory::MEM_RELEASE);
179        }
180    }
181}
182
183#[cfg(test)]
184mod tests {
185    use super::*;
186    use crate::utils::*;
187    use allocator_api2::alloc::Allocator;
188    use bolero::TypeGenerator;
189
190    #[test]
191    fn fuzz() {
192        #[cfg(miri)]
193        const MAX_SIZE: usize = 1_000_000;
194
195        #[cfg(not(miri))]
196        const MAX_SIZE: usize = isize::MAX as usize;
197
198        let align_bits = 0..=32;
199        let size = usize::produce();
200        let idx = usize::produce();
201        let val = u8::produce();
202        let allocs = Vec::<(usize, u32, usize, u8)>::produce()
203            .with()
204            .values((size, align_bits, idx, val));
205        bolero::check!()
206            .with_generator(allocs)
207            .for_each(|size_align_vec| {
208                let allocator = VirtualAllocator {};
209
210                for (size, align_bits, idx, val) in size_align_vec {
211                    fuzzer_inner_loop(&allocator, *size, *align_bits, *idx, *val, MAX_SIZE)
212                }
213            })
214    }
215
216    #[test]
217    fn test_zero_sized() {
218        let alloc = VirtualAllocator {};
219        assert_eq!(0, core::mem::size_of::<VirtualAllocator>());
220        let zero_sized_layout = Layout::new::<VirtualAllocator>();
221        _ = alloc.allocate(zero_sized_layout).unwrap_err();
222    }
223
224    #[test]
225    fn test_too_large_alignment() {
226        let page_size = os::page_size().unwrap();
227        let too_large = (page_size + 1).next_power_of_two();
228        let too_large_layout = Layout::from_size_align(1, too_large)
229            .unwrap()
230            .pad_to_align();
231        let alloc = VirtualAllocator {};
232        _ = alloc.allocate(too_large_layout).unwrap_err();
233    }
234
235    #[test]
236    fn test_small_cases() {
237        let page_size = os::page_size().unwrap();
238        let alloc = VirtualAllocator {};
239
240        // Allocations get rounded up to page size.
241        let small_cases = [1, page_size - 1];
242        for size in small_cases {
243            let layout = Layout::from_size_align(size, 1).unwrap();
244            let wide_ptr = alloc.allocate(layout).unwrap();
245            assert_eq!(page_size, wide_ptr.len());
246            unsafe { alloc.deallocate(wide_ptr.cast(), layout) };
247        }
248
249        // An even page size doesn't get rounded up.
250        {
251            let layout = Layout::from_size_align(page_size, page_size).unwrap();
252            let wide_ptr = alloc.allocate(layout).unwrap();
253            assert_eq!(page_size, wide_ptr.len());
254            unsafe { alloc.deallocate(wide_ptr.cast(), layout) };
255        }
256
257        // page_size + 1 gets rounded up to the next page.
258        {
259            let layout = Layout::from_size_align(page_size + 1, page_size).unwrap();
260            let wide_ptr = alloc.allocate(layout).unwrap();
261            assert_eq!(2 * page_size, wide_ptr.len());
262            unsafe { alloc.deallocate(wide_ptr.cast(), layout) };
263        }
264    }
265
266    #[track_caller]
267    fn realistic_size(size: usize) {
268        let page_size = os::page_size().unwrap();
269        let alloc = VirtualAllocator {};
270        let layout = Layout::from_size_align(size, page_size).unwrap();
271        let wide_ptr = alloc.allocate(layout).unwrap();
272        let actual_size = wide_ptr.len();
273
274        // Should be a multiple of page size.
275        assert_eq!(0, actual_size % page_size);
276
277        // Shouldn't ever be smaller than what was asked for.
278        assert!(actual_size >= size);
279
280        unsafe { alloc.deallocate(wide_ptr.cast(), layout) };
281    }
282
283    #[test]
284    fn realistic_size_1mib() {
285        realistic_size(1024 * 1024);
286    }
287
288    #[test]
289    fn realistic_size_2mib() {
290        realistic_size(2 * 1024 * 1024);
291    }
292
293    #[test]
294    fn realistic_size_4mib() {
295        realistic_size(4 * 1024 * 1024);
296    }
297}