near_vm_vm/
mmap.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/2.3.0/ATTRIBUTIONS.md
3
4//! Low-level abstraction for allocating and managing zero-filled pages
5//! of memory.
6
7use more_asserts::assert_le;
8use more_asserts::assert_lt;
9use std::io;
10use std::ptr;
11use std::slice;
12
13/// Round `size` up to the nearest multiple of `page_size`.
14fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
15    (size + (page_size - 1)) & !(page_size - 1)
16}
17
18/// A simple struct consisting of a page-aligned pointer to page-aligned
19/// and initially-zeroed memory and a length.
20#[derive(Debug)]
21pub struct Mmap {
22    // Note that this is stored as a `usize` instead of a `*const` or `*mut`
23    // pointer to allow this structure to be natively `Send` and `Sync` without
24    // `unsafe impl`. This type is sendable across threads and shareable since
25    // the coordination all happens at the OS layer.
26    ptr: usize,
27    len: usize,
28}
29
30impl Mmap {
31    /// Construct a new empty instance of `Mmap`.
32    pub fn new() -> Self {
33        // Rust's slices require non-null pointers, even when empty. `Vec`
34        // contains code to create a non-null dangling pointer value when
35        // constructed empty, so we reuse that here.
36        let empty = Vec::<u8>::new();
37        Self { ptr: empty.as_ptr() as usize, len: 0 }
38    }
39
40    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
41    pub fn with_at_least(size: usize) -> Result<Self, String> {
42        let page_size = region::page::size();
43        let rounded_size = round_up_to_page_size(size, page_size);
44        Self::accessible_reserved(rounded_size, rounded_size)
45    }
46
47    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
48    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
49    /// must be native page-size multiples.
50    #[cfg(not(target_os = "windows"))]
51    pub fn accessible_reserved(
52        accessible_size: usize,
53        mapping_size: usize,
54    ) -> Result<Self, String> {
55        let page_size = region::page::size();
56        assert_le!(accessible_size, mapping_size);
57        assert_eq!(mapping_size & (page_size - 1), 0);
58        assert_eq!(accessible_size & (page_size - 1), 0);
59
60        // Mmap may return EINVAL if the size is zero, so just
61        // special-case that.
62        if mapping_size == 0 {
63            return Ok(Self::new());
64        }
65
66        Ok(if accessible_size == mapping_size {
67            // Allocate a single read-write region at once.
68            let ptr = unsafe {
69                libc::mmap(
70                    ptr::null_mut(),
71                    mapping_size,
72                    libc::PROT_READ | libc::PROT_WRITE,
73                    libc::MAP_PRIVATE | libc::MAP_ANON,
74                    -1,
75                    0,
76                )
77            };
78            if ptr as isize == -1_isize {
79                return Err(io::Error::last_os_error().to_string());
80            }
81
82            Self { ptr: ptr as usize, len: mapping_size }
83        } else {
84            // Reserve the mapping size.
85            let ptr = unsafe {
86                libc::mmap(
87                    ptr::null_mut(),
88                    mapping_size,
89                    libc::PROT_NONE,
90                    libc::MAP_PRIVATE | libc::MAP_ANON,
91                    -1,
92                    0,
93                )
94            };
95            if ptr as isize == -1_isize {
96                return Err(io::Error::last_os_error().to_string());
97            }
98
99            let mut result = Self { ptr: ptr as usize, len: mapping_size };
100
101            if accessible_size != 0 {
102                // Commit the accessible size.
103                result.make_accessible(0, accessible_size)?;
104            }
105
106            result
107        })
108    }
109
110    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
111    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
112    /// must be native page-size multiples.
113    #[cfg(target_os = "windows")]
114    pub fn accessible_reserved(
115        accessible_size: usize,
116        mapping_size: usize,
117    ) -> Result<Self, String> {
118        use winapi::um::memoryapi::VirtualAlloc;
119        use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
120
121        let page_size = region::page::size();
122        assert_le!(accessible_size, mapping_size);
123        assert_eq!(mapping_size & (page_size - 1), 0);
124        assert_eq!(accessible_size & (page_size - 1), 0);
125
126        // VirtualAlloc may return ERROR_INVALID_PARAMETER if the size is zero,
127        // so just special-case that.
128        if mapping_size == 0 {
129            return Ok(Self::new());
130        }
131
132        Ok(if accessible_size == mapping_size {
133            // Allocate a single read-write region at once.
134            let ptr = unsafe {
135                VirtualAlloc(
136                    ptr::null_mut(),
137                    mapping_size,
138                    MEM_RESERVE | MEM_COMMIT,
139                    PAGE_READWRITE,
140                )
141            };
142            if ptr.is_null() {
143                return Err(io::Error::last_os_error().to_string());
144            }
145
146            Self { ptr: ptr as usize, len: mapping_size }
147        } else {
148            // Reserve the mapping size.
149            let ptr =
150                unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
151            if ptr.is_null() {
152                return Err(io::Error::last_os_error().to_string());
153            }
154
155            let mut result = Self { ptr: ptr as usize, len: mapping_size };
156
157            if accessible_size != 0 {
158                // Commit the accessible size.
159                result.make_accessible(0, accessible_size)?;
160            }
161
162            result
163        })
164    }
165
166    /// Make the memory starting at `start` and extending for `len` bytes accessible.
167    /// `start` and `len` must be native page-size multiples and describe a range within
168    /// `self`'s reserved memory.
169    #[cfg(not(target_os = "windows"))]
170    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
171        let page_size = region::page::size();
172        assert_eq!(start & (page_size - 1), 0);
173        assert_eq!(len & (page_size - 1), 0);
174        assert_lt!(len, self.len);
175        assert_lt!(start, self.len - len);
176
177        // Commit the accessible size.
178        unsafe { region::protect(self.as_ptr().add(start), len, region::Protection::READ_WRITE) }
179            .map_err(|e| e.to_string())
180    }
181
182    /// Make the memory starting at `start` and extending for `len` bytes accessible.
183    /// `start` and `len` must be native page-size multiples and describe a range within
184    /// `self`'s reserved memory.
185    #[cfg(target_os = "windows")]
186    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
187        use winapi::ctypes::c_void;
188        use winapi::um::memoryapi::VirtualAlloc;
189        use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
190        let page_size = region::page::size();
191        assert_eq!(start & (page_size - 1), 0);
192        assert_eq!(len & (page_size - 1), 0);
193        assert_lt!(len, self.len);
194        assert_lt!(start, self.len - len);
195
196        // Commit the accessible size.
197        if unsafe {
198            VirtualAlloc(self.as_ptr().add(start) as *mut c_void, len, MEM_COMMIT, PAGE_READWRITE)
199        }
200        .is_null()
201        {
202            return Err(io::Error::last_os_error().to_string());
203        }
204
205        Ok(())
206    }
207
208    /// Return the allocated memory as a slice of u8.
209    pub fn as_slice(&self) -> &[u8] {
210        unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
211    }
212
213    /// Return the allocated memory as a mutable slice of u8.
214    pub fn as_mut_slice(&mut self) -> &mut [u8] {
215        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
216    }
217
218    /// Return the allocated memory as a pointer to u8.
219    pub fn as_ptr(&self) -> *const u8 {
220        self.ptr as *const u8
221    }
222
223    /// Return the allocated memory as a mutable pointer to u8.
224    pub fn as_mut_ptr(&mut self) -> *mut u8 {
225        self.ptr as *mut u8
226    }
227
228    /// Return the length of the allocated memory.
229    pub fn len(&self) -> usize {
230        self.len
231    }
232
233    /// Return whether any memory has been allocated.
234    pub fn is_empty(&self) -> bool {
235        self.len() == 0
236    }
237}
238
239impl Drop for Mmap {
240    #[cfg(not(target_os = "windows"))]
241    fn drop(&mut self) {
242        if self.len != 0 {
243            let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
244            assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
245        }
246    }
247
248    #[cfg(target_os = "windows")]
249    fn drop(&mut self) {
250        if self.len != 0 {
251            use winapi::ctypes::c_void;
252            use winapi::um::memoryapi::VirtualFree;
253            use winapi::um::winnt::MEM_RELEASE;
254            let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
255            assert_ne!(r, 0);
256        }
257    }
258}
259
260fn _assert() {
261    fn _assert_send_sync<T: Send + Sync>() {}
262    _assert_send_sync::<Mmap>();
263}
264
265#[cfg(test)]
266mod tests {
267    use super::*;
268
269    #[test]
270    fn test_round_up_to_page_size() {
271        assert_eq!(round_up_to_page_size(0, 4096), 0);
272        assert_eq!(round_up_to_page_size(1, 4096), 4096);
273        assert_eq!(round_up_to_page_size(4096, 4096), 4096);
274        assert_eq!(round_up_to_page_size(4097, 4096), 8192);
275    }
276}