unc_vm_engine/universal/
code_memory.rs1use rustix::mm::{self, MapFlags, MprotectFlags, ProtFlags};
6use std::sync::Arc;
7use unc_vm_compiler::CompileError;
8
9pub(crate) const ARCH_FUNCTION_ALIGNMENT: u16 = 16;
15
16pub(crate) const DATA_SECTION_ALIGNMENT: u16 = 64;
19
20fn round_up(size: usize, multiple: usize) -> usize {
21 debug_assert!(multiple.is_power_of_two());
22 (size + (multiple - 1)) & !(multiple - 1)
23}
24
25pub struct CodeMemoryWriter<'a> {
26 memory: &'a mut CodeMemory,
27 offset: usize,
28}
29
30impl<'a> CodeMemoryWriter<'a> {
31 pub fn write_data(&mut self, mut alignment: u16, input: &[u8]) -> Result<usize, CompileError> {
40 if self.offset == self.memory.executable_end {
41 alignment = u16::try_from(rustix::param::page_size()).expect("page size > u16::MAX");
42 }
43 self.write_inner(alignment, input)
44 }
45
46 pub fn write_executable(
54 &mut self,
55 alignment: u16,
56 input: &[u8],
57 ) -> Result<usize, CompileError> {
58 assert_eq!(
59 self.memory.executable_end, self.offset,
60 "may not interleave executable and data in the same map"
61 );
62 let result = self.write_inner(alignment, input);
63 self.memory.executable_end = self.offset;
64 result
65 }
66
67 fn write_inner(&mut self, alignment: u16, input: &[u8]) -> Result<usize, CompileError> {
68 let entry_offset = self.offset;
69 let aligned_offset = round_up(entry_offset, usize::from(alignment));
70 let final_offset = aligned_offset + input.len();
71 let out_buffer = self.memory.as_slice_mut();
72 out_buffer
74 .get_mut(entry_offset..aligned_offset)
75 .ok_or_else(|| CompileError::Resource("out of code memory space".into()))?
76 .fill(0);
77 out_buffer
78 .get_mut(aligned_offset..final_offset)
79 .ok_or_else(|| CompileError::Resource("out of code memory space".into()))?
80 .copy_from_slice(input);
81 self.offset = final_offset;
82 Ok(aligned_offset)
83 }
84
85 pub fn position(&self) -> usize {
87 self.offset
88 }
89}
90
91pub struct CodeMemory {
93 source_pool: Option<Arc<crossbeam_queue::ArrayQueue<Self>>>,
95
96 map: *mut u8,
98
99 size: usize,
101
102 executable_end: usize,
107}
108
109impl CodeMemory {
110 fn create(size: usize) -> rustix::io::Result<Self> {
111 assert!(size != 0);
113 let size = round_up(size, rustix::param::page_size());
114 let map = unsafe {
115 mm::mmap_anonymous(
116 std::ptr::null_mut(),
117 size,
118 ProtFlags::WRITE | ProtFlags::READ,
119 MapFlags::SHARED,
120 )?
121 };
122 Ok(Self { source_pool: None, map: map.cast(), executable_end: 0, size })
123 }
124
125 fn as_slice_mut(&mut self) -> &mut [u8] {
126 unsafe {
127 std::slice::from_raw_parts_mut(self.map, self.size)
130 }
131 }
132
133 pub fn resize(mut self, size: usize) -> rustix::io::Result<Self> {
138 if self.size < size {
139 let source_pool = unsafe {
142 mm::munmap(self.map.cast(), self.size)?;
143 let source_pool = self.source_pool.take();
144 std::mem::forget(self);
145 source_pool
146 };
147 Self::create(size).map(|mut m| {
148 m.source_pool = source_pool;
149 m
150 })
151 } else {
152 self.executable_end = 0;
153 Ok(self)
154 }
155 }
156
157 pub unsafe fn writer(&mut self) -> CodeMemoryWriter<'_> {
165 self.executable_end = 0;
166 CodeMemoryWriter { memory: self, offset: 0 }
167 }
168
169 pub unsafe fn publish(&mut self) -> Result<(), CompileError> {
175 mm::mprotect(
176 self.map.cast(),
177 self.executable_end,
178 MprotectFlags::EXEC | MprotectFlags::READ,
179 )
180 .map_err(|e| {
181 CompileError::Resource(format!("could not make code memory executable: {}", e))
182 })
183 }
184
185 pub unsafe fn executable_address(&self, offset: usize) -> *const u8 {
189 debug_assert!(offset <= isize::MAX as usize);
191 self.map.offset(offset as isize)
192 }
193
194 pub unsafe fn writable_address(&self, offset: usize) -> *mut u8 {
198 debug_assert!(offset <= isize::MAX as usize);
200 self.map.offset(offset as isize)
201 }
202}
203
204impl Drop for CodeMemory {
205 fn drop(&mut self) {
206 if let Some(source_pool) = self.source_pool.take() {
207 unsafe {
208 let result = mm::mprotect(
209 self.map.cast(),
210 self.size,
211 MprotectFlags::WRITE | MprotectFlags::READ,
212 );
213 if let Err(e) = result {
214 panic!(
215 "could not mprotect mapping before returning it to the memory pool: \
216 map={:?}, size={:?}, error={}",
217 self.map, self.size, e
218 );
219 }
220 }
221 drop(source_pool.push(Self {
222 source_pool: None,
223 map: self.map,
224 size: self.size,
225 executable_end: 0,
226 }));
227 } else {
228 unsafe {
229 if let Err(e) = mm::munmap(self.map.cast(), self.size) {
230 tracing::error!(
231 target: "unc_vm",
232 message="could not unmap mapping",
233 map=?self.map, size=self.size, error=%e
234 );
235 }
236 }
237 }
238 }
239}
240
241unsafe impl Send for CodeMemory {}
242
243#[derive(Clone)]
250pub struct LimitedMemoryPool {
251 pool: Arc<crossbeam_queue::ArrayQueue<CodeMemory>>,
252}
253
254impl LimitedMemoryPool {
255 pub fn new(count: usize, default_memory_size: usize) -> rustix::io::Result<Self> {
257 let pool = Arc::new(crossbeam_queue::ArrayQueue::new(count));
258 let this = Self { pool };
259 for _ in 0..count {
260 this.pool
261 .push(CodeMemory::create(default_memory_size)?)
262 .unwrap_or_else(|_| panic!("ArrayQueue could not accomodate {count} memories!"));
263 }
264 Ok(this)
265 }
266
267 pub fn get(&self, size: usize) -> rustix::io::Result<CodeMemory> {
269 let mut memory = self.pool.pop().ok_or(rustix::io::Errno::NOMEM)?;
270 memory.source_pool = Some(Arc::clone(&self.pool));
271 if memory.size < size {
272 Ok(memory.resize(size)?)
273 } else {
274 Ok(memory)
275 }
276 }
277}
278
279#[cfg(test)]
280mod tests {
281 use super::CodeMemory;
282 fn _assert() {
283 fn _assert_send<T: Send>() {}
284 _assert_send::<CodeMemory>();
285 }
286}