wasmer_engine_jit/
code_memory.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md
3
4//! Memory management for executable code.
5use crate::unwind::UnwindRegistry;
6use wasmer_compiler::{CompiledFunctionUnwindInfo, CustomSection, FunctionBody};
7use wasmer_vm::{Mmap, VMFunctionBody};
8
9/// The optimal alignment for functions.
10///
11/// On x86-64, this is 16 since it's what the optimizations assume.
12/// When we add support for other architectures, we should also figure out their
13/// optimal alignment values.
14const ARCH_FUNCTION_ALIGNMENT: usize = 16;
15
16/// The optimal alignment for data.
17///
18const DATA_SECTION_ALIGNMENT: usize = 64;
19
20/// Memory manager for executable code.
21pub struct CodeMemory {
22    unwind_registry: UnwindRegistry,
23    mmap: Mmap,
24    start_of_nonexecutable_pages: usize,
25}
26
27impl CodeMemory {
28    /// Create a new `CodeMemory` instance.
29    pub fn new() -> Self {
30        Self {
31            unwind_registry: UnwindRegistry::new(),
32            mmap: Mmap::new(),
33            start_of_nonexecutable_pages: 0,
34        }
35    }
36
37    /// Mutably get the UnwindRegistry.
38    pub fn unwind_registry_mut(&mut self) -> &mut UnwindRegistry {
39        &mut self.unwind_registry
40    }
41
42    /// Allocate a single contiguous block of memory for the functions and custom sections, and copy the data in place.
43    pub fn allocate(
44        &mut self,
45        functions: &[&FunctionBody],
46        executable_sections: &[&CustomSection],
47        data_sections: &[&CustomSection],
48    ) -> Result<(Vec<&mut [VMFunctionBody]>, Vec<&mut [u8]>, Vec<&mut [u8]>), String> {
49        let mut function_result = vec![];
50        let mut data_section_result = vec![];
51        let mut executable_section_result = vec![];
52
53        let page_size = region::page::size();
54
55        // 1. Calculate the total size, that is:
56        // - function body size, including all trampolines
57        // -- windows unwind info
58        // -- padding between functions
59        // - executable section body
60        // -- padding between executable sections
61        // - padding until a new page to change page permissions
62        // - data section body size
63        // -- padding between data sections
64
65        let total_len = round_up(
66            functions.iter().fold(0, |acc, func| {
67                round_up(
68                    acc + Self::function_allocation_size(func),
69                    ARCH_FUNCTION_ALIGNMENT,
70                )
71            }) + executable_sections.iter().fold(0, |acc, exec| {
72                round_up(acc + exec.bytes.len(), ARCH_FUNCTION_ALIGNMENT)
73            }),
74            page_size,
75        ) + data_sections.iter().fold(0, |acc, data| {
76            round_up(acc + data.bytes.len(), DATA_SECTION_ALIGNMENT)
77        });
78
79        // 2. Allocate the pages. Mark them all read-write.
80
81        self.mmap = Mmap::with_at_least(total_len)?;
82
83        // 3. Determine where the pointers to each function, executable section
84        // or data section are. Copy the functions. Collect the addresses of each and return them.
85
86        let mut bytes = 0;
87        let mut buf = self.mmap.as_mut_slice();
88        for func in functions {
89            let len = round_up(
90                Self::function_allocation_size(func),
91                ARCH_FUNCTION_ALIGNMENT,
92            );
93            let (func_buf, next_buf) = buf.split_at_mut(len);
94            buf = next_buf;
95            bytes += len;
96
97            let vmfunc = Self::copy_function(&mut self.unwind_registry, func, func_buf);
98            assert_eq!(vmfunc.as_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
99            function_result.push(vmfunc);
100        }
101        for section in executable_sections {
102            let section = &section.bytes;
103            assert_eq!(buf.as_mut_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
104            let len = round_up(section.len(), ARCH_FUNCTION_ALIGNMENT);
105            let (s, next_buf) = buf.split_at_mut(len);
106            buf = next_buf;
107            bytes += len;
108            s[..section.len()].copy_from_slice(section.as_slice());
109            executable_section_result.push(s);
110        }
111
112        self.start_of_nonexecutable_pages = bytes;
113
114        if !data_sections.is_empty() {
115            // Data sections have different page permissions from the executable
116            // code that came before it, so they need to be on different pages.
117            let padding = round_up(bytes, page_size) - bytes;
118            buf = buf.split_at_mut(padding).1;
119
120            for section in data_sections {
121                let section = &section.bytes;
122                assert_eq!(buf.as_mut_ptr() as usize % DATA_SECTION_ALIGNMENT, 0);
123                let len = round_up(section.len(), DATA_SECTION_ALIGNMENT);
124                let (s, next_buf) = buf.split_at_mut(len);
125                buf = next_buf;
126                s[..section.len()].copy_from_slice(section.as_slice());
127                data_section_result.push(s);
128            }
129        }
130
131        Ok((
132            function_result,
133            executable_section_result,
134            data_section_result,
135        ))
136    }
137
138    /// Apply the page permissions.
139    pub fn publish(&mut self) {
140        if self.mmap.is_empty() || self.start_of_nonexecutable_pages == 0 {
141            return;
142        }
143        assert!(self.mmap.len() >= self.start_of_nonexecutable_pages);
144        unsafe {
145            region::protect(
146                self.mmap.as_mut_ptr(),
147                self.start_of_nonexecutable_pages,
148                region::Protection::READ_EXECUTE,
149            )
150        }
151        .expect("unable to make memory readonly and executable");
152    }
153
154    /// Calculates the allocation size of the given compiled function.
155    fn function_allocation_size(func: &FunctionBody) -> usize {
156        match &func.unwind_info {
157            Some(CompiledFunctionUnwindInfo::WindowsX64(info)) => {
158                // Windows unwind information is required to be emitted into code memory
159                // This is because it must be a positive relative offset from the start of the memory
160                // Account for necessary unwind information alignment padding (32-bit alignment)
161                ((func.body.len() + 3) & !3) + info.len()
162            }
163            _ => func.body.len(),
164        }
165    }
166
167    /// Copies the data of the compiled function to the given buffer.
168    ///
169    /// This will also add the function to the current function table.
170    fn copy_function<'a>(
171        registry: &mut UnwindRegistry,
172        func: &FunctionBody,
173        buf: &'a mut [u8],
174    ) -> &'a mut [VMFunctionBody] {
175        assert_eq!(buf.as_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
176
177        let func_len = func.body.len();
178
179        let (body, remainder) = buf.split_at_mut(func_len);
180        body.copy_from_slice(&func.body);
181        let vmfunc = Self::view_as_mut_vmfunc_slice(body);
182
183        if let Some(CompiledFunctionUnwindInfo::WindowsX64(info)) = &func.unwind_info {
184            // Windows unwind information is written following the function body
185            // Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
186            let unwind_start = (func_len + 3) & !3;
187            let unwind_size = info.len();
188            let padding = unwind_start - func_len;
189            assert_eq!((func_len + padding) % 4, 0);
190            let slice = remainder.split_at_mut(padding + unwind_size).0;
191            slice[padding..].copy_from_slice(&info);
192        }
193
194        if let Some(info) = &func.unwind_info {
195            registry
196                .register(vmfunc.as_ptr() as usize, 0, func_len as u32, info)
197                .expect("failed to register unwind information");
198        }
199
200        vmfunc
201    }
202
203    /// Convert mut a slice from u8 to VMFunctionBody.
204    fn view_as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
205        let byte_ptr: *mut [u8] = slice;
206        let body_ptr = byte_ptr as *mut [VMFunctionBody];
207        unsafe { &mut *body_ptr }
208    }
209}
210
211fn round_up(size: usize, multiple: usize) -> usize {
212    debug_assert!(multiple.is_power_of_two());
213    (size + (multiple - 1)) & !(multiple - 1)
214}
215
216#[cfg(test)]
217mod tests {
218    use super::CodeMemory;
219    fn _assert() {
220        fn _assert_send_sync<T: Send + Sync>() {}
221        _assert_send_sync::<CodeMemory>();
222    }
223}