wasmer_engine_universal/
code_memory.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md
3
4//! Memory management for executable code.
5use crate::unwind::UnwindRegistry;
6use loupe::MemoryUsage;
7use wasmer_compiler::{CompiledFunctionUnwindInfo, CustomSection, FunctionBody};
8use wasmer_vm::{Mmap, VMFunctionBody};
9
10/// The optimal alignment for functions.
11///
12/// On x86-64, this is 16 since it's what the optimizations assume.
13/// When we add support for other architectures, we should also figure out their
14/// optimal alignment values.
15const ARCH_FUNCTION_ALIGNMENT: usize = 16;
16
17/// The optimal alignment for data.
18///
19const DATA_SECTION_ALIGNMENT: usize = 64;
20
21/// Memory manager for executable code.
22#[derive(MemoryUsage)]
23pub struct CodeMemory {
24    unwind_registry: UnwindRegistry,
25    mmap: Mmap,
26    start_of_nonexecutable_pages: usize,
27}
28
29impl CodeMemory {
30    /// Create a new `CodeMemory` instance.
31    pub fn new() -> Self {
32        Self {
33            unwind_registry: UnwindRegistry::new(),
34            mmap: Mmap::new(),
35            start_of_nonexecutable_pages: 0,
36        }
37    }
38
39    /// Mutably get the UnwindRegistry.
40    pub fn unwind_registry_mut(&mut self) -> &mut UnwindRegistry {
41        &mut self.unwind_registry
42    }
43
44    /// Allocate a single contiguous block of memory for the functions and custom sections, and copy the data in place.
45    pub fn allocate(
46        &mut self,
47        functions: &[&FunctionBody],
48        executable_sections: &[&CustomSection],
49        data_sections: &[&CustomSection],
50    ) -> Result<(Vec<&mut [VMFunctionBody]>, Vec<&mut [u8]>, Vec<&mut [u8]>), String> {
51        let mut function_result = vec![];
52        let mut data_section_result = vec![];
53        let mut executable_section_result = vec![];
54
55        let page_size = region::page::size();
56
57        // 1. Calculate the total size, that is:
58        // - function body size, including all trampolines
59        // -- windows unwind info
60        // -- padding between functions
61        // - executable section body
62        // -- padding between executable sections
63        // - padding until a new page to change page permissions
64        // - data section body size
65        // -- padding between data sections
66
67        let total_len = round_up(
68            functions.iter().fold(0, |acc, func| {
69                round_up(
70                    acc + Self::function_allocation_size(func),
71                    ARCH_FUNCTION_ALIGNMENT,
72                )
73            }) + executable_sections.iter().fold(0, |acc, exec| {
74                round_up(acc + exec.bytes.len(), ARCH_FUNCTION_ALIGNMENT)
75            }),
76            page_size,
77        ) + data_sections.iter().fold(0, |acc, data| {
78            round_up(acc + data.bytes.len(), DATA_SECTION_ALIGNMENT)
79        });
80
81        // 2. Allocate the pages. Mark them all read-write.
82
83        self.mmap = Mmap::with_at_least(total_len)?;
84
85        // 3. Determine where the pointers to each function, executable section
86        // or data section are. Copy the functions. Collect the addresses of each and return them.
87
88        let mut bytes = 0;
89        let mut buf = self.mmap.as_mut_slice();
90        for func in functions {
91            let len = round_up(
92                Self::function_allocation_size(func),
93                ARCH_FUNCTION_ALIGNMENT,
94            );
95            let (func_buf, next_buf) = buf.split_at_mut(len);
96            buf = next_buf;
97            bytes += len;
98
99            let vmfunc = Self::copy_function(&mut self.unwind_registry, func, func_buf);
100            assert_eq!(vmfunc.as_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
101            function_result.push(vmfunc);
102        }
103        for section in executable_sections {
104            let section = &section.bytes;
105            assert_eq!(buf.as_mut_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
106            let len = round_up(section.len(), ARCH_FUNCTION_ALIGNMENT);
107            let (s, next_buf) = buf.split_at_mut(len);
108            buf = next_buf;
109            bytes += len;
110            s[..section.len()].copy_from_slice(section.as_slice());
111            executable_section_result.push(s);
112        }
113
114        self.start_of_nonexecutable_pages = bytes;
115
116        if !data_sections.is_empty() {
117            // Data sections have different page permissions from the executable
118            // code that came before it, so they need to be on different pages.
119            let padding = round_up(bytes, page_size) - bytes;
120            buf = buf.split_at_mut(padding).1;
121
122            for section in data_sections {
123                let section = &section.bytes;
124                assert_eq!(buf.as_mut_ptr() as usize % DATA_SECTION_ALIGNMENT, 0);
125                let len = round_up(section.len(), DATA_SECTION_ALIGNMENT);
126                let (s, next_buf) = buf.split_at_mut(len);
127                buf = next_buf;
128                s[..section.len()].copy_from_slice(section.as_slice());
129                data_section_result.push(s);
130            }
131        }
132
133        Ok((
134            function_result,
135            executable_section_result,
136            data_section_result,
137        ))
138    }
139
140    /// Apply the page permissions.
141    pub fn publish(&mut self) {
142        if self.mmap.is_empty() || self.start_of_nonexecutable_pages == 0 {
143            return;
144        }
145        assert!(self.mmap.len() >= self.start_of_nonexecutable_pages);
146        unsafe {
147            region::protect(
148                self.mmap.as_mut_ptr(),
149                self.start_of_nonexecutable_pages,
150                region::Protection::READ_EXECUTE,
151            )
152        }
153        .expect("unable to make memory readonly and executable");
154    }
155
156    /// Calculates the allocation size of the given compiled function.
157    fn function_allocation_size(func: &FunctionBody) -> usize {
158        match &func.unwind_info {
159            Some(CompiledFunctionUnwindInfo::WindowsX64(info)) => {
160                // Windows unwind information is required to be emitted into code memory
161                // This is because it must be a positive relative offset from the start of the memory
162                // Account for necessary unwind information alignment padding (32-bit alignment)
163                ((func.body.len() + 3) & !3) + info.len()
164            }
165            _ => func.body.len(),
166        }
167    }
168
169    /// Copies the data of the compiled function to the given buffer.
170    ///
171    /// This will also add the function to the current function table.
172    fn copy_function<'a>(
173        registry: &mut UnwindRegistry,
174        func: &FunctionBody,
175        buf: &'a mut [u8],
176    ) -> &'a mut [VMFunctionBody] {
177        assert_eq!(buf.as_ptr() as usize % ARCH_FUNCTION_ALIGNMENT, 0);
178
179        let func_len = func.body.len();
180
181        let (body, remainder) = buf.split_at_mut(func_len);
182        body.copy_from_slice(&func.body);
183        let vmfunc = Self::view_as_mut_vmfunc_slice(body);
184
185        if let Some(CompiledFunctionUnwindInfo::WindowsX64(info)) = &func.unwind_info {
186            // Windows unwind information is written following the function body
187            // Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
188            let unwind_start = (func_len + 3) & !3;
189            let unwind_size = info.len();
190            let padding = unwind_start - func_len;
191            assert_eq!((func_len + padding) % 4, 0);
192            let slice = remainder.split_at_mut(padding + unwind_size).0;
193            slice[padding..].copy_from_slice(&info);
194        }
195
196        if let Some(info) = &func.unwind_info {
197            registry
198                .register(vmfunc.as_ptr() as usize, 0, func_len as u32, info)
199                .expect("failed to register unwind information");
200        }
201
202        vmfunc
203    }
204
205    /// Convert mut a slice from u8 to VMFunctionBody.
206    fn view_as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
207        let byte_ptr: *mut [u8] = slice;
208        let body_ptr = byte_ptr as *mut [VMFunctionBody];
209        unsafe { &mut *body_ptr }
210    }
211}
212
213fn round_up(size: usize, multiple: usize) -> usize {
214    debug_assert!(multiple.is_power_of_two());
215    (size + (multiple - 1)) & !(multiple - 1)
216}
217
218#[cfg(test)]
219mod tests {
220    use super::CodeMemory;
221    fn _assert() {
222        fn _assert_send_sync<T: Send + Sync>() {}
223        _assert_send_sync::<CodeMemory>();
224    }
225}