closure_ffi/
jit_alloc.rs

1//! Abstractions around allocators that provide dual-mapped memory with XOR protection rules (one RW
2//! view and one RX view) suitable for emitting code at runtime.
3//!
4//! Meant to be an abstraction over the `jit-allocator` crate's API so that it can be swapped with
5//! user-provided allocators.
6//!
7//! See the [`JitAlloc`] trait for more information.
8
9/// Anonymous error that may be returned by [`JitAlloc`] implementations when [`JitAlloc::alloc`] or
10/// [`JitAlloc::release`] fail.
11#[derive(Debug)]
12pub struct JitAllocError;
13
14/// Values to use with [`JitAlloc::protect_jit_memory`].
15#[derive(Debug, Clone, Copy, Eq, PartialEq)]
16pub enum ProtectJitAccess {
17    /// Protect JIT memory with Read+Write permissions.
18    ReadWrite = 0,
19    /// Protect JIT memory with Read+Execute permissions.
20    ReadExecute = 1,
21}
22
23/// Generic allocator providing virtual memory suitable for emitting code at runtime.
24///
25/// The API is meant to be a thin abstraction over the `jit-allocator` crate's API, to allow it
26/// to be swapped with other allocators.
27pub trait JitAlloc {
28    /// Allocates `size` bytes in the executable memory region.
29    /// Returns two pointers. One points to Read-Execute mapping and another to Read-Write mapping.
30    /// All code writes *must* go to the Read-Write mapping.
31    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError>;
32
33    /// Releases the memory allocated by `alloc`.
34    ///
35    /// # Safety
36    /// - `rx_ptr` must have been returned from `alloc`
37    /// - `rx_ptr` must have been allocated from this allocator
38    /// - `rx_ptr` must not have been passed to `release` before
39    /// - `rx_ptr` must point to read-execute part of memory returned from `alloc`.
40    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError>;
41
42    /// On hardened architectures with `MAP_JIT`-like memory flags, set the access for the current
43    /// thread.
44    ///
45    /// This is expected to be a no-op on most platforms, but should be called before writing
46    /// or executing JIT memory.
47    ///
48    /// # Safety
49    ///
50    /// - `ptr` must point at least `size` bytes of readable memory.
51    unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess);
52
53    /// Flushes the instruction cache for (at least) the given slice of executable memory. Should be
54    /// called after the JIT memory is ready to be executed.
55    ///
56    /// On architectures with shared data/instruction caches, like x86_64, this is a no-op.
57    ///
58    /// # Safety
59    /// - `rx_ptr` must point at least `size` bytes of Read-Execute memory.
60    unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize);
61}
62
63impl<J: JitAlloc> JitAlloc for &J {
64    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
65        (*self).alloc(size)
66    }
67
68    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
69        (*self).release(rx_ptr)
70    }
71
72    #[inline(always)]
73    unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
74        J::flush_instruction_cache(rx_ptr, size);
75    }
76
77    #[inline(always)]
78    unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess) {
79        J::protect_jit_memory(ptr, size, access);
80    }
81}
82
83#[cfg(not(feature = "no_std"))]
84impl<J: JitAlloc> JitAlloc for std::sync::LazyLock<J> {
85    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
86        self.deref().alloc(size)
87    }
88
89    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
90        self.deref().release(rx_ptr)
91    }
92
93    unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
94        J::flush_instruction_cache(rx_ptr, size);
95    }
96
97    unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess) {
98        J::protect_jit_memory(ptr, size, access);
99    }
100}
101
102#[cfg(feature = "no_std")]
103impl<J: JitAlloc> JitAlloc for spin::Lazy<J> {
104    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
105        self.deref().alloc(size)
106    }
107
108    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
109        self.deref().release(rx_ptr)
110    }
111
112    unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
113        J::flush_instruction_cache(rx_ptr, size);
114    }
115
116    unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess) {
117        J::protect_jit_memory(ptr, size, access);
118    }
119}
120
121#[cfg(feature = "bundled_jit_alloc")]
122mod bundled_jit_alloc {
123    use jit_allocator::JitAllocator;
124
125    use super::*;
126
127    #[inline(always)]
128    fn convert_access(access: ProtectJitAccess) -> jit_allocator::ProtectJitAccess {
129        match access {
130            ProtectJitAccess::ReadExecute => jit_allocator::ProtectJitAccess::ReadExecute,
131            ProtectJitAccess::ReadWrite => jit_allocator::ProtectJitAccess::ReadWrite,
132        }
133    }
134
135    fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
136        #[cfg(all(target_arch = "arm", target_os = "linux"))]
137        unsafe {
138            const __ARM_NR_CACHEFLUSH: i32 = 0x0f0002;
139            libc::syscall(
140                __ARM_NR_CACHEFLUSH,
141                rx_ptr as usize as u64,
142                (rx_ptr as usize + size) as u64,
143                0,
144            );
145            return;
146        }
147        #[allow(unreachable_code)]
148        jit_allocator::flush_instruction_cache(rx_ptr, size);
149    }
150
151    impl JitAlloc for core::cell::RefCell<JitAllocator> {
152        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
153            self.borrow_mut().alloc(size).map_err(|_| JitAllocError)
154        }
155
156        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
157            self.borrow_mut().release(rx_ptr).map_err(|_| JitAllocError)
158        }
159
160        #[inline(always)]
161        unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
162            flush_instruction_cache(rx_ptr, size);
163        }
164
165        #[inline(always)]
166        unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
167            jit_allocator::protect_jit_memory(convert_access(access));
168        }
169    }
170
171    #[cfg(not(feature = "no_std"))]
172    impl JitAlloc for std::sync::RwLock<JitAllocator> {
173        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
174            self.write().unwrap().alloc(size).map_err(|_| JitAllocError)
175        }
176
177        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
178            self.write().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
179        }
180
181        #[inline(always)]
182        unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
183            flush_instruction_cache(rx_ptr, size);
184        }
185
186        #[inline(always)]
187        unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
188            jit_allocator::protect_jit_memory(convert_access(access));
189        }
190    }
191
192    #[cfg(not(feature = "no_std"))]
193    impl JitAlloc for std::sync::Mutex<JitAllocator> {
194        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
195            self.lock().unwrap().alloc(size).map_err(|_| JitAllocError)
196        }
197
198        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
199            self.lock().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
200        }
201
202        #[inline(always)]
203        unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
204            flush_instruction_cache(rx_ptr, size);
205        }
206
207        #[inline(always)]
208        unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
209            jit_allocator::protect_jit_memory(convert_access(access));
210        }
211    }
212
213    #[cfg(feature = "no_std")]
214    static GLOBAL_JIT_ALLOC: spin::Mutex<Option<alloc::boxed::Box<JitAllocator>>> =
215        spin::Mutex::new(None);
216    #[cfg(not(feature = "no_std"))]
217    static GLOBAL_JIT_ALLOC: std::sync::Mutex<Option<Box<JitAllocator>>> =
218        std::sync::Mutex::new(None);
219
220    /// The default, global JIT allocator.
221    ///
222    /// This is currently implemented as a ZST deffering to a static [`jit_allocator::JitAllocator`]
223    /// behind a [`std::sync::Mutex`] (or a [`spin::Mutex`] under no_std).
224    ///
225    /// [`spin::Mutex`]: https://docs.rs/spin/0.9/spin/type.Mutex.html
226    #[derive(Default, Clone, Copy)]
227    pub struct GlobalJitAlloc;
228
229    impl GlobalJitAlloc {
230        fn use_alloc<T>(&self, action: impl FnOnce(&mut JitAllocator) -> T) -> T {
231            #[cfg(feature = "no_std")]
232            let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock();
233            #[cfg(not(feature = "no_std"))]
234            let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock().unwrap();
235
236            let alloc = maybe_alloc.get_or_insert_with(|| JitAllocator::new(Default::default()));
237            action(alloc)
238        }
239    }
240
241    impl JitAlloc for GlobalJitAlloc {
242        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
243            self.use_alloc(|a| a.alloc(size)).map_err(|_| JitAllocError)
244        }
245
246        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
247            self.use_alloc(|a| a.release(rx_ptr)).map_err(|_| JitAllocError)
248        }
249
250        #[inline(always)]
251        unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
252            flush_instruction_cache(rx_ptr, size);
253        }
254
255        #[inline(always)]
256        unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
257            jit_allocator::protect_jit_memory(convert_access(access));
258        }
259    }
260
261    #[cfg(not(feature = "no_std"))]
262    mod thread_jit_alloc {
263        use core::{cell::UnsafeCell, marker::PhantomData};
264
265        use jit_allocator::JitAllocator;
266
267        #[allow(unused_imports)]
268        use super::*;
269
270        thread_local! {
271            static THREAD_JIT_ALLOC: UnsafeCell<Box<JitAllocator>> =
272                UnsafeCell::new(JitAllocator::new(Default::default()));
273        }
274
275        /// Marker type providing access to a thread-local JIT allocator.
276        ///
277        /// Unlike [`GlobalJitAlloc`], this allocator is neither [`Send`] nor [`Sync`].
278        #[derive(Default, Clone)]
279        pub struct ThreadJitAlloc(PhantomData<*mut ()>);
280
281        impl JitAlloc for ThreadJitAlloc {
282            fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
283                THREAD_JIT_ALLOC
284                    .with(|a| unsafe { &mut *a.get() }.alloc(size))
285                    .map_err(|_| JitAllocError)
286            }
287
288            unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
289                THREAD_JIT_ALLOC
290                    .with(|a| unsafe { &mut *a.get() }.release(rx_ptr))
291                    .map_err(|_| JitAllocError)
292            }
293
294            #[inline(always)]
295            unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
296                flush_instruction_cache(rx_ptr, size);
297            }
298
299            #[inline(always)]
300            unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
301                jit_allocator::protect_jit_memory(convert_access(access));
302            }
303        }
304    }
305    #[cfg(not(feature = "no_std"))]
306    pub use thread_jit_alloc::*;
307}
308use core::ops::Deref;
309
310#[cfg(feature = "bundled_jit_alloc")]
311pub use bundled_jit_alloc::*;