closure_ffi/
jit_alloc.rs

1//! Abstractions around allocators that provide dual-mapped memory with XOR protection rules (one RW
2//! view and one RX view) suitable for emitting code at runtime.
3//!
4//! Meant to be an abstraction over the `jit-allocator` crate's API so that it can be swapped with
5//! user-provided allocators.
6//!
7//! See the [`JitAlloc`] trait for more information.
8
9use core::ops::Deref;
10
11/// Anonymous error that may be returned by [`JitAlloc`] implementations when [`JitAlloc::alloc`] or
12/// [`JitAlloc::release`] fail.
13#[derive(Debug)]
14pub struct JitAllocError;
15
16/// Values to use with [`JitAlloc::protect_jit_memory`].
17#[derive(Debug, Clone, Copy, Eq, PartialEq)]
18pub enum ProtectJitAccess {
19    /// Protect JIT memory with Read+Write permissions.
20    ReadWrite = 0,
21    /// Protect JIT memory with Read+Execute permissions.
22    ReadExecute = 1,
23}
24
25/// Generic allocator providing virtual memory suitable for emitting code at runtime.
26///
27/// The API is meant to be a thin abstraction over the `jit-allocator` crate's API, to allow it
28/// to be swapped with other allocators.
29pub trait JitAlloc {
30    /// Allocates `size` bytes in the executable memory region.
31    /// Returns two pointers. One points to Read-Execute mapping and another to Read-Write mapping.
32    /// All code writes *must* go to the Read-Write mapping.
33    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError>;
34
35    /// Releases the memory allocated by `alloc`.
36    ///
37    /// # Safety
38    /// - `rx_ptr` must have been returned from `alloc`
39    /// - `rx_ptr` must have been allocated from this allocator
40    /// - `rx_ptr` must not have been passed to `release` before
41    /// - `rx_ptr` must point to read-execute part of memory returned from `alloc`.
42    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError>;
43
44    /// On hardened architectures with `MAP_JIT`-like memory flags, set the access for the current
45    /// thread.
46    ///
47    /// This is expected to be a no-op on most platforms, but should be called before writing
48    /// or executing JIT memory.
49    ///
50    /// # Safety
51    ///
52    /// - `ptr` must point at least `size` bytes of readable memory.
53    unsafe fn protect_jit_memory(&self, ptr: *const u8, size: usize, access: ProtectJitAccess);
54
55    /// Flushes the instruction cache for (at least) the given slice of executable memory. Should be
56    /// called after the JIT memory is ready to be executed.
57    ///
58    /// On architectures with shared data/instruction caches, like x86_64, this is a no-op.
59    ///
60    /// # Safety
61    /// - `rx_ptr` must point at least `size` bytes of Read-Execute memory.
62    unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize);
63}
64
65impl<J: JitAlloc> JitAlloc for &J {
66    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
67        (*self).alloc(size)
68    }
69
70    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
71        (*self).release(rx_ptr)
72    }
73
74    #[inline(always)]
75    unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
76        (*self).flush_instruction_cache(rx_ptr, size);
77    }
78
79    #[inline(always)]
80    unsafe fn protect_jit_memory(&self, ptr: *const u8, size: usize, access: ProtectJitAccess) {
81        (*self).protect_jit_memory(ptr, size, access);
82    }
83}
84
85#[cfg(not(feature = "no_std"))]
86impl<J: JitAlloc> JitAlloc for std::sync::LazyLock<J> {
87    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
88        self.deref().alloc(size)
89    }
90
91    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
92        self.deref().release(rx_ptr)
93    }
94
95    unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
96        self.deref().flush_instruction_cache(rx_ptr, size);
97    }
98
99    unsafe fn protect_jit_memory(&self, ptr: *const u8, size: usize, access: ProtectJitAccess) {
100        self.deref().protect_jit_memory(ptr, size, access);
101    }
102}
103
104#[cfg(feature = "no_std")]
105impl<J: JitAlloc, R: spin::RelaxStrategy> JitAlloc for spin::lazy::Lazy<J, fn() -> J, R> {
106    fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
107        self.deref().alloc(size)
108    }
109
110    unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
111        self.deref().release(rx_ptr)
112    }
113
114    unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
115        self.deref().flush_instruction_cache(rx_ptr, size);
116    }
117
118    unsafe fn protect_jit_memory(&self, ptr: *const u8, size: usize, access: ProtectJitAccess) {
119        self.deref().protect_jit_memory(ptr, size, access);
120    }
121}
122
123#[cfg(any(feature = "bundled_jit_alloc", feature = "custom_jit_alloc"))]
124/// The default, global JIT allocator.
125///
126/// When the `bundled_jit_alloc` feature is enabled, this is currently implemented as a ZST
127/// deffering to a static [`jit_allocator::JitAllocator`] behind a [`std::sync::Mutex`] (or a
128/// [`spin::Mutex`] under `no_std`).
129///
130/// When the `custom_jit_alloc` feature is enabled, defers to a [`JitAlloc`] implementation
131/// provided by a downstream crate using the [`global_jit_alloc`] macro.
132///
133/// [`spin::Mutex`]: https://docs.rs/spin/0.9/spin/type.Mutex.html
134/// [`global_jit_alloc`]: crate::global_jit_alloc
135#[derive(Default, Clone, Copy)]
136pub struct GlobalJitAlloc;
137
138#[cfg(feature = "bundled_jit_alloc")]
139mod bundled_jit_alloc {
140    use jit_allocator::JitAllocator;
141
142    use super::*;
143
144    #[inline(always)]
145    fn convert_access(access: ProtectJitAccess) -> jit_allocator::ProtectJitAccess {
146        match access {
147            ProtectJitAccess::ReadExecute => jit_allocator::ProtectJitAccess::ReadExecute,
148            ProtectJitAccess::ReadWrite => jit_allocator::ProtectJitAccess::ReadWrite,
149        }
150    }
151
152    fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
153        #[cfg(all(target_arch = "arm", target_os = "linux"))]
154        unsafe {
155            const __ARM_NR_CACHEFLUSH: i32 = 0x0f0002;
156            libc::syscall(
157                __ARM_NR_CACHEFLUSH,
158                rx_ptr as usize as u64,
159                (rx_ptr as usize + size) as u64,
160                0,
161            );
162            return;
163        }
164        #[allow(unreachable_code)]
165        jit_allocator::flush_instruction_cache(rx_ptr, size);
166    }
167
168    impl JitAlloc for core::cell::RefCell<JitAllocator> {
169        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
170            self.borrow_mut().alloc(size).map_err(|_| JitAllocError)
171        }
172
173        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
174            self.borrow_mut().release(rx_ptr).map_err(|_| JitAllocError)
175        }
176
177        #[inline(always)]
178        unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
179            flush_instruction_cache(rx_ptr, size);
180        }
181
182        #[inline(always)]
183        unsafe fn protect_jit_memory(
184            &self,
185            _ptr: *const u8,
186            _size: usize,
187            access: ProtectJitAccess,
188        ) {
189            jit_allocator::protect_jit_memory(convert_access(access));
190        }
191    }
192
193    #[cfg(not(feature = "no_std"))]
194    impl JitAlloc for std::sync::RwLock<JitAllocator> {
195        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
196            self.write().unwrap().alloc(size).map_err(|_| JitAllocError)
197        }
198
199        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
200            self.write().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
201        }
202
203        #[inline(always)]
204        unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
205            flush_instruction_cache(rx_ptr, size);
206        }
207
208        #[inline(always)]
209        unsafe fn protect_jit_memory(
210            &self,
211            _ptr: *const u8,
212            _size: usize,
213            access: ProtectJitAccess,
214        ) {
215            jit_allocator::protect_jit_memory(convert_access(access));
216        }
217    }
218
219    #[cfg(not(feature = "no_std"))]
220    impl JitAlloc for std::sync::Mutex<JitAllocator> {
221        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
222            self.lock().unwrap().alloc(size).map_err(|_| JitAllocError)
223        }
224
225        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
226            self.lock().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
227        }
228
229        #[inline(always)]
230        unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
231            flush_instruction_cache(rx_ptr, size);
232        }
233
234        #[inline(always)]
235        unsafe fn protect_jit_memory(
236            &self,
237            _ptr: *const u8,
238            _size: usize,
239            access: ProtectJitAccess,
240        ) {
241            jit_allocator::protect_jit_memory(convert_access(access));
242        }
243    }
244
245    #[cfg(feature = "no_std")]
246    static GLOBAL_JIT_ALLOC: spin::Mutex<Option<alloc::boxed::Box<JitAllocator>>> =
247        spin::Mutex::new(None);
248    #[cfg(not(feature = "no_std"))]
249    static GLOBAL_JIT_ALLOC: std::sync::Mutex<Option<Box<JitAllocator>>> =
250        std::sync::Mutex::new(None);
251
252    impl super::GlobalJitAlloc {
253        fn use_alloc<T>(&self, action: impl FnOnce(&mut JitAllocator) -> T) -> T {
254            #[cfg(feature = "no_std")]
255            let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock();
256            #[cfg(not(feature = "no_std"))]
257            let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock().unwrap();
258
259            let alloc = maybe_alloc.get_or_insert_with(|| JitAllocator::new(Default::default()));
260            action(alloc)
261        }
262    }
263
264    impl JitAlloc for super::GlobalJitAlloc {
265        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
266            self.use_alloc(|a| a.alloc(size)).map_err(|_| JitAllocError)
267        }
268
269        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
270            self.use_alloc(|a| a.release(rx_ptr)).map_err(|_| JitAllocError)
271        }
272
273        #[inline(always)]
274        unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
275            flush_instruction_cache(rx_ptr, size);
276        }
277
278        #[inline(always)]
279        unsafe fn protect_jit_memory(
280            &self,
281            _ptr: *const u8,
282            _size: usize,
283            access: ProtectJitAccess,
284        ) {
285            jit_allocator::protect_jit_memory(convert_access(access));
286        }
287    }
288
289    #[cfg(not(feature = "no_std"))]
290    mod thread_jit_alloc {
291        use core::{cell::UnsafeCell, marker::PhantomData};
292
293        use jit_allocator::JitAllocator;
294
295        #[allow(unused_imports)]
296        use super::*;
297
298        thread_local! {
299            static THREAD_JIT_ALLOC: UnsafeCell<Box<JitAllocator>> =
300                UnsafeCell::new(JitAllocator::new(Default::default()));
301        }
302
303        /// Marker type providing access to a thread-local JIT allocator.
304        ///
305        /// Unlike [`GlobalJitAlloc`], this allocator is neither [`Send`] nor [`Sync`].
306        #[derive(Default, Clone)]
307        pub struct ThreadJitAlloc(PhantomData<*mut ()>);
308
309        impl JitAlloc for ThreadJitAlloc {
310            fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
311                THREAD_JIT_ALLOC
312                    .with(|a| unsafe { &mut *a.get() }.alloc(size))
313                    .map_err(|_| JitAllocError)
314            }
315
316            unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
317                THREAD_JIT_ALLOC
318                    .with(|a| unsafe { &mut *a.get() }.release(rx_ptr))
319                    .map_err(|_| JitAllocError)
320            }
321
322            #[inline(always)]
323            unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
324                flush_instruction_cache(rx_ptr, size);
325            }
326
327            #[inline(always)]
328            unsafe fn protect_jit_memory(
329                &self,
330                _ptr: *const u8,
331                _size: usize,
332                access: ProtectJitAccess,
333            ) {
334                jit_allocator::protect_jit_memory(convert_access(access));
335            }
336        }
337    }
338    #[cfg(not(feature = "no_std"))]
339    pub use thread_jit_alloc::*;
340}
341#[cfg(feature = "bundled_jit_alloc")]
342pub use bundled_jit_alloc::*;
343
344/// Defines a global [`JitAlloc`] implementation which [`GlobalJitAlloc`] will defer to.
345///
346/// The macro can either take a path to a static variable or an expression resolving to a
347/// `&'static JitAlloc`:
348///
349/// ```ignore
350/// static GLOBAL_JIT: MyJitAlloc = MyJitAlloc::new();
351/// global_jit_alloc!(GLOBAL_JIT);
352/// ```
353///
354/// ```ignore
355/// use std::sync::OnceLock;
356///
357/// global_jit_alloc!({
358///     static WRAPPED_JIT: OnceLock<MyJitAlloc> = OnceLock::new();
359///     WRAPPED_JIT.get_or_init(|| MyJitAlloc::new())
360/// });
361/// ```
362#[macro_export]
363#[cfg(any(feature = "custom_jit_alloc", feature = "build-docs"))]
364#[cfg_attr(feature = "build-docs", doc(cfg(feature = "custom_jit_alloc")))]
365macro_rules! global_jit_alloc {
366    ($static_var:path) => {
367        #[no_mangle]
368        extern "Rust" fn _closure_ffi__global_jit_alloc(
369        ) -> &'static dyn $crate::jit_alloc::JitAlloc {
370            &$static_var
371        }
372    };
373    ($provider:expr) => {
374        #[no_mangle]
375        extern "Rust" fn _closure_ffi__global_jit_alloc(
376        ) -> &'static dyn $crate::jit_alloc::JitAlloc {
377            $provider
378        }
379    };
380}
381#[cfg(feature = "custom_jit_alloc")]
382pub use global_jit_alloc;
383
384#[cfg(feature = "custom_jit_alloc")]
385mod custom_jit_alloc {
386    use super::{GlobalJitAlloc, JitAlloc, JitAllocError, ProtectJitAccess};
387
388    extern "Rust" {
389        fn _closure_ffi__global_jit_alloc() -> &'static dyn JitAlloc;
390    }
391
392    fn get_global_jit_alloc() -> &'static dyn JitAlloc {
393        unsafe { _closure_ffi__global_jit_alloc() }
394    }
395
396    impl JitAlloc for GlobalJitAlloc {
397        fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
398            get_global_jit_alloc().alloc(size)
399        }
400
401        unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
402            get_global_jit_alloc().release(rx_ptr)
403        }
404
405        unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {
406            get_global_jit_alloc().flush_instruction_cache(rx_ptr, size);
407        }
408
409        unsafe fn protect_jit_memory(&self, ptr: *const u8, size: usize, access: ProtectJitAccess) {
410            get_global_jit_alloc().protect_jit_memory(ptr, size, access);
411        }
412    }
413}