#[derive(Debug)]
pub struct JitAllocError;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ProtectJitAccess {
ReadWrite = 0,
ReadExecute = 1,
}
pub trait JitAlloc {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError>;
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError>;
unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess);
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize);
}
impl<J: JitAlloc> JitAlloc for &J {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
(*self).alloc(size)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
(*self).release(rx_ptr)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
J::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(ptr: *const u8, size: usize, access: ProtectJitAccess) {
J::protect_jit_memory(ptr, size, access);
}
}
#[cfg(any(test, feature = "bundled_jit_alloc"))]
mod bundled_jit_alloc {
use jit_allocator::JitAllocator;
use super::*;
#[inline(always)]
fn convert_access(access: ProtectJitAccess) -> jit_allocator::ProtectJitAccess {
match access {
ProtectJitAccess::ReadExecute => jit_allocator::ProtectJitAccess::ReadExecute,
ProtectJitAccess::ReadWrite => jit_allocator::ProtectJitAccess::ReadWrite,
}
}
impl JitAlloc for core::cell::RefCell<JitAllocator> {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
self.borrow_mut().alloc(size).map_err(|_| JitAllocError)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
self.borrow_mut().release(rx_ptr).map_err(|_| JitAllocError)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
jit_allocator::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
jit_allocator::protect_jit_memory(convert_access(access));
}
}
#[cfg(not(feature = "no_std"))]
impl JitAlloc for std::sync::RwLock<JitAllocator> {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
self.write().unwrap().alloc(size).map_err(|_| JitAllocError)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
self.write().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
jit_allocator::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
jit_allocator::protect_jit_memory(convert_access(access));
}
}
#[cfg(not(feature = "no_std"))]
impl JitAlloc for std::sync::Mutex<JitAllocator> {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
self.lock().unwrap().alloc(size).map_err(|_| JitAllocError)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
self.lock().unwrap().release(rx_ptr).map_err(|_| JitAllocError)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
jit_allocator::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
jit_allocator::protect_jit_memory(convert_access(access));
}
}
#[cfg(feature = "no_std")]
static GLOBAL_JIT_ALLOC: spin::Mutex<Option<alloc::boxed::Box<JitAllocator>>> =
spin::Mutex::new(None);
#[cfg(not(feature = "no_std"))]
static GLOBAL_JIT_ALLOC: std::sync::Mutex<Option<Box<JitAllocator>>> =
std::sync::Mutex::new(None);
#[derive(Default, Clone, Copy)]
pub struct GlobalJitAlloc;
impl GlobalJitAlloc {
fn use_alloc<T>(&self, action: impl FnOnce(&mut JitAllocator) -> T) -> T {
#[cfg(feature = "no_std")]
let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock();
#[cfg(not(feature = "no_std"))]
let mut maybe_alloc = GLOBAL_JIT_ALLOC.lock().unwrap();
let alloc = maybe_alloc.get_or_insert_with(|| JitAllocator::new(Default::default()));
action(alloc)
}
}
impl JitAlloc for GlobalJitAlloc {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
self.use_alloc(|a| a.alloc(size)).map_err(|_| JitAllocError)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
self.use_alloc(|a| a.release(rx_ptr)).map_err(|_| JitAllocError)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
jit_allocator::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
jit_allocator::protect_jit_memory(convert_access(access));
}
}
#[cfg(not(feature = "no_std"))]
mod thread_jit_alloc {
use core::{cell::UnsafeCell, marker::PhantomData};
use jit_allocator::JitAllocator;
#[allow(unused_imports)]
use super::*;
thread_local! {
static THREAD_JIT_ALLOC: UnsafeCell<Box<JitAllocator>> =
UnsafeCell::new(JitAllocator::new(Default::default()));
}
#[derive(Default, Clone)]
pub struct ThreadJitAlloc(PhantomData<*mut ()>);
impl JitAlloc for ThreadJitAlloc {
fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> {
THREAD_JIT_ALLOC
.with(|a| unsafe { &mut *a.get() }.alloc(size))
.map_err(|_| JitAllocError)
}
unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> {
THREAD_JIT_ALLOC
.with(|a| unsafe { &mut *a.get() }.release(rx_ptr))
.map_err(|_| JitAllocError)
}
#[inline(always)]
unsafe fn flush_instruction_cache(rx_ptr: *const u8, size: usize) {
jit_allocator::flush_instruction_cache(rx_ptr, size);
}
#[inline(always)]
unsafe fn protect_jit_memory(_ptr: *const u8, _size: usize, access: ProtectJitAccess) {
jit_allocator::protect_jit_memory(convert_access(access));
}
}
}
#[cfg(not(feature = "no_std"))]
pub use thread_jit_alloc::*;
}
#[cfg(any(test, feature = "bundled_jit_alloc"))]
pub use bundled_jit_alloc::*;