pub mod asm;
pub mod codegen;
pub mod gas_sim;
pub mod predecode;
#[cfg(feature = "signals")]
pub mod signal;
use crate::memory::Memory;
use crate::vm::ExitReason;
use codegen::{Compiler, HelperFns};
use crate::{Gas, PVM_REGISTER_COUNT};
#[repr(C)]
pub struct JitContext {
pub regs: [u64; 13],
pub gas: i64,
pub memory: *mut Memory,
pub exit_reason: u32,
pub exit_arg: u32,
pub heap_base: u32,
pub heap_top: u32,
pub jt_ptr: *const u32,
pub jt_len: u32,
_pad0: u32,
pub bb_starts: *const u8,
pub bb_len: u32,
_pad1: u32,
pub entry_pc: u32,
pub pc: u32,
pub dispatch_table: *const i32,
pub code_base: u64,
pub flat_buf: *mut u8,
pub flat_perms: *const u8,
pub fast_reentry: u32,
_pad2: u32,
}
struct NativeCode {
ptr: *mut u8,
len: usize,
}
impl NativeCode {
fn new(code: &[u8]) -> Result<Self, String> {
if code.is_empty() {
return Err("empty code buffer".into());
}
let len = code.len();
let ptr = unsafe {
libc::mmap(
std::ptr::null_mut(),
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0,
)
};
if ptr == libc::MAP_FAILED {
return Err("mmap failed".into());
}
let ptr = ptr as *mut u8;
unsafe {
std::ptr::copy_nonoverlapping(code.as_ptr(), ptr, len);
if libc::mprotect(ptr as *mut libc::c_void, len, libc::PROT_READ | libc::PROT_EXEC) != 0 {
libc::munmap(ptr as *mut libc::c_void, len);
return Err("mprotect failed".into());
}
}
Ok(Self { ptr, len })
}
fn entry(&self) -> unsafe extern "sysv64" fn(*mut JitContext) {
unsafe { std::mem::transmute(self.ptr) }
}
}
impl Drop for NativeCode {
fn drop(&mut self) {
unsafe {
libc::munmap(self.ptr as *mut libc::c_void, self.len);
}
}
}
struct FlatMemory {
region: *mut u8,
region_size: usize,
buf: *mut u8,
perms: *mut u8,
}
const FLAT_BUF_SIZE: usize = 1 << 32; const NUM_PAGES: usize = 1 << 20; const CTX_PAGE: usize = 4096; const HEADER_SIZE: usize = NUM_PAGES + CTX_PAGE;
impl FlatMemory {
fn new(memory: &Memory, layout: Option<&crate::program::DataLayout>) -> Option<Self> {
let region_size = HEADER_SIZE + FLAT_BUF_SIZE;
let region = unsafe {
libc::mmap(
std::ptr::null_mut(),
region_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_NORESERVE,
-1,
0,
)
};
if region == libc::MAP_FAILED {
return None;
}
let region = region as *mut u8;
let perms = region; let buf = unsafe { region.add(HEADER_SIZE) };
for (page_idx, access, data) in memory.pages_iter() {
let perm = match access {
crate::memory::PageAccess::Inaccessible => 0,
crate::memory::PageAccess::ReadOnly => 1,
crate::memory::PageAccess::ReadWrite => 2,
};
if (page_idx as usize) < NUM_PAGES {
unsafe { *perms.add(page_idx as usize) = perm; }
}
if !data.is_empty() {
let offset = (page_idx as usize) * 4096;
if offset + data.len() <= FLAT_BUF_SIZE {
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.add(offset), data.len());
}
}
}
}
if let Some(dl) = layout {
let num_pages = (dl.mem_size as usize + 4095) / 4096;
unsafe {
std::ptr::write_bytes(perms, 2u8, num_pages.min(NUM_PAGES)); }
unsafe {
if !dl.arg_data.is_empty() {
std::ptr::copy_nonoverlapping(dl.arg_data.as_ptr(), buf.add(dl.arg_start as usize), dl.arg_data.len());
}
if !dl.ro_data.is_empty() {
std::ptr::copy_nonoverlapping(dl.ro_data.as_ptr(), buf.add(dl.ro_start as usize), dl.ro_data.len());
}
if !dl.rw_data.is_empty() {
std::ptr::copy_nonoverlapping(dl.rw_data.as_ptr(), buf.add(dl.rw_start as usize), dl.rw_data.len());
}
}
}
Some(Self { region, region_size, buf, perms })
}
fn ctx_ptr(&self) -> *mut u8 {
unsafe { self.buf.sub(CTX_PAGE) }
}
fn write_back(&self, memory: &mut Memory) {
let page_indices: Vec<u32> = memory.pages_iter().map(|(idx, _, _)| idx).collect();
for page_idx in page_indices {
let offset = (page_idx as usize) * 4096;
if offset + 4096 <= FLAT_BUF_SIZE {
if let Some(page_data) = memory.page_data_mut(page_idx) {
unsafe {
std::ptr::copy_nonoverlapping(
self.buf.add(offset),
page_data.as_mut_ptr(),
4096,
);
}
}
}
}
}
#[cfg(feature = "signals")]
fn install_guard_pages(&self, heap_top: u32) {
let heap_top_page = (heap_top as usize + 4095) / 4096;
let guard_start = unsafe { self.buf.add(heap_top_page * 4096) };
let guard_len = FLAT_BUF_SIZE - heap_top_page * 4096;
if guard_len > 0 {
unsafe {
libc::mprotect(
guard_start as *mut libc::c_void,
guard_len,
libc::PROT_NONE,
);
}
}
}
#[cfg(feature = "signals")]
fn update_guard_pages(&self, old_top: u32, new_top: u32) {
let old_page = (old_top as usize + 4095) / 4096;
let new_page = (new_top as usize + 4095) / 4096;
if new_page > old_page {
let start = unsafe { self.buf.add(old_page * 4096) };
let len = (new_page - old_page) * 4096;
unsafe {
libc::mprotect(
start as *mut libc::c_void,
len,
libc::PROT_READ | libc::PROT_WRITE,
);
}
}
}
}
impl Drop for FlatMemory {
fn drop(&mut self) {
unsafe {
libc::munmap(self.region as *mut libc::c_void, self.region_size);
}
}
}
fn flat_check_perm(ctx: &JitContext, addr: u32, len: u32, min_perm: u8) -> bool {
if ctx.flat_perms.is_null() {
return false;
}
let start_page = addr as usize / 4096;
let end_page = (addr as usize + len as usize - 1) / 4096;
for p in start_page..=end_page {
if p >= NUM_PAGES {
return false;
}
let perm = unsafe { *ctx.flat_perms.add(p) };
if perm < min_perm {
return false;
}
}
true
}
unsafe fn flat_read(ctx: &JitContext, addr: u32, len: usize) -> u64 {
unsafe {
let ptr = ctx.flat_buf.add(addr as usize);
match len {
1 => *ptr as u64,
2 => u16::from_le_bytes([*ptr, *ptr.add(1)]) as u64,
4 => u32::from_le_bytes([*ptr, *ptr.add(1), *ptr.add(2), *ptr.add(3)]) as u64,
8 => u64::from_le_bytes(std::ptr::read_unaligned(ptr as *const [u8; 8])),
_ => 0,
}
}
}
unsafe fn flat_write(ctx: &JitContext, addr: u32, bytes: &[u8]) {
unsafe {
std::ptr::copy_nonoverlapping(bytes.as_ptr(), ctx.flat_buf.add(addr as usize), bytes.len());
}
}
extern "sysv64" fn mem_read_u8(ctx: *mut JitContext, addr: u32) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 1, 1) {
return unsafe { flat_read(ctx, addr, 1) };
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 0;
}
let mem = unsafe { &*ctx.memory };
match mem.read_u8(addr) {
Some(v) => v as u64,
None => { ctx.exit_reason = 3; ctx.exit_arg = addr; 0 }
}
}
extern "sysv64" fn mem_read_u16(ctx: *mut JitContext, addr: u32) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 2, 1) {
return unsafe { flat_read(ctx, addr, 2) };
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 0;
}
let mem = unsafe { &*ctx.memory };
match mem.read_u16_le(addr) {
Some(v) => v as u64,
None => { ctx.exit_reason = 3; ctx.exit_arg = addr; 0 }
}
}
extern "sysv64" fn mem_read_u32(ctx: *mut JitContext, addr: u32) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 4, 1) {
return unsafe { flat_read(ctx, addr, 4) };
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 0;
}
let mem = unsafe { &*ctx.memory };
match mem.read_u32_le(addr) {
Some(v) => v as u64,
None => { ctx.exit_reason = 3; ctx.exit_arg = addr; 0 }
}
}
extern "sysv64" fn mem_read_u64_fn(ctx: *mut JitContext, addr: u32) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 8, 1) {
return unsafe { flat_read(ctx, addr, 8) };
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 0;
}
let mem = unsafe { &*ctx.memory };
match mem.read_u64_le(addr) {
Some(v) => v,
None => { ctx.exit_reason = 3; ctx.exit_arg = addr; 0 }
}
}
extern "sysv64" fn mem_write_u8(ctx: *mut JitContext, addr: u32, value: u64) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 1, 2) {
unsafe { flat_write(ctx, addr, &[value as u8]); }
return 0;
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 1;
}
let mem = unsafe { &mut *ctx.memory };
match mem.write_u8(addr, value as u8) {
crate::memory::MemoryAccess::Ok => 0,
crate::memory::MemoryAccess::PageFault(a) => { ctx.exit_reason = 3; ctx.exit_arg = a; 1 }
}
}
extern "sysv64" fn mem_write_u16(ctx: *mut JitContext, addr: u32, value: u64) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 2, 2) {
unsafe { flat_write(ctx, addr, &(value as u16).to_le_bytes()); }
return 0;
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 1;
}
let mem = unsafe { &mut *ctx.memory };
match mem.write_u16_le(addr, value as u16) {
crate::memory::MemoryAccess::Ok => 0,
crate::memory::MemoryAccess::PageFault(a) => { ctx.exit_reason = 3; ctx.exit_arg = a; 1 }
}
}
extern "sysv64" fn mem_write_u32(ctx: *mut JitContext, addr: u32, value: u64) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 4, 2) {
unsafe { flat_write(ctx, addr, &(value as u32).to_le_bytes()); }
return 0;
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 1;
}
let mem = unsafe { &mut *ctx.memory };
match mem.write_u32_le(addr, value as u32) {
crate::memory::MemoryAccess::Ok => 0,
crate::memory::MemoryAccess::PageFault(a) => { ctx.exit_reason = 3; ctx.exit_arg = a; 1 }
}
}
extern "sysv64" fn mem_write_u64_fn(ctx: *mut JitContext, addr: u32, value: u64) -> u64 {
let ctx = unsafe { &mut *ctx };
if !ctx.flat_buf.is_null() {
if flat_check_perm(ctx, addr, 8, 2) {
unsafe { flat_write(ctx, addr, &value.to_le_bytes()); }
return 0;
}
ctx.exit_reason = 3;
ctx.exit_arg = addr;
return 1;
}
let mem = unsafe { &mut *ctx.memory };
match mem.write_u64_le(addr, value) {
crate::memory::MemoryAccess::Ok => 0,
crate::memory::MemoryAccess::PageFault(a) => { ctx.exit_reason = 3; ctx.exit_arg = a; 1 }
}
}
extern "sysv64" fn sbrk_helper(ctx: *mut JitContext, size: u64) -> u64 {
let ctx = unsafe { &mut *ctx };
let mem = unsafe { &mut *ctx.memory };
let ps = crate::PVM_PAGE_SIZE;
if size > u32::MAX as u64 {
return 0;
}
if size == 0 {
return ctx.heap_top as u64;
}
let size_u32 = size as u32;
let old_top = ctx.heap_top;
let new_top = (old_top as u64) + (size_u32 as u64);
if new_top > (u32::MAX as u64) + 1 {
return 0;
}
let new_top_u32 = new_top as u32;
let start_page = old_top / ps;
let end_page = if new_top_u32 == 0 { u32::MAX / ps } else { (new_top_u32 - 1) / ps };
if !ctx.flat_perms.is_null() {
let perms = ctx.flat_perms as *mut u8;
for p in start_page..=end_page {
unsafe {
if *perms.add(p as usize) == 0 {
*perms.add(p as usize) = 2; }
}
}
} else {
for p in start_page..=end_page {
if !mem.is_page_mapped(p) {
mem.map_page(p, crate::memory::PageAccess::ReadWrite);
}
}
}
#[cfg(feature = "signals")]
if !ctx.flat_buf.is_null() {
let old_page = (old_top as usize + 4095) / 4096;
let new_page = (new_top_u32 as usize + 4095) / 4096;
if new_page > old_page {
unsafe {
let start = ctx.flat_buf.add(old_page * 4096);
let len = (new_page - old_page) * 4096;
libc::mprotect(start as *mut libc::c_void, len, libc::PROT_READ | libc::PROT_WRITE);
}
}
}
ctx.heap_top = new_top_u32;
old_top as u64
}
pub struct RecompiledPvm {
native_code: NativeCode,
ctx: *mut JitContext,
code: Vec<u8>,
bitmask: Vec<u8>,
jump_table: Vec<u32>,
initial_gas: Gas,
dispatch_table: Vec<i32>,
debug: bool,
flat_memory: Option<FlatMemory>,
#[cfg(feature = "signals")]
signal_state: Option<Box<signal::SignalState>>,
}
impl RecompiledPvm {
pub fn new(
code: Vec<u8>,
bitmask: Vec<u8>,
jump_table: Vec<u32>,
registers: [u64; PVM_REGISTER_COUNT],
memory: Memory,
gas: Gas,
data_layout: Option<crate::program::DataLayout>,
) -> Result<Self, String> {
let debug = std::env::var("GREY_PVM_DEBUG").is_ok();
let memory = Box::new(memory);
let memory_ptr = Box::into_raw(memory);
let _t1 = std::time::Instant::now();
let flat_memory = FlatMemory::new(unsafe { &*memory_ptr }, data_layout.as_ref())
.ok_or("failed to mmap flat memory region")?;
let _t_flat = _t1.elapsed();
let ctx_raw = flat_memory.ctx_ptr() as *mut JitContext;
unsafe {
ctx_raw.write(JitContext {
regs: registers,
gas: gas as i64,
memory: memory_ptr,
exit_reason: 0,
exit_arg: 0,
heap_base: 0,
heap_top: 0,
jt_ptr: std::ptr::null(),
jt_len: jump_table.len() as u32,
_pad0: 0,
bb_starts: std::ptr::null(),
bb_len: bitmask.len() as u32,
_pad1: 0,
entry_pc: 0,
pc: 0,
dispatch_table: std::ptr::null(),
code_base: 0,
flat_buf: flat_memory.buf,
flat_perms: flat_memory.perms,
fast_reentry: 0,
_pad2: 0,
});
}
let ctx = unsafe { &mut *ctx_raw };
ctx.jt_ptr = jump_table.as_ptr();
ctx.bb_starts = bitmask.as_ptr() as *const u8;
if debug {
tracing::debug!(
write_u8 = format_args!("0x{:x}", mem_write_u8 as *const () as usize),
write_u32 = format_args!("0x{:x}", mem_write_u32 as *const () as usize),
read_u8 = format_args!("0x{:x}", mem_read_u8 as *const () as usize),
"recompiler helper function pointers"
);
}
let helpers = HelperFns {
mem_read_u8: mem_read_u8 as *const () as u64,
mem_read_u16: mem_read_u16 as *const () as u64,
mem_read_u32: mem_read_u32 as *const () as u64,
mem_read_u64: mem_read_u64_fn as *const () as u64,
mem_write_u8: mem_write_u8 as *const () as u64,
mem_write_u16: mem_write_u16 as *const () as u64,
mem_write_u32: mem_write_u32 as *const () as u64,
mem_write_u64: mem_write_u64_fn as *const () as u64,
sbrk_helper: sbrk_helper as *const () as u64,
};
let _t2 = std::time::Instant::now();
let compiler = Compiler::new(
&bitmask,
jump_table.clone(),
helpers,
code.len(),
);
let compile_result = compiler.compile(&code, &bitmask);
let _t_compile = _t2.elapsed();
let native = compile_result.native_code;
let dispatch_table = compile_result.dispatch_table;
if debug {
let _ = std::fs::write("/tmp/pvm_native.bin", &native);
tracing::debug!(
native_bytes = native.len(),
basic_blocks = bitmask.iter().filter(|&&b| b == 1).count(),
"wrote native code to /tmp/pvm_native.bin"
);
}
let _t3 = std::time::Instant::now();
let native_code = NativeCode::new(&native)?;
let _t_native = _t3.elapsed();
#[cfg(feature = "signals")]
let signal_state = {
signal::ensure_installed();
let ss = Box::new(signal::SignalState {
code_start: native_code.ptr as usize,
code_end: native_code.ptr as usize + native_code.len,
exit_label_addr: native_code.ptr as usize + compile_result.exit_label_offset as usize,
ctx_ptr: ctx_raw,
trap_table: compile_result.trap_table,
});
Some(ss)
};
tracing::debug!(
flat_mem_us = _t_flat.as_micros() as u64,
compile_us = _t_compile.as_micros() as u64,
native_us = _t_native.as_micros() as u64,
code_len = code.len(),
native_len = native.len(),
"recompiler::new() timing"
);
ctx.code_base = native_code.ptr as u64;
let mut result = Self {
native_code,
ctx: ctx_raw,
code,
bitmask,
jump_table,
initial_gas: gas,
dispatch_table,
debug,
flat_memory: Some(flat_memory),
#[cfg(feature = "signals")]
signal_state,
};
result.ctx_mut().dispatch_table = result.dispatch_table.as_ptr();
Ok(result)
}
#[inline(always)]
fn ctx(&self) -> &JitContext {
unsafe { &*self.ctx }
}
#[inline(always)]
fn ctx_mut(&mut self) -> &mut JitContext {
unsafe { &mut *self.ctx }
}
pub fn run(&mut self) -> ExitReason {
loop {
if self.debug {
tracing::debug!(
entry_pc = self.ctx().entry_pc,
gas = self.ctx().gas,
heap_base = format_args!("0x{:08x}", self.ctx().heap_base),
heap_top = format_args!("0x{:08x}", self.ctx().heap_top),
regs = ?&self.ctx().regs,
"recompiler::run() entry"
);
self.ctx_mut().exit_reason = 0xDEAD;
}
#[cfg(feature = "signals")]
if let Some(ref mut ss) = self.signal_state {
signal::SIGNAL_STATE.with(|cell| cell.set(&mut **ss as *mut _));
}
let entry = self.native_code.entry();
unsafe { entry(self.ctx); }
#[cfg(feature = "signals")]
signal::SIGNAL_STATE.with(|cell| cell.set(std::ptr::null_mut()));
if self.debug {
tracing::debug!(
exit_reason = self.ctx().exit_reason,
exit_arg = self.ctx().exit_arg,
gas = self.ctx().gas,
pc = self.ctx().pc,
regs = ?&self.ctx().regs,
"recompiler::run() exit"
);
}
match self.ctx().exit_reason {
4 => {
self.ctx_mut().entry_pc = self.ctx().pc;
return ExitReason::HostCall(self.ctx().exit_arg);
}
0 => return self.handle_halt_exit(),
1 => return self.handle_panic_exit(),
2 => return self.handle_oog_exit(),
3 => return self.handle_page_fault_exit(),
5 => {
let idx = self.ctx().exit_arg;
if let Some(target) = self.resolve_djump(idx) {
self.ctx_mut().entry_pc = target;
continue;
} else {
return ExitReason::Panic;
}
}
_ => return ExitReason::Panic,
}
}
}
fn resolve_djump(&self, idx: u32) -> Option<u32> {
if idx as usize >= self.jump_table.len() {
return None;
}
let target = self.jump_table[idx as usize];
if (target as usize) < self.bitmask.len()
&& self.bitmask[target as usize] == 1
{
Some(target)
} else {
None
}
}
#[cold]
fn handle_halt_exit(&mut self) -> ExitReason {
self.correct_gas_for_mid_block_exit(self.ctx().pc as usize);
ExitReason::Halt
}
#[cold]
fn handle_panic_exit(&mut self) -> ExitReason {
self.correct_gas_for_mid_block_exit(self.ctx().pc as usize);
ExitReason::Panic
}
#[cold]
fn handle_page_fault_exit(&mut self) -> ExitReason {
self.correct_gas_for_mid_block_exit(self.ctx().pc as usize);
ExitReason::PageFault(self.ctx().exit_arg)
}
#[cold]
fn handle_oog_exit(&mut self) -> ExitReason {
let pc = self.ctx().pc;
let remaining_gas = self.ctx().gas as u64;
if remaining_gas == 0 {
self.ctx_mut().entry_pc = pc;
return ExitReason::OutOfGas;
}
let mut interp = crate::vm::Pvm::new(
self.code.clone(),
self.bitmask.clone(),
self.jump_table.clone(),
*self.registers(),
self.memory().clone(),
remaining_gas,
);
interp.pc = pc;
interp.heap_base = self.ctx().heap_base;
interp.heap_top = self.ctx().heap_top;
let (exit, _) = interp.run();
for i in 0..13 {
self.ctx_mut().regs[i] = interp.registers[i];
}
self.ctx_mut().gas = interp.gas as i64;
self.ctx_mut().pc = interp.pc;
self.ctx_mut().entry_pc = interp.pc;
self.ctx_mut().heap_base = interp.heap_base;
self.ctx_mut().heap_top = interp.heap_top;
self.sync_memory_from_interp(&interp.memory);
exit
}
pub fn registers(&self) -> &[u64; 13] {
&self.ctx().regs
}
pub fn registers_mut(&mut self) -> &mut [u64; 13] {
&mut self.ctx_mut().regs
}
pub fn gas(&self) -> u64 {
self.ctx().gas.max(0) as u64
}
pub fn memory(&self) -> &Memory {
if let Some(ref fm) = self.flat_memory {
fm.write_back(unsafe { &mut *self.ctx().memory });
}
unsafe { &*self.ctx().memory }
}
pub fn memory_mut(&mut self) -> &mut Memory {
if let Some(ref fm) = self.flat_memory {
fm.write_back(unsafe { &mut *self.ctx().memory });
}
unsafe { &mut *self.ctx().memory }
}
pub fn read_byte(&self, addr: u32) -> Option<u8> {
if let Some(ref fm) = self.flat_memory {
let page = addr as usize / 4096;
if page < NUM_PAGES {
let perm = unsafe { *fm.perms.add(page) };
if perm >= 1 {
return Some(unsafe { *fm.buf.add(addr as usize) });
}
}
return None;
}
unsafe { &*self.ctx().memory }.read_u8(addr)
}
pub fn write_byte(&mut self, addr: u32, value: u8) -> bool {
if let Some(ref fm) = self.flat_memory {
let page = addr as usize / 4096;
if page < NUM_PAGES {
let perm = unsafe { *fm.perms.add(page) };
if perm >= 2 {
unsafe { *fm.buf.add(addr as usize) = value; }
return true;
}
}
return false;
}
matches!(unsafe { &mut *self.ctx().memory }.write_u8(addr, value),
crate::memory::MemoryAccess::Ok)
}
pub fn read_bytes(&self, addr: u32, len: u32) -> Option<Vec<u8>> {
if let Some(ref fm) = self.flat_memory {
let mut result = Vec::with_capacity(len as usize);
for i in 0..len {
let a = addr.wrapping_add(i);
let page = a as usize / 4096;
if page >= NUM_PAGES {
return None;
}
let perm = unsafe { *fm.perms.add(page) };
if perm < 1 {
return None;
}
result.push(unsafe { *fm.buf.add(a as usize) });
}
return Some(result);
}
unsafe { &*self.ctx().memory }.read_bytes(addr, len)
}
pub fn write_bytes(&mut self, addr: u32, data: &[u8]) -> bool {
if let Some(ref fm) = self.flat_memory {
for (i, &byte) in data.iter().enumerate() {
let a = addr.wrapping_add(i as u32);
let page = a as usize / 4096;
if page >= NUM_PAGES {
return false;
}
let perm = unsafe { *fm.perms.add(page) };
if perm < 2 {
return false;
}
unsafe { *fm.buf.add(a as usize) = byte; }
}
return true;
}
for (i, &byte) in data.iter().enumerate() {
if !matches!(unsafe { &mut *self.ctx().memory }.write_u8(addr.wrapping_add(i as u32), byte),
crate::memory::MemoryAccess::Ok) {
return false;
}
}
true
}
fn correct_gas_for_mid_block_exit(&mut self, _exit_pc: usize) {
}
fn sync_memory_from_interp(&mut self, memory: &Memory) {
if let Some(ref fm) = self.flat_memory {
for (page_idx, access, data) in memory.pages_iter() {
let perm: u8 = match access {
crate::memory::PageAccess::Inaccessible => 0,
crate::memory::PageAccess::ReadOnly => 1,
crate::memory::PageAccess::ReadWrite => 2,
};
if (page_idx as usize) < NUM_PAGES {
unsafe { *fm.perms.add(page_idx as usize) = perm; }
}
let offset = (page_idx as usize) * 4096;
if offset + data.len() <= FLAT_BUF_SIZE {
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), fm.buf.add(offset), data.len());
}
}
}
}
}
pub fn pc(&self) -> u32 {
self.ctx().pc
}
pub fn set_pc(&mut self, pc: u32) {
self.ctx_mut().entry_pc = pc;
self.ctx_mut().pc = pc;
}
pub fn set_gas(&mut self, gas: Gas) {
self.ctx_mut().gas = gas as i64;
}
pub fn set_register(&mut self, idx: usize, val: u64) {
self.ctx_mut().regs[idx] = val;
}
pub fn heap_top(&self) -> u32 {
self.ctx().heap_top
}
pub fn set_heap_top(&mut self, top: u32) {
#[cfg(feature = "signals")]
if let Some(ref fm) = self.flat_memory {
let old = self.ctx().heap_top;
fm.update_guard_pages(old, top);
}
self.ctx_mut().heap_top = top;
}
pub fn native_code_bytes(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.native_code.ptr, self.native_code.len) }
}
}
impl Drop for RecompiledPvm {
fn drop(&mut self) {
unsafe {
let _ = Box::from_raw(self.ctx().memory);
}
}
}
pub fn initialize_program_recompiled(
blob: &[u8],
arguments: &[u8],
gas: Gas,
) -> Option<RecompiledPvm> {
let parsed = crate::program::parse_program_blob(blob, arguments, gas, true)?;
let mut rpvm = RecompiledPvm::new(
parsed.code,
parsed.bitmask,
parsed.jump_table,
parsed.registers,
parsed.memory,
gas,
parsed.layout,
).ok()?;
rpvm.ctx_mut().heap_base = parsed.heap_base;
rpvm.ctx_mut().heap_top = parsed.heap_top;
#[cfg(feature = "signals")]
if let Some(ref fm) = rpvm.flat_memory {
fm.install_guard_pages(parsed.heap_top);
}
Some(rpvm)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::PageAccess;
use codegen::{CTX_REGS, CTX_GAS, CTX_EXIT_REASON, CTX_EXIT_ARG, CTX_ENTRY_PC, CTX_PC,
CTX_DISPATCH_TABLE, CTX_CODE_BASE, CTX_OFFSET};
#[test]
fn test_jit_context_layout() {
let ctx = JitContext {
regs: [0; 13],
gas: 0,
memory: std::ptr::null_mut(),
exit_reason: 0,
exit_arg: 0,
heap_base: 0,
heap_top: 0,
jt_ptr: std::ptr::null(),
jt_len: 0,
_pad0: 0,
bb_starts: std::ptr::null(),
bb_len: 0,
_pad1: 0,
entry_pc: 0,
pc: 0,
dispatch_table: std::ptr::null(),
code_base: 0,
flat_buf: std::ptr::null_mut(),
flat_perms: std::ptr::null(),
fast_reentry: 0,
_pad2: 0,
};
let base = &ctx as *const JitContext as usize;
let so = |codegen_off: i32| -> usize { (codegen_off + CTX_OFFSET) as usize };
assert_eq!(&ctx.regs as *const _ as usize - base, so(CTX_REGS));
assert_eq!(&ctx.gas as *const _ as usize - base, so(CTX_GAS));
assert_eq!(&ctx.exit_reason as *const _ as usize - base, so(CTX_EXIT_REASON));
assert_eq!(&ctx.exit_arg as *const _ as usize - base, so(CTX_EXIT_ARG));
assert_eq!(&ctx.entry_pc as *const _ as usize - base, so(CTX_ENTRY_PC));
assert_eq!(&ctx.pc as *const _ as usize - base, so(CTX_PC));
assert_eq!(&ctx.dispatch_table as *const _ as usize - base, so(CTX_DISPATCH_TABLE));
assert_eq!(&ctx.code_base as *const _ as usize - base, so(CTX_CODE_BASE));
}
#[test]
fn test_recompile_trap() {
let code = vec![0u8]; let bitmask = vec![1u8];
let registers = [0u64; 13];
let memory = Memory::new();
let mut pvm = RecompiledPvm::new(code, bitmask, vec![], registers, memory, 1000, None)
.expect("compilation should succeed");
let exit = pvm.run();
assert_eq!(exit, ExitReason::Panic);
}
#[test]
fn test_recompile_ecalli() {
let code = vec![10, 42]; let bitmask = vec![1, 0];
let registers = [0u64; 13];
let memory = Memory::new();
let mut pvm = RecompiledPvm::new(code, bitmask, vec![], registers, memory, 1000, None)
.expect("compilation should succeed");
let exit = pvm.run();
assert_eq!(exit, ExitReason::HostCall(42));
}
#[test]
fn test_recompile_load_imm() {
let code = vec![51, 0, 123, 0]; let bitmask = vec![1, 0, 0, 1];
let registers = [0u64; 13];
let memory = Memory::new();
let mut pvm = RecompiledPvm::new(code, bitmask, vec![], registers, memory, 1000, None)
.expect("compilation should succeed");
let exit = pvm.run();
assert_eq!(pvm.registers()[0], 123);
assert_eq!(exit, ExitReason::Panic);
}
#[test]
fn test_recompile_add64() {
let code = vec![
51, 0, 10, 51, 1, 20, 200, 0x10, 2, 10, 0, ];
let bitmask = vec![1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0];
let registers = [0u64; 13];
let memory = Memory::new();
let mut pvm = RecompiledPvm::new(code, bitmask, vec![], registers, memory, 1000, None)
.expect("compilation should succeed");
let exit = pvm.run();
assert_eq!(pvm.registers()[2], 30);
assert_eq!(exit, ExitReason::HostCall(0));
}
#[test]
fn test_recompile_out_of_gas() {
let code = vec![51, 0, 42];
let bitmask = vec![1, 0, 0];
let registers = [0u64; 13];
let memory = Memory::new();
let mut pvm = RecompiledPvm::new(code, bitmask, vec![], registers, memory, 0, None)
.expect("compilation should succeed");
let exit = pvm.run();
assert_eq!(exit, ExitReason::OutOfGas);
}
#[test]
#[ignore] fn test_compare_interpreter_recompiler() {
let blob = match std::fs::read("/tmp/test_code_blob.bin") {
Ok(b) => b,
Err(_) => {
eprintln!("Skipping comparison test: /tmp/test_code_blob.bin not found");
return;
}
};
let args = &[0u8, 0, 0, 0]; let gas = 900_000u64;
let mut interp = crate::program::initialize_program(&blob, args, gas)
.expect("interpreter init failed");
interp.pc = 5;
let mut recomp = initialize_program_recompiled(&blob, args, gas)
.expect("recompiler init failed");
recomp.set_pc(5);
let mut step = 0;
loop {
step += 1;
let interp_exit = interp.run();
let recomp_exit = recomp.run();
let interp_exit_clone = interp_exit.0.clone();
let recomp_gas = recomp.gas();
let interp_gas = interp.gas;
eprintln!("Step {}: interp_exit={:?} recomp_exit={:?}", step, interp_exit_clone, recomp_exit);
eprintln!(" interp: gas={} pc={} regs={:?}", interp_gas, interp.pc, &interp.registers);
eprintln!(" recomp: gas={} pc={} regs={:?}", recomp_gas, recomp.pc(), recomp.registers());
let gas_match = interp_gas == recomp_gas;
let exit_match = interp_exit_clone == recomp_exit;
let reg_match = (0..13).all(|i| interp.registers[i] == recomp.registers()[i]);
if !gas_match || !exit_match || !reg_match {
let trace = &interp.pc_trace;
eprintln!("Interpreter trace (first 100 PCs from tracing start):");
for (i, &(pc, op)) in trace.iter().take(165).enumerate() {
let opname = crate::instruction::Opcode::from_byte(op)
.map(|o| format!("{:?}", o))
.unwrap_or_else(|| format!("?{}", op));
eprintln!(" [{:3}] pc={:5} op={}", i, pc, opname);
}
if !gas_match {
panic!("Gas mismatch at step {}: interp={} recomp={}", step, interp_gas, recomp_gas);
}
if !exit_match {
panic!("Exit mismatch at step {}: interp={:?} recomp={:?}", step, interp_exit_clone, recomp_exit);
}
for i in 0..13 {
if interp.registers[i] != recomp.registers()[i] {
panic!("Register φ[{}] mismatch at step {}: interp=0x{:x} recomp=0x{:x}",
i, step, interp.registers[i], recomp.registers()[i]);
}
}
}
if step == 3 {
let trace = &interp.pc_trace;
eprintln!("Interpreter trace (first 50 PCs after step 2):");
for (i, &(pc, op)) in trace.iter().take(50).enumerate() {
let opname = crate::instruction::Opcode::from_byte(op)
.map(|o| format!("{:?}", o))
.unwrap_or_else(|| format!("?{}", op));
eprintln!(" [{:3}] pc={:5} op={}", i, pc, opname);
}
}
match interp_exit_clone {
ExitReason::Halt | ExitReason::Panic | ExitReason::OutOfGas | ExitReason::PageFault(_) => {
eprintln!("Both exited with {:?} after {} steps", interp_exit_clone, step);
break;
}
ExitReason::HostCall(id) => {
let what = u64::MAX - 2;
interp.registers[7] = what;
recomp.registers_mut()[7] = what;
if id == 0 {
interp.registers[7] = interp.gas;
recomp.registers_mut()[7] = recomp.gas();
}
interp.tracing_enabled = true;
}
}
if step > 100 {
eprintln!("Reached 100 steps, stopping comparison");
break;
}
}
}
}