#![allow(clippy::deprecated_cfg_attr)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unreachable_code)]
extern crate libc;
use std::fmt::Debug;
use std::mem;
use std::collections::HashMap;
use std::fmt::Formatter;
use std::fmt::Error as FormatterError;
use std::ops::{Index, IndexMut};
use rand::{rngs::ThreadRng, Rng};
use crate::{
vm::{Config, Executable, ProgramResult, InstructionMeter, Tracer, DynTraitFatPointer, SYSCALL_CONTEXT_OBJECTS_OFFSET, REPORT_UNRESOLVED_SYMBOL_INDEX},
ebpf::{self, INSN_SIZE, FIRST_SCRATCH_REG, SCRATCH_REGS, STACK_REG, MM_STACK_START},
error::{UserDefinedError, EbpfError},
memory_region::{AccessType, MemoryMapping, MemoryRegion},
user_error::UserError,
x86::*,
};
pub struct JitProgramArgument<'a> {
pub memory_mapping: MemoryMapping<'a>,
pub syscall_context_objects: [*const u8; 0],
}
struct JitProgramSections {
pc_section: &'static mut [u64],
text_section: &'static mut [u8],
total_allocation_size: usize,
}
#[cfg(not(target_os = "windows"))]
macro_rules! libc_error_guard {
(succeeded?, mmap, $addr:expr, $($arg:expr),*) => {{
*$addr = libc::mmap(*$addr, $($arg),*);
*$addr != libc::MAP_FAILED
}};
(succeeded?, $function:ident, $($arg:expr),*) => {
libc::$function($($arg),*) == 0
};
($function:ident, $($arg:expr),*) => {{
const RETRY_COUNT: usize = 3;
for i in 0..RETRY_COUNT {
if libc_error_guard!(succeeded?, $function, $($arg),*) {
break;
} else if i + 1 == RETRY_COUNT {
let args = vec![$(format!("{:?}", $arg)),*];
#[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
let errno = *libc::__error();
#[cfg(target_os = "linux")]
let errno = *libc::__errno_location();
return Err(EbpfError::LibcInvocationFailed(stringify!($function), args, errno));
}
}
}};
}
impl JitProgramSections {
fn new<E: UserDefinedError>(_pc: usize, _code_size: usize) -> Result<Self, EbpfError<E>> {
#[cfg(target_os = "windows")]
{
Ok(Self {
pc_section: &mut [],
text_section: &mut [],
total_allocation_size: 0,
})
}
#[cfg(not(target_os = "windows"))]
unsafe {
fn round_to_page_size(value: usize, page_size: usize) -> usize {
(value + page_size - 1) / page_size * page_size
}
let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
let pc_loc_table_size = round_to_page_size(_pc * 8, page_size);
let code_size = round_to_page_size(_code_size, page_size);
let mut raw: *mut libc::c_void = std::ptr::null_mut();
libc_error_guard!(mmap, &mut raw, pc_loc_table_size + code_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, 0, 0);
std::ptr::write_bytes(raw, 0x00, pc_loc_table_size);
std::ptr::write_bytes(raw.add(pc_loc_table_size), 0xcc, code_size); Ok(Self {
pc_section: std::slice::from_raw_parts_mut(raw as *mut u64, _pc),
text_section: std::slice::from_raw_parts_mut(raw.add(pc_loc_table_size) as *mut u8, code_size),
total_allocation_size: pc_loc_table_size + code_size,
})
}
}
fn seal<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
if self.total_allocation_size > 0 {
#[cfg(not(target_os = "windows"))]
unsafe {
libc_error_guard!(mprotect, self.pc_section.as_mut_ptr() as *mut _, self.pc_section.len(), libc::PROT_READ);
libc_error_guard!(mprotect, self.text_section.as_mut_ptr() as *mut _, self.text_section.len(), libc::PROT_EXEC | libc::PROT_READ);
}
}
Ok(())
}
}
impl Drop for JitProgramSections {
fn drop(&mut self) {
if self.total_allocation_size > 0 {
#[cfg(not(target_os = "windows"))]
unsafe {
libc::munmap(self.pc_section.as_ptr() as *mut _, self.total_allocation_size);
}
}
}
}
pub struct JitProgram<E: UserDefinedError, I: InstructionMeter> {
_sections: JitProgramSections,
pub main: unsafe fn(&ProgramResult<E>, u64, &JitProgramArgument, &mut I) -> i64,
}
impl<E: UserDefinedError, I: InstructionMeter> Debug for JitProgram<E, I> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.write_fmt(format_args!("JitProgram {:?}", &self.main as *const _))
}
}
impl<E: UserDefinedError, I: InstructionMeter> PartialEq for JitProgram<E, I> {
fn eq(&self, other: &Self) -> bool {
std::ptr::eq(self.main as *const u8, other.main as *const u8)
}
}
impl<E: UserDefinedError, I: InstructionMeter> JitProgram<E, I> {
pub fn new(executable: &dyn Executable<E, I>) -> Result<Self, EbpfError<E>> {
let program = executable.get_text_bytes().1;
let mut jit = JitCompiler::new::<E>(program, executable.get_config())?;
jit.compile::<E, I>(executable)?;
let main = unsafe { mem::transmute(jit.result.text_section.as_ptr()) };
Ok(Self {
_sections: jit.result,
main,
})
}
}
const TARGET_PC_TRACE: usize = std::usize::MAX - 29;
const TARGET_PC_TRANSLATE_PC: usize = std::usize::MAX - 28;
const TARGET_PC_TRANSLATE_PC_LOOP: usize = std::usize::MAX - 27;
const TARGET_PC_TRANSLATE_MEMORY_ADDRESS: usize = std::usize::MAX - 26;
const TARGET_PC_MEMORY_ACCESS_VIOLATION: usize = std::usize::MAX - 18;
const TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS: usize = std::usize::MAX - 10;
const TARGET_PC_CALL_DEPTH_EXCEEDED: usize = std::usize::MAX - 9;
const TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT: usize = std::usize::MAX - 8;
const TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION: usize = std::usize::MAX - 7;
const TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION: usize = std::usize::MAX - 6;
const TARGET_PC_DIV_BY_ZERO: usize = std::usize::MAX - 5;
const TARGET_PC_EXCEPTION_AT: usize = std::usize::MAX - 4;
const TARGET_PC_RUST_EXCEPTION: usize = std::usize::MAX - 3;
const TARGET_PC_EXIT: usize = std::usize::MAX - 2;
const TARGET_PC_EPILOGUE: usize = std::usize::MAX - 1;
const REGISTER_MAP: [u8; 11] = [
CALLER_SAVED_REGISTERS[0],
ARGUMENT_REGISTERS[1],
ARGUMENT_REGISTERS[2],
ARGUMENT_REGISTERS[3],
ARGUMENT_REGISTERS[4],
ARGUMENT_REGISTERS[5],
CALLEE_SAVED_REGISTERS[2],
CALLEE_SAVED_REGISTERS[3],
CALLEE_SAVED_REGISTERS[4],
CALLEE_SAVED_REGISTERS[5],
CALLEE_SAVED_REGISTERS[1],
];
#[inline]
pub fn emit<T, E: UserDefinedError>(jit: &mut JitCompiler, data: T) -> Result<(), EbpfError<E>> {
let size = mem::size_of::<T>() as usize;
if jit.offset_in_text_section + size > jit.result.text_section.len() {
return Err(EbpfError::ExhausedTextSegment(jit.pc));
}
unsafe {
#[allow(clippy::cast_ptr_alignment)]
let ptr = jit.result.text_section.as_ptr().add(jit.offset_in_text_section) as *mut T;
*ptr = data as T;
}
jit.offset_in_text_section += size;
Ok(())
}
pub fn emit_variable_length<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, data: u64) -> Result<(), EbpfError<E>> {
match size {
OperandSize::S0 => Ok(()),
OperandSize::S8 => emit::<u8, E>(jit, data as u8),
OperandSize::S16 => emit::<u16, E>(jit, data as u16),
OperandSize::S32 => emit::<u32, E>(jit, data as u32),
OperandSize::S64 => emit::<u64, E>(jit, data),
}
}
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum OperandSize {
S0 = 0,
S8 = 8,
S16 = 16,
S32 = 32,
S64 = 64,
}
#[inline]
fn emit_sanitized_load_immediate<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, destination: u8, value: i64) -> Result<(), EbpfError<E>> {
match size {
OperandSize::S32 => {
let key: i32 = jit.rng.gen();
X86Instruction::load_immediate(size, destination, (value as i32).wrapping_sub(key) as i64).emit(jit)?;
emit_alu(jit, size, 0x81, 0, destination, key as i64, None)
},
OperandSize::S64 if destination == R11 => {
let key: i64 = jit.rng.gen();
let lower_key = key as i32 as i64;
let upper_key = (key >> 32) as i32 as i64;
X86Instruction::load_immediate(size, destination, value.wrapping_sub(lower_key).rotate_right(32).wrapping_sub(upper_key)).emit(jit)?;
emit_alu(jit, size, 0x81, 0, destination, upper_key, None)?; emit_alu(jit, size, 0xc1, 1, destination, 32, None)?; emit_alu(jit, size, 0x81, 0, destination, lower_key, None) },
OperandSize::S64 if value >= std::i32::MIN as i64 && value <= std::i32::MAX as i64 => {
let key = jit.rng.gen::<i32>() as i64;
X86Instruction::load_immediate(size, destination, value.wrapping_sub(key)).emit(jit)?;
emit_alu(jit, size, 0x81, 0, destination, key, None)
},
OperandSize::S64 => {
let key: i64 = jit.rng.gen();
X86Instruction::load_immediate(size, destination, value.wrapping_sub(key)).emit(jit)?;
X86Instruction::load_immediate(size, R11, key).emit(jit)?;
emit_alu(jit, size, 0x01, R11, destination, 0, None)
},
_ => {
#[cfg(debug_assertions)]
unreachable!();
Ok(())
}
}
}
fn emit_sanitized_load<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, source: u8, destination: u8, offset: i32) -> Result<(), EbpfError<E>> {
let key: i32 = jit.rng.gen();
X86Instruction::load_immediate(OperandSize::S64, destination, offset.wrapping_sub(key) as i64).emit(jit)?;
X86Instruction::load(size, source, destination, X86IndirectAccess::OffsetIndexShift(key, destination, 0)).emit(jit)
}
#[inline]
fn emit_alu<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, opcode: u8, source: u8, destination: u8, immediate: i64, indirect: Option<X86IndirectAccess>) -> Result<(), EbpfError<E>> {
X86Instruction {
size,
opcode,
first_operand: source,
second_operand: destination,
immediate_size: match opcode {
0xc1 => OperandSize::S8,
0x81 => OperandSize::S32,
0xf7 if source == 0 => OperandSize::S32,
_ => OperandSize::S0,
},
immediate,
indirect,
..X86Instruction::default()
}.emit(jit)
}
#[inline]
fn emit_sanitized_alu<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, opcode: u8, opcode_extension: u8, destination: u8, immediate: i64) -> Result<(), EbpfError<E>> {
if jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, size, R11, immediate)?;
emit_alu(jit, size, opcode, R11, destination, immediate, None)
} else {
emit_alu(jit, size, 0x81, opcode_extension, destination, immediate, None)
}
}
#[inline]
fn emit_jump_offset<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
jit.text_section_jumps.push(Jump { location: jit.offset_in_text_section, target_pc });
emit::<u32, E>(jit, 0)
}
#[inline]
fn emit_jcc<E: UserDefinedError>(jit: &mut JitCompiler, code: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0x0f)?;
emit::<u8, E>(jit, code)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn emit_jmp<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0xe9)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn emit_call<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0xe8)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn set_anchor(jit: &mut JitCompiler, target: usize) {
jit.handler_anchors.insert(target, jit.offset_in_text_section);
}
#[repr(C)]
enum EnvironmentStackSlot {
LastSavedRegister = 5,
BpfStackPtr = 6,
OptRetValPtr = 7,
PrevInsnMeter = 8,
InsnMeterPtr = 9,
StopwatchNumerator = 10,
StopwatchDenominator = 11,
SlotCount = 12,
}
fn slot_on_environment_stack(jit: &JitCompiler, slot: EnvironmentStackSlot) -> i32 {
-8 * (slot as i32 + jit.environment_stack_key)
}
#[allow(dead_code)]
#[inline]
fn emit_stopwatch<E: UserDefinedError>(jit: &mut JitCompiler, begin: bool) -> Result<(), EbpfError<E>> {
jit.stopwatch_is_active = true;
X86Instruction::push(RDX).emit(jit)?;
X86Instruction::push(RAX).emit(jit)?;
X86Instruction::fence(FenceType::Load).emit(jit)?; X86Instruction::cycle_count().emit(jit)?; X86Instruction::fence(FenceType::Load).emit(jit)?; emit_alu(jit, OperandSize::S64, 0xc1, 4, RDX, 32, None)?; emit_alu(jit, OperandSize::S64, 0x09, RDX, RAX, 0, None)?; if begin {
emit_alu(jit, OperandSize::S64, 0x29, RAX, RBP, 0, Some(X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::StopwatchNumerator))))?; } else {
emit_alu(jit, OperandSize::S64, 0x01, RAX, RBP, 0, Some(X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::StopwatchNumerator))))?; emit_alu(jit, OperandSize::S64, 0x81, 0, RBP, 1, Some(X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::StopwatchDenominator))))?; }
X86Instruction::pop(RAX).emit(jit)?;
X86Instruction::pop(RDX).emit(jit)
}
#[inline]
fn emit_validate_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, exclusive: bool, pc: Option<usize>) -> Result<(), EbpfError<E>> {
if let Some(pc) = pc {
jit.last_instruction_meter_validation_pc = pc;
X86Instruction::cmp_immediate(OperandSize::S64, ARGUMENT_REGISTERS[0], pc as i64 + 1, None).emit(jit)?;
} else {
X86Instruction::cmp(OperandSize::S64, R11, ARGUMENT_REGISTERS[0], None).emit(jit)?;
}
emit_jcc(jit, if exclusive { 0x82 } else { 0x86 }, TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS)
}
#[inline]
fn emit_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: Option<usize>) -> Result<(), EbpfError<E>> {
match target_pc {
Some(target_pc) => {
emit_alu(jit, OperandSize::S64, 0x81, 0, ARGUMENT_REGISTERS[0], target_pc as i64 - jit.pc as i64 - 1, None)?; },
None => { X86Instruction::pop(R11).emit(jit)?;
emit_alu(jit, OperandSize::S64, 0x81, 5, ARGUMENT_REGISTERS[0], jit.pc as i64 + 1, None)?; emit_alu(jit, OperandSize::S64, 0x01, R11, ARGUMENT_REGISTERS[0], jit.pc as i64, None)?; },
}
Ok(())
}
#[inline]
fn emit_validate_and_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, exclusive: bool, target_pc: Option<usize>) -> Result<(), EbpfError<E>> {
if jit.config.enable_instruction_meter {
emit_validate_instruction_count(jit, exclusive, Some(jit.pc))?;
emit_profile_instruction_count(jit, target_pc)?;
}
Ok(())
}
#[inline]
fn emit_undo_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
if jit.config.enable_instruction_meter {
emit_alu(jit, OperandSize::S64, 0x81, 0, ARGUMENT_REGISTERS[0], jit.pc as i64 + 1 - target_pc as i64, None)?; }
Ok(())
}
#[inline]
fn emit_profile_instruction_count_of_exception<E: UserDefinedError>(jit: &mut JitCompiler, store_pc_in_exception: bool) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperandSize::S64, 0x81, 0, R11, 1, None)?;
if jit.config.enable_instruction_meter {
emit_alu(jit, OperandSize::S64, 0x29, R11, ARGUMENT_REGISTERS[0], 0, None)?; }
if store_pc_in_exception {
X86Instruction::load(OperandSize::S64, RBP, R10, X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::OptRetValPtr))).emit(jit)?;
X86Instruction::store_immediate(OperandSize::S64, R10, X86IndirectAccess::Offset(0), 1).emit(jit)?; emit_alu(jit, OperandSize::S64, 0x81, 0, R11, ebpf::ELF_INSN_DUMP_OFFSET as i64 - 1, None)?;
X86Instruction::store(OperandSize::S64, R11, R10, X86IndirectAccess::Offset(16)).emit(jit)?; }
Ok(())
}
#[inline]
fn emit_conditional_branch_reg<E: UserDefinedError>(jit: &mut JitCompiler, op: u8, bitwise: bool, first_operand: u8, second_operand: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc))?;
if bitwise { X86Instruction::test(OperandSize::S64, first_operand, second_operand, None).emit(jit)?;
} else { X86Instruction::cmp(OperandSize::S64, first_operand, second_operand, None).emit(jit)?;
}
X86Instruction::load_immediate(OperandSize::S64, R11, target_pc as i64).emit(jit)?;
emit_jcc(jit, op, target_pc)?;
emit_undo_profile_instruction_count(jit, target_pc)
}
#[inline]
fn emit_conditional_branch_imm<E: UserDefinedError>(jit: &mut JitCompiler, op: u8, bitwise: bool, immediate: i64, second_operand: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc))?;
if jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, R11, immediate)?;
if bitwise { X86Instruction::test(OperandSize::S64, R11, second_operand, None).emit(jit)?;
} else { X86Instruction::cmp(OperandSize::S64, R11, second_operand, None).emit(jit)?;
}
} else if bitwise { X86Instruction::test_immediate(OperandSize::S64, second_operand, immediate, None).emit(jit)?;
} else { X86Instruction::cmp_immediate(OperandSize::S64, second_operand, immediate, None).emit(jit)?;
}
X86Instruction::load_immediate(OperandSize::S64, R11, target_pc as i64).emit(jit)?;
emit_jcc(jit, op, target_pc)?;
emit_undo_profile_instruction_count(jit, target_pc)
}
enum Value {
Register(u8),
RegisterIndirect(u8, i32, bool),
RegisterPlusConstant32(u8, i32, bool),
RegisterPlusConstant64(u8, i64, bool),
Constant64(i64, bool),
}
#[inline]
fn emit_bpf_call<E: UserDefinedError>(jit: &mut JitCompiler, dst: Value, number_of_instructions: usize) -> Result<(), EbpfError<E>> {
for reg in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS) {
X86Instruction::push(*reg).emit(jit)?;
}
X86Instruction::push(REGISTER_MAP[STACK_REG]).emit(jit)?;
match dst {
Value::Register(reg) => {
X86Instruction::push(REGISTER_MAP[0]).emit(jit)?;
if reg != REGISTER_MAP[0] {
X86Instruction::mov(OperandSize::S64, reg, REGISTER_MAP[0]).emit(jit)?;
}
emit_alu(jit, OperandSize::S64, 0x81, 4, REGISTER_MAP[0], !(INSN_SIZE as i64 - 1), None)?; X86Instruction::load_immediate(OperandSize::S64, R11, jit.pc as i64).emit(jit)?;
X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[STACK_REG], jit.program_vm_addr as i64 + (number_of_instructions * INSN_SIZE) as i64).emit(jit)?;
X86Instruction::cmp(OperandSize::S64, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], None).emit(jit)?;
emit_jcc(jit, 0x83, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT)?;
X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[STACK_REG], jit.program_vm_addr as i64).emit(jit)?;
X86Instruction::cmp(OperandSize::S64, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], None).emit(jit)?;
emit_jcc(jit, 0x82, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT)?;
emit_alu(jit, OperandSize::S64, 0x29, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], 0, None)?; if jit.config.enable_instruction_meter {
let shift_amount = INSN_SIZE.trailing_zeros();
debug_assert_eq!(INSN_SIZE, 1<<shift_amount);
X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG]).emit(jit)?;
emit_alu(jit, OperandSize::S64, 0xc1, 5, REGISTER_MAP[STACK_REG], shift_amount as i64, None)?;
X86Instruction::push(REGISTER_MAP[STACK_REG]).emit(jit)?;
}
debug_assert_eq!(INSN_SIZE, 8); X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG]).emit(jit)?;
X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[STACK_REG], jit.result.pc_section.as_ptr() as i64).emit(jit)?;
emit_alu(jit, OperandSize::S64, 0x01, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], 0, None)?; X86Instruction::load(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::Offset(0)).emit(jit)?; },
Value::Constant64(_target_pc, user_provided) => debug_assert!(!user_provided),
_ => {
#[cfg(debug_assertions)]
unreachable!();
}
}
let stack_frame_size = jit.config.stack_frame_size as i64 * if jit.config.enable_stack_frame_gaps { 2 } else { 1 };
X86Instruction::load(OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::BpfStackPtr))).emit(jit)?;
emit_alu(jit, OperandSize::S64, 0x81, 0, REGISTER_MAP[STACK_REG], stack_frame_size, None)?; X86Instruction::store(OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::BpfStackPtr))).emit(jit)?;
X86Instruction::load_immediate(OperandSize::S64, R11, MM_STACK_START as i64 + jit.config.stack_frame_size as i64 + (jit.config.max_call_depth as i64 * stack_frame_size)).emit(jit)?;
X86Instruction::cmp(OperandSize::S64, R11, REGISTER_MAP[STACK_REG], None).emit(jit)?;
X86Instruction::load_immediate(OperandSize::S64, R11, jit.pc as i64).emit(jit)?;
emit_jcc(jit, 0x83, TARGET_PC_CALL_DEPTH_EXCEEDED)?;
match dst {
Value::Register(_reg) => {
emit_validate_and_profile_instruction_count(jit, false, None)?;
X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], R11).emit(jit)?;
X86Instruction::pop(REGISTER_MAP[0]).emit(jit)?;
X86Instruction::call_reg(OperandSize::S64, R11, None).emit(jit)?; },
Value::Constant64(target_pc, _user_provided) => {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc as usize))?;
X86Instruction::load_immediate(OperandSize::S64, R11, target_pc as i64).emit(jit)?;
emit_call(jit, target_pc as usize)?;
},
_ => {
#[cfg(debug_assertions)]
unreachable!();
}
}
emit_undo_profile_instruction_count(jit, 0)?;
X86Instruction::pop(REGISTER_MAP[STACK_REG]).emit(jit)?;
for reg in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS).rev() {
X86Instruction::pop(*reg).emit(jit)?;
}
Ok(())
}
struct Argument {
index: usize,
value: Value,
}
impl Argument {
fn is_stack_argument(&self) -> bool {
self.index >= ARGUMENT_REGISTERS.len()
}
fn get_argument_register(&self) -> u8 {
ARGUMENT_REGISTERS[self.index]
}
fn emit_pass<E: UserDefinedError>(&self, jit: &mut JitCompiler) -> Result<(), EbpfError<E>> {
let is_stack_argument = self.is_stack_argument();
let dst = if is_stack_argument {
R11
} else {
self.get_argument_register()
};
match self.value {
Value::Register(reg) => {
if is_stack_argument {
return X86Instruction::push(reg).emit(jit);
} else if reg != dst {
X86Instruction::mov(OperandSize::S64, reg, dst).emit(jit)?;
}
},
Value::RegisterIndirect(reg, offset, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load(jit, OperandSize::S64, reg, dst, offset)?;
} else {
X86Instruction::load(OperandSize::S64, reg, dst, X86IndirectAccess::Offset(offset)).emit(jit)?;
}
},
Value::RegisterPlusConstant32(reg, offset, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, dst, offset as i64)?;
emit_alu(jit, OperandSize::S64, 0x01, reg, dst, 0, None)?;
} else {
X86Instruction::lea(OperandSize::S64, reg, dst, Some(X86IndirectAccess::Offset(offset))).emit(jit)?;
}
},
Value::RegisterPlusConstant64(reg, offset, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, R11, offset)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, R11, offset).emit(jit)?;
}
emit_alu(jit, OperandSize::S64, 0x01, reg, R11, 0, None)?;
X86Instruction::mov(OperandSize::S64, R11, dst).emit(jit)?;
},
Value::Constant64(value, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, dst, value)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, dst, value).emit(jit)?;
}
},
}
if is_stack_argument {
X86Instruction::push(dst).emit(jit)
} else {
Ok(())
}
}
}
#[inline]
fn emit_rust_call<E: UserDefinedError>(jit: &mut JitCompiler, function: *const u8, arguments: &[Argument], result_reg: Option<u8>, check_exception: bool) -> Result<(), EbpfError<E>> {
let mut saved_registers = CALLER_SAVED_REGISTERS.to_vec();
if let Some(reg) = result_reg {
let dst = saved_registers.iter().position(|x| *x == reg);
debug_assert!(dst.is_some());
if let Some(dst) = dst {
saved_registers.remove(dst);
}
}
for reg in saved_registers.iter() {
X86Instruction::push(*reg).emit(jit)?;
}
let mut stack_arguments = 0;
for argument in arguments {
if argument.is_stack_argument() {
stack_arguments += 1;
}
argument.emit_pass(jit)?;
}
X86Instruction::load_immediate(OperandSize::S64, RAX, function as i64).emit(jit)?;
X86Instruction::call_reg(OperandSize::S64, RAX, None).emit(jit)?;
if let Some(reg) = result_reg {
X86Instruction::mov(OperandSize::S64, RAX, reg).emit(jit)?;
}
emit_alu(jit, OperandSize::S64, 0x81, 0, RSP, stack_arguments * 8, None)?;
for reg in saved_registers.iter().rev() {
X86Instruction::pop(*reg).emit(jit)?;
}
if check_exception {
X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::OptRetValPtr))).emit(jit)?;
X86Instruction::cmp_immediate(OperandSize::S64, R11, 0, Some(X86IndirectAccess::Offset(0))).emit(jit)?;
}
Ok(())
}
#[inline]
fn emit_address_translation<E: UserDefinedError>(jit: &mut JitCompiler, host_addr: u8, vm_addr: Value, len: u64, access_type: AccessType) -> Result<(), EbpfError<E>> {
match vm_addr {
Value::RegisterPlusConstant64(reg, constant, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, R11, constant)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, R11, constant).emit(jit)?;
}
emit_alu(jit, OperandSize::S64, 0x01, reg, R11, 0, None)?;
},
Value::Constant64(constant, user_provided) => {
if user_provided && jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, R11, constant)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, R11, constant).emit(jit)?;
}
},
_ => {
#[cfg(debug_assertions)]
unreachable!();
},
}
emit_call(jit, TARGET_PC_TRANSLATE_MEMORY_ADDRESS + len.trailing_zeros() as usize + 4 * (access_type as usize))?;
X86Instruction::mov(OperandSize::S64, R11, host_addr).emit(jit)
}
fn emit_shift<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, opcode_extension: u8, source: u8, destination: u8, immediate: Option<i64>) -> Result<(), EbpfError<E>> {
if let Some(immediate) = immediate {
if jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S32, source, immediate)?;
} else {
return emit_alu(jit, size, 0xc1, opcode_extension, destination, immediate, None);
}
}
if size == OperandSize::S32 {
emit_alu(jit, OperandSize::S32, 0x81, 4, destination, -1, None)?; }
if source == RCX {
if destination == RCX {
emit_alu(jit, size, 0xd3, opcode_extension, destination, 0, None)
} else {
X86Instruction::push(RCX).emit(jit)?;
emit_alu(jit, size, 0xd3, opcode_extension, destination, 0, None)?;
X86Instruction::pop(RCX).emit(jit)
}
} else if destination == RCX {
if source != R11 {
X86Instruction::push(source).emit(jit)?;
}
X86Instruction::xchg(OperandSize::S64, source, RCX).emit(jit)?;
emit_alu(jit, size, 0xd3, opcode_extension, source, 0, None)?;
X86Instruction::mov(OperandSize::S64, source, RCX).emit(jit)?;
if source != R11 {
X86Instruction::pop(source).emit(jit)?;
}
Ok(())
} else {
X86Instruction::push(RCX).emit(jit)?;
X86Instruction::mov(OperandSize::S64, source, RCX).emit(jit)?;
emit_alu(jit, size, 0xd3, opcode_extension, destination, 0, None)?;
X86Instruction::pop(RCX).emit(jit)
}
}
fn emit_muldivmod<E: UserDefinedError>(jit: &mut JitCompiler, opc: u8, src: u8, dst: u8, imm: Option<i64>) -> Result<(), EbpfError<E>> {
let mul = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MUL32_IMM & ebpf::BPF_ALU_OP_MASK);
let div = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::DIV32_IMM & ebpf::BPF_ALU_OP_MASK);
let modrm = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MOD32_IMM & ebpf::BPF_ALU_OP_MASK);
let size = if (opc & ebpf::BPF_CLS_MASK) == ebpf::BPF_ALU64 { OperandSize::S64 } else { OperandSize::S32 };
if (div || modrm) && imm.is_none() {
X86Instruction::load_immediate(OperandSize::S64, R11, jit.pc as i64).emit(jit)?;
X86Instruction::test(size, src, src, None).emit(jit)?; emit_jcc(jit, 0x84, TARGET_PC_DIV_BY_ZERO)?;
}
if dst != RAX {
X86Instruction::push(RAX).emit(jit)?;
}
if dst != RDX {
X86Instruction::push(RDX).emit(jit)?;
}
if let Some(imm) = imm {
if jit.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(jit, OperandSize::S64, R11, imm)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, R11, imm).emit(jit)?;
}
} else {
X86Instruction::mov(OperandSize::S64, src, R11).emit(jit)?;
}
if dst != RAX {
X86Instruction::mov(OperandSize::S64, dst, RAX).emit(jit)?;
}
if div || modrm {
emit_alu(jit, size, 0x31, RDX, RDX, 0, None)?;
}
emit_alu(jit, size, 0xf7, if mul { 4 } else { 6 }, R11, 0, None)?;
if dst != RDX {
if modrm {
X86Instruction::mov(OperandSize::S64, RDX, dst).emit(jit)?;
}
X86Instruction::pop(RDX).emit(jit)?;
}
if dst != RAX {
if div || mul {
X86Instruction::mov(OperandSize::S64, RAX, dst).emit(jit)?;
}
X86Instruction::pop(RAX).emit(jit)?;
}
if size == OperandSize::S32 && opc & ebpf::BPF_ALU_OP_MASK == ebpf::BPF_MUL {
X86Instruction::sign_extend_i32_to_i64(dst, dst).emit(jit)?;
}
Ok(())
}
#[inline]
fn emit_set_exception_kind<E: UserDefinedError>(jit: &mut JitCompiler, err: EbpfError<E>) -> Result<(), EbpfError<E>> {
let err = Result::<u64, EbpfError<E>>::Err(err);
let err_kind = unsafe { *(&err as *const _ as *const u64).offset(1) };
X86Instruction::load(OperandSize::S64, RBP, R10, X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::OptRetValPtr))).emit(jit)?;
X86Instruction::store_immediate(OperandSize::S64, R10, X86IndirectAccess::Offset(8), err_kind as i64).emit(jit)
}
#[derive(Debug)]
struct Jump {
location: usize,
target_pc: usize,
}
impl Jump {
fn get_target_offset(&self, jit: &JitCompiler) -> u64 {
match jit.handler_anchors.get(&self.target_pc) {
Some(target) => *target as u64,
None => jit.result.pc_section[self.target_pc]
}
}
}
pub struct JitCompiler {
result: JitProgramSections,
pc_section_jumps: Vec<Jump>,
text_section_jumps: Vec<Jump>,
offset_in_text_section: usize,
pc: usize,
last_instruction_meter_validation_pc: usize,
program_vm_addr: u64,
handler_anchors: HashMap<usize, usize>,
config: Config,
rng: ThreadRng,
stopwatch_is_active: bool,
environment_stack_key: i32,
program_argument_key: i32,
}
impl Index<usize> for JitCompiler {
type Output = u8;
fn index(&self, _index: usize) -> &u8 {
&self.result.text_section[_index]
}
}
impl IndexMut<usize> for JitCompiler {
fn index_mut(&mut self, _index: usize) -> &mut u8 {
&mut self.result.text_section[_index]
}
}
impl std::fmt::Debug for JitCompiler {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), FormatterError> {
fmt.write_str("JIT text_section: [")?;
for i in self.result.text_section as &[u8] {
fmt.write_fmt(format_args!(" {:#04x},", i))?;
};
fmt.write_str(" ] | ")?;
fmt.debug_struct("JIT state")
.field("memory", &self.result.pc_section.as_ptr())
.field("pc", &self.pc)
.field("offset_in_text_section", &self.offset_in_text_section)
.field("pc_section", &self.result.pc_section)
.field("handler_anchors", &self.handler_anchors)
.field("pc_section_jumps", &self.pc_section_jumps)
.field("text_section_jumps", &self.text_section_jumps)
.finish()
}
}
impl JitCompiler {
fn new<E: UserDefinedError>(_program: &[u8], _config: &Config) -> Result<Self, EbpfError<E>> {
#[cfg(target_os = "windows")]
{
panic!("JIT not supported on windows");
}
#[cfg(not(target_arch = "x86_64"))]
{
panic!("JIT is only supported on x86_64");
}
let mut pc = 0;
while pc * ebpf::INSN_SIZE < _program.len() {
let insn = ebpf::get_insn(_program, pc);
pc += match insn.opc {
ebpf::LD_DW_IMM => 2,
_ => 1,
};
}
let mut code_length_estimate = pc * 256 + 4096;
code_length_estimate += (code_length_estimate as f64 * _config.noop_instruction_ratio) as usize;
let mut rng = rand::thread_rng();
let (environment_stack_key, program_argument_key) =
if _config.encrypt_environment_registers {
(rng.gen::<i32>() / 8, rng.gen())
} else { (0, 0) };
Ok(Self {
result: JitProgramSections::new(pc + 1, code_length_estimate)?,
pc_section_jumps: vec![],
text_section_jumps: vec![],
offset_in_text_section: 0,
pc: 0,
last_instruction_meter_validation_pc: 0,
program_vm_addr: 0,
handler_anchors: HashMap::new(),
config: *_config,
rng,
stopwatch_is_active: false,
environment_stack_key,
program_argument_key,
})
}
fn compile<E: UserDefinedError, I: InstructionMeter>(&mut self,
executable: &dyn Executable<E, I>) -> Result<(), EbpfError<E>> {
let (program_vm_addr, program) = executable.get_text_bytes();
self.program_vm_addr = program_vm_addr;
self.generate_prologue::<E, I>()?;
let entry = executable.get_entrypoint_instruction_offset().unwrap_or(0);
if self.config.enable_instruction_meter {
emit_profile_instruction_count(self, Some(entry + 1))?;
}
X86Instruction::load_immediate(OperandSize::S64, R11, entry as i64).emit(self)?;
emit_jmp(self, entry)?;
self.generate_helper_routines::<E>()?;
self.generate_exception_handlers::<E>()?;
while self.pc * ebpf::INSN_SIZE < program.len() {
let mut insn = ebpf::get_insn(program, self.pc);
self.result.pc_section[self.pc] = self.offset_in_text_section as u64;
if self.last_instruction_meter_validation_pc + self.config.instruction_meter_checkpoint_distance <= self.pc {
emit_validate_instruction_count(self, true, Some(self.pc))?;
}
if self.config.enable_instruction_tracing {
X86Instruction::load_immediate(OperandSize::S64, R11, self.pc as i64).emit(self)?;
emit_call(self, TARGET_PC_TRACE)?;
}
let dst = REGISTER_MAP[insn.dst as usize];
let src = REGISTER_MAP[insn.src as usize];
let target_pc = (self.pc as isize + insn.off as isize + 1) as usize;
match insn.opc {
ebpf::LD_ABS_B => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 1, AccessType::Load)?;
X86Instruction::load(OperandSize::S8, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_ABS_H => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 2, AccessType::Load)?;
X86Instruction::load(OperandSize::S16, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_ABS_W => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 4, AccessType::Load)?;
X86Instruction::load(OperandSize::S32, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_ABS_DW => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 8, AccessType::Load)?;
X86Instruction::load(OperandSize::S64, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_IND_B => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 1, AccessType::Load)?;
X86Instruction::load(OperandSize::S8, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_IND_H => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 2, AccessType::Load)?;
X86Instruction::load(OperandSize::S16, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_IND_W => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 4, AccessType::Load)?;
X86Instruction::load(OperandSize::S32, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_IND_DW => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64, true), 8, AccessType::Load)?;
X86Instruction::load(OperandSize::S64, R11, RAX, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_DW_IMM => {
emit_validate_and_profile_instruction_count(self, true, Some(self.pc + 2))?;
self.pc += 1;
self.pc_section_jumps.push(Jump { location: self.pc, target_pc: TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION });
ebpf::augment_lddw_unchecked(program, &mut insn);
if self.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(self, OperandSize::S64, dst, insn.imm)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, dst, insn.imm).emit(self)?;
}
},
ebpf::LD_B_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64, true), 1, AccessType::Load)?;
X86Instruction::load(OperandSize::S8, R11, dst, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_H_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64, true), 2, AccessType::Load)?;
X86Instruction::load(OperandSize::S16, R11, dst, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_W_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64, true), 4, AccessType::Load)?;
X86Instruction::load(OperandSize::S32, R11, dst, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::LD_DW_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64, true), 8, AccessType::Load)?;
X86Instruction::load(OperandSize::S64, R11, dst, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::ST_B_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, AccessType::Store)?;
X86Instruction::store_immediate(OperandSize::S8, R11, X86IndirectAccess::Offset(0), insn.imm as i64).emit(self)?;
},
ebpf::ST_H_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, AccessType::Store)?;
X86Instruction::store_immediate(OperandSize::S16, R11, X86IndirectAccess::Offset(0), insn.imm as i64).emit(self)?;
},
ebpf::ST_W_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, AccessType::Store)?;
X86Instruction::store_immediate(OperandSize::S32, R11, X86IndirectAccess::Offset(0), insn.imm as i64).emit(self)?;
},
ebpf::ST_DW_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 8, AccessType::Store)?;
X86Instruction::store_immediate(OperandSize::S64, R11, X86IndirectAccess::Offset(0), insn.imm as i64).emit(self)?;
},
ebpf::ST_B_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, AccessType::Store)?;
X86Instruction::store(OperandSize::S8, src, R11, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::ST_H_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, AccessType::Store)?;
X86Instruction::store(OperandSize::S16, src, R11, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::ST_W_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, AccessType::Store)?;
X86Instruction::store(OperandSize::S32, src, R11, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::ST_DW_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 8, AccessType::Store)?;
X86Instruction::store(OperandSize::S64, src, R11, X86IndirectAccess::Offset(0)).emit(self)?;
},
ebpf::ADD32_IMM => {
emit_sanitized_alu(self, OperandSize::S32, 0x01, 0, dst, insn.imm)?;
X86Instruction::sign_extend_i32_to_i64(dst, dst).emit(self)?;
},
ebpf::ADD32_REG => {
emit_alu(self, OperandSize::S32, 0x01, src, dst, 0, None)?;
X86Instruction::sign_extend_i32_to_i64(dst, dst).emit(self)?;
},
ebpf::SUB32_IMM => {
emit_sanitized_alu(self, OperandSize::S32, 0x29, 5, dst, insn.imm)?;
X86Instruction::sign_extend_i32_to_i64(dst, dst).emit(self)?;
},
ebpf::SUB32_REG => {
emit_alu(self, OperandSize::S32, 0x29, src, dst, 0, None)?;
X86Instruction::sign_extend_i32_to_i64(dst, dst).emit(self)?;
},
ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM =>
emit_muldivmod(self, insn.opc, dst, dst, Some(insn.imm))?,
ebpf::MUL32_REG | ebpf::DIV32_REG | ebpf::MOD32_REG =>
emit_muldivmod(self, insn.opc, src, dst, None)?,
ebpf::OR32_IMM => emit_sanitized_alu(self, OperandSize::S32, 0x09, 1, dst, insn.imm)?,
ebpf::OR32_REG => emit_alu(self, OperandSize::S32, 0x09, src, dst, 0, None)?,
ebpf::AND32_IMM => emit_sanitized_alu(self, OperandSize::S32, 0x21, 4, dst, insn.imm)?,
ebpf::AND32_REG => emit_alu(self, OperandSize::S32, 0x21, src, dst, 0, None)?,
ebpf::LSH32_IMM => emit_shift(self, OperandSize::S32, 4, R11, dst, Some(insn.imm))?,
ebpf::LSH32_REG => emit_shift(self, OperandSize::S32, 4, src, dst, None)?,
ebpf::RSH32_IMM => emit_shift(self, OperandSize::S32, 5, R11, dst, Some(insn.imm))?,
ebpf::RSH32_REG => emit_shift(self, OperandSize::S32, 5, src, dst, None)?,
ebpf::NEG32 => emit_alu(self, OperandSize::S32, 0xf7, 3, dst, 0, None)?,
ebpf::XOR32_IMM => emit_sanitized_alu(self, OperandSize::S32, 0x31, 6, dst, insn.imm)?,
ebpf::XOR32_REG => emit_alu(self, OperandSize::S32, 0x31, src, dst, 0, None)?,
ebpf::MOV32_IMM => {
if self.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(self, OperandSize::S32, dst, insn.imm)?;
} else {
X86Instruction::load_immediate(OperandSize::S32, dst, insn.imm).emit(self)?;
}
}
ebpf::MOV32_REG => X86Instruction::mov(OperandSize::S32, src, dst).emit(self)?,
ebpf::ARSH32_IMM => emit_shift(self, OperandSize::S32, 7, R11, dst, Some(insn.imm))?,
ebpf::ARSH32_REG => emit_shift(self, OperandSize::S32, 7, src, dst, None)?,
ebpf::LE => {
match insn.imm {
16 => {
emit_alu(self, OperandSize::S32, 0x81, 4, dst, 0xffff, None)?; }
32 => {
emit_alu(self, OperandSize::S32, 0x81, 4, dst, -1, None)?; }
64 => {}
_ => {
return Err(EbpfError::InvalidInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
}
},
ebpf::BE => {
match insn.imm {
16 => {
X86Instruction::bswap(OperandSize::S16, dst).emit(self)?;
emit_alu(self, OperandSize::S32, 0x81, 4, dst, 0xffff, None)?; }
32 => X86Instruction::bswap(OperandSize::S32, dst).emit(self)?,
64 => X86Instruction::bswap(OperandSize::S64, dst).emit(self)?,
_ => {
return Err(EbpfError::InvalidInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
}
},
ebpf::ADD64_IMM => emit_sanitized_alu(self, OperandSize::S64, 0x01, 0, dst, insn.imm)?,
ebpf::ADD64_REG => emit_alu(self, OperandSize::S64, 0x01, src, dst, 0, None)?,
ebpf::SUB64_IMM => emit_sanitized_alu(self, OperandSize::S64, 0x29, 5, dst, insn.imm)?,
ebpf::SUB64_REG => emit_alu(self, OperandSize::S64, 0x29, src, dst, 0, None)?,
ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::MOD64_IMM =>
emit_muldivmod(self, insn.opc, dst, dst, Some(insn.imm))?,
ebpf::MUL64_REG | ebpf::DIV64_REG | ebpf::MOD64_REG =>
emit_muldivmod(self, insn.opc, src, dst, None)?,
ebpf::OR64_IMM => emit_sanitized_alu(self, OperandSize::S64, 0x09, 1, dst, insn.imm)?,
ebpf::OR64_REG => emit_alu(self, OperandSize::S64, 0x09, src, dst, 0, None)?,
ebpf::AND64_IMM => emit_sanitized_alu(self, OperandSize::S64, 0x21, 4, dst, insn.imm)?,
ebpf::AND64_REG => emit_alu(self, OperandSize::S64, 0x21, src, dst, 0, None)?,
ebpf::LSH64_IMM => emit_shift(self, OperandSize::S64, 4, R11, dst, Some(insn.imm))?,
ebpf::LSH64_REG => emit_shift(self, OperandSize::S64, 4, src, dst, None)?,
ebpf::RSH64_IMM => emit_shift(self, OperandSize::S64, 5, R11, dst, Some(insn.imm))?,
ebpf::RSH64_REG => emit_shift(self, OperandSize::S64, 5, src, dst, None)?,
ebpf::NEG64 => emit_alu(self, OperandSize::S64, 0xf7, 3, dst, 0, None)?,
ebpf::XOR64_IMM => emit_sanitized_alu(self, OperandSize::S64, 0x31, 6, dst, insn.imm)?,
ebpf::XOR64_REG => emit_alu(self, OperandSize::S64, 0x31, src, dst, 0, None)?,
ebpf::MOV64_IMM => {
if self.config.sanitize_user_provided_values {
emit_sanitized_load_immediate(self, OperandSize::S64, dst, insn.imm)?;
} else {
X86Instruction::load_immediate(OperandSize::S64, dst, insn.imm).emit(self)?;
}
}
ebpf::MOV64_REG => X86Instruction::mov(OperandSize::S64, src, dst).emit(self)?,
ebpf::ARSH64_IMM => emit_shift(self, OperandSize::S64, 7, R11, dst, Some(insn.imm))?,
ebpf::ARSH64_REG => emit_shift(self, OperandSize::S64, 7, src, dst, None)?,
ebpf::JA => {
emit_validate_and_profile_instruction_count(self, false, Some(target_pc))?;
X86Instruction::load_immediate(OperandSize::S64, R11, target_pc as i64).emit(self)?;
emit_jmp(self, target_pc)?;
},
ebpf::JEQ_IMM => emit_conditional_branch_imm(self, 0x84, false, insn.imm, dst, target_pc)?,
ebpf::JEQ_REG => emit_conditional_branch_reg(self, 0x84, false, src, dst, target_pc)?,
ebpf::JGT_IMM => emit_conditional_branch_imm(self, 0x87, false, insn.imm, dst, target_pc)?,
ebpf::JGT_REG => emit_conditional_branch_reg(self, 0x87, false, src, dst, target_pc)?,
ebpf::JGE_IMM => emit_conditional_branch_imm(self, 0x83, false, insn.imm, dst, target_pc)?,
ebpf::JGE_REG => emit_conditional_branch_reg(self, 0x83, false, src, dst, target_pc)?,
ebpf::JLT_IMM => emit_conditional_branch_imm(self, 0x82, false, insn.imm, dst, target_pc)?,
ebpf::JLT_REG => emit_conditional_branch_reg(self, 0x82, false, src, dst, target_pc)?,
ebpf::JLE_IMM => emit_conditional_branch_imm(self, 0x86, false, insn.imm, dst, target_pc)?,
ebpf::JLE_REG => emit_conditional_branch_reg(self, 0x86, false, src, dst, target_pc)?,
ebpf::JSET_IMM => emit_conditional_branch_imm(self, 0x85, true, insn.imm, dst, target_pc)?,
ebpf::JSET_REG => emit_conditional_branch_reg(self, 0x85, true, src, dst, target_pc)?,
ebpf::JNE_IMM => emit_conditional_branch_imm(self, 0x85, false, insn.imm, dst, target_pc)?,
ebpf::JNE_REG => emit_conditional_branch_reg(self, 0x85, false, src, dst, target_pc)?,
ebpf::JSGT_IMM => emit_conditional_branch_imm(self, 0x8f, false, insn.imm, dst, target_pc)?,
ebpf::JSGT_REG => emit_conditional_branch_reg(self, 0x8f, false, src, dst, target_pc)?,
ebpf::JSGE_IMM => emit_conditional_branch_imm(self, 0x8d, false, insn.imm, dst, target_pc)?,
ebpf::JSGE_REG => emit_conditional_branch_reg(self, 0x8d, false, src, dst, target_pc)?,
ebpf::JSLT_IMM => emit_conditional_branch_imm(self, 0x8c, false, insn.imm, dst, target_pc)?,
ebpf::JSLT_REG => emit_conditional_branch_reg(self, 0x8c, false, src, dst, target_pc)?,
ebpf::JSLE_IMM => emit_conditional_branch_imm(self, 0x8e, false, insn.imm, dst, target_pc)?,
ebpf::JSLE_REG => emit_conditional_branch_reg(self, 0x8e, false, src, dst, target_pc)?,
ebpf::CALL_IMM => {
if let Some(syscall) = executable.get_syscall_registry().lookup_syscall(insn.imm as u32) {
if self.config.enable_instruction_meter {
emit_validate_and_profile_instruction_count(self, true, Some(0))?;
X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::PrevInsnMeter))).emit(self)?;
emit_alu(self, OperandSize::S64, 0x29, ARGUMENT_REGISTERS[0], R11, 0, None)?;
X86Instruction::mov(OperandSize::S64, R11, ARGUMENT_REGISTERS[0]).emit(self)?;
X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::InsnMeterPtr))).emit(self)?;
emit_rust_call(self, I::consume as *const u8, &[
Argument { index: 1, value: Value::Register(ARGUMENT_REGISTERS[0]) },
Argument { index: 0, value: Value::Register(R11) },
], None, false)?;
}
X86Instruction::load(OperandSize::S64, R10, RAX, X86IndirectAccess::Offset((SYSCALL_CONTEXT_OBJECTS_OFFSET + syscall.context_object_slot) as i32 * 8 + self.program_argument_key)).emit(self)?;
emit_rust_call(self, syscall.function as *const u8, &[
Argument { index: 7, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) },
Argument { index: 6, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, Argument { index: 5, value: Value::Register(ARGUMENT_REGISTERS[5]) },
Argument { index: 4, value: Value::Register(ARGUMENT_REGISTERS[4]) },
Argument { index: 3, value: Value::Register(ARGUMENT_REGISTERS[3]) },
Argument { index: 2, value: Value::Register(ARGUMENT_REGISTERS[2]) },
Argument { index: 1, value: Value::Register(ARGUMENT_REGISTERS[1]) },
Argument { index: 0, value: Value::Register(RAX) }, ], None, false)?;
if self.config.enable_instruction_meter {
X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::InsnMeterPtr))).emit(self)?;
emit_rust_call(self, I::get_remaining as *const u8, &[
Argument { index: 0, value: Value::Register(R11) },
], Some(ARGUMENT_REGISTERS[0]), false)?;
X86Instruction::store(OperandSize::S64, ARGUMENT_REGISTERS[0], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::PrevInsnMeter))).emit(self)?;
emit_undo_profile_instruction_count(self, 0)?;
}
X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr))).emit(self)?;
X86Instruction::load(OperandSize::S64, R11, REGISTER_MAP[0], X86IndirectAccess::Offset(8)).emit(self)?;
X86Instruction::cmp_immediate(OperandSize::S64, R11, 0, Some(X86IndirectAccess::Offset(0))).emit(self)?;
X86Instruction::load_immediate(OperandSize::S64, R11, self.pc as i64).emit(self)?;
emit_jcc(self, 0x85, TARGET_PC_RUST_EXCEPTION)?;
} else if let Some(target_pc) = executable.lookup_bpf_function(insn.imm as u32) {
emit_bpf_call(self, Value::Constant64(target_pc as i64, false), self.result.pc_section.len() - 1)?;
} else {
let fat_ptr: DynTraitFatPointer = unsafe { std::mem::transmute(executable) };
emit_rust_call(self, fat_ptr.vtable.methods[REPORT_UNRESOLVED_SYMBOL_INDEX], &[
Argument { index: 2, value: Value::Constant64(self.pc as i64, false) },
Argument { index: 1, value: Value::Constant64(fat_ptr.data as i64, false) },
Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) },
], None, true)?;
X86Instruction::load_immediate(OperandSize::S64, R11, self.pc as i64).emit(self)?;
emit_validate_instruction_count(self, false, None)?;
emit_jmp(self, TARGET_PC_RUST_EXCEPTION)?;
}
},
ebpf::CALL_REG => {
emit_bpf_call(self, Value::Register(REGISTER_MAP[insn.imm as usize]), self.result.pc_section.len() - 1)?;
},
ebpf::EXIT => {
emit_validate_and_profile_instruction_count(self, true, Some(0))?;
let stack_frame_size = self.config.stack_frame_size as i64 * if self.config.enable_stack_frame_gaps { 2 } else { 1 };
X86Instruction::load(OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::BpfStackPtr))).emit(self)?;
emit_alu(self, OperandSize::S64, 0x81, 5, REGISTER_MAP[STACK_REG], stack_frame_size, None)?; X86Instruction::store(OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::BpfStackPtr))).emit(self)?;
X86Instruction::load_immediate(OperandSize::S64, R11, MM_STACK_START as i64 + self.config.stack_frame_size as i64).emit(self)?;
X86Instruction::cmp(OperandSize::S64, R11, REGISTER_MAP[STACK_REG], None).emit(self)?;
emit_jcc(self, 0x82, TARGET_PC_EXIT)?;
X86Instruction::return_near().emit(self)?;
},
_ => return Err(EbpfError::UnsupportedInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET)),
}
self.pc += 1;
}
self.result.pc_section[self.pc] = self.offset_in_text_section as u64;
emit_validate_and_profile_instruction_count(self, true, Some(self.pc + 2))?;
X86Instruction::load_immediate(OperandSize::S64, R11, self.pc as i64).emit(self)?;
emit_set_exception_kind::<E>(self, EbpfError::ExecutionOverrun(0))?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
self.generate_epilogue::<E>()?;
self.resolve_jumps();
self.result.seal()?;
self.environment_stack_key = 0;
self.program_argument_key = 0;
Ok(())
}
fn generate_helper_routines<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
if self.config.enable_instruction_tracing {
set_anchor(self, TARGET_PC_TRACE);
X86Instruction::push(R11).emit(self)?;
for reg in REGISTER_MAP.iter().rev() {
X86Instruction::push(*reg).emit(self)?;
}
X86Instruction::mov(OperandSize::S64, RSP, REGISTER_MAP[0]).emit(self)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RSP, - 8 * 3, None)?; emit_rust_call(self, Tracer::trace as *const u8, &[
Argument { index: 1, value: Value::Register(REGISTER_MAP[0]) }, Argument { index: 0, value: Value::RegisterIndirect(R10, std::mem::size_of::<MemoryMapping>() as i32 + self.program_argument_key, false) }, ], None, false)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RSP, 8 * 3, None)?; X86Instruction::pop(REGISTER_MAP[0]).emit(self)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RSP, 8 * (REGISTER_MAP.len() - 1) as i64, None)?; X86Instruction::pop(R11).emit(self)?;
X86Instruction::return_near().emit(self)?;
}
set_anchor(self, TARGET_PC_TRANSLATE_PC);
X86Instruction::push(REGISTER_MAP[0]).emit(self)?; X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64 - 8).emit(self)?; set_anchor(self, TARGET_PC_TRANSLATE_PC_LOOP); emit_alu(self, OperandSize::S64, 0x81, 0, REGISTER_MAP[0], 8, None)?; X86Instruction::cmp(OperandSize::S64, R11, REGISTER_MAP[0], Some(X86IndirectAccess::Offset(8))).emit(self)?; emit_jcc(self, 0x86, TARGET_PC_TRANSLATE_PC_LOOP)?; X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], R11).emit(self)?; X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64).emit(self)?; emit_alu(self, OperandSize::S64, 0x29, REGISTER_MAP[0], R11, 0, None)?; emit_alu(self, OperandSize::S64, 0xc1, 5, R11, 3, None)?; X86Instruction::pop(REGISTER_MAP[0]).emit(self)?; X86Instruction::return_near().emit(self)?;
for (access_type, len) in &[
(AccessType::Load, 1i32),
(AccessType::Load, 2i32),
(AccessType::Load, 4i32),
(AccessType::Load, 8i32),
(AccessType::Store, 1i32),
(AccessType::Store, 2i32),
(AccessType::Store, 4i32),
(AccessType::Store, 8i32),
] {
let target_offset = len.trailing_zeros() as usize + 4 * (*access_type as usize);
set_anchor(self, TARGET_PC_TRANSLATE_MEMORY_ADDRESS + target_offset);
X86Instruction::push(R11).emit(self)?;
X86Instruction::push(RAX).emit(self)?;
X86Instruction::push(RCX).emit(self)?;
let stack_offset = if self.config.enable_stack_frame_gaps {
X86Instruction::push(RDX).emit(self)?;
24
} else {
16
};
X86Instruction::mov(OperandSize::S64, R11, RAX).emit(self)?; emit_alu(self, OperandSize::S64, 0xc1, 5, RAX, ebpf::VIRTUAL_ADDRESS_BITS as i64, None)?; X86Instruction::cmp(OperandSize::S64, RAX, R10, Some(X86IndirectAccess::Offset(self.program_argument_key + 8))).emit(self)?; emit_jcc(self, 0x86, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?;
debug_assert_eq!(1 << 5, std::mem::size_of::<MemoryRegion>());
emit_alu(self, OperandSize::S64, 0xc1, 4, RAX, 5, None)?; emit_alu(self, OperandSize::S64, 0x03, RAX, R10, 0, Some(X86IndirectAccess::Offset(self.program_argument_key)))?; if *access_type == AccessType::Store {
X86Instruction::cmp_immediate(OperandSize::S8, RAX, 0, Some(X86IndirectAccess::Offset(25))).emit(self)?; emit_jcc(self, 0x84, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?;
}
X86Instruction::load_immediate(OperandSize::S64, RCX, (1i64 << ebpf::VIRTUAL_ADDRESS_BITS) - 1).emit(self)?; emit_alu(self, OperandSize::S64, 0x21, RCX, R11, 0, None)?; if self.config.enable_stack_frame_gaps {
X86Instruction::load(OperandSize::S8, RAX, RCX, X86IndirectAccess::Offset(24)).emit(self)?; X86Instruction::mov(OperandSize::S64, R11, RDX).emit(self)?; emit_alu(self, OperandSize::S64, 0xd3, 5, RDX, 0, None)?; X86Instruction::test_immediate(OperandSize::S64, RDX, 1, None).emit(self)?; emit_jcc(self, 0x85, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?;
X86Instruction::load_immediate(OperandSize::S64, RDX, -1).emit(self)?; emit_alu(self, OperandSize::S64, 0xd3, 4, RDX, 0, None)?; X86Instruction::mov(OperandSize::S64, RDX, RCX).emit(self)?; emit_alu(self, OperandSize::S64, 0xf7, 2, RCX, 0, None)?; emit_alu(self, OperandSize::S64, 0x21, R11, RCX, 0, None)?; emit_alu(self, OperandSize::S64, 0x21, RDX, R11, 0, None)?; emit_alu(self, OperandSize::S64, 0xc1, 5, R11, 1, None)?; emit_alu(self, OperandSize::S64, 0x09, RCX, R11, 0, None)?; }
X86Instruction::lea(OperandSize::S64, R11, RCX, Some(X86IndirectAccess::Offset(*len))).emit(self)?; X86Instruction::cmp(OperandSize::S8, RCX, RAX, Some(X86IndirectAccess::Offset(16))).emit(self)?; emit_jcc(self, 0x82, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?;
emit_alu(self, OperandSize::S64, 0x03, R11, RAX, 0, Some(X86IndirectAccess::Offset(0)))?; if self.config.enable_stack_frame_gaps {
X86Instruction::pop(RDX).emit(self)?;
}
X86Instruction::pop(RCX).emit(self)?;
X86Instruction::pop(RAX).emit(self)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RSP, 8, None)?;
X86Instruction::return_near().emit(self)?;
set_anchor(self, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset);
emit_alu(self, OperandSize::S64, 0x31, R11, R11, 0, None)?; X86Instruction::load(OperandSize::S64, RSP, R11, X86IndirectAccess::OffsetIndexShift(stack_offset, R11, 0)).emit(self)?;
emit_rust_call(self, MemoryMapping::generate_access_violation::<UserError> as *const u8, &[
Argument { index: 3, value: Value::Register(R11) }, Argument { index: 4, value: Value::Constant64(*len as i64, false) },
Argument { index: 2, value: Value::Constant64(*access_type as i64, false) },
Argument { index: 1, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) }, ], None, true)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RSP, stack_offset as i64 + 8, None)?; X86Instruction::pop(R11).emit(self)?; emit_call(self, TARGET_PC_TRANSLATE_PC)?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
}
Ok(())
}
fn generate_exception_handlers<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
set_anchor(self, TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS);
X86Instruction::mov(OperandSize::S64, ARGUMENT_REGISTERS[0], R11).emit(self)?;
emit_set_exception_kind::<E>(self, EbpfError::ExceededMaxInstructions(0, 0))?;
emit_profile_instruction_count_of_exception(self, true)?;
emit_jmp(self, TARGET_PC_EPILOGUE)?;
set_anchor(self, TARGET_PC_CALL_DEPTH_EXCEEDED);
emit_set_exception_kind::<E>(self, EbpfError::CallDepthExceeded(0, 0))?;
X86Instruction::store_immediate(OperandSize::S64, R10, X86IndirectAccess::Offset(24), self.config.max_call_depth as i64).emit(self)?; emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT);
emit_set_exception_kind::<E>(self, EbpfError::CallOutsideTextSegment(0, 0))?;
X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], R10, X86IndirectAccess::Offset(24)).emit(self)?; emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_DIV_BY_ZERO);
emit_set_exception_kind::<E>(self, EbpfError::DivideByZero(0))?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION);
emit_alu(self, OperandSize::S64, 0x31, R11, R11, 0, None)?; X86Instruction::load(OperandSize::S64, RSP, R11, X86IndirectAccess::OffsetIndexShift(0, R11, 0)).emit(self)?;
emit_call(self, TARGET_PC_TRANSLATE_PC)?;
emit_alu(self, OperandSize::S64, 0x81, 0, R11, 2, None)?;
set_anchor(self, TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION);
if self.config.enable_instruction_tracing {
emit_call(self, TARGET_PC_TRACE)?;
}
emit_set_exception_kind::<E>(self, EbpfError::UnsupportedInstruction(0))?;
set_anchor(self, TARGET_PC_EXCEPTION_AT);
if self.config.enable_instruction_meter {
emit_validate_instruction_count(self, false, None)?;
}
emit_profile_instruction_count_of_exception(self, true)?;
emit_jmp(self, TARGET_PC_EPILOGUE)?;
set_anchor(self, TARGET_PC_RUST_EXCEPTION);
emit_profile_instruction_count_of_exception(self, false)?;
emit_jmp(self, TARGET_PC_EPILOGUE)
}
fn generate_prologue<E: UserDefinedError, I: InstructionMeter>(&mut self) -> Result<(), EbpfError<E>> {
for reg in CALLEE_SAVED_REGISTERS.iter() {
X86Instruction::push(*reg).emit(self)?;
}
X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[STACK_REG], MM_STACK_START as i64 + self.config.stack_frame_size as i64).emit(self)?;
X86Instruction::push(REGISTER_MAP[STACK_REG]).emit(self)?;
X86Instruction::push(ARGUMENT_REGISTERS[0]).emit(self)?;
emit_rust_call(self, I::get_remaining as *const u8, &[
Argument { index: 0, value: Value::Register(ARGUMENT_REGISTERS[3]) },
], Some(ARGUMENT_REGISTERS[0]), false)?;
X86Instruction::push(ARGUMENT_REGISTERS[0]).emit(self)?;
X86Instruction::push(ARGUMENT_REGISTERS[3]).emit(self)?;
emit_alu(self, OperandSize::S64, 0x31, R11, R11, 0, None)?; X86Instruction::push(R11).emit(self)?;
X86Instruction::push(R11).emit(self)?;
X86Instruction::mov(OperandSize::S64, RSP, RBP).emit(self)?;
emit_alu(self, OperandSize::S64, 0x81, 0, RBP, 8 * (EnvironmentStackSlot::SlotCount as i64 - 1 + self.environment_stack_key as i64), None)?;
X86Instruction::lea(OperandSize::S64, ARGUMENT_REGISTERS[2], R10, Some(X86IndirectAccess::Offset(-self.program_argument_key))).emit(self)?;
for reg in REGISTER_MAP.iter() {
if *reg != REGISTER_MAP[1] && *reg != REGISTER_MAP[STACK_REG] {
X86Instruction::load_immediate(OperandSize::S64, *reg, 0).emit(self)?;
}
}
Ok(())
}
fn generate_epilogue<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
set_anchor(self, TARGET_PC_EXIT);
X86Instruction::load(OperandSize::S64, RBP, R10, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr))).emit(self)?;
X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], R10, X86IndirectAccess::Offset(8)).emit(self)?; X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[0], 0).emit(self)?;
X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], R10, X86IndirectAccess::Offset(0)).emit(self)?;
set_anchor(self, TARGET_PC_EPILOGUE);
fn stopwatch_result(numerator: u64, denominator: u64) {
println!("Stop watch: {} / {} = {}", numerator, denominator, if denominator == 0 { 0.0 } else { numerator as f64 / denominator as f64 });
}
if self.stopwatch_is_active {
emit_rust_call(self, stopwatch_result as *const u8, &[
Argument { index: 1, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::StopwatchDenominator), false) },
Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::StopwatchNumerator), false) },
], None, false)?;
}
X86Instruction::mov(OperandSize::S64, ARGUMENT_REGISTERS[0], RAX).emit(self)?;
X86Instruction::lea(OperandSize::S64, RBP, RSP, Some(X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::LastSavedRegister)))).emit(self)?;
for reg in CALLEE_SAVED_REGISTERS.iter().rev() {
X86Instruction::pop(*reg).emit(self)?;
}
X86Instruction::return_near().emit(self)
}
pub fn emit_random_noop<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
if self.config.noop_instruction_ratio != 0.0 && self.rng.gen_bool(self.config.noop_instruction_ratio) {
emit::<u8, E>(self, 0x90)
} else {
Ok(())
}
}
fn resolve_jumps(&mut self) {
for jump in &self.pc_section_jumps {
self.result.pc_section[jump.location] = jump.get_target_offset(self);
}
for jump in &self.text_section_jumps {
let offset_value = jump.get_target_offset(self) as i32
- jump.location as i32 - std::mem::size_of::<i32>() as i32; unsafe {
libc::memcpy(
self.result.text_section.as_ptr().add(jump.location) as *mut libc::c_void,
&offset_value as *const i32 as *const libc::c_void,
std::mem::size_of::<i32>(),
);
}
}
let call_unsupported_instruction = self.handler_anchors.get(&TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION).unwrap();
let callx_unsupported_instruction = self.handler_anchors.get(&TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION).unwrap();
for offset in self.result.pc_section.iter_mut() {
if *offset == *call_unsupported_instruction as u64 {
*offset = *callx_unsupported_instruction as u64;
}
*offset = unsafe { (self.result.text_section.as_ptr() as *const u8).add(*offset as usize) } as u64;
}
}
}