#![allow(clippy::deprecated_cfg_attr)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![doc(html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.png",
html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.ico")]
#![warn(missing_docs)]
#![allow(unused_mut)]
#![allow(renamed_and_removed_lints)]
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names, single_match, cast_lossless, doc_markdown, match_same_arms, unreadable_literal, new_ret_no_self))]
extern crate byteorder;
extern crate combine;
extern crate hash32;
extern crate time;
extern crate log;
use std::u32;
use std::collections::HashMap;
use std::io::{Error, ErrorKind};
use elf::EBpfElf;
use log::trace;
use ebpf::HelperContext;
use memory_region::{MemoryRegion, translate_addr};
pub mod assembler;
pub mod disassembler;
pub mod ebpf;
pub mod elf;
pub mod helpers;
pub mod insn_builder;
pub mod memory_region;
mod asm_parser;
#[cfg(not(windows))]
mod jit;
mod verifier;
pub type Verifier = fn(prog: &[u8]) -> Result<(), Error>;
pub type JitProgram = unsafe fn(*mut u8, usize, usize) -> u64;
#[derive(Clone, Debug)]
struct CallFrame {
stack: MemoryRegion,
saved_reg: [u64; 4],
return_ptr: usize,
}
#[derive(Clone, Debug)]
struct CallFrames {
stack: Vec<u8>,
frame: usize,
frames: Vec<CallFrame>,
}
impl CallFrames {
fn new(depth: usize, size: usize) -> Self {
let mut frames = CallFrames {
stack: vec![0u8; depth * size],
frame: 0,
frames: vec![CallFrame { stack: MemoryRegion {
addr_host: 0,
addr_vm: 0,
len: 0,
},
saved_reg: [0u64; ebpf::SCRATCH_REGS],
return_ptr: 0
};
depth],
};
for i in 0..depth {
let start = i * size;
let end = start + size;
let addr_vm = ebpf::MM_STACK_START + start as u64;
frames.frames[i].stack = MemoryRegion::new_from_slice(&frames.stack[start..end], addr_vm);
}
frames
}
fn get_stacks(&self) -> Vec<MemoryRegion> {
let mut ptrs = Vec::new();
for frame in self.frames.iter() {
ptrs.push(frame.stack.clone());
}
ptrs
}
fn get_stack_top(&self) -> u64 {
self.frames[self.frame].stack.addr_vm + self.frames[self.frame].stack.len - 1
}
#[allow(dead_code)]
fn get_frame_index(&self) -> usize {
self.frame
}
fn push(&mut self, saved_reg: &[u64], return_ptr: usize) -> Result<u64, Error> {
if self.frame + 1 >= ebpf::MAX_CALL_DEPTH {
return Err(Error::new(ErrorKind::Other,
format!("Exceeded max BPF to BPF call depth of {:?}",
ebpf::MAX_CALL_DEPTH)));
}
self.frames[self.frame].saved_reg[..].copy_from_slice(saved_reg);
self.frames[self.frame].return_ptr = return_ptr;
self.frame += 1;
Ok(self.get_stack_top())
}
fn pop(&mut self) -> Result<([u64; ebpf::SCRATCH_REGS], u64, usize), Error> {
if self.frame == 0 {
return Err(Error::new(ErrorKind::Other, "Attempted to exit root call frame"));
}
self.frame -= 1;
Ok((self.frames[self.frame].saved_reg,
self.get_stack_top(),
self.frames[self.frame].return_ptr))
}
}
pub struct EbpfVm<'a> {
prog: Option<&'a [u8]>,
elf: Option<EBpfElf>,
verifier: Verifier,
jit: Option<JitProgram>,
helpers: HashMap<u32, ebpf::Helper>,
max_insn_count: u64,
last_insn_count: u64,
}
impl<'a> EbpfVm<'a> {
pub fn new(prog: Option<&'a [u8]>) -> Result<EbpfVm<'a>, Error> {
if let Some(prog) = prog {
verifier::check(prog)?;
}
Ok(EbpfVm {
prog: prog,
elf: None,
verifier: verifier::check,
jit: None,
helpers: HashMap::new(),
max_insn_count: 0,
last_insn_count: 0,
})
}
pub fn set_program(&mut self, prog: &'a [u8]) -> Result<(), Error> {
(self.verifier)(prog)?;
self.prog = Some(prog);
Ok(())
}
pub fn set_elf(&mut self, elf_bytes: &'a [u8]) -> Result<(), Error> {
let elf = EBpfElf::load(elf_bytes)?;
let (_, bytes) = elf.get_text_bytes()?;
(self.verifier)(bytes)?;
self.elf = Some(elf);
Ok(())
}
pub fn set_verifier(&mut self, verifier: Verifier) -> Result<(), Error> {
if let Some(ref elf) = self.elf {
let (_, bytes) = elf.get_text_bytes()?;
verifier(bytes)?;
} else if let Some(ref prog) = self.prog {
verifier(prog)?;
}
self.verifier = verifier;
Ok(())
}
pub fn set_max_instruction_count(&mut self, count: u64) -> Result<(), Error> {
self.max_insn_count = count;
Ok(())
}
pub fn get_last_instruction_count(&self) -> u64 {
self.last_insn_count
}
pub fn register_helper(&mut self,
key: u32,
function: ebpf::HelperFunction,
context: HelperContext,
) -> Result<(), Error> {
self.helpers.insert(key, ebpf::Helper{ function, context });
Ok(())
}
pub fn register_helper_ex(&mut self,
name: &str,
function: ebpf::HelperFunction,
context: HelperContext,
) -> Result<(), Error> {
self.helpers.insert(ebpf::hash_symbol_name(name.as_bytes()), ebpf::Helper{ function, context });
Ok(())
}
#[allow(unknown_lints)]
#[allow(cyclomatic_complexity)]
#[allow(cognitive_complexity)]
pub fn execute_program(&mut self,
mem: &[u8],
granted_ro_regions: &[MemoryRegion],
granted_rw_regions: &[MemoryRegion],
) -> Result<u64, Error> {
const U32MAX: u64 = u32::MAX as u64;
let mut frames = CallFrames::new(ebpf::MAX_CALL_DEPTH, ebpf::STACK_SIZE);
let mut ro_regions = Vec::new();
let mut rw_regions = Vec::new();
ro_regions.extend_from_slice(granted_ro_regions);
ro_regions.extend_from_slice(granted_rw_regions);
rw_regions.extend_from_slice(granted_rw_regions);
for ptr in frames.get_stacks() {
ro_regions.push(ptr.clone());
rw_regions.push(ptr.clone());
}
ro_regions.push(MemoryRegion::new_from_slice(&mem, ebpf::MM_INPUT_START));
rw_regions.push(MemoryRegion::new_from_slice(&mem, ebpf::MM_INPUT_START));
let mut entry: usize = 0;
let (prog_addr, prog) =
if let Some(ref elf) = self.elf {
if let Ok(sections) = elf.get_ro_sections() {
let regions: Vec<_> = sections.iter().map( |(addr, slice)| MemoryRegion::new_from_slice(slice, *addr)).collect();
ro_regions.extend(regions);
}
entry = elf.get_entrypoint_instruction_offset()?;
elf.get_text_bytes()?
} else if let Some(prog) = self.prog {
(ebpf::MM_PROGRAM_START, prog)
} else {
return Err(Error::new(ErrorKind::Other,
"Error: no program or elf set"));
};
ro_regions.push(MemoryRegion::new_from_slice(prog, prog_addr));
let mut reg: [u64; 11] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, frames.get_stack_top()];
if !mem.is_empty() {
reg[1] = ebpf::MM_INPUT_START;
}
let translate_load_addr = | addr: u64, len: usize, pc: usize | {
translate_addr(addr, len, "load", pc, &ro_regions)
};
let translate_store_addr = | addr: u64, len: usize, pc: usize | {
translate_addr(addr, len, "store", pc, &rw_regions)
};
let mut pc: usize = entry;
self.last_insn_count = 0;
while pc * ebpf::INSN_SIZE < prog.len() {
trace!(" BPF: {:5?} {:016x?} frame {:?} pc {:4?} {}",
self.last_insn_count,
reg,
frames.get_frame_index(),
pc + ebpf::ELF_INSN_DUMP_OFFSET,
disassembler::to_insn_vec(&prog[pc * ebpf::INSN_SIZE..])[0].desc);
let insn = ebpf::get_insn(prog, pc);
let dst = insn.dst as usize;
let src = insn.src as usize;
pc += 1;
self.last_insn_count += 1;
match insn.opc {
ebpf::LD_ABS_B => {
let vm_addr = mem.as_ptr() as u64 + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u8;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_H => {
let vm_addr = mem.as_ptr() as u64 + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u16;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_W => {
let vm_addr = mem.as_ptr() as u64 + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u32;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_DW => {
let vm_addr = mem.as_ptr() as u64 + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u64;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_B => {
let vm_addr = mem.as_ptr() as u64 + reg[src] + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u8;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_H => {
let vm_addr = mem.as_ptr() as u64 + reg[src] + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u16;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_W => {
let vm_addr = mem.as_ptr() as u64 + reg[src] + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u32;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_DW => {
let vm_addr = mem.as_ptr() as u64 + reg[src] + (insn.imm as u32) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u64;
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_DW_IMM => {
let next_insn = ebpf::get_insn(prog, pc);
pc += 1;
reg[dst] = (insn.imm as u32) as u64 + ((next_insn.imm as u64) << 32);
},
ebpf::LD_B_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[src] as i64 + insn.off as i64) as u64;
let host_ptr = translate_load_addr(vm_addr, 1, pc)? as *const u8;
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_H_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[src] as i64 + insn.off as i64) as u64;
let host_ptr = translate_load_addr(vm_addr, 2, pc)? as *const u16;
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_W_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[src] as i64 + insn.off as i64) as u64;
let host_ptr = translate_load_addr(vm_addr, 4, pc)? as *const u32;
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_DW_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[src] as i64 + insn.off as i64) as u64;
let host_ptr = translate_load_addr(vm_addr, 8, pc)? as *const u64;
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::ST_B_IMM => {
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 1, pc)? as *mut u8;
unsafe { *host_ptr = insn.imm as u8 };
},
ebpf::ST_H_IMM => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 2, pc)? as *mut u16;
unsafe { *host_ptr = insn.imm as u16 };
},
ebpf::ST_W_IMM => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 4, pc)? as *mut u32;
unsafe { *host_ptr = insn.imm as u32 };
},
ebpf::ST_DW_IMM => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 8, pc)? as *mut u64;
unsafe { *host_ptr = insn.imm as u64 };
},
ebpf::ST_B_REG => {
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 1, pc)? as *mut u8;
unsafe { *host_ptr = reg[src] as u8 };
},
ebpf::ST_H_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 2, pc)? as *mut u16;
unsafe { *host_ptr = reg[src] as u16 };
},
ebpf::ST_W_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 4, pc)? as *mut u32;
unsafe { *host_ptr = reg[src] as u32 };
},
ebpf::ST_DW_REG => {
#[allow(cast_ptr_alignment)]
let vm_addr = (reg[dst] as i64 + insn.off as i64) as u64;
let host_ptr = translate_store_addr(vm_addr, 8, pc)? as *mut u64;
unsafe { *host_ptr = reg[src] as u64 };
},
ebpf::ST_W_XADD => unimplemented!(),
ebpf::ST_DW_XADD => unimplemented!(),
ebpf::ADD32_IMM => reg[dst] = (reg[dst] as i32).wrapping_add(insn.imm) as u64,
ebpf::ADD32_REG => reg[dst] = (reg[dst] as i32).wrapping_add(reg[src] as i32) as u64,
ebpf::SUB32_IMM => reg[dst] = (reg[dst] as i32).wrapping_sub(insn.imm) as u64,
ebpf::SUB32_REG => reg[dst] = (reg[dst] as i32).wrapping_sub(reg[src] as i32) as u64,
ebpf::MUL32_IMM => reg[dst] = (reg[dst] as i32).wrapping_mul(insn.imm) as u64,
ebpf::MUL32_REG => reg[dst] = (reg[dst] as i32).wrapping_mul(reg[src] as i32) as u64,
ebpf::DIV32_IMM => reg[dst] = (reg[dst] as u32 / insn.imm as u32) as u64,
ebpf::DIV32_REG => {
if reg[src] == 0 {
return Err(Error::new(ErrorKind::Other,"Error: division by 0"));
}
reg[dst] = (reg[dst] as u32 / reg[src] as u32) as u64;
},
ebpf::OR32_IMM => reg[dst] = (reg[dst] as u32 | insn.imm as u32) as u64,
ebpf::OR32_REG => reg[dst] = (reg[dst] as u32 | reg[src] as u32) as u64,
ebpf::AND32_IMM => reg[dst] = (reg[dst] as u32 & insn.imm as u32) as u64,
ebpf::AND32_REG => reg[dst] = (reg[dst] as u32 & reg[src] as u32) as u64,
ebpf::LSH32_IMM => reg[dst] = (reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64,
ebpf::LSH32_REG => reg[dst] = (reg[dst] as u32).wrapping_shl(reg[src] as u32) as u64,
ebpf::RSH32_IMM => reg[dst] = (reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64,
ebpf::RSH32_REG => reg[dst] = (reg[dst] as u32).wrapping_shr(reg[src] as u32) as u64,
ebpf::NEG32 => { reg[dst] = (reg[dst] as i32).wrapping_neg() as u64; reg[dst] &= U32MAX; },
ebpf::MOD32_IMM => reg[dst] = (reg[dst] as u32 % insn.imm as u32) as u64,
ebpf::MOD32_REG => {
if reg[src] == 0 {
return Err(Error::new(ErrorKind::Other,"Error: division by 0"));
}
reg[dst] = (reg[dst] as u32 % reg[src] as u32) as u64;
},
ebpf::XOR32_IMM => reg[dst] = (reg[dst] as u32 ^ insn.imm as u32) as u64,
ebpf::XOR32_REG => reg[dst] = (reg[dst] as u32 ^ reg[src] as u32) as u64,
ebpf::MOV32_IMM => reg[dst] = insn.imm as u64,
ebpf::MOV32_REG => reg[dst] = (reg[src] as u32) as u64,
ebpf::ARSH32_IMM => { reg[dst] = (reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[dst] &= U32MAX; },
ebpf::ARSH32_REG => { reg[dst] = (reg[dst] as i32).wrapping_shr(reg[src] as u32) as u64; reg[dst] &= U32MAX; },
ebpf::LE => {
reg[dst] = match insn.imm {
16 => (reg[dst] as u16).to_le() as u64,
32 => (reg[dst] as u32).to_le() as u64,
64 => reg[dst].to_le(),
_ => unreachable!(),
};
},
ebpf::BE => {
reg[dst] = match insn.imm {
16 => (reg[dst] as u16).to_be() as u64,
32 => (reg[dst] as u32).to_be() as u64,
64 => reg[dst].to_be(),
_ => unreachable!(),
};
},
ebpf::ADD64_IMM => reg[dst] = reg[dst].wrapping_add(insn.imm as u64),
ebpf::ADD64_REG => reg[dst] = reg[dst].wrapping_add(reg[src]),
ebpf::SUB64_IMM => reg[dst] = reg[dst].wrapping_sub(insn.imm as u64),
ebpf::SUB64_REG => reg[dst] = reg[dst].wrapping_sub(reg[src]),
ebpf::MUL64_IMM => reg[dst] = reg[dst].wrapping_mul(insn.imm as u64),
ebpf::MUL64_REG => reg[dst] = reg[dst].wrapping_mul(reg[src]),
ebpf::DIV64_IMM => reg[dst] /= insn.imm as u64,
ebpf::DIV64_REG => {
if reg[src] == 0 {
return Err(Error::new(ErrorKind::Other,"Error: division by 0"));
}
reg[dst] /= reg[src];
},
ebpf::OR64_IMM => reg[dst] |= insn.imm as u64,
ebpf::OR64_REG => reg[dst] |= reg[src],
ebpf::AND64_IMM => reg[dst] &= insn.imm as u64,
ebpf::AND64_REG => reg[dst] &= reg[src],
ebpf::LSH64_IMM => reg[dst] <<= insn.imm as u64,
ebpf::LSH64_REG => reg[dst] <<= reg[src],
ebpf::RSH64_IMM => reg[dst] >>= insn.imm as u64,
ebpf::RSH64_REG => reg[dst] >>= reg[src],
ebpf::NEG64 => reg[dst] = -(reg[dst] as i64) as u64,
ebpf::MOD64_IMM => reg[dst] %= insn.imm as u64,
ebpf::MOD64_REG => {
if reg[src] == 0 {
return Err(Error::new(ErrorKind::Other,"Error: division by 0"));
}
reg[dst] %= reg[src];
},
ebpf::XOR64_IMM => reg[dst] ^= insn.imm as u64,
ebpf::XOR64_REG => reg[dst] ^= reg[src],
ebpf::MOV64_IMM => reg[dst] = insn.imm as u64,
ebpf::MOV64_REG => reg[dst] = reg[src],
ebpf::ARSH64_IMM => reg[dst] = (reg[dst] as i64 >> insn.imm) as u64,
ebpf::ARSH64_REG => reg[dst] = (reg[dst] as i64 >> reg[src]) as u64,
ebpf::JA => pc = (pc as isize + insn.off as isize) as usize,
ebpf::JEQ_IMM => if reg[dst] == insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JEQ_REG => if reg[dst] == reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JGT_IMM => if reg[dst] > insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JGT_REG => if reg[dst] > reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JGE_IMM => if reg[dst] >= insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JGE_REG => if reg[dst] >= reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JLT_IMM => if reg[dst] < insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JLT_REG => if reg[dst] < reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JLE_IMM => if reg[dst] <= insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JLE_REG => if reg[dst] <= reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSET_IMM => if reg[dst] & insn.imm as u64 != 0 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSET_REG => if reg[dst] & reg[src] != 0 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JNE_IMM => if reg[dst] != insn.imm as u64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JNE_REG => if reg[dst] != reg[src] { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSGT_IMM => if reg[dst] as i64 > insn.imm as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSGT_REG => if reg[dst] as i64 > reg[src] as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSGE_IMM => if reg[dst] as i64 >= insn.imm as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSGE_REG => if reg[dst] as i64 >= reg[src] as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSLT_IMM => if (reg[dst] as i64) < insn.imm as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSLT_REG => if (reg[dst] as i64) < reg[src] as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSLE_IMM => if (reg[dst] as i64) <= insn.imm as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::JSLE_REG => if (reg[dst] as i64) <= reg[src] as i64 { pc = (pc as isize + insn.off as isize) as usize; },
ebpf::CALL_REG => {
let target_address = reg[insn.imm as usize];
reg[ebpf::STACK_REG] =
frames.push(®[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], pc)?;
if target_address < ebpf::MM_PROGRAM_START {
return Err(Error::new(ErrorKind::Other,
format!("Error: callx at instruction #{:?} attempted to call outside of the text segment at addr {:#x}",
pc - 1 + ebpf::ELF_INSN_DUMP_OFFSET, reg[insn.imm as usize])));
}
pc = (target_address - ebpf::MM_PROGRAM_START) as usize / ebpf::INSN_SIZE;
},
ebpf::CALL_IMM => {
if let Some(mut helper) = self.helpers.get_mut(&(insn.imm as u32)) {
reg[0] = (helper.function)(reg[1], reg[2], reg[3], reg[4], reg[5], &mut helper.context, &ro_regions, &rw_regions)?;
} else if let Some(ref elf) = self.elf {
if let Some(new_pc) = elf.lookup_bpf_call(insn.imm as u32) {
reg[ebpf::STACK_REG] =
frames.push(®[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], pc)?;
pc = *new_pc;
} else {
elf.report_unresolved_symbol(pc - 1)?;
}
} else {
return Err(Error::new(ErrorKind::Other,
format!("Error: Unresolved symbol at instruction #{:?}",
pc - 1 + ebpf::ELF_INSN_DUMP_OFFSET)));
}
},
ebpf::EXIT => {
match frames.pop() {
Ok((saved_reg, stack_ptr, ptr)) => {
reg[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS]
.copy_from_slice(&saved_reg);
reg[ebpf::STACK_REG] = stack_ptr;
pc = ptr;
},
_ => return Ok(reg[0]),
}
},
_ => unreachable!()
}
if (self.max_insn_count != 0) && (self.last_insn_count >= self.max_insn_count) {
return Err(Error::new(ErrorKind::Other,
format!("Error: Exceeded maximum number of instructions allowed ({:?})",
self.max_insn_count)));
}
}
Err(Error::new(ErrorKind::Other,
format!("Error: Attempted to call outside of the text segment, pc: {:?}",
pc + ebpf::ELF_INSN_DUMP_OFFSET)))
}
#[cfg(not(windows))]
pub fn jit_compile(&mut self) -> Result<(), Error> {
let prog =
if let Some(ref elf) = self.elf {
if elf.get_ro_sections().is_ok() {
return Err(Error::new(ErrorKind::Other,
"Error: JIT does not support RO data"));
}
let (_, bytes) = elf.get_text_bytes()?;
bytes
} else if let Some(ref prog) = self.prog {
prog
} else {
return Err(Error::new(ErrorKind::Other,
"Error: no program or elf set"));
};
self.jit = Some(jit::compile(prog, &self.helpers)?);
Ok(())
}
pub unsafe fn execute_program_jit(&self, mem: &mut [u8]) -> Result<u64, Error> {
let mem_ptr = match mem.len() {
0 => std::ptr::null_mut(),
_ => mem.as_ptr() as *mut u8
};
match self.jit {
Some(jit) => Ok(jit(mem_ptr, mem.len(), 0)),
None => Err(Error::new(ErrorKind::Other,
"Error: program has not been JIT-compiled")),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_frames() {
const DEPTH: usize = 10;
const SIZE: usize = 5;
let mut frames = CallFrames::new(DEPTH, SIZE);
let mut ptrs: Vec<MemoryRegion> = Vec::new();
for i in 0..DEPTH - 1 {
let registers = vec![i as u64; 5];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_stacks()[i].clone());
assert_eq!(ptrs[i].len, SIZE as u64);
let top = frames.push(®isters[0..4], i).unwrap();
let new_ptrs = frames.get_stacks();
assert_eq!(top, new_ptrs[i+1].addr_vm + new_ptrs[i+1].len - 1);
assert_ne!(top, ptrs[i].addr_vm + ptrs[i].len - 1);
assert!(!(ptrs[i].addr_vm <= new_ptrs[i+1].addr_vm && new_ptrs[i+1].addr_vm < ptrs[i].addr_vm + ptrs[i].len));
}
let i = DEPTH - 1;
let registers = vec![i as u64; 5];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_stacks()[i].clone());
assert!(frames.push(®isters, DEPTH - 1).is_err());
for i in (0..DEPTH - 1).rev() {
let (saved_reg, stack_ptr, return_ptr) = frames.pop().unwrap();
assert_eq!(saved_reg, [i as u64, i as u64, i as u64, i as u64]);
assert_eq!(ptrs[i].addr_vm + ptrs[i].len - 1, stack_ptr);
assert_eq!(i, return_ptr);
}
assert!(frames.pop().is_err());
}
}