use crate::{
aligned_memory::{is_memory_aligned, AlignedMemory},
ebpf::{self, EF_SBF_V2, HOST_ALIGN, INSN_SIZE},
elf_parser::{
consts::{
ELFCLASS64, ELFDATA2LSB, ELFOSABI_NONE, EM_BPF, EM_SBF, ET_DYN, R_X86_64_32,
R_X86_64_64, R_X86_64_NONE, R_X86_64_RELATIVE,
},
types::Elf64Word,
},
elf_parser_glue::{
ElfParser, ElfProgramHeader, ElfRelocation, ElfSectionHeader, ElfSymbol, GoblinParser,
NewParser,
},
memory_region::MemoryRegion,
vm::{BuiltInProgram, Config, ContextObject, FunctionRegistry},
};
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
use crate::jit::{JitCompiler, JitProgram};
use byteorder::{ByteOrder, LittleEndian};
use std::{
collections::{btree_map::Entry, BTreeMap},
fmt::Debug,
mem,
ops::Range,
str,
sync::Arc,
};
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
pub enum ElfError {
#[error("Failed to parse ELF file: {0}")]
FailedToParse(String),
#[error("Entrypoint out of bounds")]
EntrypointOutOfBounds,
#[error("Invaid entrypoint")]
InvalidEntrypoint,
#[error("Failed to get section {0}")]
FailedToGetSection(String),
#[error("Unresolved symbol ({0}) at instruction #{1:?} (ELF file offset {2:#x})")]
UnresolvedSymbol(String, usize, usize),
#[error("Section not found: {0}")]
SectionNotFound(String),
#[error("Relative jump out of bounds at instruction #{0}")]
RelativeJumpOutOfBounds(usize),
#[error("Symbol hash collision {0:#x}")]
SymbolHashCollision(u32),
#[error("Incompatible ELF: wrong endianess")]
WrongEndianess,
#[error("Incompatible ELF: wrong ABI")]
WrongAbi,
#[error("Incompatible ELF: wrong machine")]
WrongMachine,
#[error("Incompatible ELF: wrong class")]
WrongClass,
#[error("Multiple or no text sections, consider removing llc option: -function-sections")]
NotOneTextSection,
#[error("Found .bss section in ELF, read-write data not supported")]
BssNotSupported,
#[error("Found writable section ({0}) in ELF, read-write data not supported")]
WritableSectionNotSupported(String),
#[error("Relocation failed, no loadable section contains virtual address {0:#x}")]
AddressOutsideLoadableSection(u64),
#[error("Relocation failed, invalid referenced virtual address {0:#x}")]
InvalidVirtualAddress(u64),
#[error("Relocation failed, unknown type {0:?}")]
UnknownRelocation(u32),
#[error("Failed to read relocation info")]
FailedToReadRelocationInfo,
#[error("Incompatible ELF: wrong type")]
WrongType,
#[error("Unknown symbol with index {0}")]
UnknownSymbol(usize),
#[error("Offset or value is out of bounds")]
ValueOutOfBounds,
#[error("Detected capabilities required by the executable which are not enabled")]
UnsupportedExecutableCapabilities,
#[error("Invalid ELF program header")]
InvalidProgramHeader,
}
pub fn hash_internal_function(pc: usize, name: &str) -> u32 {
if name == "entrypoint" {
ebpf::hash_symbol_name(b"entrypoint")
} else {
let mut key = [0u8; mem::size_of::<u64>()];
LittleEndian::write_u64(&mut key, pc as u64);
ebpf::hash_symbol_name(&key)
}
}
pub fn register_internal_function<
C: ContextObject,
T: AsRef<str> + ToString + std::cmp::PartialEq<&'static str>,
>(
function_registry: &mut FunctionRegistry,
loader: &BuiltInProgram<C>,
pc: usize,
name: T,
) -> Result<u32, ElfError> {
let config = loader.get_config();
let key = if config.static_syscalls {
pc as u32
} else {
let hash = hash_internal_function(pc, name.as_ref());
if config.external_internal_function_hash_collision
&& loader.lookup_function(hash).is_some()
{
return Err(ElfError::SymbolHashCollision(hash));
}
hash
};
match function_registry.entry(key) {
Entry::Vacant(entry) => {
entry.insert((
pc,
if config.enable_symbol_and_section_labels || name == "entrypoint" {
name.to_string()
} else {
String::default()
},
));
}
Entry::Occupied(entry) => {
if entry.get().0 != pc {
return Err(ElfError::SymbolHashCollision(key));
}
}
}
Ok(key)
}
const BYTE_OFFSET_IMMEDIATE: usize = 4;
const BYTE_LENGTH_IMMEDIATE: usize = 4;
#[allow(non_camel_case_types)]
#[derive(Debug, PartialEq, Copy, Clone)]
enum BpfRelocationType {
R_Bpf_None = 0,
R_Bpf_64_64 = 1,
R_Bpf_64_Relative = 8,
R_Bpf_64_32 = 10,
}
impl BpfRelocationType {
fn from_x86_relocation_type(from: u32) -> Option<BpfRelocationType> {
match from {
R_X86_64_NONE => Some(BpfRelocationType::R_Bpf_None),
R_X86_64_64 => Some(BpfRelocationType::R_Bpf_64_64),
R_X86_64_RELATIVE => Some(BpfRelocationType::R_Bpf_64_Relative),
R_X86_64_32 => Some(BpfRelocationType::R_Bpf_64_32),
_ => None,
}
}
}
#[derive(Debug, PartialEq)]
struct SectionInfo {
name: String,
vaddr: u64,
offset_range: Range<usize>,
}
impl SectionInfo {
fn mem_size(&self) -> usize {
mem::size_of::<Self>().saturating_add(self.name.capacity())
}
}
#[derive(Debug, PartialEq)]
pub(crate) enum Section {
Owned(usize, Vec<u8>),
Borrowed(usize, Range<usize>),
}
#[derive(Debug, PartialEq)]
pub struct Executable<C: ContextObject> {
elf_bytes: AlignedMemory<{ HOST_ALIGN }>,
ro_section: Section,
text_section_info: SectionInfo,
entry_pc: usize,
function_registry: FunctionRegistry,
loader: Arc<BuiltInProgram<C>>,
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
compiled_program: Option<JitProgram>,
}
impl<C: ContextObject> Executable<C> {
pub fn get_config(&self) -> &Config {
self.loader.get_config()
}
pub fn get_text_bytes(&self) -> (u64, &[u8]) {
let (ro_offset, ro_section) = match &self.ro_section {
Section::Owned(offset, data) => (*offset, data.as_slice()),
Section::Borrowed(offset, byte_range) => {
(*offset, &self.elf_bytes.as_slice()[byte_range.clone()])
}
};
let offset = self
.text_section_info
.vaddr
.saturating_sub(ebpf::MM_PROGRAM_START)
.saturating_sub(ro_offset as u64) as usize;
(
self.text_section_info.vaddr,
&ro_section[offset..offset.saturating_add(self.text_section_info.offset_range.len())],
)
}
pub fn get_ro_section(&self) -> &[u8] {
match &self.ro_section {
Section::Owned(_offset, data) => data.as_slice(),
Section::Borrowed(_offset, byte_range) => {
&self.elf_bytes.as_slice()[byte_range.clone()]
}
}
}
pub fn get_ro_region(&self) -> MemoryRegion {
get_ro_region(&self.ro_section, self.elf_bytes.as_slice())
}
pub fn get_entrypoint_instruction_offset(&self) -> usize {
self.entry_pc
}
#[cfg(feature = "debugger")]
pub fn get_text_section_offset(&self) -> u64 {
self.text_section_info.offset_range.start as u64
}
pub fn lookup_internal_function(&self, hash: u32) -> Option<usize> {
self.function_registry.get(&hash).map(|(pc, _name)| *pc)
}
pub fn get_loader(&self) -> &BuiltInProgram<C> {
&self.loader
}
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
pub fn get_compiled_program(&self) -> Option<&JitProgram> {
self.compiled_program.as_ref()
}
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
pub fn jit_compile(executable: &mut Self) -> Result<(), crate::error::EbpfError> {
let jit = JitCompiler::<C>::new(executable)?;
executable.compiled_program = Some(jit.compile()?);
Ok(())
}
pub fn get_function_registry(&self) -> &FunctionRegistry {
&self.function_registry
}
pub fn new_from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltInProgram<C>>,
mut function_registry: FunctionRegistry,
) -> Result<Self, ElfError> {
let elf_bytes = AlignedMemory::from_slice(text_bytes);
let config = loader.get_config();
let enable_symbol_and_section_labels = config.enable_symbol_and_section_labels;
let entry_pc = if let Some((pc, _name)) = function_registry
.values()
.find(|(_pc, name)| name == "entrypoint")
{
*pc
} else {
register_internal_function(&mut function_registry, &loader, 0, "entrypoint")?;
0
};
Ok(Self {
elf_bytes,
ro_section: Section::Borrowed(0, 0..text_bytes.len()),
text_section_info: SectionInfo {
name: if enable_symbol_and_section_labels {
".text".to_string()
} else {
String::default()
},
vaddr: ebpf::MM_PROGRAM_START,
offset_range: 0..text_bytes.len(),
},
entry_pc,
function_registry,
loader,
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
compiled_program: None,
})
}
pub fn load(bytes: &[u8], loader: Arc<BuiltInProgram<C>>) -> Result<Self, ElfError> {
if loader.get_config().new_elf_parser {
let aligned;
let bytes = if is_memory_aligned(bytes.as_ptr() as usize, HOST_ALIGN) {
bytes
} else {
aligned = AlignedMemory::<{ HOST_ALIGN }>::from_slice(bytes);
aligned.as_slice()
};
Self::load_with_parser(&NewParser::parse(bytes)?, bytes, loader)
} else {
Self::load_with_parser(&GoblinParser::parse(bytes)?, bytes, loader)
}
}
fn load_with_parser<'a, P: ElfParser<'a>>(
elf: &'a P,
bytes: &[u8],
loader: Arc<BuiltInProgram<C>>,
) -> Result<Self, ElfError> {
let mut elf_bytes = AlignedMemory::from_slice(bytes);
let config = loader.get_config();
Self::validate(config, elf, elf_bytes.as_slice())?;
let text_section = elf.section(".text")?;
let text_section_info = SectionInfo {
name: if config.enable_symbol_and_section_labels {
elf.section_name(text_section.sh_name())
.unwrap_or(".text")
.to_string()
} else {
String::default()
},
vaddr: if config.enable_elf_vaddr && text_section.sh_addr() >= ebpf::MM_PROGRAM_START {
text_section.sh_addr()
} else {
text_section
.sh_addr()
.saturating_add(ebpf::MM_PROGRAM_START)
},
offset_range: text_section.file_range().unwrap_or_default(),
};
let vaddr_end = if config.reject_rodata_stack_overlap {
text_section_info
.vaddr
.saturating_add(text_section.sh_size())
} else {
text_section_info.vaddr
};
if (config.reject_broken_elfs
&& !config.enable_elf_vaddr
&& text_section.sh_addr() != text_section.sh_offset())
|| vaddr_end > ebpf::MM_STACK_START
{
return Err(ElfError::ValueOutOfBounds);
}
let mut function_registry = FunctionRegistry::default();
Self::relocate(
&mut function_registry,
&loader,
elf,
elf_bytes.as_slice_mut(),
)?;
let offset = elf.header().e_entry.saturating_sub(text_section.sh_addr());
if offset.checked_rem(ebpf::INSN_SIZE as u64) != Some(0) {
return Err(ElfError::InvalidEntrypoint);
}
let entry_pc = if let Some(entry_pc) = (offset as usize).checked_div(ebpf::INSN_SIZE) {
if !config.static_syscalls {
function_registry.remove(&ebpf::hash_symbol_name(b"entrypoint"));
}
register_internal_function(&mut function_registry, &loader, entry_pc, "entrypoint")?;
entry_pc
} else {
return Err(ElfError::InvalidEntrypoint);
};
let ro_section = Self::parse_ro_sections(
config,
elf.section_headers()
.map(|s| (elf.section_name(s.sh_name()), s)),
elf_bytes.as_slice(),
)?;
Ok(Self {
elf_bytes,
ro_section,
text_section_info,
entry_pc,
function_registry,
loader,
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
compiled_program: None,
})
}
#[rustfmt::skip]
pub fn mem_size(&self) -> usize {
let mut total = mem::size_of::<Self>();
total = total
.saturating_add(self.elf_bytes.mem_size())
.saturating_add(match &self.ro_section {
Section::Owned(_, data) => data.capacity(),
Section::Borrowed(_, _) => 0,
})
.saturating_add(self.text_section_info.mem_size())
.saturating_add(mem::size_of_val(&self.function_registry))
.saturating_add(self.function_registry
.iter()
.fold(0, |state: usize, (_, (val, name))| state
.saturating_add(mem::size_of_val(&val)
.saturating_add(mem::size_of_val(&name)
.saturating_add(name.capacity())))))
.saturating_add(self.loader.mem_size());
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
total = total.saturating_add(self.compiled_program.as_ref().map_or(0, |program| program.mem_size()));
}
total
}
pub fn fixup_relative_calls(
function_registry: &mut FunctionRegistry,
loader: &BuiltInProgram<C>,
elf_bytes: &mut [u8],
) -> Result<(), ElfError> {
let config = loader.get_config();
let instruction_count = elf_bytes
.len()
.checked_div(ebpf::INSN_SIZE)
.ok_or(ElfError::ValueOutOfBounds)?;
for i in 0..instruction_count {
let mut insn = ebpf::get_insn(elf_bytes, i);
if insn.opc == ebpf::CALL_IMM
&& insn.imm != -1
&& !(config.static_syscalls && insn.src == 0)
{
let target_pc = (i as isize)
.saturating_add(1)
.saturating_add(insn.imm as isize);
if target_pc < 0 || target_pc >= instruction_count as isize {
return Err(ElfError::RelativeJumpOutOfBounds(
i.saturating_add(ebpf::ELF_INSN_DUMP_OFFSET),
));
}
let name = if config.enable_symbol_and_section_labels {
format!("function_{}", target_pc)
} else {
String::default()
};
let key = register_internal_function(
function_registry,
loader,
target_pc as usize,
name,
)?;
insn.imm = key as i64;
let offset = i.saturating_mul(ebpf::INSN_SIZE);
let checked_slice = elf_bytes
.get_mut(offset..offset.saturating_add(ebpf::INSN_SIZE))
.ok_or(ElfError::ValueOutOfBounds)?;
checked_slice.copy_from_slice(&insn.to_array());
}
}
Ok(())
}
pub fn validate<'a, P: ElfParser<'a>>(
config: &Config,
elf: &'a P,
elf_bytes: &[u8],
) -> Result<(), ElfError> {
let header = elf.header();
if header.e_ident.ei_class != ELFCLASS64 {
return Err(ElfError::WrongClass);
}
if header.e_ident.ei_data != ELFDATA2LSB {
return Err(ElfError::WrongEndianess);
}
if header.e_ident.ei_osabi != ELFOSABI_NONE {
return Err(ElfError::WrongAbi);
}
if header.e_machine != EM_BPF && header.e_machine != EM_SBF {
return Err(ElfError::WrongMachine);
}
if header.e_type != ET_DYN {
return Err(ElfError::WrongType);
}
if header.e_flags == EF_SBF_V2 {
if !config.dynamic_stack_frames {
return Err(ElfError::UnsupportedExecutableCapabilities);
}
} else if config.dynamic_stack_frames
&& config.enable_elf_vaddr
&& config.reject_rodata_stack_overlap
&& config.static_syscalls
{
return Err(ElfError::UnsupportedExecutableCapabilities);
}
if config.enable_elf_vaddr {
if elf
.program_headers()
.any(|header| header.p_vaddr().checked_add(header.p_memsz()).is_none())
{
return Err(ElfError::InvalidProgramHeader);
}
}
let num_text_sections = elf
.section_headers()
.fold(0, |count: usize, section_header| {
if let Some(this_name) = elf.section_name(section_header.sh_name()) {
if this_name == ".text" {
return count.saturating_add(1);
}
}
count
});
if 1 != num_text_sections {
return Err(ElfError::NotOneTextSection);
}
for section_header in elf.section_headers() {
if let Some(name) = elf.section_name(section_header.sh_name()) {
if name.starts_with(".bss")
|| (section_header.is_writable()
&& (name.starts_with(".data") && !name.starts_with(".data.rel")))
{
return Err(ElfError::WritableSectionNotSupported(name.to_owned()));
} else if name == ".bss" {
return Err(ElfError::BssNotSupported);
}
}
}
for section_header in elf.section_headers() {
let start = section_header.sh_offset() as usize;
let end = section_header
.sh_offset()
.checked_add(section_header.sh_size())
.ok_or(ElfError::ValueOutOfBounds)? as usize;
let _ = elf_bytes
.get(start..end)
.ok_or(ElfError::ValueOutOfBounds)?;
}
let text_section = elf.section(".text")?;
if !text_section.vm_range().contains(&header.e_entry) {
return Err(ElfError::EntrypointOutOfBounds);
}
Ok(())
}
pub(crate) fn parse_ro_sections<
'a,
T: ElfSectionHeader + 'a,
S: IntoIterator<Item = (Option<&'a str>, &'a T)>,
>(
config: &Config,
sections: S,
elf_bytes: &[u8],
) -> Result<Section, ElfError> {
let mut lowest_addr = usize::MAX;
let mut highest_addr = 0;
let mut ro_fill_length = 0usize;
let mut invalid_offsets = false;
let mut addr_file_offset = None;
let mut first_ro_section = 0;
let mut last_ro_section = 0;
let mut n_ro_sections = 0usize;
let mut ro_slices = vec![];
for (i, (name, section_header)) in sections.into_iter().enumerate() {
match name {
Some(name)
if name == ".text"
|| name == ".rodata"
|| name == ".data.rel.ro"
|| name == ".eh_frame" => {}
_ => continue,
}
if n_ro_sections == 0 {
first_ro_section = i;
}
last_ro_section = i;
n_ro_sections = n_ro_sections.saturating_add(1);
let section_addr = section_header.sh_addr();
if !invalid_offsets {
if config.enable_elf_vaddr {
if section_addr < section_header.sh_offset() {
invalid_offsets = true;
} else {
let offset = section_addr.saturating_sub(section_header.sh_offset());
if *addr_file_offset.get_or_insert(offset) != offset {
invalid_offsets = true;
}
}
} else if section_addr != section_header.sh_offset() {
invalid_offsets = true;
}
}
let mut vaddr_end = if config.enable_elf_vaddr && section_addr >= ebpf::MM_PROGRAM_START
{
section_addr
} else {
section_addr.saturating_add(ebpf::MM_PROGRAM_START)
};
if config.reject_rodata_stack_overlap {
vaddr_end = vaddr_end.saturating_add(section_header.sh_size());
}
if (config.reject_broken_elfs && invalid_offsets) || vaddr_end > ebpf::MM_STACK_START {
return Err(ElfError::ValueOutOfBounds);
}
let section_data = elf_bytes
.get(section_header.file_range().unwrap_or_default())
.ok_or(ElfError::ValueOutOfBounds)?;
let section_addr = section_addr as usize;
lowest_addr = lowest_addr.min(section_addr);
highest_addr = highest_addr.max(section_addr.saturating_add(section_data.len()));
ro_fill_length = ro_fill_length.saturating_add(section_data.len());
ro_slices.push((section_addr, section_data));
}
if config.reject_broken_elfs && lowest_addr.saturating_add(ro_fill_length) > highest_addr {
return Err(ElfError::ValueOutOfBounds);
}
let can_borrow = !invalid_offsets
&& last_ro_section
.saturating_add(1)
.saturating_sub(first_ro_section)
== n_ro_sections;
let ro_section = if config.optimize_rodata && can_borrow {
let buf_offset_start =
lowest_addr.saturating_sub(addr_file_offset.unwrap_or(0) as usize);
let buf_offset_end =
highest_addr.saturating_sub(addr_file_offset.unwrap_or(0) as usize);
let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize {
lowest_addr.saturating_sub(ebpf::MM_PROGRAM_START as usize)
} else {
lowest_addr
};
Section::Borrowed(addr_offset, buf_offset_start..buf_offset_end)
} else {
if config.optimize_rodata {
highest_addr = highest_addr.saturating_sub(lowest_addr);
} else {
lowest_addr = 0;
};
let buf_len = highest_addr;
if buf_len > elf_bytes.len() {
return Err(ElfError::ValueOutOfBounds);
}
let mut ro_section = vec![0; buf_len];
for (section_addr, slice) in ro_slices.iter() {
let buf_offset_start = section_addr.saturating_sub(lowest_addr);
ro_section[buf_offset_start..buf_offset_start.saturating_add(slice.len())]
.copy_from_slice(slice);
}
let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize {
lowest_addr.saturating_sub(ebpf::MM_PROGRAM_START as usize)
} else {
lowest_addr
};
Section::Owned(addr_offset, ro_section)
};
Ok(ro_section)
}
fn relocate<'a, P: ElfParser<'a>>(
function_registry: &mut FunctionRegistry,
loader: &BuiltInProgram<C>,
elf: &'a P,
elf_bytes: &mut [u8],
) -> Result<(), ElfError> {
let mut syscall_cache = BTreeMap::new();
let text_section = elf.section(".text")?;
Self::fixup_relative_calls(
function_registry,
loader,
elf_bytes
.get_mut(text_section.file_range().unwrap_or_default())
.ok_or(ElfError::ValueOutOfBounds)?,
)?;
let config = loader.get_config();
let mut program_header: Option<&<P as ElfParser<'a>>::ProgramHeader> = None;
for relocation in elf.dynamic_relocations() {
let mut r_offset = relocation.r_offset() as usize;
if config.enable_elf_vaddr {
match program_header {
Some(header) if header.vm_range().contains(&(r_offset as u64)) => {}
_ => {
program_header = elf
.program_headers()
.find(|header| header.vm_range().contains(&(r_offset as u64)))
}
}
let header = program_header.as_ref().ok_or(ElfError::ValueOutOfBounds)?;
r_offset = r_offset
.saturating_sub(header.p_vaddr() as usize)
.saturating_add(header.p_offset() as usize);
}
let imm_offset = r_offset.saturating_add(BYTE_OFFSET_IMMEDIATE);
match BpfRelocationType::from_x86_relocation_type(relocation.r_type()) {
Some(BpfRelocationType::R_Bpf_64_64) => {
let imm_low_offset = imm_offset;
let imm_high_offset = imm_low_offset.saturating_add(INSN_SIZE);
let checked_slice = elf_bytes
.get(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE))
.ok_or(ElfError::ValueOutOfBounds)?;
let refd_addr = LittleEndian::read_u32(checked_slice) as u64;
let symbol = elf
.dynamic_symbol(relocation.r_sym())
.ok_or_else(|| ElfError::UnknownSymbol(relocation.r_sym() as usize))?;
let mut addr = symbol.st_value().saturating_add(refd_addr);
if addr < ebpf::MM_PROGRAM_START {
addr = ebpf::MM_PROGRAM_START.saturating_add(addr);
}
let imm_slice = elf_bytes
.get_mut(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE))
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u32(imm_slice, (addr & 0xFFFFFFFF) as u32);
let imm_slice = elf_bytes
.get_mut(
imm_high_offset..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE),
)
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u32(
imm_slice,
addr.checked_shr(32).unwrap_or_default() as u32,
);
}
Some(BpfRelocationType::R_Bpf_64_Relative) => {
if text_section
.file_range()
.unwrap_or_default()
.contains(&r_offset)
{
let imm_low_offset = imm_offset;
let imm_high_offset = r_offset
.saturating_add(INSN_SIZE)
.saturating_add(BYTE_OFFSET_IMMEDIATE);
let imm_slice = elf_bytes
.get(
imm_low_offset
..imm_low_offset.saturating_add(BYTE_LENGTH_IMMEDIATE),
)
.ok_or(ElfError::ValueOutOfBounds)?;
let va_low = LittleEndian::read_u32(imm_slice) as u64;
let imm_slice = elf_bytes
.get(
imm_high_offset
..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE),
)
.ok_or(ElfError::ValueOutOfBounds)?;
let va_high = LittleEndian::read_u32(imm_slice) as u64;
let mut refd_addr = va_high.checked_shl(32).unwrap_or_default() | va_low;
if refd_addr == 0 {
return Err(ElfError::InvalidVirtualAddress(refd_addr));
}
if refd_addr < ebpf::MM_PROGRAM_START {
refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr);
}
let imm_slice = elf_bytes
.get_mut(
imm_low_offset
..imm_low_offset.saturating_add(BYTE_LENGTH_IMMEDIATE),
)
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u32(imm_slice, (refd_addr & 0xFFFFFFFF) as u32);
let imm_slice = elf_bytes
.get_mut(
imm_high_offset
..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE),
)
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u32(
imm_slice,
refd_addr.checked_shr(32).unwrap_or_default() as u32,
);
} else {
let refd_addr = if elf.header().e_flags == EF_SBF_V2 {
let addr_slice = elf_bytes
.get(r_offset..r_offset.saturating_add(mem::size_of::<u64>()))
.ok_or(ElfError::ValueOutOfBounds)?;
let mut refd_addr = LittleEndian::read_u64(addr_slice);
if refd_addr < ebpf::MM_PROGRAM_START {
refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr);
}
refd_addr
} else {
let addr_slice = elf_bytes
.get(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE))
.ok_or(ElfError::ValueOutOfBounds)?;
let refd_addr = LittleEndian::read_u32(addr_slice) as u64;
ebpf::MM_PROGRAM_START.saturating_add(refd_addr)
};
let addr_slice = elf_bytes
.get_mut(r_offset..r_offset.saturating_add(mem::size_of::<u64>()))
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u64(addr_slice, refd_addr);
}
}
Some(BpfRelocationType::R_Bpf_64_32) => {
let symbol = elf
.dynamic_symbol(relocation.r_sym())
.ok_or_else(|| ElfError::UnknownSymbol(relocation.r_sym() as usize))?;
let name = elf
.dynamic_symbol_name(symbol.st_name() as Elf64Word)
.ok_or_else(|| ElfError::UnknownSymbol(symbol.st_name() as usize))?;
let key = if symbol.is_function() && symbol.st_value() != 0 {
if !text_section.vm_range().contains(&symbol.st_value()) {
return Err(ElfError::ValueOutOfBounds);
}
let target_pc = (symbol.st_value().saturating_sub(text_section.sh_addr())
as usize)
.checked_div(ebpf::INSN_SIZE)
.unwrap_or_default();
register_internal_function(function_registry, loader, target_pc, name)?
} else {
let hash = *syscall_cache
.entry(symbol.st_name())
.or_insert_with(|| ebpf::hash_symbol_name(name.as_bytes()));
if config.reject_broken_elfs && loader.lookup_function(hash).is_none() {
return Err(ElfError::UnresolvedSymbol(
name.to_string(),
r_offset
.checked_div(ebpf::INSN_SIZE)
.and_then(|offset| {
offset.checked_add(ebpf::ELF_INSN_DUMP_OFFSET)
})
.unwrap_or(ebpf::ELF_INSN_DUMP_OFFSET),
r_offset,
));
}
hash
};
let checked_slice = elf_bytes
.get_mut(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE))
.ok_or(ElfError::ValueOutOfBounds)?;
LittleEndian::write_u32(checked_slice, key);
}
_ => return Err(ElfError::UnknownRelocation(relocation.r_type())),
}
}
if config.enable_symbol_and_section_labels {
for symbol in elf.symbols() {
if symbol.st_info() & 0xEF != 0x02 {
continue;
}
if !text_section.vm_range().contains(&symbol.st_value()) {
return Err(ElfError::ValueOutOfBounds);
}
let target_pc = (symbol.st_value().saturating_sub(text_section.sh_addr()) as usize)
.checked_div(ebpf::INSN_SIZE)
.unwrap_or_default();
let name = elf
.symbol_name(symbol.st_name() as Elf64Word)
.ok_or_else(|| ElfError::UnknownSymbol(symbol.st_name() as usize))?;
register_internal_function(function_registry, loader, target_pc, name)?;
}
}
Ok(())
}
#[allow(dead_code)]
fn dump_data(name: &str, prog: &[u8]) {
let mut eight_bytes: Vec<u8> = Vec::new();
println!("{}", name);
for i in prog.iter() {
if eight_bytes.len() >= 7 {
println!("{:02X?}", eight_bytes);
eight_bytes.clear();
} else {
eight_bytes.push(*i);
}
}
}
}
pub(crate) fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion {
let (offset, ro_data) = match ro_section {
Section::Owned(offset, data) => (*offset, data.as_slice()),
Section::Borrowed(offset, byte_range) => (*offset, &elf[byte_range.clone()]),
};
MemoryRegion::new_readonly(
ro_data,
ebpf::MM_PROGRAM_START.saturating_add(offset as u64),
)
}
#[cfg(test)]
mod test {
use super::*;
use crate::{
ebpf,
elf_parser::{
consts::{ELFCLASS32, ELFDATA2MSB, ET_REL},
types::{Elf64Ehdr, Elf64Shdr},
},
error::EbpfError,
fuzz::fuzz,
syscalls,
vm::{ProgramResult, TestContextObject},
};
use rand::{distributions::Uniform, Rng};
use std::{fs::File, io::Read};
type ElfExecutable = Executable<TestContextObject>;
fn loader() -> Arc<BuiltInProgram<TestContextObject>> {
let mut loader = BuiltInProgram::new_loader(Config::default());
loader
.register_function_by_name("log", syscalls::bpf_syscall_string)
.unwrap();
loader
.register_function_by_name("log_64", syscalls::bpf_syscall_u64)
.unwrap();
Arc::new(loader)
}
#[test]
fn test_validate() {
let elf_bytes = std::fs::read("tests/elfs/noop.so").unwrap();
let elf = NewParser::parse(&elf_bytes).unwrap();
let mut header = elf.header().clone();
let config = Config::default();
let write_header = |header: Elf64Ehdr| unsafe {
let mut bytes = elf_bytes.clone();
std::ptr::write(bytes.as_mut_ptr() as *mut Elf64Ehdr, header);
bytes
};
ElfExecutable::validate(&config, &elf, &elf_bytes).expect("validation failed");
header.e_ident.ei_class = ELFCLASS32;
let bytes = write_header(header.clone());
NewParser::parse(&bytes).expect_err("allowed bad class");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed bad class");
header.e_ident.ei_class = ELFCLASS64;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
header.e_ident.ei_data = ELFDATA2MSB;
let bytes = write_header(header.clone());
NewParser::parse(&bytes).expect_err("allowed big endian");
header.e_ident.ei_data = ELFDATA2LSB;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
header.e_ident.ei_osabi = 1;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong abi");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong abi");
header.e_ident.ei_osabi = ELFOSABI_NONE;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
header.e_machine = 42;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong machine");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong machine");
header.e_machine = EM_BPF;
let bytes = write_header(header.clone());
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect("validation failed");
header.e_type = ET_REL;
let bytes = write_header(header);
ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong type");
ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes)
.expect_err("allowed wrong type");
}
#[test]
fn test_load() {
let mut file = File::open("tests/elfs/noop.so").expect("file open failed");
let mut elf_bytes = Vec::new();
file.read_to_end(&mut elf_bytes)
.expect("failed to read elf file");
ElfExecutable::load(&elf_bytes, loader()).expect("validation failed");
}
#[test]
fn test_load_unaligned() {
let mut elf_bytes = std::fs::read("tests/elfs/noop.so").expect("failed to read elf file");
elf_bytes.insert(0, 0);
ElfExecutable::load(&elf_bytes[1..], loader()).expect("validation failed");
}
#[test]
fn test_entrypoint() {
let loader = loader();
let mut file = File::open("tests/elfs/noop.so").expect("file open failed");
let mut elf_bytes = Vec::new();
file.read_to_end(&mut elf_bytes)
.expect("failed to read elf file");
let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed");
let parsed_elf = NewParser::parse(&elf_bytes).unwrap();
let executable: &Executable<TestContextObject> = &elf;
assert_eq!(0, executable.get_entrypoint_instruction_offset());
let write_header = |header: Elf64Ehdr| unsafe {
let mut bytes = elf_bytes.clone();
std::ptr::write(bytes.as_mut_ptr() as *mut Elf64Ehdr, header);
bytes
};
let mut header = parsed_elf.header().clone();
let initial_e_entry = header.e_entry;
header.e_entry += 8;
let elf_bytes = write_header(header.clone());
let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed");
let executable: &Executable<TestContextObject> = &elf;
assert_eq!(1, executable.get_entrypoint_instruction_offset());
header.e_entry = 1;
let elf_bytes = write_header(header.clone());
assert_eq!(
Err(ElfError::EntrypointOutOfBounds),
ElfExecutable::load(&elf_bytes, loader.clone())
);
header.e_entry = u64::MAX;
let elf_bytes = write_header(header.clone());
assert_eq!(
Err(ElfError::EntrypointOutOfBounds),
ElfExecutable::load(&elf_bytes, loader.clone())
);
header.e_entry = initial_e_entry + ebpf::INSN_SIZE as u64 + 1;
let elf_bytes = write_header(header.clone());
assert_eq!(
Err(ElfError::InvalidEntrypoint),
ElfExecutable::load(&elf_bytes, loader.clone())
);
header.e_entry = initial_e_entry;
let elf_bytes = write_header(header);
let elf = ElfExecutable::load(&elf_bytes, loader).expect("validation failed");
let executable: &Executable<TestContextObject> = &elf;
assert_eq!(0, executable.get_entrypoint_instruction_offset());
}
#[test]
fn test_fixup_relative_calls_back() {
let mut function_registry = FunctionRegistry::default();
let loader = BuiltInProgram::new_loader(Config {
static_syscalls: false,
enable_symbol_and_section_labels: true,
..Config::default()
});
#[rustfmt::skip]
let mut prog = vec![
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x85, 0x10, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff];
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_4".to_string();
let hash = hash_internal_function(4, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[40..]);
assert_eq!(*function_registry.get(&hash).unwrap(), (4, name));
let mut function_registry = FunctionRegistry::default();
prog.splice(44.., vec![0xfa, 0xff, 0xff, 0xff]);
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_0".to_string();
let hash = hash_internal_function(0, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[40..]);
assert_eq!(*function_registry.get(&hash).unwrap(), (0, name));
}
#[test]
fn test_fixup_relative_calls_forward() {
let mut function_registry = FunctionRegistry::default();
let loader = BuiltInProgram::new_loader(Config {
static_syscalls: false,
enable_symbol_and_section_labels: true,
..Config::default()
});
#[rustfmt::skip]
let mut prog = vec![
0x85, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_1".to_string();
let hash = hash_internal_function(1, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[..8]);
assert_eq!(*function_registry.get(&hash).unwrap(), (1, name));
let mut function_registry = FunctionRegistry::default();
prog.splice(4..8, vec![0x04, 0x00, 0x00, 0x00]);
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_5".to_string();
let hash = hash_internal_function(5, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[..8]);
assert_eq!(*function_registry.get(&hash).unwrap(), (5, name));
}
#[test]
#[should_panic(
expected = "called `Result::unwrap()` on an `Err` value: RelativeJumpOutOfBounds(29)"
)]
fn test_fixup_relative_calls_out_of_bounds_forward() {
let mut function_registry = FunctionRegistry::default();
let loader = loader();
#[rustfmt::skip]
let mut prog = vec![
0x85, 0x10, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_1".to_string();
let hash = hash_internal_function(1, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[..8]);
assert_eq!(*function_registry.get(&hash).unwrap(), (1, name));
}
#[test]
#[should_panic(
expected = "called `Result::unwrap()` on an `Err` value: RelativeJumpOutOfBounds(34)"
)]
fn test_fixup_relative_calls_out_of_bounds_back() {
let mut function_registry = FunctionRegistry::default();
let loader = loader();
#[rustfmt::skip]
let mut prog = vec![
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x85, 0x10, 0x00, 0x00, 0xf9, 0xff, 0xff, 0xff];
ElfExecutable::fixup_relative_calls(&mut function_registry, &loader, &mut prog).unwrap();
let name = "function_4".to_string();
let hash = hash_internal_function(4, &name);
let insn = ebpf::Insn {
opc: 0x85,
dst: 0,
src: 1,
imm: hash as i64,
..ebpf::Insn::default()
};
assert_eq!(insn.to_array(), prog[40..]);
assert_eq!(*function_registry.get(&hash).unwrap(), (4, name));
}
#[test]
#[ignore]
fn test_fuzz_load() {
let loader = loader();
let mut rng = rand::thread_rng();
let range = Uniform::new(0, 255);
println!("random bytes");
for _ in 0..1_000 {
let elf_bytes: Vec<u8> = (0..100).map(|_| rng.sample(range)).collect();
let _ = ElfExecutable::load(&elf_bytes, loader.clone());
}
let mut file = File::open("tests/elfs/noop.so").expect("file open failed");
let mut elf_bytes = Vec::new();
file.read_to_end(&mut elf_bytes)
.expect("failed to read elf file");
let parsed_elf = NewParser::parse(&elf_bytes).unwrap();
println!("mangle elf header");
fuzz(
&elf_bytes,
1_000_000,
100,
0..parsed_elf.header().e_ehsize as usize,
0..255,
|bytes: &mut [u8]| {
let _ = ElfExecutable::load(bytes, loader.clone());
},
);
println!("mangle section headers");
fuzz(
&elf_bytes,
1_000_000,
100,
parsed_elf.header().e_shoff as usize..elf_bytes.len(),
0..255,
|bytes: &mut [u8]| {
let _ = ElfExecutable::load(bytes, loader.clone());
},
);
println!("mangle whole elf");
fuzz(
&elf_bytes,
1_000_000,
100,
0..elf_bytes.len(),
0..255,
|bytes: &mut [u8]| {
let _ = ElfExecutable::load(bytes, loader.clone());
},
);
}
fn new_section(sh_addr: u64, sh_size: u64) -> Elf64Shdr {
Elf64Shdr {
sh_addr,
sh_offset: sh_addr,
sh_size,
sh_name: 0,
sh_type: 0,
sh_flags: 0,
sh_link: 0,
sh_info: 0,
sh_addralign: 0,
sh_entsize: 0,
}
}
#[test]
fn test_owned_ro_sections_not_contiguous() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(10, 10);
let s2 = new_section(20, 10);
let s3 = new_section(30, 10);
assert!(matches!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".dynamic"), &s2), (Some(".rodata"), &s3)],
&elf_bytes,
),
Ok(Section::Owned(offset, data)) if offset == 10 && data.len() == 30
));
}
#[test]
fn test_owned_ro_sections_with_sh_offset() {
let config = Config {
reject_broken_elfs: false,
..Config::default()
};
let elf_bytes = [0u8; 512];
let s1 = new_section(10, 10);
let mut s2 = new_section(20, 10);
s2.sh_offset = 30;
assert!(matches!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".rodata"), &s2)],
&elf_bytes,
),
Ok(Section::Owned(offset, data)) if offset == 10 && data.len() == 20
));
}
#[test]
fn test_sh_offset_not_same_as_vaddr() {
let config = Config {
reject_broken_elfs: true,
enable_elf_vaddr: false,
..Config::default()
};
let elf_bytes = [0u8; 512];
let mut s1 = new_section(10, 10);
assert!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes,).is_ok()
);
s1.sh_offset = 0;
assert_eq!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes,),
Err(ElfError::ValueOutOfBounds)
);
}
#[test]
fn test_invalid_sh_offset_larger_than_vaddr() {
let config = Config {
reject_broken_elfs: true,
..Config::default()
};
let elf_bytes = [0u8; 512];
let s1 = new_section(10, 10);
let mut s2 = new_section(20, 10);
s2.sh_offset = 30;
assert_eq!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".rodata"), &s2)],
&elf_bytes,
),
Err(ElfError::ValueOutOfBounds)
);
}
#[test]
fn test_reject_non_constant_sh_offset() {
let config = Config {
reject_broken_elfs: true,
..Config::default()
};
let elf_bytes = [0u8; 512];
let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10);
let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10);
s1.sh_offset = 100;
s2.sh_offset = 120;
assert_eq!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".rodata"), &s2)],
&elf_bytes,
),
Err(ElfError::ValueOutOfBounds)
);
}
#[test]
fn test_borrowed_ro_sections_with_constant_sh_offset() {
let config = Config {
reject_broken_elfs: true,
..Config::default()
};
let elf_bytes = [0u8; 512];
let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10);
let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10);
s1.sh_offset = 100;
s2.sh_offset = 110;
assert_eq!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".rodata"), &s2)],
&elf_bytes,
),
Ok(Section::Borrowed(10, 100..120))
);
}
#[test]
fn test_owned_ro_region_no_initial_gap() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(0, 10);
let s2 = new_section(10, 10);
let s3 = new_section(20, 10);
let ro_section = ElfExecutable::parse_ro_sections(
&config,
[
(Some(".text"), &s1),
(Some(".dynamic"), &s2),
(Some(".rodata"), &s3),
],
&elf_bytes,
)
.unwrap();
let ro_region = get_ro_region(&ro_section, &elf_bytes);
let owned_section = match &ro_section {
Section::Owned(_offset, data) => data.as_slice(),
_ => panic!(),
};
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size),
ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(
address
)) if address == ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size,
));
}
#[test]
fn test_owned_ro_region_initial_gap_mappable() {
let config = Config {
optimize_rodata: false,
..Config::default()
};
let elf_bytes = [0u8; 512];
let s1 = new_section(10, 10);
let s2 = new_section(20, 10);
let s3 = new_section(30, 10);
let ro_section = ElfExecutable::parse_ro_sections(
&config,
[
(Some(".text"), &s1),
(Some(".dynamic"), &s2),
(Some(".rodata"), &s3),
],
&elf_bytes,
)
.unwrap();
let ro_region = get_ro_region(&ro_section, &elf_bytes);
let owned_section = match &ro_section {
Section::Owned(_offset, data) => data.as_slice(),
_ => panic!(),
};
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size),
ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(
address
)) if address == ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size,
));
}
#[test]
fn test_owned_ro_region_initial_gap_map_error() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(10, 10);
let s2 = new_section(20, 10);
let s3 = new_section(30, 10);
let ro_section = ElfExecutable::parse_ro_sections(
&config,
[
(Some(".text"), &s1),
(Some(".dynamic"), &s2),
(Some(".rodata"), &s3),
],
&elf_bytes,
)
.unwrap();
let owned_section = match &ro_section {
Section::Owned(_offset, data) => data.as_slice(),
_ => panic!(),
};
let ro_region = get_ro_region(&ro_section, &elf_bytes);
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(address)) if address == ebpf::MM_PROGRAM_START,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_addr - 1, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(address)) if address == ebpf::MM_PROGRAM_START + 9,
));
assert!(matches!(
ro_region.vm_to_host(
ebpf::MM_PROGRAM_START + s1.sh_addr,
s3.sh_addr + s3.sh_size - s1.sh_addr
),
ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(
address
)) if address == ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size,
));
}
#[test]
fn test_borrowed_ro_sections_disabled() {
let config = Config {
optimize_rodata: false,
..Config::default()
};
let elf_bytes = [0u8; 512];
let s1 = new_section(0, 10);
let s2 = new_section(10, 10);
assert!(matches!(
ElfExecutable::parse_ro_sections(
&config,
[(Some(".text"), &s1), (Some(".rodata"), &s2)],
&elf_bytes,
),
Ok(Section::Owned(offset, data)) if offset == 0 && data.len() == 20
));
}
#[test]
fn test_borrowed_ro_sections() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(0, 10);
let s2 = new_section(20, 10);
let s3 = new_section(40, 10);
let s4 = new_section(50, 10);
assert_eq!(
ElfExecutable::parse_ro_sections(
&config,
[
(Some(".dynsym"), &s1),
(Some(".text"), &s2),
(Some(".rodata"), &s3),
(Some(".dynamic"), &s4)
],
&elf_bytes,
),
Ok(Section::Borrowed(20, 20..50))
);
}
#[test]
fn test_borrowed_ro_region_no_initial_gap() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(0, 10);
let s2 = new_section(10, 10);
let s3 = new_section(10, 10);
let ro_section = ElfExecutable::parse_ro_sections(
&config,
[
(Some(".text"), &s1),
(Some(".rodata"), &s2),
(Some(".dynamic"), &s3),
],
&elf_bytes,
)
.unwrap();
let ro_region = get_ro_region(&ro_section, &elf_bytes);
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s2.sh_addr + s2.sh_size),
ProgramResult::Ok(ptr) if ptr == elf_bytes.as_ptr() as u64,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s2.sh_addr + s2.sh_size, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(
address
)) if address == ebpf::MM_PROGRAM_START + s2.sh_addr + s2.sh_size,
));
}
#[test]
fn test_borrowed_ro_region_initial_gap() {
let config = Config::default();
let elf_bytes = [0u8; 512];
let s1 = new_section(0, 10);
let s2 = new_section(10, 10);
let s3 = new_section(20, 10);
let ro_section = ElfExecutable::parse_ro_sections(
&config,
[
(Some(".dynamic"), &s1),
(Some(".text"), &s2),
(Some(".rodata"), &s3),
],
&elf_bytes,
)
.unwrap();
let ro_region = get_ro_region(&ro_section, &elf_bytes);
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(address)) if address == ebpf::MM_PROGRAM_START,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s2.sh_addr - 1, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(address)) if address == ebpf::MM_PROGRAM_START + 9,
));
assert!(matches!(
ro_region.vm_to_host(
ebpf::MM_PROGRAM_START + s2.sh_addr,
s3.sh_addr + s3.sh_size - s2.sh_addr
),
ProgramResult::Ok(ptr) if ptr == elf_bytes[s2.sh_addr as usize..].as_ptr() as u64,
));
assert!(matches!(
ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1),
ProgramResult::Err(EbpfError::InvalidVirtualAddress(
address
)) if address == ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size,
));
}
#[test]
fn test_reject_rodata_stack_overlap() {
let config = Config {
enable_elf_vaddr: true,
reject_rodata_stack_overlap: true,
..Config::default()
};
let elf_bytes = [0u8; 512];
let mut s1 = new_section(ebpf::MM_STACK_START - 10, 10);
s1.sh_offset = 0;
assert!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes).is_ok()
);
let mut s1 = new_section(ebpf::MM_STACK_START, 0);
s1.sh_offset = 0;
assert!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes).is_ok()
);
let mut s1 = new_section(ebpf::MM_STACK_START, 1);
s1.sh_offset = 0;
assert_eq!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes),
Err(ElfError::ValueOutOfBounds)
);
let mut s1 = new_section(ebpf::MM_STACK_START - 10, 11);
s1.sh_offset = 0;
assert_eq!(
ElfExecutable::parse_ro_sections(&config, [(Some(".text"), &s1)], &elf_bytes),
Err(ElfError::ValueOutOfBounds)
);
}
#[test]
#[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".data")"#)]
fn test_writable_data_section() {
let elf_bytes =
std::fs::read("tests/elfs/writable_data_section.so").expect("failed to read elf file");
ElfExecutable::load(&elf_bytes, loader()).expect("validation failed");
}
#[test]
#[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".bss")"#)]
fn test_bss_section() {
let elf_bytes =
std::fs::read("tests/elfs/bss_section.so").expect("failed to read elf file");
ElfExecutable::load(&elf_bytes, loader()).expect("validation failed");
}
#[test]
#[should_panic(expected = r#"validation failed: RelativeJumpOutOfBounds(29)"#)]
fn test_static_syscall_disabled() {
let loader = BuiltInProgram::new_loader(Config {
static_syscalls: false,
..Config::default()
});
let elf_bytes =
std::fs::read("tests/elfs/syscall_static_unknown.so").expect("failed to read elf file");
ElfExecutable::load(&elf_bytes, Arc::new(loader)).expect("validation failed");
}
#[test]
#[should_panic(expected = "validation failed: InvalidProgramHeader")]
fn test_program_headers_overflow() {
let elf_bytes = std::fs::read("tests/elfs/program_headers_overflow.so")
.expect("failed to read elf file");
ElfExecutable::load(&elf_bytes, loader()).expect("validation failed");
}
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
#[test]
fn test_size() {
let mut file = File::open("tests/elfs/noop.so").expect("file open failed");
let mut elf_bytes = Vec::new();
file.read_to_end(&mut elf_bytes)
.expect("failed to read elf file");
let mut executable =
ElfExecutable::from_elf(&elf_bytes, loader()).expect("validation failed");
{
Executable::jit_compile(&mut executable).unwrap();
}
assert_eq!(10538, executable.mem_size());
}
}