#[cfg(feature = "mem_profile")]
use std::sync::Arc;
#[cfg(target_arch = "aarch64")]
use goblin::elf::reloc::{R_AARCH64_NONE, R_AARCH64_RELATIVE};
#[cfg(target_arch = "x86_64")]
use goblin::elf::reloc::{R_X86_64_NONE, R_X86_64_RELATIVE};
use goblin::elf::{Elf, ProgramHeaders, Reloc};
#[cfg(feature = "nanvix-unstable")]
use goblin::elf32::program_header::PT_LOAD;
#[cfg(not(feature = "nanvix-unstable"))]
use goblin::elf64::program_header::PT_LOAD;
use super::exe::LoadInfo;
use crate::{Result, log_then_return, new_error};
#[cfg(feature = "mem_profile")]
struct ResolvedSectionHeader {
name: String,
addr: u64,
offset: u64,
size: u64,
}
pub(crate) struct ElfInfo {
payload: Vec<u8>,
phdrs: ProgramHeaders,
#[cfg(feature = "mem_profile")]
shdrs: Vec<ResolvedSectionHeader>,
entry: u64,
relocs: Vec<Reloc>,
guest_bin_version: Option<String>,
}
#[cfg(feature = "mem_profile")]
struct UnwindInfo {
payload: Vec<u8>,
load_addr: u64,
va_size: u64,
base_svma: u64,
shdrs: Vec<ResolvedSectionHeader>,
}
#[cfg(feature = "mem_profile")]
impl super::exe::UnwindInfo for UnwindInfo {
fn as_module(&self) -> framehop::Module<Vec<u8>> {
framehop::Module::new(
"guest".to_string(),
self.load_addr..self.load_addr + self.va_size,
self.load_addr,
self,
)
}
fn hash(&self) -> blake3::Hash {
blake3::hash(&self.payload)
}
}
#[cfg(feature = "mem_profile")]
impl UnwindInfo {
fn resolved_section_header(&self, name: &[u8]) -> Option<&ResolvedSectionHeader> {
self.shdrs
.iter()
.find(|&sh| sh.name.as_bytes()[0..core::cmp::min(name.len(), sh.name.len())] == *name)
}
}
#[cfg(feature = "mem_profile")]
impl framehop::ModuleSectionInfo<Vec<u8>> for &UnwindInfo {
fn base_svma(&self) -> u64 {
self.base_svma
}
fn section_svma_range(&mut self, name: &[u8]) -> Option<std::ops::Range<u64>> {
let shdr = self.resolved_section_header(name)?;
Some(shdr.addr..shdr.addr + shdr.size)
}
fn section_data(&mut self, name: &[u8]) -> Option<Vec<u8>> {
if name == b".eh_frame" && self.resolved_section_header(b".debug_frame").is_some() {
return None;
}
let shdr = self.resolved_section_header(name)?;
Some(self.payload[shdr.offset as usize..(shdr.offset + shdr.size) as usize].to_vec())
}
}
impl ElfInfo {
pub(crate) fn new(bytes: &[u8]) -> Result<Self> {
let elf = Elf::parse(bytes)?;
let relocs = elf.dynrels.iter().chain(elf.dynrelas.iter()).collect();
if !elf
.program_headers
.iter()
.any(|phdr| phdr.p_type == PT_LOAD)
{
log_then_return!("ELF must have at least one PT_LOAD header");
}
let guest_bin_version = Self::read_version_note(&elf, bytes);
Ok(ElfInfo {
payload: bytes.to_vec(),
phdrs: elf.program_headers,
#[cfg(feature = "mem_profile")]
shdrs: elf
.section_headers
.iter()
.filter_map(|sh| {
Some(ResolvedSectionHeader {
name: elf.shdr_strtab.get_at(sh.sh_name)?.to_string(),
addr: sh.sh_addr,
offset: sh.sh_offset,
size: sh.sh_size,
})
})
.collect(),
entry: elf.entry,
relocs,
guest_bin_version,
})
}
fn read_version_note<'a>(elf: &Elf<'a>, bytes: &'a [u8]) -> Option<String> {
use hyperlight_common::version_note::{
HYPERLIGHT_NOTE_NAME, HYPERLIGHT_NOTE_TYPE, HYPERLIGHT_VERSION_SECTION,
};
let notes = elf.iter_note_sections(bytes, Some(HYPERLIGHT_VERSION_SECTION))?;
for note in notes {
let Ok(note) = note else { continue };
if note.name == HYPERLIGHT_NOTE_NAME && note.n_type == HYPERLIGHT_NOTE_TYPE {
let desc = core::str::from_utf8(note.desc).ok()?;
return Some(desc.trim_end_matches('\0').to_string());
}
}
None
}
pub(crate) fn entrypoint_va(&self) -> u64 {
self.entry
}
pub(crate) fn guest_bin_version(&self) -> Option<&str> {
self.guest_bin_version.as_deref()
}
pub(crate) fn get_base_va(&self) -> u64 {
#[allow(clippy::unwrap_used)] let min_phdr = self
.phdrs
.iter()
.find(|phdr| phdr.p_type == PT_LOAD)
.unwrap();
min_phdr.p_vaddr
}
pub(crate) fn get_va_size(&self) -> usize {
#[allow(clippy::unwrap_used)] let max_phdr = self
.phdrs
.iter()
.rev()
.find(|phdr| phdr.p_type == PT_LOAD)
.unwrap();
(max_phdr.p_vaddr + max_phdr.p_memsz - self.get_base_va()) as usize
}
pub(crate) fn load_at(self, load_addr: usize, target: &mut [u8]) -> Result<LoadInfo> {
let base_va = self.get_base_va();
for phdr in self.phdrs.iter().filter(|phdr| phdr.p_type == PT_LOAD) {
let start_va = (phdr.p_vaddr - base_va) as usize;
let payload_offset = phdr.p_offset as usize;
let payload_len = phdr.p_filesz as usize;
target[start_va..start_va + payload_len]
.copy_from_slice(&self.payload[payload_offset..payload_offset + payload_len]);
target[start_va + payload_len..start_va + phdr.p_memsz as usize].fill(0);
}
let get_addend = |name, r: &Reloc| {
r.r_addend
.ok_or_else(|| new_error!("{} missing addend", name))
};
for r in self.relocs.iter() {
#[cfg(target_arch = "aarch64")]
match r.r_type {
R_AARCH64_RELATIVE => {
let addend = get_addend("R_AARCH64_RELATIVE", r)?;
target[r.r_offset as usize..r.r_offset as usize + 8]
.copy_from_slice(&(load_addr as i64 + addend).to_le_bytes());
}
R_AARCH64_NONE => {}
_ => {
log_then_return!("unsupported aarch64 relocation {}", r.r_type);
}
}
#[cfg(target_arch = "x86_64")]
match r.r_type {
R_X86_64_RELATIVE => {
let addend = get_addend("R_X86_64_RELATIVE", r)?;
target[r.r_offset as usize..r.r_offset as usize + 8]
.copy_from_slice(&(load_addr as i64 + addend).to_le_bytes());
}
R_X86_64_NONE => {}
_ => {
log_then_return!("unsupported x86_64 relocation {}", r.r_type);
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "mem_profile")] {
let va_size = self.get_va_size() as u64;
let base_svma = self.get_base_va();
Ok(LoadInfo {
info: Arc::new(UnwindInfo {
payload: self.payload,
load_addr: load_addr as u64,
va_size,
base_svma,
shdrs: self.shdrs,
})
})
} else {
Ok(LoadInfo {})
}
}
}
}