use crate::{Error, Result};
use std::collections::HashMap;
use std::fmt;
use std::io::Write;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Machine(pub u16);
impl fmt::Display for Machine {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0x014C => write!(f, "i386"),
0x8664 => write!(f, "AMD64"),
0x01C0 => write!(f, "ARM"),
0xAA64 => write!(f, "ARM64"),
0x01C4 => write!(f, "ARMv7"),
other => write!(f, "0x{other:04X}"),
}
}
}
impl From<u16> for Machine {
fn from(v: u16) -> Self {
Self(v)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Subsystem(pub u16);
impl fmt::Display for Subsystem {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
1 => write!(f, "Native"),
2 => write!(f, "Windows GUI"),
3 => write!(f, "Windows CUI"),
9 => write!(f, "Windows CE GUI"),
other => write!(f, "0x{other:04X}"),
}
}
}
impl From<u16> for Subsystem {
fn from(v: u16) -> Self {
Self(v)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PeCharacteristics(pub u16);
impl fmt::Display for PeCharacteristics {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut parts = Vec::new();
if self.0 & 0x0002 != 0 {
parts.push("Executable");
}
if self.0 & 0x0020 != 0 {
parts.push("LargeAddressAware");
}
if self.0 & 0x0100 != 0 {
parts.push("32Bit");
}
if self.0 & 0x2000 != 0 {
parts.push("DLL");
}
if parts.is_empty() {
write!(f, "0x{:04X}", self.0)
} else {
write!(f, "{}", parts.join(", "))
}
}
}
impl From<u16> for PeCharacteristics {
fn from(v: u16) -> Self {
Self(v)
}
}
newtype_ops!(Machine, u16);
newtype_ops!(Subsystem, u16);
newtype_ops!(PeCharacteristics, u16);
pub mod constants {
pub const COR20_HEADER_SIZE: u32 = 72;
pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000;
pub const IMAGE_SCN_METADATA: u32 = 0x4000_0040;
pub const MAX_REASONABLE_RVA: u32 = 0x1000_0000;
pub const IMAGE_RESOURCE_DIRECTORY_SIZE: usize = 16;
pub const RESOURCE_ENTRY_SIZE: usize = 8;
pub const RESOURCE_DATA_ENTRY_SIZE: usize = 16;
pub const IMAGE_RESOURCE_DATA_IS_DIRECTORY: u32 = 0x8000_0000;
pub const IMAGE_RESOURCE_NAME_IS_STRING: u32 = 0x8000_0000;
pub const IMAGE_RESOURCE_MASK: u32 = 0x7FFF_FFFF;
}
use constants::{IMAGE_RESOURCE_DIRECTORY_SIZE, RESOURCE_DATA_ENTRY_SIZE, RESOURCE_ENTRY_SIZE};
#[derive(Debug, Clone)]
pub struct Pe {
pub dos_header: DosHeader,
pub coff_header: CoffHeader,
pub optional_header: Option<OptionalHeader>,
pub sections: Vec<SectionTable>,
pub image_base: u64,
pub is_64bit: bool,
pub imports: Vec<Import>,
pub exports: Vec<Export>,
pub libraries: Vec<String>,
pub data_directories: HashMap<DataDirectoryType, DataDirectory>,
}
#[derive(Debug, Clone)]
pub struct DosHeader {
pub signature: u16,
pub bytes_on_last_page: u16,
pub pages_in_file: u16,
pub relocations: u16,
pub size_of_header_paragraphs: u16,
pub minimum_extra_paragraphs: u16,
pub maximum_extra_paragraphs: u16,
pub initial_relative_ss: u16,
pub initial_sp: u16,
pub checksum: u16,
pub initial_ip: u16,
pub initial_relative_cs: u16,
pub address_of_relocation_table: u16,
pub overlay_number: u16,
pub pe_header_offset: u32,
}
#[derive(Debug, Clone)]
pub struct CoffHeader {
pub machine: Machine,
pub number_of_sections: u16,
pub time_date_stamp: u32,
pub pointer_to_symbol_table: u32,
pub number_of_symbols: u32,
pub size_of_optional_header: u16,
pub characteristics: PeCharacteristics,
}
#[derive(Debug, Clone)]
pub struct OptionalHeader {
pub standard_fields: StandardFields,
pub windows_fields: WindowsFields,
pub data_directories: DataDirectories,
}
#[derive(Debug, Clone)]
pub struct StandardFields {
pub magic: u16,
pub major_linker_version: u8,
pub minor_linker_version: u8,
pub size_of_code: u32,
pub size_of_initialized_data: u32,
pub size_of_uninitialized_data: u32,
pub address_of_entry_point: u32,
pub base_of_code: u32,
pub base_of_data: Option<u32>,
}
#[derive(Debug, Clone)]
pub struct WindowsFields {
pub image_base: u64,
pub section_alignment: u32,
pub file_alignment: u32,
pub major_operating_system_version: u16,
pub minor_operating_system_version: u16,
pub major_image_version: u16,
pub minor_image_version: u16,
pub major_subsystem_version: u16,
pub minor_subsystem_version: u16,
pub win32_version_value: u32,
pub size_of_image: u32,
pub size_of_headers: u32,
pub checksum: u32,
pub subsystem: Subsystem,
pub dll_characteristics: u16,
pub size_of_stack_reserve: u64,
pub size_of_stack_commit: u64,
pub size_of_heap_reserve: u64,
pub size_of_heap_commit: u64,
pub loader_flags: u32,
pub number_of_rva_and_sizes: u32,
}
#[derive(Debug, Clone)]
pub struct DataDirectories {
directories: HashMap<DataDirectoryType, DataDirectory>,
}
#[derive(Debug, Clone, Copy)]
pub struct DataDirectory {
pub virtual_address: u32,
pub size: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum DataDirectoryType {
ExportTable = 0,
ImportTable = 1,
ResourceTable = 2,
ExceptionTable = 3,
CertificateTable = 4,
BaseRelocationTable = 5,
Debug = 6,
Architecture = 7,
GlobalPtr = 8,
TlsTable = 9,
LoadConfigTable = 10,
BoundImport = 11,
ImportAddressTable = 12,
DelayImportDescriptor = 13,
ClrRuntimeHeader = 14,
Reserved = 15,
}
#[derive(Debug, Clone)]
pub struct SectionTable {
pub name: String,
pub virtual_size: u32,
pub virtual_address: u32,
pub size_of_raw_data: u32,
pub pointer_to_raw_data: u32,
pub pointer_to_relocations: u32,
pub pointer_to_line_numbers: u32,
pub number_of_relocations: u16,
pub number_of_line_numbers: u16,
pub characteristics: u32,
}
#[derive(Debug, Clone)]
pub struct Import {
pub dll: String,
pub name: Option<String>,
pub ordinal: Option<u16>,
pub rva: u32,
pub hint: u16,
pub ilt_value: u64,
}
#[derive(Debug, Clone)]
pub struct Export {
pub name: Option<String>,
pub rva: u32,
pub offset: Option<u32>,
}
impl Pe {
pub fn from_goblin_pe(goblin_pe: &goblin::pe::PE) -> Result<Self> {
let dos_header = DosHeader::from_goblin(&goblin_pe.header.dos_header);
let coff_header = CoffHeader::from_goblin(&goblin_pe.header.coff_header);
let optional_header = goblin_pe
.header
.optional_header
.as_ref()
.map(OptionalHeader::from_goblin)
.transpose()?;
if optional_header.is_none() {
return Err(malformed_error!("File does not have an OptionalHeader"));
}
if goblin_pe.image_base == 0 {
return Err(malformed_error!("PE has invalid zero image base"));
}
let sections = goblin_pe
.sections
.iter()
.map(SectionTable::from_goblin)
.collect::<Result<Vec<_>>>()?;
let imports = goblin_pe
.imports
.iter()
.map(Import::from_goblin)
.collect::<Result<Vec<_>>>()?;
let exports = goblin_pe
.exports
.iter()
.map(Export::from_goblin)
.collect::<Result<Vec<_>>>()?;
let libraries = goblin_pe.libraries.iter().map(|&s| s.to_string()).collect();
let data_directories = optional_header
.as_ref()
.map_or_else(DataDirectories::new, |oh| oh.data_directories.clone());
Ok(Pe {
dos_header,
coff_header,
optional_header,
sections,
image_base: goblin_pe.image_base,
is_64bit: goblin_pe.is_64,
imports,
exports,
libraries,
data_directories: data_directories.directories,
})
}
pub fn write_dos_header<W: Write>(&self, writer: &mut W) -> Result<()> {
self.dos_header.write_to(writer)
}
pub fn write_pe_headers<W: Write>(&self, writer: &mut W) -> Result<()> {
writer.write_all(b"PE\x00\x00")?;
self.coff_header.write_to(writer)?;
if let Some(ref oh) = self.optional_header {
oh.write_to(writer)?;
}
Ok(())
}
pub fn write_section_table<W: Write>(&self, writer: &mut W) -> Result<()> {
for section in &self.sections {
section.write_to(writer)?;
}
Ok(())
}
#[must_use]
pub fn get_data_directory(&self, dir_type: DataDirectoryType) -> Option<DataDirectory> {
self.data_directories.get(&dir_type).copied()
}
#[must_use]
pub fn get_clr_runtime_header(&self) -> Option<DataDirectory> {
self.get_data_directory(DataDirectoryType::ClrRuntimeHeader)
}
#[must_use]
pub fn calculate_headers_size(&self) -> u64 {
let optional_header_size = self
.optional_header
.as_ref()
.map_or(0, |_| u64::from(self.coff_header.size_of_optional_header));
4 + CoffHeader::SIZE as u64 + optional_header_size
}
#[must_use]
pub fn calculate_total_file_headers_size(&self) -> u64 {
DosHeader::SIZE as u64 + self.calculate_headers_size()
}
#[must_use]
pub fn get_sections_total_raw_data_size(&self) -> u64 {
self.sections
.iter()
.map(|section| u64::from(section.size_of_raw_data))
.sum()
}
#[must_use]
pub fn get_pe_headers_offset(&self) -> u64 {
u64::from(self.dos_header.pe_header_offset)
}
#[must_use]
pub fn get_file_alignment(&self) -> u64 {
self.optional_header
.as_ref()
.map_or(0x200, |oh| u64::from(oh.windows_fields.file_alignment)) }
#[must_use]
pub fn get_section_alignment(&self) -> u64 {
self.optional_header
.as_ref()
.map_or(0x1000, |oh| u64::from(oh.windows_fields.section_alignment))
}
pub fn add_section(&mut self, section: SectionTable) {
self.sections.push(section);
if let Ok(section_count) = u16::try_from(self.sections.len()) {
self.coff_header.update_section_count(section_count);
}
}
pub fn remove_section(&mut self, name: &str) -> bool {
if let Some(index) = self.sections.iter().position(|s| s.name == name) {
self.sections.remove(index);
if let Ok(section_count) = u16::try_from(self.sections.len()) {
self.coff_header.update_section_count(section_count);
}
true
} else {
false
}
}
pub fn get_section_mut(&mut self, name: &str) -> Option<&mut SectionTable> {
self.sections.iter_mut().find(|s| s.name == name)
}
#[must_use]
pub fn get_section(&self, name: &str) -> Option<&SectionTable> {
self.sections.iter().find(|s| s.name == name)
}
pub fn update_clr_data_directory(&mut self, rva: u32, size: u32) -> Result<()> {
if let Some(ref mut optional_header) = self.optional_header {
optional_header.data_directories.update_clr_entry(rva, size);
self.data_directories.insert(
DataDirectoryType::ClrRuntimeHeader,
DataDirectory {
virtual_address: rva,
size,
},
);
Ok(())
} else {
Err(malformed_error!(
"Cannot update CLR data directory: PE has no optional header"
))
}
}
pub fn update_data_directory(
&mut self,
dir_type: DataDirectoryType,
rva: u32,
size: u32,
) -> Result<()> {
if let Some(ref mut optional_header) = self.optional_header {
optional_header
.data_directories
.update_entry(dir_type, rva, size);
self.data_directories.insert(
dir_type,
DataDirectory {
virtual_address: rva,
size,
},
);
Ok(())
} else {
Err(malformed_error!(
"Cannot update data directory: PE has no optional header"
))
}
}
pub fn update_size_of_image(&mut self, new_size: u32) -> Result<()> {
if let Some(ref mut optional_header) = self.optional_header {
optional_header.windows_fields.size_of_image = new_size;
Ok(())
} else {
Err(malformed_error!(
"Cannot update SizeOfImage: PE has no optional header"
))
}
}
pub fn update_size_of_headers(&mut self, new_size: u32) -> Result<()> {
if let Some(ref mut optional_header) = self.optional_header {
optional_header.windows_fields.size_of_headers = new_size;
Ok(())
} else {
Err(malformed_error!(
"Cannot update SizeOfHeaders: PE has no optional header"
))
}
}
pub fn write_headers<W: Write>(&self, writer: &mut W) -> Result<()> {
writer.write_all(b"PE\x00\x00")?;
self.coff_header.write_to(writer)?;
if let Some(ref optional_header) = self.optional_header {
optional_header.write_to(writer)?;
}
Ok(())
}
pub fn write_section_headers<W: Write>(&self, writer: &mut W) -> Result<()> {
for section in &self.sections {
section.write_to(writer)?;
}
Ok(())
}
}
impl DosHeader {
pub const SIZE: usize = 64;
pub const STANDARD_SIZE: usize = 128;
#[rustfmt::skip]
pub const STANDARD_DOS_HEADER: [u8; 128] = [
0x4D, 0x5A, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x0E, 0x1F, 0xBA, 0x0E, 0x00, 0xB4, 0x09, 0xCD, 0x21, 0xB8, 0x01, 0x4C, 0xCD, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6F, 0x67, 0x72, 0x61, 0x6D, 0x20, 0x63, 0x61, 0x6E, 0x6E, 0x6F, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x44, 0x4F, 0x53, 0x20, 0x6D, 0x6F, 0x64, 0x65, 0x2E, 0x0D, 0x0D, 0x0A, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ];
fn from_goblin(goblin_dos: &goblin::pe::header::DosHeader) -> Self {
Self {
signature: goblin_dos.signature,
bytes_on_last_page: goblin_dos.bytes_on_last_page,
pages_in_file: goblin_dos.pages_in_file,
relocations: goblin_dos.relocations,
size_of_header_paragraphs: goblin_dos.size_of_header_in_paragraphs,
minimum_extra_paragraphs: goblin_dos.minimum_extra_paragraphs_needed,
maximum_extra_paragraphs: goblin_dos.maximum_extra_paragraphs_needed,
initial_relative_ss: goblin_dos.initial_relative_ss,
initial_sp: goblin_dos.initial_sp,
checksum: goblin_dos.checksum,
initial_ip: goblin_dos.initial_ip,
initial_relative_cs: goblin_dos.initial_relative_cs,
address_of_relocation_table: goblin_dos.file_address_of_relocation_table,
overlay_number: goblin_dos.overlay_number,
pe_header_offset: goblin_dos.pe_pointer,
}
}
pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
writer.write_all(&self.signature.to_le_bytes())?;
writer.write_all(&self.bytes_on_last_page.to_le_bytes())?;
writer.write_all(&self.pages_in_file.to_le_bytes())?;
writer.write_all(&self.relocations.to_le_bytes())?;
writer.write_all(&self.size_of_header_paragraphs.to_le_bytes())?;
writer.write_all(&self.minimum_extra_paragraphs.to_le_bytes())?;
writer.write_all(&self.maximum_extra_paragraphs.to_le_bytes())?;
writer.write_all(&self.initial_relative_ss.to_le_bytes())?;
writer.write_all(&self.initial_sp.to_le_bytes())?;
writer.write_all(&self.checksum.to_le_bytes())?;
writer.write_all(&self.initial_ip.to_le_bytes())?;
writer.write_all(&self.initial_relative_cs.to_le_bytes())?;
writer.write_all(&self.address_of_relocation_table.to_le_bytes())?;
writer.write_all(&self.overlay_number.to_le_bytes())?;
for _ in 0..4 {
writer.write_all(&0u16.to_le_bytes())?;
}
writer.write_all(&0u16.to_le_bytes())?; writer.write_all(&0u16.to_le_bytes())?; for _ in 0..10 {
writer.write_all(&0u16.to_le_bytes())?;
}
writer.write_all(&self.pe_header_offset.to_le_bytes())?;
Ok(())
}
pub fn write_standard<W: Write>(writer: &mut W) -> Result<()> {
writer.write_all(&Self::STANDARD_DOS_HEADER)?;
Ok(())
}
pub fn write_with_stub<W: Write>(&self, writer: &mut W) -> Result<()> {
self.write_to(writer)?;
writer.write_all(&Self::STANDARD_DOS_HEADER[64..128])?;
Ok(())
}
}
impl CoffHeader {
pub const SIZE: usize = 20;
fn from_goblin(goblin_coff: &goblin::pe::header::CoffHeader) -> Self {
Self {
machine: Machine(goblin_coff.machine),
number_of_sections: goblin_coff.number_of_sections,
time_date_stamp: goblin_coff.time_date_stamp,
pointer_to_symbol_table: goblin_coff.pointer_to_symbol_table,
number_of_symbols: goblin_coff.number_of_symbol_table,
size_of_optional_header: goblin_coff.size_of_optional_header,
characteristics: PeCharacteristics(goblin_coff.characteristics),
}
}
pub fn update_section_count(&mut self, new_count: u16) {
self.number_of_sections = new_count;
}
pub fn update_optional_header_size(&mut self, new_size: u16) {
self.size_of_optional_header = new_size;
}
pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
writer.write_all(&self.machine.to_le_bytes())?;
writer.write_all(&self.number_of_sections.to_le_bytes())?;
writer.write_all(&self.time_date_stamp.to_le_bytes())?;
writer.write_all(&self.pointer_to_symbol_table.to_le_bytes())?;
writer.write_all(&self.number_of_symbols.to_le_bytes())?;
writer.write_all(&self.size_of_optional_header.to_le_bytes())?;
writer.write_all(&self.characteristics.to_le_bytes())?;
Ok(())
}
}
impl OptionalHeader {
fn from_goblin(goblin_oh: &goblin::pe::optional_header::OptionalHeader) -> Result<Self> {
let standard_fields = StandardFields::from_goblin(&goblin_oh.standard_fields)?;
let windows_fields = WindowsFields::from_goblin(&goblin_oh.windows_fields);
let data_directories = DataDirectories::from_goblin(&goblin_oh.data_directories);
Ok(Self {
standard_fields,
windows_fields,
data_directories,
})
}
pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
let is_pe32_plus = match self.standard_fields.magic {
0x10b => false, 0x20b => true, magic => {
return Err(malformed_error!(
"Invalid PE optional header magic: 0x{:x} (expected 0x10b or 0x20b)",
magic
))
}
};
self.standard_fields.write_to(writer)?;
self.windows_fields.write_to(writer, is_pe32_plus)?;
self.data_directories.write_to(writer)?;
Ok(())
}
#[must_use]
pub fn size(&self) -> usize {
Self::size_for_format(self.standard_fields.magic == 0x20b)
}
#[must_use]
pub const fn size_for_format(is_pe32_plus: bool) -> usize {
StandardFields::SIZE_FOR_FORMAT[is_pe32_plus as usize]
+ WindowsFields::SIZE_FOR_FORMAT[is_pe32_plus as usize]
+ DataDirectories::SIZE
}
}
impl StandardFields {
pub const SIZE_FOR_FORMAT: [usize; 2] = [28, 24];
fn from_goblin(goblin_sf: &goblin::pe::optional_header::StandardFields) -> Result<Self> {
Ok(Self {
magic: goblin_sf.magic,
major_linker_version: goblin_sf.major_linker_version,
minor_linker_version: goblin_sf.minor_linker_version,
size_of_code: u32::try_from(goblin_sf.size_of_code)
.map_err(|_| malformed_error!("PE size_of_code value too large"))?,
size_of_initialized_data: u32::try_from(goblin_sf.size_of_initialized_data)
.map_err(|_| malformed_error!("PE size_of_initialized_data value too large"))?,
size_of_uninitialized_data: u32::try_from(goblin_sf.size_of_uninitialized_data)
.map_err(|_| malformed_error!("PE size_of_uninitialized_data value too large"))?,
address_of_entry_point: goblin_sf.address_of_entry_point,
base_of_code: u32::try_from(goblin_sf.base_of_code)
.map_err(|_| malformed_error!("PE base_of_code value too large"))?,
base_of_data: if goblin_sf.magic == 0x10b {
Some(goblin_sf.base_of_data)
} else {
None
},
})
}
pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
writer.write_all(&self.magic.to_le_bytes())?;
writer.write_all(&self.major_linker_version.to_le_bytes())?;
writer.write_all(&self.minor_linker_version.to_le_bytes())?;
writer.write_all(&self.size_of_code.to_le_bytes())?;
writer.write_all(&self.size_of_initialized_data.to_le_bytes())?;
writer.write_all(&self.size_of_uninitialized_data.to_le_bytes())?;
writer.write_all(&self.address_of_entry_point.to_le_bytes())?;
writer.write_all(&self.base_of_code.to_le_bytes())?;
if self.magic == 0x10b {
if let Some(base_of_data) = self.base_of_data {
writer.write_all(&base_of_data.to_le_bytes())?;
} else {
return Err(Error::Malformed {
message: "PE32 file missing base_of_data field".to_string(),
file: file!(),
line: line!(),
});
}
}
Ok(())
}
}
impl WindowsFields {
pub const SIZE_FOR_FORMAT: [usize; 2] = [68, 88];
fn from_goblin(goblin_wf: &goblin::pe::optional_header::WindowsFields) -> Self {
Self {
image_base: goblin_wf.image_base,
section_alignment: goblin_wf.section_alignment,
file_alignment: goblin_wf.file_alignment,
major_operating_system_version: goblin_wf.major_operating_system_version,
minor_operating_system_version: goblin_wf.minor_operating_system_version,
major_image_version: goblin_wf.major_image_version,
minor_image_version: goblin_wf.minor_image_version,
major_subsystem_version: goblin_wf.major_subsystem_version,
minor_subsystem_version: goblin_wf.minor_subsystem_version,
win32_version_value: goblin_wf.win32_version_value,
size_of_image: goblin_wf.size_of_image,
size_of_headers: goblin_wf.size_of_headers,
checksum: goblin_wf.check_sum,
subsystem: Subsystem(goblin_wf.subsystem),
dll_characteristics: goblin_wf.dll_characteristics,
size_of_stack_reserve: goblin_wf.size_of_stack_reserve,
size_of_stack_commit: goblin_wf.size_of_stack_commit,
size_of_heap_reserve: goblin_wf.size_of_heap_reserve,
size_of_heap_commit: goblin_wf.size_of_heap_commit,
loader_flags: goblin_wf.loader_flags,
number_of_rva_and_sizes: goblin_wf.number_of_rva_and_sizes,
}
}
pub fn write_to<W: Write>(&self, writer: &mut W, is_pe32_plus: bool) -> Result<()> {
if is_pe32_plus {
writer.write_all(&self.image_base.to_le_bytes())?;
} else {
writer.write_all(
&u32::try_from(self.image_base)
.map_err(|_| malformed_error!("Image base exceeds u32 range"))?
.to_le_bytes(),
)?;
}
writer.write_all(&self.section_alignment.to_le_bytes())?;
writer.write_all(&self.file_alignment.to_le_bytes())?;
writer.write_all(&self.major_operating_system_version.to_le_bytes())?;
writer.write_all(&self.minor_operating_system_version.to_le_bytes())?;
writer.write_all(&self.major_image_version.to_le_bytes())?;
writer.write_all(&self.minor_image_version.to_le_bytes())?;
writer.write_all(&self.major_subsystem_version.to_le_bytes())?;
writer.write_all(&self.minor_subsystem_version.to_le_bytes())?;
writer.write_all(&self.win32_version_value.to_le_bytes())?;
writer.write_all(&self.size_of_image.to_le_bytes())?;
writer.write_all(&self.size_of_headers.to_le_bytes())?;
writer.write_all(&self.checksum.to_le_bytes())?;
writer.write_all(&self.subsystem.to_le_bytes())?;
writer.write_all(&self.dll_characteristics.to_le_bytes())?;
if is_pe32_plus {
writer.write_all(&self.size_of_stack_reserve.to_le_bytes())?;
writer.write_all(&self.size_of_stack_commit.to_le_bytes())?;
writer.write_all(&self.size_of_heap_reserve.to_le_bytes())?;
writer.write_all(&self.size_of_heap_commit.to_le_bytes())?;
} else {
writer.write_all(
&u32::try_from(self.size_of_stack_reserve)
.map_err(|_| malformed_error!("Stack reserve size exceeds u32 range"))?
.to_le_bytes(),
)?;
writer.write_all(
&u32::try_from(self.size_of_stack_commit)
.map_err(|_| malformed_error!("Stack commit size exceeds u32 range"))?
.to_le_bytes(),
)?;
writer.write_all(
&u32::try_from(self.size_of_heap_reserve)
.map_err(|_| malformed_error!("Heap reserve size exceeds u32 range"))?
.to_le_bytes(),
)?;
writer.write_all(
&u32::try_from(self.size_of_heap_commit)
.map_err(|_| malformed_error!("Heap commit size exceeds u32 range"))?
.to_le_bytes(),
)?;
}
writer.write_all(&self.loader_flags.to_le_bytes())?;
writer.write_all(&self.number_of_rva_and_sizes.to_le_bytes())?;
Ok(())
}
}
impl Default for DataDirectories {
fn default() -> Self {
Self::new()
}
}
impl DataDirectories {
pub const COUNT: usize = 16;
pub const SIZE: usize = Self::COUNT * 8;
#[must_use]
pub fn new() -> Self {
Self {
directories: HashMap::new(),
}
}
#[must_use]
pub fn get_clr_runtime_header(&self) -> Option<&DataDirectory> {
self.directories.get(&DataDirectoryType::ClrRuntimeHeader)
}
pub fn update_entry(&mut self, dir_type: DataDirectoryType, rva: u32, size: u32) {
self.directories.insert(
dir_type,
DataDirectory {
virtual_address: rva,
size,
},
);
}
pub fn update_clr_entry(&mut self, rva: u32, size: u32) {
self.update_entry(DataDirectoryType::ClrRuntimeHeader, rva, size);
}
fn from_goblin(goblin_dd: &goblin::pe::data_directories::DataDirectories) -> Self {
let mut directories = HashMap::new();
for (i, opt_entry) in goblin_dd.data_directories.iter().enumerate() {
if let Some((_, dir_entry)) = opt_entry {
let dir_type = match i {
0 => DataDirectoryType::ExportTable,
1 => DataDirectoryType::ImportTable,
2 => DataDirectoryType::ResourceTable,
3 => DataDirectoryType::ExceptionTable,
4 => DataDirectoryType::CertificateTable,
5 => DataDirectoryType::BaseRelocationTable,
6 => DataDirectoryType::Debug,
7 => DataDirectoryType::Architecture,
8 => DataDirectoryType::GlobalPtr,
9 => DataDirectoryType::TlsTable,
10 => DataDirectoryType::LoadConfigTable,
11 => DataDirectoryType::BoundImport,
12 => DataDirectoryType::ImportAddressTable,
13 => DataDirectoryType::DelayImportDescriptor,
14 => DataDirectoryType::ClrRuntimeHeader,
15 => DataDirectoryType::Reserved,
_ => {
continue;
}
};
if dir_entry.virtual_address != 0 || dir_entry.size != 0 {
directories.insert(
dir_type,
DataDirectory {
virtual_address: dir_entry.virtual_address,
size: dir_entry.size,
},
);
}
}
}
Self { directories }
}
fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
for i in 0..16 {
let dir_type = match i {
0 => DataDirectoryType::ExportTable,
1 => DataDirectoryType::ImportTable,
2 => DataDirectoryType::ResourceTable,
3 => DataDirectoryType::ExceptionTable,
4 => DataDirectoryType::CertificateTable,
5 => DataDirectoryType::BaseRelocationTable,
6 => DataDirectoryType::Debug,
7 => DataDirectoryType::Architecture,
8 => DataDirectoryType::GlobalPtr,
9 => DataDirectoryType::TlsTable,
10 => DataDirectoryType::LoadConfigTable,
11 => DataDirectoryType::BoundImport,
12 => DataDirectoryType::ImportAddressTable,
13 => DataDirectoryType::DelayImportDescriptor,
14 => DataDirectoryType::ClrRuntimeHeader,
15 => DataDirectoryType::Reserved,
_ => unreachable!(),
};
if let Some(entry) = self.directories.get(&dir_type) {
writer.write_all(&entry.virtual_address.to_le_bytes())?;
writer.write_all(&entry.size.to_le_bytes())?;
} else {
writer.write_all(&0u32.to_le_bytes())?; writer.write_all(&0u32.to_le_bytes())?; }
}
Ok(())
}
}
impl SectionTable {
pub const SIZE: usize = 40;
fn from_goblin(goblin_section: &goblin::pe::section_table::SectionTable) -> Result<Self> {
let name = std::str::from_utf8(&goblin_section.name)
.map_err(|_| Error::Malformed {
message: "Invalid section name".to_string(),
file: file!(),
line: line!(),
})?
.trim_end_matches('\0')
.to_string();
Ok(Self {
name,
virtual_size: goblin_section.virtual_size,
virtual_address: goblin_section.virtual_address,
size_of_raw_data: goblin_section.size_of_raw_data,
pointer_to_raw_data: goblin_section.pointer_to_raw_data,
pointer_to_relocations: goblin_section.pointer_to_relocations,
pointer_to_line_numbers: goblin_section.pointer_to_linenumbers,
number_of_relocations: goblin_section.number_of_relocations,
number_of_line_numbers: goblin_section.number_of_linenumbers,
characteristics: goblin_section.characteristics,
})
}
#[must_use]
pub fn calculate_table_size(section_count: usize) -> u64 {
(section_count * Self::SIZE) as u64
}
pub fn from_layout_info(
name: String,
virtual_address: u32,
virtual_size: u32,
file_offset: u64,
file_size: u64,
characteristics: u32,
) -> Result<Self> {
let size_of_raw_data = u32::try_from(file_size)
.map_err(|_| malformed_error!("File size exceeds u32 range: {}", file_size))?;
let pointer_to_raw_data = u32::try_from(file_offset)
.map_err(|_| malformed_error!("File offset exceeds u32 range: {}", file_offset))?;
Ok(Self {
name,
virtual_size,
virtual_address,
size_of_raw_data,
pointer_to_raw_data,
pointer_to_relocations: 0, pointer_to_line_numbers: 0, number_of_relocations: 0, number_of_line_numbers: 0, characteristics,
})
}
pub fn update_virtual_location(&mut self, virtual_address: u32, virtual_size: u32) {
self.virtual_address = virtual_address;
self.virtual_size = virtual_size;
}
pub fn update_file_location(&mut self, file_offset: u64, file_size: u64) -> Result<()> {
self.pointer_to_raw_data = u32::try_from(file_offset)
.map_err(|_| malformed_error!("File offset exceeds u32 range: {}", file_offset))?;
self.size_of_raw_data = u32::try_from(file_size)
.map_err(|_| malformed_error!("File size exceeds u32 range: {}", file_size))?;
Ok(())
}
pub fn update_characteristics(&mut self, characteristics: u32) {
self.characteristics = characteristics;
}
pub fn set_name(&mut self, name: String) -> Result<()> {
if name.len() > 8 {
return Err(malformed_error!(
"Section name '{}' exceeds 8-byte PE limit ({} bytes)",
name,
name.len()
));
}
self.name = name;
Ok(())
}
pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> {
let mut name_bytes = [0u8; 8];
let name_str = self.name.as_bytes();
let copy_len = std::cmp::min(name_str.len(), 8);
name_bytes[..copy_len].copy_from_slice(&name_str[..copy_len]);
writer.write_all(&name_bytes)?;
writer.write_all(&self.virtual_size.to_le_bytes())?;
writer.write_all(&self.virtual_address.to_le_bytes())?;
writer.write_all(&self.size_of_raw_data.to_le_bytes())?;
writer.write_all(&self.pointer_to_raw_data.to_le_bytes())?;
writer.write_all(&self.pointer_to_relocations.to_le_bytes())?;
writer.write_all(&self.pointer_to_line_numbers.to_le_bytes())?;
writer.write_all(&self.number_of_relocations.to_le_bytes())?;
writer.write_all(&self.number_of_line_numbers.to_le_bytes())?;
writer.write_all(&self.characteristics.to_le_bytes())?;
Ok(())
}
}
impl Import {
fn from_goblin(goblin_import: &goblin::pe::import::Import) -> Result<Self> {
Ok(Self {
dll: goblin_import.dll.to_string(),
name: if goblin_import.name.is_empty() {
None
} else {
Some(goblin_import.name.to_string())
},
ordinal: if goblin_import.ordinal != 0 {
Some(goblin_import.ordinal)
} else {
None
},
rva: u32::try_from(goblin_import.rva)
.map_err(|_| malformed_error!("PE import RVA value too large"))?,
hint: 0, ilt_value: u64::try_from(goblin_import.offset)
.map_err(|_| malformed_error!("PE import offset value too large"))?,
})
}
#[must_use]
pub fn function_identifier(&self) -> String {
if let Some(ref name) = self.name {
name.clone()
} else if let Some(ordinal) = self.ordinal {
format!("#{ordinal}")
} else {
"unknown".to_string()
}
}
}
impl Export {
fn from_goblin(goblin_export: &goblin::pe::export::Export) -> Result<Self> {
Ok(Self {
name: goblin_export.name.map(ToString::to_string),
rva: u32::try_from(goblin_export.rva)
.map_err(|_| malformed_error!("PE export RVA value too large"))?,
offset: goblin_export
.offset
.map(|o| {
u32::try_from(o)
.map_err(|_| malformed_error!("PE export offset value too large"))
})
.transpose()?,
})
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ImageResourceDirectory {
pub characteristics: u32,
pub time_date_stamp: u32,
pub major_version: u16,
pub minor_version: u16,
pub number_of_named_entries: u16,
pub number_of_id_entries: u16,
}
impl ImageResourceDirectory {
pub fn read_from(data: &[u8], offset: usize) -> Result<Self> {
if offset + IMAGE_RESOURCE_DIRECTORY_SIZE > data.len() {
return Err(malformed_error!(
"Resource directory at offset {:#x} exceeds bounds",
offset
));
}
Ok(Self {
characteristics: u32::from_le_bytes([
data[offset],
data[offset + 1],
data[offset + 2],
data[offset + 3],
]),
time_date_stamp: u32::from_le_bytes([
data[offset + 4],
data[offset + 5],
data[offset + 6],
data[offset + 7],
]),
major_version: u16::from_le_bytes([data[offset + 8], data[offset + 9]]),
minor_version: u16::from_le_bytes([data[offset + 10], data[offset + 11]]),
number_of_named_entries: u16::from_le_bytes([data[offset + 12], data[offset + 13]]),
number_of_id_entries: u16::from_le_bytes([data[offset + 14], data[offset + 15]]),
})
}
#[inline]
pub fn entry_count(&self) -> usize {
self.number_of_named_entries as usize + self.number_of_id_entries as usize
}
pub fn write_to(&self, data: &mut [u8], offset: usize) -> Result<()> {
if offset + IMAGE_RESOURCE_DIRECTORY_SIZE > data.len() {
return Err(malformed_error!(
"Resource directory at offset {:#x} exceeds bounds for write",
offset
));
}
data[offset..offset + 4].copy_from_slice(&self.characteristics.to_le_bytes());
data[offset + 4..offset + 8].copy_from_slice(&self.time_date_stamp.to_le_bytes());
data[offset + 8..offset + 10].copy_from_slice(&self.major_version.to_le_bytes());
data[offset + 10..offset + 12].copy_from_slice(&self.minor_version.to_le_bytes());
data[offset + 12..offset + 14].copy_from_slice(&self.number_of_named_entries.to_le_bytes());
data[offset + 14..offset + 16].copy_from_slice(&self.number_of_id_entries.to_le_bytes());
Ok(())
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ResourceEntry {
pub name_or_id: u32,
pub offset_to_data_or_directory: u32,
}
impl ResourceEntry {
pub fn read_from(data: &[u8], offset: usize) -> Result<Self> {
if offset + RESOURCE_ENTRY_SIZE > data.len() {
return Err(malformed_error!(
"Resource entry at offset {:#x} exceeds bounds",
offset
));
}
Ok(Self {
name_or_id: u32::from_le_bytes([
data[offset],
data[offset + 1],
data[offset + 2],
data[offset + 3],
]),
offset_to_data_or_directory: u32::from_le_bytes([
data[offset + 4],
data[offset + 5],
data[offset + 6],
data[offset + 7],
]),
})
}
#[inline]
#[must_use]
pub fn is_directory(self) -> bool {
self.offset_to_data_or_directory & constants::IMAGE_RESOURCE_DATA_IS_DIRECTORY != 0
}
#[inline]
#[must_use]
pub fn target_offset(self) -> usize {
(self.offset_to_data_or_directory & constants::IMAGE_RESOURCE_MASK) as usize
}
pub fn write_to(self, data: &mut [u8], offset: usize) -> Result<()> {
if offset + RESOURCE_ENTRY_SIZE > data.len() {
return Err(malformed_error!(
"Resource entry at offset {:#x} exceeds bounds for write",
offset
));
}
data[offset..offset + 4].copy_from_slice(&self.name_or_id.to_le_bytes());
data[offset + 4..offset + 8]
.copy_from_slice(&self.offset_to_data_or_directory.to_le_bytes());
Ok(())
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ResourceDataEntry {
pub offset_to_data: u32,
pub size: u32,
pub code_page: u32,
pub reserved: u32,
}
impl ResourceDataEntry {
pub fn read_from(data: &[u8], offset: usize) -> Result<Self> {
if offset + RESOURCE_DATA_ENTRY_SIZE > data.len() {
return Err(malformed_error!(
"Resource data entry at offset {:#x} exceeds bounds",
offset
));
}
Ok(Self {
offset_to_data: u32::from_le_bytes([
data[offset],
data[offset + 1],
data[offset + 2],
data[offset + 3],
]),
size: u32::from_le_bytes([
data[offset + 4],
data[offset + 5],
data[offset + 6],
data[offset + 7],
]),
code_page: u32::from_le_bytes([
data[offset + 8],
data[offset + 9],
data[offset + 10],
data[offset + 11],
]),
reserved: u32::from_le_bytes([
data[offset + 12],
data[offset + 13],
data[offset + 14],
data[offset + 15],
]),
})
}
pub fn write_to(&self, data: &mut [u8], offset: usize) -> Result<()> {
if offset + RESOURCE_DATA_ENTRY_SIZE > data.len() {
return Err(malformed_error!(
"Resource data entry at offset {:#x} exceeds bounds",
offset
));
}
let rva_bytes = self.offset_to_data.to_le_bytes();
let size_bytes = self.size.to_le_bytes();
let code_page_bytes = self.code_page.to_le_bytes();
let reserved_bytes = self.reserved.to_le_bytes();
data[offset..offset + 4].copy_from_slice(&rva_bytes);
data[offset + 4..offset + 8].copy_from_slice(&size_bytes);
data[offset + 8..offset + 12].copy_from_slice(&code_page_bytes);
data[offset + 12..offset + 16].copy_from_slice(&reserved_bytes);
Ok(())
}
}
pub fn relocate_resource_section(data: &mut [u8], old_rva: u32, new_rva: u32) -> Result<()> {
if old_rva == new_rva || data.is_empty() {
return Ok(()); }
let delta = i64::from(new_rva) - i64::from(old_rva);
relocate_resource_directory(data, 0, delta)
}
fn relocate_resource_directory(data: &mut [u8], offset: usize, delta: i64) -> Result<()> {
let dir = ImageResourceDirectory::read_from(data, offset)?;
let entries_offset = offset + IMAGE_RESOURCE_DIRECTORY_SIZE;
for i in 0..dir.entry_count() {
let entry_offset = entries_offset + i * RESOURCE_ENTRY_SIZE;
let entry = ResourceEntry::read_from(data, entry_offset)?;
if entry.is_directory() {
relocate_resource_directory(data, entry.target_offset(), delta)?;
} else {
let data_entry_offset = entry.target_offset();
if data_entry_offset + 4 > data.len() {
return Err(malformed_error!(
"Resource data entry at offset {:#x} exceeds bounds",
data_entry_offset
));
}
let old_data_rva = u32::from_le_bytes([
data[data_entry_offset],
data[data_entry_offset + 1],
data[data_entry_offset + 2],
data[data_entry_offset + 3],
]);
let new_data_rva = u32::try_from(i64::from(old_data_rva) + delta).map_err(|_| {
malformed_error!(
"Resource RVA relocation overflow: old_rva={:#x}, delta={}",
old_data_rva,
delta
)
})?;
data[data_entry_offset..data_entry_offset + 4]
.copy_from_slice(&new_data_rva.to_le_bytes());
}
}
Ok(())
}