use crate::macho::MachError;
use goblin::error::Error;
use goblin::mach::segment::SectionData;
use scroll::{Endian, Pread};
use std::mem;
type Result<T> = std::result::Result<T, MachError>;
#[derive(Debug, Clone)]
enum Arch {
X86,
X64,
Arm64,
Other,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct FirstLevelPage {
global_opcodes_offset: u32,
global_opcodes_len: u32,
personalities_offset: u32,
personalities_len: u32,
pages_offset: u32,
pages_len: u32,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct FirstLevelPageEntry {
first_address: u32,
second_level_page_offset: u32,
lsda_index_offset: u32,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct RegularSecondLevelPage {
entries_offset: u16,
entries_len: u16,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct CompressedSecondLevelPage {
entries_offset: u16,
entries_len: u16,
local_opcodes_offset: u16,
local_opcodes_len: u16,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct RegularEntry {
instruction_address: u32,
opcode: u32,
}
#[derive(Debug, Clone)]
#[repr(C)]
struct LsdaEntry {
instruction_address: u32,
lsda_address: u32,
}
#[derive(Debug, Clone)]
enum OpcodeOrIndex {
Opcode(u32),
Index(u32),
}
#[derive(Debug, Clone)]
struct RawCompactUnwindInfoEntry {
instruction_address: u32,
opcode_or_index: OpcodeOrIndex,
}
#[derive(Debug, Clone)]
pub struct CompactUnwindInfoIter<'a> {
arch: Arch,
endian: Endian,
section: SectionData<'a>,
root: FirstLevelPage,
first_idx: u32,
second_idx: u32,
page_of_next_entry: Option<(FirstLevelPageEntry, SecondLevelPage)>,
next_entry: Option<RawCompactUnwindInfoEntry>,
done_page: bool,
}
impl<'a> CompactUnwindInfoIter<'a> {
pub fn new(
section: SectionData<'a>,
little_endian: bool,
arch: symbolic_common::Arch,
) -> Result<Self> {
const UNWIND_SECTION_VERSION: u32 = 1;
use symbolic_common::CpuFamily;
let arch = match arch.cpu_family() {
CpuFamily::Intel32 => Arch::X86,
CpuFamily::Amd64 => Arch::X64,
CpuFamily::Arm64 => Arch::Arm64,
_ => Arch::Other,
};
let endian = if little_endian {
Endian::Little
} else {
Endian::Big
};
let offset = &mut 0;
let version: u32 = section.gread_with(offset, endian)?;
if version != UNWIND_SECTION_VERSION {
return Err(MachError::from(Error::Malformed(format!(
"Unknown Compact Unwinding Info version {version}"
))));
}
let root = section.gread_with(offset, endian)?;
let iter = CompactUnwindInfoIter {
arch,
endian,
section,
root,
first_idx: 0,
second_idx: 0,
page_of_next_entry: None,
next_entry: None,
done_page: true,
};
Ok(iter)
}
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Result<Option<CompactUnwindInfoEntry>> {
if self.next_entry.is_none() {
self.next_entry = self.next_raw()?;
}
if let Some(cur_entry) = self.next_entry.take() {
let (first_page, second_page) = self.page_of_next_entry.clone().unwrap();
self.next_entry = self.next_raw()?;
if let Some(next_entry) = self.next_entry.as_ref() {
let result = self.complete_entry(
&cur_entry,
next_entry.instruction_address,
&first_page,
&second_page,
)?;
Ok(Some(result))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
fn next_raw(&mut self) -> Result<Option<RawCompactUnwindInfoEntry>> {
if self.done_page {
if self.page_of_next_entry.is_some() && self.first_idx != self.root.pages_len {
self.first_idx += 1;
self.second_idx = 0;
}
if let Some(entry) = self.first_level_entry(self.first_idx)? {
if entry.second_level_page_offset == 0 {
return Ok(Some(RawCompactUnwindInfoEntry {
instruction_address: entry.first_address,
opcode_or_index: OpcodeOrIndex::Opcode(0),
}));
}
let second_level_page = self.second_level_page(entry.second_level_page_offset)?;
self.page_of_next_entry = Some((entry, second_level_page));
self.done_page = false;
} else {
return Ok(None);
}
}
let (first_level_entry, second_level_page) = self.page_of_next_entry.as_ref().unwrap();
let entry =
self.second_level_entry(first_level_entry, second_level_page, self.second_idx)?;
self.second_idx += 1;
if self.second_idx == second_level_page.len() {
self.done_page = true;
}
Ok(Some(entry))
}
fn first_level_entry(&self, idx: u32) -> Result<Option<FirstLevelPageEntry>> {
if idx < self.root.pages_len {
let idx_offset = mem::size_of::<FirstLevelPageEntry>() * idx as usize;
let offset = self.root.pages_offset as usize + idx_offset;
Ok(Some(self.section.pread_with(offset, self.endian)?))
} else {
Ok(None)
}
}
fn second_level_page(&self, offset: u32) -> Result<SecondLevelPage> {
const SECOND_LEVEL_REGULAR: u32 = 2;
const SECOND_LEVEL_COMPRESSED: u32 = 3;
let mut offset = offset as usize;
let kind: u32 = self.section.gread_with(&mut offset, self.endian)?;
if kind == SECOND_LEVEL_REGULAR {
Ok(SecondLevelPage::Regular(
self.section.gread_with(&mut offset, self.endian)?,
))
} else if kind == SECOND_LEVEL_COMPRESSED {
Ok(SecondLevelPage::Compressed(
self.section.gread_with(&mut offset, self.endian)?,
))
} else {
Err(MachError::from(Error::Malformed(format!(
"Unknown second-level page kind: {kind}"
))))
}
}
fn second_level_entry(
&self,
first_level_entry: &FirstLevelPageEntry,
second_level_page: &SecondLevelPage,
second_level_idx: u32,
) -> Result<RawCompactUnwindInfoEntry> {
match *second_level_page {
SecondLevelPage::Compressed(ref page) => {
let offset = first_level_entry.second_level_page_offset as usize
+ page.entries_offset as usize
+ second_level_idx as usize * 4;
let compressed_entry: u32 = self.section.pread_with(offset, self.endian)?;
let instruction_address =
(compressed_entry & 0x00FFFFFF) + first_level_entry.first_address;
let opcode_idx = (compressed_entry >> 24) & 0xFF;
Ok(RawCompactUnwindInfoEntry {
instruction_address,
opcode_or_index: OpcodeOrIndex::Index(opcode_idx),
})
}
SecondLevelPage::Regular(ref page) => {
let offset = first_level_entry.second_level_page_offset as usize
+ page.entries_offset as usize
+ second_level_idx as usize * 8;
let entry: RegularEntry = self.section.pread_with(offset, self.endian)?;
Ok(RawCompactUnwindInfoEntry {
instruction_address: entry.instruction_address,
opcode_or_index: OpcodeOrIndex::Opcode(entry.opcode),
})
}
}
}
fn complete_entry(
&self,
entry: &RawCompactUnwindInfoEntry,
next_entry_instruction_address: u32,
first_level_entry: &FirstLevelPageEntry,
second_level_page: &SecondLevelPage,
) -> Result<CompactUnwindInfoEntry> {
if entry.instruction_address > next_entry_instruction_address {
return Err(MachError::from(Error::Malformed(format!(
"Entry addresses are not monotonic! ({} > {})",
entry.instruction_address, next_entry_instruction_address
))));
}
let opcode = match entry.opcode_or_index {
OpcodeOrIndex::Opcode(opcode) => opcode,
OpcodeOrIndex::Index(opcode_idx) => {
if let SecondLevelPage::Compressed(ref page) = second_level_page {
if opcode_idx < self.root.global_opcodes_len {
self.global_opcode(opcode_idx)?
} else {
let opcode_idx = opcode_idx - self.root.global_opcodes_len;
if opcode_idx >= page.local_opcodes_len as u32 {
return Err(MachError::from(Error::Malformed(format!(
"Local opcode index too large ({} >= {})",
opcode_idx, page.local_opcodes_len
))));
}
let offset = first_level_entry.second_level_page_offset as usize
+ page.local_opcodes_offset as usize
+ opcode_idx as usize * 4;
let opcode: u32 = self.section.pread_with(offset, self.endian)?;
opcode
}
} else {
unreachable!()
}
}
};
let opcode = Opcode(opcode);
Ok(CompactUnwindInfoEntry {
instruction_address: entry.instruction_address,
len: next_entry_instruction_address - entry.instruction_address,
opcode,
})
}
fn global_opcode(&self, opcode_idx: u32) -> Result<u32> {
if opcode_idx >= self.root.global_opcodes_len {
return Err(MachError::from(Error::Malformed(format!(
"Global opcode index too large ({} >= {})",
opcode_idx, self.root.global_opcodes_len
))));
}
let offset = self.root.global_opcodes_offset as usize + opcode_idx as usize * 4;
let opcode: u32 = self.section.pread_with(offset, self.endian)?;
Ok(opcode)
}
fn personality(&self, personality_idx: u32) -> Result<u32> {
if personality_idx >= self.root.personalities_len {
return Err(MachError::from(Error::Malformed(format!(
"Personality index too large ({} >= {})",
personality_idx, self.root.personalities_len
))));
}
let offset = self.root.personalities_offset as usize + personality_idx as usize * 4;
let personality: u32 = self.section.pread_with(offset, self.endian)?;
Ok(personality)
}
pub fn dump(&self) -> Result<()> {
println!("Contents of __unwind_info section:");
println!(" Version: 0x1");
println!(
" Common encodings array section offset: 0x{:x}",
self.root.global_opcodes_offset
);
println!(
" Number of common encodings in array: 0x{:x}",
self.root.global_opcodes_len
);
println!(
" Personality function array section offset: 0x{:x}",
self.root.personalities_offset
);
println!(
" Number of personality functions in array: 0x{:x}",
self.root.personalities_len
);
println!(
" Index array section offset: 0x{:x}",
self.root.pages_offset
);
println!(
" Number of indices in array: 0x{:x}",
self.root.pages_len
);
println!(
" Common encodings: (count = {})",
self.root.global_opcodes_len
);
for i in 0..self.root.global_opcodes_len {
let opcode = self.global_opcode(i)?;
println!(" encoding[{i}]: 0x{opcode:08x}");
}
println!(
" Personality functions: (count = {})",
self.root.personalities_len
);
for i in 0..self.root.personalities_len {
let personality = self.personality(i)?;
println!(" personality[{i}]: 0x{personality:08x}");
}
println!(" Top level indices: (count = {})", self.root.pages_len);
for i in 0..self.root.pages_len {
let entry = self.first_level_entry(i)?.unwrap();
println!(" [{}]: function offset=0x{:08x}, 2nd level page offset=0x{:08x}, LSDA offset=0x{:08x}",
i,
entry.first_address,
entry.second_level_page_offset,
entry.lsda_index_offset);
}
println!(" LSDA descriptors:");
println!(" Second level indices:");
let mut iter = (*self).clone();
while let Some(raw_entry) = iter.next_raw()? {
let (first, second) = iter.page_of_next_entry.clone().unwrap();
let second_idx = iter.second_idx - 1;
if second_idx == 0 {
println!(" Second level index[{}]: offset in section=0x{:08x}, base function=0x{:08x}",
iter.first_idx,
first.second_level_page_offset,
first.first_address);
}
let entry =
iter.complete_entry(&raw_entry, raw_entry.instruction_address, &first, &second)?;
if let OpcodeOrIndex::Index(opcode_idx) = raw_entry.opcode_or_index {
println!(
" [{}]: function offset=0x{:08x}, encoding[{}]=0x{:08x}",
second_idx, entry.instruction_address, opcode_idx, entry.opcode.0
);
} else {
println!(
" [{}]: function offset=0x{:08x}, encoding=0x{:08x}",
second_idx, entry.instruction_address, entry.opcode.0
);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum SecondLevelPage {
Compressed(CompressedSecondLevelPage),
Regular(RegularSecondLevelPage),
}
impl SecondLevelPage {
fn len(&self) -> u32 {
match *self {
SecondLevelPage::Regular(ref page) => page.entries_len as u32,
SecondLevelPage::Compressed(ref page) => page.entries_len as u32,
}
}
}
#[derive(Debug, Clone)]
pub struct CompactUnwindInfoEntry {
pub instruction_address: u32,
pub len: u32,
opcode: Opcode,
}
impl CompactUnwindInfoEntry {
pub fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
self.opcode.instructions(iter)
}
}
#[derive(Debug)]
pub enum CompactUnwindOp {
CfiOps(CompactCfiOpIter),
UseDwarfFde {
offset_in_eh_frame: u32,
},
None,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CompactCfiOp {
RegisterAt {
dest_reg: CompactCfiRegister,
src_reg: CompactCfiRegister,
offset_from_src: i32,
},
RegisterIs {
dest_reg: CompactCfiRegister,
src_reg: CompactCfiRegister,
offset_from_src: i32,
},
}
#[derive(Debug, Clone)]
enum X86UnwindingMode {
RbpFrame,
StackImmediate,
StackIndirect,
Dwarf,
}
#[derive(Debug, Clone)]
enum Arm64UnwindingMode {
Frameless,
Dwarf,
Frame,
}
#[derive(Debug, Clone)]
struct Opcode(u32);
impl Opcode {
fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
match iter.arch {
Arch::X86 | Arch::X64 => self.x86_instructions(iter),
Arch::Arm64 => self.arm64_instructions(iter),
_ => CompactUnwindOp::None,
}
}
fn pointer_size(&self, iter: &CompactUnwindInfoIter) -> u32 {
match iter.arch {
Arch::X86 => 4,
Arch::X64 => 8,
Arch::Arm64 => 8,
_ => unimplemented!(),
}
}
}
impl Opcode {
fn x86_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
let pointer_size = self.pointer_size(iter) as i32;
match self.x86_mode() {
Some(X86UnwindingMode::RbpFrame) => {
let mut ops = CompactCfiOpIter::new();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
let offset = self.x86_rbp_stack_offset() as i32 + 2;
for (i, reg) in self.x86_rbp_registers().iter().enumerate() {
if let Some(reg) = *reg {
ops.push(CompactCfiOp::RegisterAt {
dest_reg: reg,
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(offset - i as i32) * pointer_size,
});
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(X86UnwindingMode::StackImmediate) => {
let mut ops = CompactCfiOpIter::new();
let stack_size = self.x86_frameless_stack_size();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
let mut offset = 2;
for reg in self.x86_frameless_registers().iter().rev() {
if let Some(reg) = *reg {
ops.push(CompactCfiOp::RegisterAt {
dest_reg: reg,
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -offset * pointer_size,
});
offset += 1;
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(X86UnwindingMode::StackIndirect) => {
CompactUnwindOp::None
}
Some(X86UnwindingMode::Dwarf) => {
let offset_in_eh_frame = self.x86_dwarf_fde();
CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
}
None => CompactUnwindOp::None,
}
}
fn x86_mode(&self) -> Option<X86UnwindingMode> {
const X86_MODE_MASK: u32 = 0x0F00_0000;
const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
const X86_MODE_STACK_IND: u32 = 0x0300_0000;
const X86_MODE_DWARF: u32 = 0x0400_0000;
let masked = self.0 & X86_MODE_MASK;
match masked {
X86_MODE_RBP_FRAME => Some(X86UnwindingMode::RbpFrame),
X86_MODE_STACK_IMMD => Some(X86UnwindingMode::StackImmediate),
X86_MODE_STACK_IND => Some(X86UnwindingMode::StackIndirect),
X86_MODE_DWARF => Some(X86UnwindingMode::Dwarf),
_ => None,
}
}
fn x86_rbp_registers(&self) -> [Option<CompactCfiRegister>; 5] {
let mask = 0b111;
[
CompactCfiRegister::from_x86_encoded(self.0 & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 3) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 6) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 9) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 12) & mask),
]
}
fn x86_rbp_stack_offset(&self) -> u32 {
let offset = 32 - 8 - 8;
(self.0 >> offset) & 0b1111_1111
}
fn x86_frameless_stack_size(&self) -> u32 {
let offset = 32 - 8 - 8;
(self.0 >> offset) & 0b1111_1111
}
fn x86_frameless_register_count(&self) -> u32 {
let offset = 32 - 8 - 8 - 3 - 3;
let register_count = (self.0 >> offset) & 0b111;
if register_count > 6 {
6
} else {
register_count
}
}
fn x86_frameless_registers(&self) -> [Option<CompactCfiRegister>; 6] {
let mut permutation = self.0 & 0b11_1111_1111;
let mut permunreg = [0; 6];
let register_count = self.x86_frameless_register_count();
match register_count {
6 => {
permunreg[0] = permutation / 120; permutation -= permunreg[0] * 120;
permunreg[1] = permutation / 24; permutation -= permunreg[1] * 24;
permunreg[2] = permutation / 6; permutation -= permunreg[2] * 6;
permunreg[3] = permutation / 2; permutation -= permunreg[3] * 2;
permunreg[4] = permutation; permunreg[5] = 0;
}
5 => {
permunreg[0] = permutation / 120;
permutation -= permunreg[0] * 120;
permunreg[1] = permutation / 24;
permutation -= permunreg[1] * 24;
permunreg[2] = permutation / 6;
permutation -= permunreg[2] * 6;
permunreg[3] = permutation / 2;
permutation -= permunreg[3] * 2;
permunreg[4] = permutation;
}
4 => {
permunreg[0] = permutation / 60;
permutation -= permunreg[0] * 60;
permunreg[1] = permutation / 12;
permutation -= permunreg[1] * 12;
permunreg[2] = permutation / 3;
permutation -= permunreg[2] * 3;
permunreg[3] = permutation;
}
3 => {
permunreg[0] = permutation / 20;
permutation -= permunreg[0] * 20;
permunreg[1] = permutation / 4;
permutation -= permunreg[1] * 4;
permunreg[2] = permutation;
}
2 => {
permunreg[0] = permutation / 5;
permutation -= permunreg[0] * 5;
permunreg[1] = permutation;
}
1 => {
permunreg[0] = permutation;
}
_ => {
}
}
let mut registers = [0u32; 6];
let mut used = [false; 7];
for i in 0..register_count {
let mut renum = 0;
for j in 1u32..7 {
if !used[j as usize] {
if renum == permunreg[i as usize] {
registers[i as usize] = j;
used[j as usize] = true;
break;
}
renum += 1;
}
}
}
[
CompactCfiRegister::from_x86_encoded(registers[0]),
CompactCfiRegister::from_x86_encoded(registers[1]),
CompactCfiRegister::from_x86_encoded(registers[2]),
CompactCfiRegister::from_x86_encoded(registers[3]),
CompactCfiRegister::from_x86_encoded(registers[4]),
CompactCfiRegister::from_x86_encoded(registers[5]),
]
}
fn x86_dwarf_fde(&self) -> u32 {
self.0 & 0x00FF_FFFF
}
}
impl Opcode {
fn arm64_mode(&self) -> Option<Arm64UnwindingMode> {
const ARM64_MODE_MASK: u32 = 0x0F000000;
const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
const ARM64_MODE_DWARF: u32 = 0x03000000;
const ARM64_MODE_FRAME: u32 = 0x04000000;
let masked = self.0 & ARM64_MODE_MASK;
match masked {
ARM64_MODE_FRAMELESS => Some(Arm64UnwindingMode::Frameless),
ARM64_MODE_DWARF => Some(Arm64UnwindingMode::Dwarf),
ARM64_MODE_FRAME => Some(Arm64UnwindingMode::Frame),
_ => None,
}
}
fn arm64_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
let pointer_size = self.pointer_size(iter) as i32;
match self.arm64_mode() {
Some(Arm64UnwindingMode::Frameless) => {
let stack_size = self.arm64_frameless_stack_size() * 16;
let mut ops = CompactCfiOpIter::new();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32,
});
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::link_register(),
offset_from_src: 0,
});
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(Arm64UnwindingMode::Dwarf) => {
let offset_in_eh_frame = self.arm64_dwarf_fde();
CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
}
Some(Arm64UnwindingMode::Frame) => {
let mut ops = CompactCfiOpIter::new();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
let num_reg_pairs = 9;
let mut pairs_saved = 0;
for pair_num in 0..num_reg_pairs {
let has_pair = (self.0 & (1 << pair_num)) != 0;
if has_pair {
let first_reg = ARM64_REG_BASE + pair_num * 2;
let second_reg = ARM64_REG_BASE + pair_num * 2 + 1;
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(first_reg),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: (-2 * pairs_saved - 3) * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(second_reg),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: (-2 * pairs_saved - 4) * pointer_size,
});
pairs_saved += 1;
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
None => CompactUnwindOp::None,
}
}
fn arm64_frameless_stack_size(&self) -> u32 {
let offset = 32 - 8 - 12;
(self.0 >> offset) & 0xFFF
}
fn arm64_dwarf_fde(&self) -> u32 {
self.0 & 0x00FF_FFFF
}
}
const REG_FRAME: u8 = 6;
const ARM64_REG_BASE: u32 = REG_FRAME as u32 + 1;
const REG_LINK: u8 = 252;
const REG_INSTRUCTION: u8 = 253;
const REG_STACK: u8 = 254;
const REG_CFA: u8 = 255;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct CompactCfiRegister(u8);
impl CompactCfiRegister {
fn from_x86_encoded(val: u32) -> Option<Self> {
if (1..=6).contains(&val) {
Some(CompactCfiRegister(val as u8))
} else {
None
}
}
fn from_arm64_encoded(val: u32) -> Self {
debug_assert!((ARM64_REG_BASE..ARM64_REG_BASE + 18).contains(&val));
CompactCfiRegister(val as u8)
}
pub fn is_cfa(&self) -> bool {
self.0 == REG_CFA
}
pub fn name(&self, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
match self.0 {
REG_CFA => Some("cfa"),
other => name_of_other_reg(other, iter),
}
}
pub fn cfa() -> Self {
Self(REG_CFA)
}
pub fn frame_pointer() -> Self {
CompactCfiRegister(REG_FRAME)
}
pub fn instruction_pointer() -> Self {
CompactCfiRegister(REG_INSTRUCTION)
}
pub fn stack_pointer() -> Self {
CompactCfiRegister(REG_STACK)
}
pub fn link_register() -> Self {
CompactCfiRegister(REG_LINK)
}
}
fn name_of_other_reg(reg: u8, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
match iter.arch {
Arch::X86 => match reg {
0 => None,
1 => Some("ebx"),
2 => Some("ecx"),
3 => Some("edx"),
4 => Some("edi"),
5 => Some("esi"),
6 => Some("ebp"),
REG_INSTRUCTION => Some("eip"),
REG_STACK => Some("esp"),
_ => None,
},
Arch::X64 => match reg {
0 => None,
1 => Some("rbx"),
2 => Some("r12"),
3 => Some("r13"),
4 => Some("r14"),
5 => Some("r15"),
6 => Some("rbp"),
REG_INSTRUCTION => Some("rip"),
REG_STACK => Some("rsp"),
_ => None,
},
Arch::Arm64 => {
match reg {
7 => Some("x19"),
8 => Some("x20"),
9 => Some("x21"),
10 => Some("x22"),
11 => Some("x23"),
12 => Some("x24"),
13 => Some("x25"),
14 => Some("x26"),
15 => Some("x27"),
16 => Some("x28"),
17 => Some("d8"),
18 => Some("d9"),
19 => Some("d10"),
20 => Some("d11"),
21 => Some("d12"),
22 => Some("d13"),
23 => Some("d14"),
24 => Some("d15"),
REG_FRAME => Some("x29"),
REG_LINK => Some("x30"),
REG_INSTRUCTION => Some("pc"),
REG_STACK => Some("sp"),
_ => None,
}
}
_ => None,
}
}
#[derive(Debug, Clone)]
pub struct CompactCfiOpIter {
items: [Option<CompactCfiOp>; 21],
cur_idx: usize,
}
impl CompactCfiOpIter {
fn new() -> Self {
Self {
items: [
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None,
],
cur_idx: 0,
}
}
fn push(&mut self, item: CompactCfiOp) {
self.items[self.cur_idx] = Some(item);
self.cur_idx += 1;
}
fn into_iter(mut self) -> Self {
self.cur_idx = 0;
self
}
}
impl Iterator for CompactCfiOpIter {
type Item = CompactCfiOp;
fn next(&mut self) -> Option<Self::Item> {
if self.cur_idx < self.items.len() {
let old_idx = self.cur_idx;
self.cur_idx += 1;
self.items[old_idx].take()
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::{
CompactCfiOp, CompactCfiRegister, CompactUnwindInfoIter, CompactUnwindOp, Opcode,
ARM64_REG_BASE,
};
use crate::macho::MachError;
use scroll::Pwrite;
use symbolic_common::Arch;
const PAGE_SIZE: usize = 4096;
const REGULAR_PAGE_HEADER_LEN: usize = 8;
const COMPRESSED_PAGE_HEADER_LEN: usize = 12;
const MAX_REGULAR_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - REGULAR_PAGE_HEADER_LEN) / 8;
const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN) / 4;
const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS: usize =
(PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN - MAX_LOCAL_OPCODES_LEN as usize * 4) / 4;
const MAX_GLOBAL_OPCODES_LEN: u32 = 127;
const MAX_LOCAL_OPCODES_LEN: u32 = 128;
const MAX_PERSONALITIES_LEN: u32 = 4;
const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
const X86_MODE_STACK_IND: u32 = 0x0300_0000;
const X86_MODE_DWARF: u32 = 0x0400_0000;
const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
const ARM64_MODE_DWARF: u32 = 0x03000000;
const ARM64_MODE_FRAME: u32 = 0x04000000;
const REGULAR_PAGE_KIND: u32 = 2;
const COMPRESSED_PAGE_KIND: u32 = 3;
fn align(offset: u32, align: u32) -> u32 {
((offset + align - 1) / align) * align
}
fn pack_x86_rbp_registers(regs: [u8; 5]) -> u32 {
let mut result: u32 = 0;
let base_offset = 0;
for (idx, ®) in regs.iter().enumerate() {
assert!(reg <= 6);
result |= (reg as u32 & 0b111) << (base_offset + idx * 3);
}
result
}
fn pack_x86_stackless_registers(num_regs: u32, registers: [u8; 6]) -> u32 {
for ® in ®isters {
assert!(reg <= 6);
}
let mut renumregs = [0u32; 6];
for i in 6 - num_regs..6 {
let mut countless = 0;
for j in 6 - num_regs..i {
if registers[j as usize] < registers[i as usize] {
countless += 1;
}
}
renumregs[i as usize] = registers[i as usize] as u32 - countless - 1;
}
let mut permutation_encoding: u32 = 0;
match num_regs {
6 => {
permutation_encoding |= 120 * renumregs[0]
+ 24 * renumregs[1]
+ 6 * renumregs[2]
+ 2 * renumregs[3]
+ renumregs[4];
}
5 => {
permutation_encoding |= 120 * renumregs[1]
+ 24 * renumregs[2]
+ 6 * renumregs[3]
+ 2 * renumregs[4]
+ renumregs[5];
}
4 => {
permutation_encoding |=
60 * renumregs[2] + 12 * renumregs[3] + 3 * renumregs[4] + renumregs[5];
}
3 => {
permutation_encoding |= 20 * renumregs[3] + 4 * renumregs[4] + renumregs[5];
}
2 => {
permutation_encoding |= 5 * renumregs[4] + renumregs[5];
}
1 => {
permutation_encoding |= renumregs[5];
}
0 => {
}
_ => unreachable!(),
}
permutation_encoding
}
fn assert_opcodes_match<A, B>(mut a: A, mut b: B)
where
A: Iterator<Item = CompactCfiOp>,
B: Iterator<Item = CompactCfiOp>,
{
while let (Some(a_op), Some(b_op)) = (a.next(), b.next()) {
assert_eq!(a_op, b_op);
}
assert!(b.next().is_none());
assert!(a.next().is_none());
}
#[test]
fn test_compact_unknown_version() -> Result<(), MachError> {
{
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(0u32, offset)?;
assert!(CompactUnwindInfoIter::new(§ion, true, Arch::Amd64).is_err());
}
{
let offset = &mut 0;
let mut section = vec![0; 1024];
section.gwrite(2u32, offset)?;
assert!(CompactUnwindInfoIter::new(§ion, true, Arch::X86).is_err());
}
Ok(())
}
#[test]
fn test_compact_empty() -> Result<(), MachError> {
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(1u32, offset)?;
let mut iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
assert!(iter.next()?.is_none());
assert!(iter.next()?.is_none());
Ok(())
}
#[test]
fn test_compact_structure() -> Result<(), MachError> {
let global_opcodes: Vec<u32> = vec![0, 2, 4, 7];
assert!(global_opcodes.len() <= MAX_GLOBAL_OPCODES_LEN as usize);
let personalities: Vec<u32> = vec![7, 12, 3];
assert!(personalities.len() <= MAX_PERSONALITIES_LEN as usize);
let lsdas: Vec<(u32, u32)> = vec![(0, 1), (7, 3), (18, 5)];
let mut first_entries: Vec<(u32, u32, u32)> = vec![];
let mut regular_entries: Vec<Vec<(u32, u32)>> = vec![
vec![(1, 7), (3, 8), (6, 10), (10, 4)],
vec![(20, 5), (21, 2), (24, 7), (25, 0)],
vec![(29, 8)],
];
let mut compressed_entries: Vec<Vec<(u32, u32)>> = vec![
vec![(10001, 7), (10003, 8), (10006, 10), (10010, 4)],
vec![(10020, 5), (10021, 2), (10024, 7), (10025, 0)],
vec![(10029, 8)],
];
let mut temp = vec![];
let base_instruction = 100;
for i in 0..MAX_REGULAR_SECOND_LEVEL_ENTRIES {
temp.push((base_instruction + i as u32, i as u32))
}
regular_entries.push(temp);
let mut temp = vec![];
let base_instruction = 10100;
for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES {
temp.push((base_instruction + i as u32, 2))
}
compressed_entries.push(temp);
let mut temp = vec![];
let base_instruction = 14100;
for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS {
temp.push((
base_instruction + i as u32,
100 + (i as u32 % MAX_LOCAL_OPCODES_LEN),
))
}
compressed_entries.push(temp);
let mut second_level_pages: Vec<[u8; PAGE_SIZE]> = vec![];
for page in ®ular_entries {
second_level_pages.push([0; PAGE_SIZE]);
let buf = second_level_pages.last_mut().unwrap();
let buf_offset = &mut 0;
buf.gwrite(REGULAR_PAGE_KIND, buf_offset)?;
buf.gwrite(REGULAR_PAGE_HEADER_LEN as u16, buf_offset)?;
buf.gwrite(page.len() as u16, buf_offset)?;
for &(insruction_address, opcode) in page {
buf.gwrite(insruction_address, buf_offset)?;
buf.gwrite(opcode, buf_offset)?;
}
}
for page in &compressed_entries {
second_level_pages.push([0; PAGE_SIZE]);
let buf = second_level_pages.last_mut().unwrap();
let buf_offset = &mut 0;
let mut local_opcodes = vec![];
let mut indices = vec![];
for &(_, opcode) in page {
if let Some((idx, _)) = global_opcodes
.iter()
.enumerate()
.find(|&(_, &global_opcode)| global_opcode == opcode)
{
indices.push(idx);
} else if let Some((idx, _)) = local_opcodes
.iter()
.enumerate()
.find(|&(_, &global_opcode)| global_opcode == opcode)
{
indices.push(global_opcodes.len() + idx);
} else {
local_opcodes.push(opcode);
indices.push(global_opcodes.len() + local_opcodes.len() - 1);
}
}
assert!(local_opcodes.len() <= MAX_LOCAL_OPCODES_LEN as usize);
let entries_offset = COMPRESSED_PAGE_HEADER_LEN + local_opcodes.len() * 4;
let first_address = page.first().unwrap().0;
buf.gwrite(COMPRESSED_PAGE_KIND, buf_offset)?;
buf.gwrite(entries_offset as u16, buf_offset)?;
buf.gwrite(page.len() as u16, buf_offset)?;
buf.gwrite(COMPRESSED_PAGE_HEADER_LEN as u16, buf_offset)?;
buf.gwrite(local_opcodes.len() as u16, buf_offset)?;
for opcode in local_opcodes {
buf.gwrite(opcode, buf_offset)?;
}
for (&(instruction_address, _opcode), idx) in page.iter().zip(indices) {
let compressed_address = (instruction_address - first_address) & 0x00FF_FFFF;
let compressed_idx = (idx as u32) << 24;
assert_eq!(compressed_address + first_address, instruction_address);
assert_eq!(idx & 0xFFFF_FF00, 0);
let compressed_opcode: u32 = compressed_address | compressed_idx;
buf.gwrite(compressed_opcode, buf_offset)?;
}
}
let header_size: u32 = 4 * 7;
let global_opcodes_offset: u32 = header_size;
let personalities_offset: u32 = global_opcodes_offset + global_opcodes.len() as u32 * 4;
let first_entries_offset: u32 = personalities_offset + personalities.len() as u32 * 4;
let lsdas_offset: u32 = first_entries_offset + (second_level_pages.len() + 1) as u32 * 12;
let second_level_pages_offset: u32 =
align(lsdas_offset + lsdas.len() as u32 * 8, PAGE_SIZE as u32);
let final_size: u32 =
second_level_pages_offset + second_level_pages.len() as u32 * PAGE_SIZE as u32;
let mut cur_address = 0;
for (idx, page) in regular_entries
.iter()
.chain(compressed_entries.iter())
.enumerate()
{
let first_address = page.first().unwrap().0;
let page_offset = second_level_pages_offset + PAGE_SIZE as u32 * idx as u32;
first_entries.push((first_address, page_offset, lsdas_offset));
for &(address, _) in page {
assert!(address > cur_address);
cur_address = address;
}
}
assert_eq!(second_level_pages.len(), first_entries.len());
first_entries.push((cur_address + 1, 0, 0));
let offset = &mut 0;
let mut section = vec![0u8; final_size as usize];
section.gwrite(1u32, offset)?;
section.gwrite(global_opcodes_offset, offset)?;
section.gwrite(global_opcodes.len() as u32, offset)?;
section.gwrite(personalities_offset, offset)?;
section.gwrite(personalities.len() as u32, offset)?;
section.gwrite(first_entries_offset, offset)?;
section.gwrite(first_entries.len() as u32, offset)?;
assert_eq!(*offset as u32, global_opcodes_offset);
for &opcode in &global_opcodes {
section.gwrite(opcode, offset)?;
}
assert_eq!(*offset as u32, personalities_offset);
for &personality in &personalities {
section.gwrite(personality, offset)?;
}
assert_eq!(*offset as u32, first_entries_offset);
for &entry in &first_entries {
section.gwrite(entry.0, offset)?;
section.gwrite(entry.1, offset)?;
section.gwrite(entry.2, offset)?;
}
assert_eq!(*offset as u32, lsdas_offset);
for &lsda in &lsdas {
section.gwrite(lsda.0, offset)?;
section.gwrite(lsda.1, offset)?;
}
*offset = second_level_pages_offset as usize;
for second_level_page in &second_level_pages {
for byte in second_level_page {
section.gwrite(byte, offset)?;
}
}
let mut iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
let mut orig_entries = regular_entries
.iter()
.chain(compressed_entries.iter())
.flatten();
while let (Some(entry), Some((orig_address, orig_opcode))) =
(iter.next()?, orig_entries.next())
{
assert_eq!(entry.instruction_address, *orig_address);
assert_eq!(entry.opcode.0, *orig_opcode);
}
assert!(iter.next()?.is_none());
assert_eq!(orig_entries.next(), None);
Ok(())
}
#[test]
fn test_compact_opcodes_x86() -> Result<(), MachError> {
let pointer_size = 4;
let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
let stack_size_offset = 32 - 8 - 8;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::X86)?;
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
{
let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
{
let stack_size: i32 = 0xa1;
let registers = [0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0x13;
let registers = [1, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xc2;
let registers = [2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 1) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 3) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xa7;
let registers = [2, 0, 4, 0, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xa1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 0;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0x13;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 1;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 1];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xc1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 6;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [1, 2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xf1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 3;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 2, 4, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let _opcode = Opcode(X86_MODE_STACK_IND);
}
Ok(())
}
#[test]
fn test_compact_opcodes_x64() -> Result<(), MachError> {
let pointer_size = 8;
let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
let stack_size_offset = 32 - 8 - 8;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
{
let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
{
let stack_size: i32 = 0xa1;
let registers = [0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0x13;
let registers = [1, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xc2;
let registers = [2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 1) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 3) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xa7;
let registers = [2, 0, 4, 0, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xa1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 0;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0x13;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 1;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 1];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xc1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 6;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [1, 2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size: i32 = 0xf1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 3;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 2, 4, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let _opcode = Opcode(X86_MODE_STACK_IND);
}
Ok(())
}
#[test]
fn test_compact_opcodes_arm64() -> Result<(), MachError> {
let pointer_size = 8;
let frameless_stack_size_offset = 32 - 8 - 12;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Arm64)?;
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
{
let opcode = Opcode(ARM64_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
{
let registers = 0b0_0000_0000;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let registers = 0b0_0100_0000;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let registers = 0b1_1111_1111;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 2),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 3),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -8 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 6),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -9 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 7),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -10 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -11 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -12 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 10),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -13 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 11),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -14 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -15 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -16 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 14),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -17 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 15),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -18 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -19 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -20 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let registers = 0b1_0101_0101;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -8 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -9 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -10 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -11 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -12 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
let stack_size = 0xae1;
let packed_stack_size = stack_size << frameless_stack_size_offset;
let opcode = Opcode(ARM64_MODE_FRAMELESS | packed_stack_size);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32 * 16,
},
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::link_register(),
offset_from_src: 0,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
Ok(())
}
#[test]
fn test_compact_register_naming() -> Result<(), MachError> {
let offset = &mut 0;
let mut section = vec![0u8; 1024];
section.gwrite(1u32, offset)?;
{
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Arm64)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("sp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("pc")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("x29"));
assert_eq!(CompactCfiRegister::link_register().name(&iter), Some("x30"));
}
{
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::X86)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("esp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("eip")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("ebp"));
}
{
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("rsp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("rip")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("rbp"));
}
Ok(())
}
}