use std::sync::Arc;
use rayon::prelude::*;
use tracing::{debug, trace};
use crate::dyld::*;
use crate::error::{Error, Result};
use crate::macho::MachOContext;
use super::ExtractionContext;
#[derive(Clone, Copy)]
struct WriteOp {
offset: usize,
value: u64,
}
#[derive(Clone)]
struct SlideMapping {
address: u64,
#[allow(dead_code)]
size: u64,
slide_info_offset: u64,
#[allow(dead_code)]
slide_info_size: u64,
subcache_index: usize,
}
pub fn process_slide_info(ctx: &mut ExtractionContext) -> Result<()> {
ctx.info("Processing slide info...");
let cache = Arc::clone(&ctx.cache);
let mappings: Vec<SlideMapping> = cache
.mappings
.iter()
.filter(|m| m.has_slide_info())
.filter(|mapping| {
ctx.macho.segments().any(|seg| {
let seg_start = seg.command.vmaddr;
let seg_end = seg_start + seg.command.vmsize;
let map_start = mapping.address;
let map_end = map_start + mapping.size;
seg_start < map_end && seg_end > map_start
})
})
.map(|m| SlideMapping {
address: m.address,
size: m.size,
slide_info_offset: m.slide_info_offset,
slide_info_size: m.slide_info_size,
subcache_index: m.subcache_index,
})
.collect();
for mapping in mappings {
let cache_data = cache.data_for_subcache(mapping.subcache_index);
let slide_offset = mapping.slide_info_offset as usize;
if slide_offset + 4 > cache_data.len() {
ctx.warn(&format!(
"Slide info at offset {:#x} is out of bounds",
slide_offset
));
continue;
}
let version = crate::util::read_u32_le(&cache_data[slide_offset..]);
debug!(
"Processing slide info v{} for mapping at {:#x}",
version, mapping.address
);
match version {
2 => process_slide_info_v2(&mut ctx.macho, cache_data, slide_offset, &mapping)?,
3 => process_slide_info_v3(&mut ctx.macho, cache_data, slide_offset, &mapping)?,
5 => process_slide_info_v5(&mut ctx.macho, &cache, cache_data, slide_offset, &mapping)?,
_ => {
return Err(Error::UnsupportedSlideVersion(version));
}
}
}
Ok(())
}
fn process_slide_info_v2(
macho: &mut MachOContext,
cache_data: &[u8],
offset: usize,
mapping: &SlideMapping,
) -> Result<()> {
use zerocopy::FromBytes;
if macho.header.is_x86_64() {
debug!("Slide v2: skipping rebasing for x86_64 (pointers already in offset format)");
return Ok(());
}
let slide_info = DyldCacheSlideInfo2::read_from_prefix(&cache_data[offset..])
.map_err(|_| Error::InvalidSlideInfo {
offset: offset as u64,
reason: "failed to parse slide info v2".into(),
})?
.0;
let page_size = slide_info.page_size as u64;
let page_starts_offset = offset + slide_info.page_starts_offset as usize;
let page_count = slide_info.page_starts_count as usize;
let delta_mask = slide_info.delta_mask;
let value_mask = slide_info.value_mask();
let value_add = slide_info.value_add;
let delta_shift = slide_info.delta_shift();
debug!(
"Slide v2: delta_mask={:#018x}, value_mask={:#018x}, value_add={:#018x}, delta_shift={}, pages={}",
delta_mask, value_mask, value_add, delta_shift, page_count
);
let page_infos: Vec<_> = (0..page_count)
.filter_map(|page_idx| {
let page_start_offset = page_starts_offset + page_idx * 2;
if page_start_offset + 2 > cache_data.len() {
return None;
}
let page_start = crate::util::read_u16_le(&cache_data[page_start_offset..]);
if page_start == (DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE & 0xFFFF) as u16 {
return None;
}
let page_addr = mapping.address + (page_idx as u64 * page_size);
let start_offset = (page_start as u64) * 4; Some((page_addr + start_offset, page_idx))
})
.collect();
let macho_data: &[u8] = &macho.data;
let all_writes: Vec<Vec<WriteOp>> = page_infos
.par_iter()
.map(|&(start_addr, _page_idx)| {
collect_v2_page_writes(
macho_data,
macho,
start_addr,
delta_mask,
value_mask,
value_add,
delta_shift,
)
})
.collect();
for writes in all_writes {
for op in writes {
macho.data[op.offset..op.offset + 8].copy_from_slice(&op.value.to_le_bytes());
}
}
Ok(())
}
#[inline]
fn collect_v2_page_writes(
data: &[u8],
macho: &MachOContext,
mut addr: u64,
delta_mask: u64,
value_mask: u64,
value_add: u64,
delta_shift: u32,
) -> Vec<WriteOp> {
let mut writes = Vec::with_capacity(64);
loop {
let macho_offset = match macho.addr_to_offset(addr) {
Some(off) => off,
None => {
trace!("Address {:#x} not in Mach-O, skipping", addr);
break;
}
};
if macho_offset + 8 > data.len() {
break;
}
let raw_value = crate::util::read_u64_le(&data[macho_offset..]);
let delta = ((raw_value & delta_mask) >> delta_shift) as u64;
let mut new_value = raw_value & value_mask;
if new_value != 0 {
new_value += value_add;
}
writes.push(WriteOp {
offset: macho_offset,
value: new_value,
});
if delta == 0 {
break;
}
addr += delta * 4;
}
writes
}
fn process_slide_info_v3(
macho: &mut MachOContext,
cache_data: &[u8],
offset: usize,
mapping: &SlideMapping,
) -> Result<()> {
use zerocopy::FromBytes;
let slide_info = DyldCacheSlideInfo3::read_from_prefix(&cache_data[offset..])
.map_err(|_| Error::InvalidSlideInfo {
offset: offset as u64,
reason: "failed to parse slide info v3".into(),
})?
.0;
let page_size = slide_info.page_size as u64;
let auth_value_add = slide_info.auth_value_add;
let page_count = slide_info.page_starts_count as usize;
let page_starts_offset = offset + std::mem::size_of::<DyldCacheSlideInfo3>();
debug!(
"Slide v3: auth_value_add={:#018x}, pages={}",
auth_value_add, page_count
);
let page_infos: Vec<_> = (0..page_count)
.filter_map(|page_idx| {
let page_start_offset = page_starts_offset + page_idx * 2;
if page_start_offset + 2 > cache_data.len() {
return None;
}
let page_start = crate::util::read_u16_le(&cache_data[page_start_offset..]);
if page_start == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE {
return None;
}
let page_addr = mapping.address + (page_idx as u64 * page_size);
let initial_offset = (page_start as u64) * 8; Some(page_addr + initial_offset)
})
.collect();
let macho_data: &[u8] = &macho.data;
let all_writes: Vec<Vec<WriteOp>> = page_infos
.par_iter()
.map(|&start_addr| collect_v3_page_writes(macho_data, macho, start_addr, auth_value_add))
.collect();
for writes in all_writes {
for op in writes {
macho.data[op.offset..op.offset + 8].copy_from_slice(&op.value.to_le_bytes());
}
}
Ok(())
}
#[inline]
fn collect_v3_page_writes(
data: &[u8],
macho: &MachOContext,
mut addr: u64,
auth_value_add: u64,
) -> Vec<WriteOp> {
let mut writes = Vec::with_capacity(64);
loop {
let macho_offset = match macho.addr_to_offset(addr) {
Some(off) => off,
None => {
trace!("Address {:#x} not in Mach-O, skipping", addr);
break;
}
};
if macho_offset + 8 > data.len() {
break;
}
let raw_value = crate::util::read_u64_le(&data[macho_offset..]);
let ptr = SlidePointer3(raw_value);
let delta = ptr.offset_to_next() * 8;
let new_value = if ptr.is_auth() {
ptr.auth_offset() as u64 + auth_value_add
} else {
ptr.plain_value()
};
writes.push(WriteOp {
offset: macho_offset,
value: new_value,
});
if delta == 0 {
break;
}
addr += delta;
}
writes
}
fn process_slide_info_v5(
macho: &mut MachOContext,
cache: &Arc<DyldContext>,
cache_data: &[u8],
offset: usize,
mapping: &SlideMapping,
) -> Result<()> {
use zerocopy::FromBytes;
let slide_info = DyldCacheSlideInfo5::read_from_prefix(&cache_data[offset..])
.map_err(|_| Error::InvalidSlideInfo {
offset: offset as u64,
reason: "failed to parse slide info v5".into(),
})?
.0;
let page_size = slide_info.page_size as u64;
let value_add = slide_info.value_add;
let page_count = slide_info.page_starts_count as usize;
let page_starts_offset = offset + std::mem::size_of::<DyldCacheSlideInfo5>();
debug!(
"Slide v5: value_add={:#018x}, pages={}",
value_add, page_count
);
let page_infos: Vec<_> = (0..page_count)
.filter_map(|page_idx| {
let page_start_offset = page_starts_offset + page_idx * 2;
if page_start_offset + 2 > cache_data.len() {
return None;
}
let page_start = crate::util::read_u16_le(&cache_data[page_start_offset..]);
if page_start == DYLD_CACHE_SLIDE_V5_PAGE_ATTR_NO_REBASE {
return None;
}
let page_addr = mapping.address + (page_idx as u64 * page_size);
let initial_offset = (page_start as u64) * 8;
Some(page_addr + initial_offset)
})
.collect();
let all_writes: Vec<Vec<WriteOp>> = page_infos
.par_iter()
.map(|&start_addr| collect_v5_page_writes(cache, macho, start_addr, value_add))
.collect();
for writes in all_writes {
for op in writes {
macho.data[op.offset..op.offset + 8].copy_from_slice(&op.value.to_le_bytes());
}
}
Ok(())
}
#[inline]
fn collect_v5_page_writes(
cache: &Arc<DyldContext>,
macho: &MachOContext,
mut addr: u64,
value_add: u64,
) -> Vec<WriteOp> {
let mut writes = Vec::with_capacity(64);
loop {
let macho_offset = macho.addr_to_offset(addr);
let raw_value = match cache.data_at_addr(addr, 8) {
Ok(data) => u64::from_le_bytes(data.try_into().unwrap()),
Err(_) => {
break;
}
};
let ptr = SlidePointer5(raw_value);
let delta = ptr.next() * 8;
if let Some(macho_off) = macho_offset {
let new_value = if ptr.is_auth() {
ptr.runtime_offset() + value_add
} else {
let runtime_offset = ptr.runtime_offset();
let high8 = (ptr.high8() as u64) << 56;
runtime_offset + value_add + high8
};
writes.push(WriteOp {
offset: macho_off,
value: new_value,
});
}
if delta == 0 {
break;
}
addr += delta;
}
writes
}