Skip to main content

specter/memory/manipulation/
patch.rs

1//! Stealth memory patching
2//!
3//! Uses mach_vm_remap to create writable aliases of code pages,
4//! avoiding detectable vm_protect calls on the original code segment.
5
6use crate::memory::ffi::mach_exc::mach_vm_remap;
7use crate::memory::platform::thread;
8#[cfg(feature = "dev_release")]
9use crate::utils::logger;
10use jit_assembler::aarch64::Aarch64InstructionBuilder;
11use jit_assembler::common::InstructionBuilder;
12use mach2::kern_return::KERN_SUCCESS;
13use mach2::traps::mach_task_self;
14use mach2::vm::{mach_vm_deallocate, mach_vm_protect};
15use mach2::vm_prot::{VM_PROT_COPY, VM_PROT_EXECUTE, VM_PROT_READ, VM_PROT_WRITE};
16use std::arch::asm;
17use std::ffi::c_void;
18use std::ptr;
19use thiserror::Error;
20
21const CACHE_LINE_SIZE: usize = 64;
22
23// B instruction range: +/- 128MB
24const B_RANGE: isize = 128 * 1024 * 1024;
25
26// Mach VM constants not exposed by mach2
27const VM_FLAGS_ANYWHERE: i32 = 0x1;
28const VM_INHERIT_NONE: u32 = 2;
29
30#[derive(Error, Debug)]
31/// Errors that can occur during patching operations
32pub enum PatchError {
33    /// The provided hex string is invalid
34    #[error("Invalid hex: {0}")]
35    InvalidHex(#[from] hex::FromHexError),
36    #[error("Image not found: {0}")]
37    ImageBaseNotFound(#[from] crate::memory::info::image::ImageError),
38    /// Failed to change memory protection
39    #[error("Protection failed: {0}")]
40    ProtectionFailed(i32),
41    /// Thread manipulation error
42    #[error("Thread error: {0}")]
43    ThreadError(#[from] crate::memory::platform::thread::ThreadError),
44    /// The provided instruction list is empty
45    #[error("Empty instructions")]
46    EmptyInstructions,
47    /// Code cave error
48    #[error("Code cave error: {0}")]
49    CaveError(#[from] crate::memory::info::code_cave::CodeCaveError),
50    /// Branch out of range
51    #[error("Branch target out of range")]
52    BranchOutOfRange,
53    /// Write verification failed
54    #[error("Write verification failed")]
55    VerificationFailed,
56}
57
58/// Represents an applied memory patch
59pub struct Patch {
60    /// The address where the patch was applied
61    address: usize,
62    /// The original bytes that were overwritten
63    original_bytes: Vec<u8>,
64    /// Optional code cave used by this patch
65    cave: Option<crate::memory::info::code_cave::CodeCave>,
66}
67
68impl Patch {
69    /// Reverts the patch, restoring the original bytes
70    pub fn revert(&self) {
71        unsafe {
72            let suspended = match thread::suspend_other_threads() {
73                Ok(s) => s,
74                Err(_e) => {
75                    #[cfg(feature = "dev_release")]
76                    logger::error(&format!("Revert suspend failed: {}", _e));
77                    return;
78                }
79            };
80
81            if let Err(_e) = stealth_write(self.address, &self.original_bytes) {
82                #[cfg(feature = "dev_release")]
83                logger::error(&format!("Revert write failed: {}", _e));
84            }
85
86            if let Some(cave) = &self.cave
87                && let Err(_e) = crate::memory::info::code_cave::free_cave(cave.address)
88            {
89                #[cfg(feature = "dev_release")]
90                logger::error(&format!("Cave free failed: {}", _e));
91            }
92
93            thread::resume_threads(&suspended);
94            #[cfg(feature = "dev_release")]
95            logger::debug("Patch reverted");
96        }
97    }
98
99    /// Returns the address of the patch
100    pub fn address(&self) -> usize {
101        self.address
102    }
103
104    /// Returns the original bytes
105    pub fn original_bytes(&self) -> &[u8] {
106        &self.original_bytes
107    }
108}
109
110/// Applies a hex string patch at a relative virtual address (RVA)
111///
112/// # Arguments
113/// * `rva` - The relative virtual address to patch
114/// * `hex_str` - The hex string representing the bytes to write
115///
116/// # Returns
117/// * `Result<Patch, PatchError>` - The applied patch or an error
118pub fn apply(rva: usize, hex_str: &str) -> Result<Patch, PatchError> {
119    let clean: String = hex_str.chars().filter(|c| !c.is_whitespace()).collect();
120    let bytes = hex::decode(&clean)?;
121    let image_name = crate::config::get_target_image_name().ok_or_else(|| {
122        crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
123    })?;
124    let base = crate::memory::info::image::get_image_base(&image_name)?;
125    let address = base + rva;
126
127    unsafe {
128        let suspended = thread::suspend_other_threads()?;
129
130        let original_bytes = read_bytes(address, bytes.len());
131
132        let result = stealth_write(address, &bytes);
133        if let Err(e) = result {
134            thread::resume_threads(&suspended);
135            return Err(e);
136        }
137
138        if !verify_write(address, &bytes) {
139            thread::resume_threads(&suspended);
140            return Err(PatchError::VerificationFailed);
141        }
142
143        thread::resume_threads(&suspended);
144        #[cfg(feature = "dev_release")]
145        logger::debug("Patch applied");
146
147        Ok(Patch {
148            address,
149            original_bytes,
150            cave: None,
151        })
152    }
153}
154
155/// Applies an assembly patch at a relative virtual address (RVA)
156///
157/// # Type Parameters
158/// * `F` - The closure that builds the assembly instructions
159///
160/// # Arguments
161/// * `rva` - The relative virtual address to patch
162/// * `build` - A closure that takes an `Aarch64InstructionBuilder` and appends instructions
163///
164/// # Returns
165/// * `Result<Patch, PatchError>` - The applied patch or an error
166pub fn apply_asm<F>(rva: usize, build: F) -> Result<Patch, PatchError>
167where
168    F: FnOnce(&mut Aarch64InstructionBuilder) -> &mut Aarch64InstructionBuilder,
169{
170    let mut builder = Aarch64InstructionBuilder::new();
171    build(&mut builder);
172    let instructions = builder.instructions();
173    if instructions.is_empty() {
174        return Err(PatchError::EmptyInstructions);
175    }
176    let bytes: Vec<u8> = instructions
177        .iter()
178        .flat_map(|instr| instr.0.to_le_bytes())
179        .collect();
180    let image_name = crate::config::get_target_image_name().ok_or_else(|| {
181        crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
182    })?;
183    let base = crate::memory::info::image::get_image_base(&image_name)?;
184    let address = base + rva;
185
186    unsafe {
187        let suspended = thread::suspend_other_threads()?;
188
189        let original_bytes = read_bytes(address, bytes.len());
190
191        let result = stealth_write(address, &bytes);
192        if let Err(e) = result {
193            thread::resume_threads(&suspended);
194            return Err(e);
195        }
196
197        if !verify_write(address, &bytes) {
198            thread::resume_threads(&suspended);
199            return Err(PatchError::VerificationFailed);
200        }
201
202        thread::resume_threads(&suspended);
203        #[cfg(feature = "dev_release")]
204        logger::debug("ASM patch applied");
205
206        Ok(Patch {
207            address,
208            original_bytes,
209            cave: None,
210        })
211    }
212}
213
214/// Applies an assembly patch using a code cave
215///
216/// This writes the assembly instructions to a nearby code cave and patches the
217/// target address with a branch to the cave.
218///
219/// # Type Parameters
220/// * `F` - The closure that builds the assembly instructions
221///
222/// # Arguments
223/// * `rva` - The relative virtual address to patch
224/// * `build` - A closure that takes an `Aarch64InstructionBuilder` and appends instructions
225///
226/// # Returns
227/// * `Result<Patch, PatchError>` - The applied patch or an error
228pub fn apply_asm_in_cave<F>(rva: usize, build: F) -> Result<Patch, PatchError>
229where
230    F: FnOnce(&mut Aarch64InstructionBuilder) -> &mut Aarch64InstructionBuilder,
231{
232    let mut builder = Aarch64InstructionBuilder::new();
233    build(&mut builder);
234    let instructions = builder.instructions();
235    if instructions.is_empty() {
236        return Err(PatchError::EmptyInstructions);
237    }
238
239    let bytes: Vec<u8> = instructions
240        .iter()
241        .flat_map(|instr| instr.0.to_le_bytes())
242        .collect();
243
244    let image_name = crate::config::get_target_image_name().ok_or_else(|| {
245        crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
246    })?;
247    let base = crate::memory::info::image::get_image_base(&image_name)?;
248    let address = base + rva;
249
250    let cave = crate::memory::info::code_cave::allocate_cave_near(address, bytes.len())?;
251
252    let aligned_address = (cave.address + 3) & !3;
253
254    if aligned_address + bytes.len() > cave.address + cave.size {
255        crate::memory::info::code_cave::free_cave(cave.address).ok();
256        return Err(PatchError::CaveError(
257            crate::memory::info::code_cave::CodeCaveError::Custom(
258                "Cave too small for alignment".to_string(),
259            ),
260        ));
261    }
262
263    let offset = (aligned_address as isize) - (address as isize);
264    if !(-B_RANGE..B_RANGE).contains(&offset) {
265        crate::memory::info::code_cave::free_cave(cave.address).ok();
266        return Err(PatchError::BranchOutOfRange);
267    }
268
269    // B instruction: 0x14000000 | imm26
270    let b_instr = 0x14000000 | (((offset >> 2) as u32) & 0x03FFFFFF);
271    let b_bytes = b_instr.to_le_bytes();
272
273    unsafe {
274        let suspended = thread::suspend_other_threads()?;
275
276        // Write instructions to the cave
277        if let Err(e) = stealth_write(aligned_address, &bytes) {
278            thread::resume_threads(&suspended);
279            crate::memory::info::code_cave::free_cave(cave.address).ok();
280            return Err(e);
281        }
282
283        if !verify_write(aligned_address, &bytes) {
284            thread::resume_threads(&suspended);
285            crate::memory::info::code_cave::free_cave(cave.address).ok();
286            return Err(PatchError::VerificationFailed);
287        }
288
289        // Save original bytes and write the branch
290        let original_bytes = read_bytes(address, 4);
291
292        if let Err(e) = stealth_write(address, &b_bytes) {
293            thread::resume_threads(&suspended);
294            crate::memory::info::code_cave::free_cave(cave.address).ok();
295            return Err(e);
296        }
297
298        if !verify_write(address, &b_bytes) {
299            thread::resume_threads(&suspended);
300            crate::memory::info::code_cave::free_cave(cave.address).ok();
301            return Err(PatchError::VerificationFailed);
302        }
303
304        thread::resume_threads(&suspended);
305        #[cfg(feature = "dev_release")]
306        logger::debug("Cave ASM patch applied");
307
308        Ok(Patch {
309            address,
310            original_bytes,
311            cave: Some(cave),
312        })
313    }
314}
315
316/// Applies a hex string patch using a code cave
317///
318/// This writes the hex bytes to a nearby code cave and patches the
319/// target address with a branch to the cave.
320///
321/// # Arguments
322/// * `rva` - The relative virtual address to patch
323/// * `hex_str` - The hex string representing the bytes to write
324///
325/// # Returns
326/// * `Result<Patch, PatchError>` - The applied patch or an error
327pub fn apply_in_cave(rva: usize, hex_str: &str) -> Result<Patch, PatchError> {
328    let clean: String = hex_str.chars().filter(|c| !c.is_whitespace()).collect();
329    let bytes = hex::decode(&clean)?;
330
331    if bytes.is_empty() {
332        return Err(PatchError::EmptyInstructions);
333    }
334
335    let image_name = crate::config::get_target_image_name().ok_or_else(|| {
336        crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
337    })?;
338    let base = crate::memory::info::image::get_image_base(&image_name)?;
339    let address = base + rva;
340
341    let cave = crate::memory::info::code_cave::allocate_cave_near(address, bytes.len())?;
342
343    let aligned_address = (cave.address + 3) & !3;
344
345    if aligned_address + bytes.len() > cave.address + cave.size {
346        crate::memory::info::code_cave::free_cave(cave.address).ok();
347        return Err(PatchError::CaveError(
348            crate::memory::info::code_cave::CodeCaveError::Custom(
349                "Cave too small for alignment".to_string(),
350            ),
351        ));
352    }
353
354    let offset = (aligned_address as isize) - (address as isize);
355    if !(-B_RANGE..B_RANGE).contains(&offset) {
356        crate::memory::info::code_cave::free_cave(cave.address).ok();
357        return Err(PatchError::BranchOutOfRange);
358    }
359
360    // B instruction: 0x14000000 | imm26
361    let b_instr = 0x14000000 | (((offset >> 2) as u32) & 0x03FFFFFF);
362    let b_bytes = b_instr.to_le_bytes();
363
364    unsafe {
365        let suspended = thread::suspend_other_threads()?;
366
367        // Write payload to the cave
368        if let Err(e) = stealth_write(aligned_address, &bytes) {
369            thread::resume_threads(&suspended);
370            crate::memory::info::code_cave::free_cave(cave.address).ok();
371            return Err(e);
372        }
373
374        if !verify_write(aligned_address, &bytes) {
375            thread::resume_threads(&suspended);
376            crate::memory::info::code_cave::free_cave(cave.address).ok();
377            return Err(PatchError::VerificationFailed);
378        }
379
380        // Save original bytes and write the branch
381        let original_bytes = read_bytes(address, 4);
382
383        if let Err(e) = stealth_write(address, &b_bytes) {
384            thread::resume_threads(&suspended);
385            crate::memory::info::code_cave::free_cave(cave.address).ok();
386            return Err(e);
387        }
388
389        if !verify_write(address, &b_bytes) {
390            thread::resume_threads(&suspended);
391            crate::memory::info::code_cave::free_cave(cave.address).ok();
392            return Err(PatchError::VerificationFailed);
393        }
394
395        thread::resume_threads(&suspended);
396        #[cfg(feature = "dev_release")]
397        logger::debug("Cave patch applied");
398
399        Ok(Patch {
400            address,
401            original_bytes,
402            cave: Some(cave),
403        })
404    }
405}
406
407/// Applies a patch at an absolute address
408///
409/// # Arguments
410/// * `address` - The absolute address to patch
411/// * `bytes` - The bytes to write
412///
413/// # Returns
414/// * `Result<Patch, PatchError>` - The applied patch or an error
415pub fn apply_at_address(address: usize, bytes: &[u8]) -> Result<Patch, PatchError> {
416    unsafe {
417        let suspended = thread::suspend_other_threads()?;
418
419        let original_bytes = read_bytes(address, bytes.len());
420
421        let result = stealth_write(address, bytes);
422        if let Err(e) = result {
423            thread::resume_threads(&suspended);
424            return Err(e);
425        }
426
427        if !verify_write(address, bytes) {
428            thread::resume_threads(&suspended);
429            return Err(PatchError::VerificationFailed);
430        }
431
432        thread::resume_threads(&suspended);
433        #[cfg(feature = "dev_release")]
434        logger::debug("Address patch applied");
435
436        Ok(Patch {
437            address,
438            original_bytes,
439            cave: None,
440        })
441    }
442}
443
444unsafe fn read_bytes(address: usize, len: usize) -> Vec<u8> {
445    unsafe {
446        (0..len)
447            .map(|i| super::rw::read::<u8>(address + i).unwrap_or(0))
448            .collect()
449    }
450}
451
452/// Writes to code memory using mach_vm_remap to create a writable alias,
453/// avoiding detectable vm_protect calls on the original code pages.
454/// Falls back to traditional vm_protect if remap is unavailable.
455pub(crate) unsafe fn stealth_write(address: usize, data: &[u8]) -> Result<(), PatchError> {
456    unsafe {
457        let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
458        let page_mask = !(page_size - 1);
459        let page_start = address & page_mask;
460        let page_len = ((address + data.len() + page_size - 1) & page_mask) - page_start;
461        let offset_in_page = address - page_start;
462
463        let task = mach_task_self();
464        let mut remap_addr: u64 = 0;
465        let mut cur_prot: i32 = 0;
466        let mut max_prot: i32 = 0;
467
468        let kr = mach_vm_remap(
469            task,
470            &mut remap_addr,
471            page_len as u64,
472            0,
473            VM_FLAGS_ANYWHERE,
474            task,
475            page_start as u64,
476            0, // share, not copy
477            &mut cur_prot,
478            &mut max_prot,
479            VM_INHERIT_NONE,
480        );
481
482        if kr != KERN_SUCCESS {
483            #[cfg(feature = "dev_release")]
484            logger::debug("Remap unavailable, fallback");
485            return fallback_write(address, data);
486        }
487
488        // Check if the remap's max protection allows writing
489        if (max_prot & VM_PROT_WRITE) == 0 {
490            mach_vm_deallocate(task, remap_addr, page_len as u64);
491            #[cfg(feature = "dev_release")]
492            logger::debug("Remap max prot insufficient, fallback");
493            return fallback_write(address, data);
494        }
495
496        // Make the REMAP writable (not the original code pages)
497        let kr = mach_vm_protect(
498            task,
499            remap_addr,
500            page_len as u64,
501            0,
502            VM_PROT_READ | VM_PROT_WRITE,
503        );
504
505        if kr != KERN_SUCCESS {
506            mach_vm_deallocate(task, remap_addr, page_len as u64);
507            #[cfg(feature = "dev_release")]
508            logger::debug("Remap protect failed, fallback");
509            return fallback_write(address, data);
510        }
511
512        // Write through the writable alias
513        let write_addr = remap_addr as usize + offset_in_page;
514        ptr::copy_nonoverlapping(data.as_ptr(), write_addr as *mut u8, data.len());
515
516        // Tear down the alias
517        mach_vm_deallocate(task, remap_addr, page_len as u64);
518
519        // Flush caches on the ORIGINAL address
520        invalidate_icache(address as *mut c_void, data.len());
521
522        Ok(())
523    }
524}
525
526/// Fallback write using traditional vm_protect (if remap is unavailable)
527unsafe fn fallback_write(address: usize, data: &[u8]) -> Result<(), PatchError> {
528    unsafe {
529        use crate::memory::info::protection;
530
531        let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
532        let page_mask = !(page_size - 1);
533        let page_start = address & page_mask;
534        let page_len = ((address + data.len() + page_size - 1) & page_mask) - page_start;
535
536        let original_prot = protection::get_protection(address)
537            .map(|p| p.raw())
538            .unwrap_or(VM_PROT_READ | VM_PROT_EXECUTE);
539
540        protection::protect(
541            page_start,
542            page_len,
543            protection::PageProtection::from_raw(VM_PROT_READ | VM_PROT_WRITE | VM_PROT_COPY),
544        )
545        .map_err(|e| match e {
546            protection::ProtectionError::ProtectionFailed(k) => PatchError::ProtectionFailed(k),
547            _ => PatchError::ProtectionFailed(0),
548        })?;
549
550        ptr::copy_nonoverlapping(data.as_ptr(), address as *mut u8, data.len());
551
552        let _ = protection::protect(
553            page_start,
554            page_len,
555            protection::PageProtection::from_raw(original_prot),
556        );
557
558        invalidate_icache(address as *mut c_void, data.len());
559
560        Ok(())
561    }
562}
563
564/// Verifies that written bytes match expected data
565unsafe fn verify_write(address: usize, expected: &[u8]) -> bool {
566    unsafe {
567        for (i, &byte) in expected.iter().enumerate() {
568            if super::rw::read::<u8>(address + i).unwrap_or(!byte) != byte {
569                return false;
570            }
571        }
572        true
573    }
574}
575
576/// Invalidates the instruction cache for a memory range
577///
578/// Uses the full ARM64 cache maintenance sequence:
579/// dc cvau -> dsb ish -> ic ivau -> dsb ish -> isb
580#[inline]
581pub unsafe fn invalidate_icache(start: *mut c_void, len: usize) {
582    unsafe {
583        let start_addr = start as usize;
584        let end_addr = start_addr + len;
585        let mut addr = start_addr & !(CACHE_LINE_SIZE - 1);
586
587        // Clean data cache to Point of Unification
588        while addr < end_addr {
589            asm!("dc cvau, {x}", x = in(reg) addr, options(nostack, preserves_flags));
590            addr += CACHE_LINE_SIZE;
591        }
592        asm!("dsb ish", options(nostack, preserves_flags));
593
594        // Invalidate instruction cache
595        addr = start_addr & !(CACHE_LINE_SIZE - 1);
596        while addr < end_addr {
597            asm!("ic ivau, {x}", x = in(reg) addr, options(nostack, preserves_flags));
598            addr += CACHE_LINE_SIZE;
599        }
600        asm!("dsb ish", options(nostack, preserves_flags));
601        asm!("isb", options(nostack, preserves_flags));
602    }
603}