specter/memory/manipulation/
patch.rs1use crate::memory::ffi::mach_exc::mach_vm_remap;
7use crate::memory::platform::thread;
8#[cfg(feature = "dev_release")]
9use crate::utils::logger;
10use jit_assembler::aarch64::Aarch64InstructionBuilder;
11use jit_assembler::common::InstructionBuilder;
12use mach2::kern_return::KERN_SUCCESS;
13use mach2::traps::mach_task_self;
14use mach2::vm::{mach_vm_deallocate, mach_vm_protect};
15use mach2::vm_prot::{VM_PROT_COPY, VM_PROT_EXECUTE, VM_PROT_READ, VM_PROT_WRITE};
16use std::arch::asm;
17use std::ffi::c_void;
18use std::ptr;
19use thiserror::Error;
20
21const CACHE_LINE_SIZE: usize = 64;
22
23const B_RANGE: isize = 128 * 1024 * 1024;
25
26const VM_FLAGS_ANYWHERE: i32 = 0x1;
28const VM_INHERIT_NONE: u32 = 2;
29
30#[derive(Error, Debug)]
31pub enum PatchError {
33 #[error("Invalid hex: {0}")]
35 InvalidHex(#[from] hex::FromHexError),
36 #[error("Image not found: {0}")]
37 ImageBaseNotFound(#[from] crate::memory::info::image::ImageError),
38 #[error("Protection failed: {0}")]
40 ProtectionFailed(i32),
41 #[error("Thread error: {0}")]
43 ThreadError(#[from] crate::memory::platform::thread::ThreadError),
44 #[error("Empty instructions")]
46 EmptyInstructions,
47 #[error("Code cave error: {0}")]
49 CaveError(#[from] crate::memory::info::code_cave::CodeCaveError),
50 #[error("Branch target out of range")]
52 BranchOutOfRange,
53 #[error("Write verification failed")]
55 VerificationFailed,
56}
57
58pub struct Patch {
60 address: usize,
62 original_bytes: Vec<u8>,
64 cave: Option<crate::memory::info::code_cave::CodeCave>,
66}
67
68impl Patch {
69 pub fn revert(&self) {
71 unsafe {
72 let suspended = match thread::suspend_other_threads() {
73 Ok(s) => s,
74 Err(_e) => {
75 #[cfg(feature = "dev_release")]
76 logger::error(&format!("Revert suspend failed: {}", _e));
77 return;
78 }
79 };
80
81 if let Err(_e) = stealth_write(self.address, &self.original_bytes) {
82 #[cfg(feature = "dev_release")]
83 logger::error(&format!("Revert write failed: {}", _e));
84 }
85
86 if let Some(cave) = &self.cave
87 && let Err(_e) = crate::memory::info::code_cave::free_cave(cave.address)
88 {
89 #[cfg(feature = "dev_release")]
90 logger::error(&format!("Cave free failed: {}", _e));
91 }
92
93 thread::resume_threads(&suspended);
94 #[cfg(feature = "dev_release")]
95 logger::debug("Patch reverted");
96 }
97 }
98
99 pub fn address(&self) -> usize {
101 self.address
102 }
103
104 pub fn original_bytes(&self) -> &[u8] {
106 &self.original_bytes
107 }
108}
109
110pub fn apply(rva: usize, hex_str: &str) -> Result<Patch, PatchError> {
119 let clean: String = hex_str.chars().filter(|c| !c.is_whitespace()).collect();
120 let bytes = hex::decode(&clean)?;
121 let image_name = crate::config::get_target_image_name().ok_or_else(|| {
122 crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
123 })?;
124 let base = crate::memory::info::image::get_image_base(&image_name)?;
125 let address = base + rva;
126
127 unsafe {
128 let suspended = thread::suspend_other_threads()?;
129
130 let original_bytes = read_bytes(address, bytes.len());
131
132 let result = stealth_write(address, &bytes);
133 if let Err(e) = result {
134 thread::resume_threads(&suspended);
135 return Err(e);
136 }
137
138 if !verify_write(address, &bytes) {
139 thread::resume_threads(&suspended);
140 return Err(PatchError::VerificationFailed);
141 }
142
143 thread::resume_threads(&suspended);
144 #[cfg(feature = "dev_release")]
145 logger::debug("Patch applied");
146
147 Ok(Patch {
148 address,
149 original_bytes,
150 cave: None,
151 })
152 }
153}
154
155pub fn apply_asm<F>(rva: usize, build: F) -> Result<Patch, PatchError>
167where
168 F: FnOnce(&mut Aarch64InstructionBuilder) -> &mut Aarch64InstructionBuilder,
169{
170 let mut builder = Aarch64InstructionBuilder::new();
171 build(&mut builder);
172 let instructions = builder.instructions();
173 if instructions.is_empty() {
174 return Err(PatchError::EmptyInstructions);
175 }
176 let bytes: Vec<u8> = instructions
177 .iter()
178 .flat_map(|instr| instr.0.to_le_bytes())
179 .collect();
180 let image_name = crate::config::get_target_image_name().ok_or_else(|| {
181 crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
182 })?;
183 let base = crate::memory::info::image::get_image_base(&image_name)?;
184 let address = base + rva;
185
186 unsafe {
187 let suspended = thread::suspend_other_threads()?;
188
189 let original_bytes = read_bytes(address, bytes.len());
190
191 let result = stealth_write(address, &bytes);
192 if let Err(e) = result {
193 thread::resume_threads(&suspended);
194 return Err(e);
195 }
196
197 if !verify_write(address, &bytes) {
198 thread::resume_threads(&suspended);
199 return Err(PatchError::VerificationFailed);
200 }
201
202 thread::resume_threads(&suspended);
203 #[cfg(feature = "dev_release")]
204 logger::debug("ASM patch applied");
205
206 Ok(Patch {
207 address,
208 original_bytes,
209 cave: None,
210 })
211 }
212}
213
214pub fn apply_asm_in_cave<F>(rva: usize, build: F) -> Result<Patch, PatchError>
229where
230 F: FnOnce(&mut Aarch64InstructionBuilder) -> &mut Aarch64InstructionBuilder,
231{
232 let mut builder = Aarch64InstructionBuilder::new();
233 build(&mut builder);
234 let instructions = builder.instructions();
235 if instructions.is_empty() {
236 return Err(PatchError::EmptyInstructions);
237 }
238
239 let bytes: Vec<u8> = instructions
240 .iter()
241 .flat_map(|instr| instr.0.to_le_bytes())
242 .collect();
243
244 let image_name = crate::config::get_target_image_name().ok_or_else(|| {
245 crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
246 })?;
247 let base = crate::memory::info::image::get_image_base(&image_name)?;
248 let address = base + rva;
249
250 let cave = crate::memory::info::code_cave::allocate_cave_near(address, bytes.len())?;
251
252 let aligned_address = (cave.address + 3) & !3;
253
254 if aligned_address + bytes.len() > cave.address + cave.size {
255 crate::memory::info::code_cave::free_cave(cave.address).ok();
256 return Err(PatchError::CaveError(
257 crate::memory::info::code_cave::CodeCaveError::Custom(
258 "Cave too small for alignment".to_string(),
259 ),
260 ));
261 }
262
263 let offset = (aligned_address as isize) - (address as isize);
264 if !(-B_RANGE..B_RANGE).contains(&offset) {
265 crate::memory::info::code_cave::free_cave(cave.address).ok();
266 return Err(PatchError::BranchOutOfRange);
267 }
268
269 let b_instr = 0x14000000 | (((offset >> 2) as u32) & 0x03FFFFFF);
271 let b_bytes = b_instr.to_le_bytes();
272
273 unsafe {
274 let suspended = thread::suspend_other_threads()?;
275
276 if let Err(e) = stealth_write(aligned_address, &bytes) {
278 thread::resume_threads(&suspended);
279 crate::memory::info::code_cave::free_cave(cave.address).ok();
280 return Err(e);
281 }
282
283 if !verify_write(aligned_address, &bytes) {
284 thread::resume_threads(&suspended);
285 crate::memory::info::code_cave::free_cave(cave.address).ok();
286 return Err(PatchError::VerificationFailed);
287 }
288
289 let original_bytes = read_bytes(address, 4);
291
292 if let Err(e) = stealth_write(address, &b_bytes) {
293 thread::resume_threads(&suspended);
294 crate::memory::info::code_cave::free_cave(cave.address).ok();
295 return Err(e);
296 }
297
298 if !verify_write(address, &b_bytes) {
299 thread::resume_threads(&suspended);
300 crate::memory::info::code_cave::free_cave(cave.address).ok();
301 return Err(PatchError::VerificationFailed);
302 }
303
304 thread::resume_threads(&suspended);
305 #[cfg(feature = "dev_release")]
306 logger::debug("Cave ASM patch applied");
307
308 Ok(Patch {
309 address,
310 original_bytes,
311 cave: Some(cave),
312 })
313 }
314}
315
316pub fn apply_in_cave(rva: usize, hex_str: &str) -> Result<Patch, PatchError> {
328 let clean: String = hex_str.chars().filter(|c| !c.is_whitespace()).collect();
329 let bytes = hex::decode(&clean)?;
330
331 if bytes.is_empty() {
332 return Err(PatchError::EmptyInstructions);
333 }
334
335 let image_name = crate::config::get_target_image_name().ok_or_else(|| {
336 crate::memory::info::image::ImageError::NotFound("call mem_init first".to_string())
337 })?;
338 let base = crate::memory::info::image::get_image_base(&image_name)?;
339 let address = base + rva;
340
341 let cave = crate::memory::info::code_cave::allocate_cave_near(address, bytes.len())?;
342
343 let aligned_address = (cave.address + 3) & !3;
344
345 if aligned_address + bytes.len() > cave.address + cave.size {
346 crate::memory::info::code_cave::free_cave(cave.address).ok();
347 return Err(PatchError::CaveError(
348 crate::memory::info::code_cave::CodeCaveError::Custom(
349 "Cave too small for alignment".to_string(),
350 ),
351 ));
352 }
353
354 let offset = (aligned_address as isize) - (address as isize);
355 if !(-B_RANGE..B_RANGE).contains(&offset) {
356 crate::memory::info::code_cave::free_cave(cave.address).ok();
357 return Err(PatchError::BranchOutOfRange);
358 }
359
360 let b_instr = 0x14000000 | (((offset >> 2) as u32) & 0x03FFFFFF);
362 let b_bytes = b_instr.to_le_bytes();
363
364 unsafe {
365 let suspended = thread::suspend_other_threads()?;
366
367 if let Err(e) = stealth_write(aligned_address, &bytes) {
369 thread::resume_threads(&suspended);
370 crate::memory::info::code_cave::free_cave(cave.address).ok();
371 return Err(e);
372 }
373
374 if !verify_write(aligned_address, &bytes) {
375 thread::resume_threads(&suspended);
376 crate::memory::info::code_cave::free_cave(cave.address).ok();
377 return Err(PatchError::VerificationFailed);
378 }
379
380 let original_bytes = read_bytes(address, 4);
382
383 if let Err(e) = stealth_write(address, &b_bytes) {
384 thread::resume_threads(&suspended);
385 crate::memory::info::code_cave::free_cave(cave.address).ok();
386 return Err(e);
387 }
388
389 if !verify_write(address, &b_bytes) {
390 thread::resume_threads(&suspended);
391 crate::memory::info::code_cave::free_cave(cave.address).ok();
392 return Err(PatchError::VerificationFailed);
393 }
394
395 thread::resume_threads(&suspended);
396 #[cfg(feature = "dev_release")]
397 logger::debug("Cave patch applied");
398
399 Ok(Patch {
400 address,
401 original_bytes,
402 cave: Some(cave),
403 })
404 }
405}
406
407pub fn apply_at_address(address: usize, bytes: &[u8]) -> Result<Patch, PatchError> {
416 unsafe {
417 let suspended = thread::suspend_other_threads()?;
418
419 let original_bytes = read_bytes(address, bytes.len());
420
421 let result = stealth_write(address, bytes);
422 if let Err(e) = result {
423 thread::resume_threads(&suspended);
424 return Err(e);
425 }
426
427 if !verify_write(address, bytes) {
428 thread::resume_threads(&suspended);
429 return Err(PatchError::VerificationFailed);
430 }
431
432 thread::resume_threads(&suspended);
433 #[cfg(feature = "dev_release")]
434 logger::debug("Address patch applied");
435
436 Ok(Patch {
437 address,
438 original_bytes,
439 cave: None,
440 })
441 }
442}
443
444unsafe fn read_bytes(address: usize, len: usize) -> Vec<u8> {
445 unsafe {
446 (0..len)
447 .map(|i| super::rw::read::<u8>(address + i).unwrap_or(0))
448 .collect()
449 }
450}
451
452pub(crate) unsafe fn stealth_write(address: usize, data: &[u8]) -> Result<(), PatchError> {
456 unsafe {
457 let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
458 let page_mask = !(page_size - 1);
459 let page_start = address & page_mask;
460 let page_len = ((address + data.len() + page_size - 1) & page_mask) - page_start;
461 let offset_in_page = address - page_start;
462
463 let task = mach_task_self();
464 let mut remap_addr: u64 = 0;
465 let mut cur_prot: i32 = 0;
466 let mut max_prot: i32 = 0;
467
468 let kr = mach_vm_remap(
469 task,
470 &mut remap_addr,
471 page_len as u64,
472 0,
473 VM_FLAGS_ANYWHERE,
474 task,
475 page_start as u64,
476 0, &mut cur_prot,
478 &mut max_prot,
479 VM_INHERIT_NONE,
480 );
481
482 if kr != KERN_SUCCESS {
483 #[cfg(feature = "dev_release")]
484 logger::debug("Remap unavailable, fallback");
485 return fallback_write(address, data);
486 }
487
488 if (max_prot & VM_PROT_WRITE) == 0 {
490 mach_vm_deallocate(task, remap_addr, page_len as u64);
491 #[cfg(feature = "dev_release")]
492 logger::debug("Remap max prot insufficient, fallback");
493 return fallback_write(address, data);
494 }
495
496 let kr = mach_vm_protect(
498 task,
499 remap_addr,
500 page_len as u64,
501 0,
502 VM_PROT_READ | VM_PROT_WRITE,
503 );
504
505 if kr != KERN_SUCCESS {
506 mach_vm_deallocate(task, remap_addr, page_len as u64);
507 #[cfg(feature = "dev_release")]
508 logger::debug("Remap protect failed, fallback");
509 return fallback_write(address, data);
510 }
511
512 let write_addr = remap_addr as usize + offset_in_page;
514 ptr::copy_nonoverlapping(data.as_ptr(), write_addr as *mut u8, data.len());
515
516 mach_vm_deallocate(task, remap_addr, page_len as u64);
518
519 invalidate_icache(address as *mut c_void, data.len());
521
522 Ok(())
523 }
524}
525
526unsafe fn fallback_write(address: usize, data: &[u8]) -> Result<(), PatchError> {
528 unsafe {
529 use crate::memory::info::protection;
530
531 let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
532 let page_mask = !(page_size - 1);
533 let page_start = address & page_mask;
534 let page_len = ((address + data.len() + page_size - 1) & page_mask) - page_start;
535
536 let original_prot = protection::get_protection(address)
537 .map(|p| p.raw())
538 .unwrap_or(VM_PROT_READ | VM_PROT_EXECUTE);
539
540 protection::protect(
541 page_start,
542 page_len,
543 protection::PageProtection::from_raw(VM_PROT_READ | VM_PROT_WRITE | VM_PROT_COPY),
544 )
545 .map_err(|e| match e {
546 protection::ProtectionError::ProtectionFailed(k) => PatchError::ProtectionFailed(k),
547 _ => PatchError::ProtectionFailed(0),
548 })?;
549
550 ptr::copy_nonoverlapping(data.as_ptr(), address as *mut u8, data.len());
551
552 let _ = protection::protect(
553 page_start,
554 page_len,
555 protection::PageProtection::from_raw(original_prot),
556 );
557
558 invalidate_icache(address as *mut c_void, data.len());
559
560 Ok(())
561 }
562}
563
564unsafe fn verify_write(address: usize, expected: &[u8]) -> bool {
566 unsafe {
567 for (i, &byte) in expected.iter().enumerate() {
568 if super::rw::read::<u8>(address + i).unwrap_or(!byte) != byte {
569 return false;
570 }
571 }
572 true
573 }
574}
575
576#[inline]
581pub unsafe fn invalidate_icache(start: *mut c_void, len: usize) {
582 unsafe {
583 let start_addr = start as usize;
584 let end_addr = start_addr + len;
585 let mut addr = start_addr & !(CACHE_LINE_SIZE - 1);
586
587 while addr < end_addr {
589 asm!("dc cvau, {x}", x = in(reg) addr, options(nostack, preserves_flags));
590 addr += CACHE_LINE_SIZE;
591 }
592 asm!("dsb ish", options(nostack, preserves_flags));
593
594 addr = start_addr & !(CACHE_LINE_SIZE - 1);
596 while addr < end_addr {
597 asm!("ic ivau, {x}", x = in(reg) addr, options(nostack, preserves_flags));
598 addr += CACHE_LINE_SIZE;
599 }
600 asm!("dsb ish", options(nostack, preserves_flags));
601 asm!("isb", options(nostack, preserves_flags));
602 }
603}