wraith/manipulation/inline_hook/trampoline/
allocator.rs1use crate::error::{Result, WraithError};
7
8const MEM_COMMIT: u32 = 0x1000;
10const MEM_RESERVE: u32 = 0x2000;
11const MEM_RELEASE: u32 = 0x8000;
12const PAGE_EXECUTE_READWRITE: u32 = 0x40;
13
14pub struct ExecutableMemory {
18 base: *mut u8,
19 size: usize,
20 used: usize,
21}
22
23impl ExecutableMemory {
24 pub fn allocate_near(target: usize, size: usize) -> Result<Self> {
29 let size = (size + 0xFFF) & !0xFFF;
31
32 #[cfg(target_arch = "x86_64")]
34 {
35 if let Some(mem) = try_allocate_near_x64(target, size) {
36 return Ok(mem);
37 }
38 }
39
40 Self::allocate(size)
42 }
43
44 pub fn allocate(size: usize) -> Result<Self> {
46 let size = (size + 0xFFF) & !0xFFF;
47
48 let base = unsafe {
49 VirtualAlloc(
50 core::ptr::null_mut(),
51 size,
52 MEM_COMMIT | MEM_RESERVE,
53 PAGE_EXECUTE_READWRITE,
54 )
55 };
56
57 if base.is_null() {
58 return Err(WraithError::AllocationFailed {
59 size,
60 protection: PAGE_EXECUTE_READWRITE,
61 });
62 }
63
64 unsafe {
67 core::ptr::write_bytes(base, 0xCC, size); }
69
70 Ok(Self {
71 base: base as *mut u8,
72 size,
73 used: 0,
74 })
75 }
76
77 pub fn base(&self) -> usize {
79 self.base as usize
80 }
81
82 pub fn size(&self) -> usize {
84 self.size
85 }
86
87 pub fn used(&self) -> usize {
89 self.used
90 }
91
92 pub fn available(&self) -> usize {
94 self.size - self.used
95 }
96
97 pub fn write(&mut self, code: &[u8]) -> Result<usize> {
101 if code.len() > self.available() {
102 return Err(WraithError::WriteFailed {
103 address: self.base as u64,
104 size: code.len(),
105 });
106 }
107
108 let write_addr = self.base as usize + self.used;
109
110 unsafe {
112 core::ptr::copy_nonoverlapping(code.as_ptr(), write_addr as *mut u8, code.len());
113 }
114
115 self.used += code.len();
116
117 Ok(write_addr)
118 }
119
120 pub fn ptr_at(&self, offset: usize) -> *mut u8 {
122 unsafe { self.base.add(offset) }
124 }
125
126 pub fn read_at(&self, offset: usize, len: usize) -> Result<&[u8]> {
128 if offset + len > self.size {
129 return Err(WraithError::ReadFailed {
130 address: (self.base as usize + offset) as u64,
131 size: len,
132 });
133 }
134
135 Ok(unsafe { core::slice::from_raw_parts(self.base.add(offset), len) })
137 }
138
139 pub fn contains(&self, addr: usize) -> bool {
141 addr >= self.base as usize && addr < (self.base as usize + self.size)
142 }
143
144 pub fn is_near(&self, target: usize) -> bool {
146 let base = self.base as usize;
147 let distance = if base > target {
148 base - target
149 } else {
150 target - base
151 };
152 distance <= i32::MAX as usize
153 }
154
155 pub fn flush_icache(&self) -> Result<()> {
157 let result = unsafe {
158 FlushInstructionCache(
159 GetCurrentProcess(),
160 self.base as *const _,
161 self.size,
162 )
163 };
164
165 if result == 0 {
166 Err(WraithError::from_last_error("FlushInstructionCache"))
167 } else {
168 Ok(())
169 }
170 }
171
172 pub fn leak(self) -> *mut u8 {
174 let ptr = self.base;
175 core::mem::forget(self);
176 ptr
177 }
178}
179
180impl Drop for ExecutableMemory {
181 fn drop(&mut self) {
182 unsafe {
184 VirtualFree(self.base as *mut _, 0, MEM_RELEASE);
185 }
186 }
187}
188
189unsafe impl Send for ExecutableMemory {}
191unsafe impl Sync for ExecutableMemory {}
192
193#[cfg(target_arch = "x86_64")]
195fn try_allocate_near_x64(target: usize, size: usize) -> Option<ExecutableMemory> {
196 const GRANULARITY: usize = 0x10000;
199 const SEARCH_RANGE: i64 = 0x7FFF0000; let target_i64 = target as i64;
202
203 let mut addr = (target_i64 - SEARCH_RANGE).max(0x10000) as usize;
205 addr = addr & !(GRANULARITY - 1); while (addr as i64) < target_i64 {
208 let ptr = unsafe {
209 VirtualAlloc(
210 addr as *mut _,
211 size,
212 MEM_COMMIT | MEM_RESERVE,
213 PAGE_EXECUTE_READWRITE,
214 )
215 };
216
217 if !ptr.is_null() {
218 let distance = (target as i64 - ptr as i64).abs();
220 if distance <= i32::MAX as i64 {
221 unsafe {
222 core::ptr::write_bytes(ptr, 0xCC, size);
223 }
224 return Some(ExecutableMemory {
225 base: ptr as *mut u8,
226 size,
227 used: 0,
228 });
229 }
230 unsafe {
232 VirtualFree(ptr, 0, MEM_RELEASE);
233 }
234 }
235 addr += GRANULARITY;
236 }
237
238 addr = ((target_i64 + 0x10000) & !(GRANULARITY as i64 - 1)) as usize;
240 let max_addr = (target_i64 + SEARCH_RANGE) as usize;
241
242 while addr < max_addr {
243 let ptr = unsafe {
244 VirtualAlloc(
245 addr as *mut _,
246 size,
247 MEM_COMMIT | MEM_RESERVE,
248 PAGE_EXECUTE_READWRITE,
249 )
250 };
251
252 if !ptr.is_null() {
253 let distance = (target as i64 - ptr as i64).abs();
254 if distance <= i32::MAX as i64 {
255 unsafe {
256 core::ptr::write_bytes(ptr, 0xCC, size);
257 }
258 return Some(ExecutableMemory {
259 base: ptr as *mut u8,
260 size,
261 used: 0,
262 });
263 }
264 unsafe {
265 VirtualFree(ptr, 0, MEM_RELEASE);
266 }
267 }
268 addr += GRANULARITY;
269 }
270
271 None
272}
273
274#[link(name = "kernel32")]
275extern "system" {
276 fn VirtualAlloc(
277 lpAddress: *mut core::ffi::c_void,
278 dwSize: usize,
279 flAllocationType: u32,
280 flProtect: u32,
281 ) -> *mut core::ffi::c_void;
282
283 fn VirtualFree(lpAddress: *mut core::ffi::c_void, dwSize: usize, dwFreeType: u32) -> i32;
284
285 fn FlushInstructionCache(
286 hProcess: *mut core::ffi::c_void,
287 lpBaseAddress: *const core::ffi::c_void,
288 dwSize: usize,
289 ) -> i32;
290
291 fn GetCurrentProcess() -> *mut core::ffi::c_void;
292}
293
294#[cfg(test)]
295mod tests {
296 use super::*;
297
298 #[test]
299 fn test_allocate() {
300 let mem = ExecutableMemory::allocate(0x1000).unwrap();
301 assert!(mem.base() != 0);
302 assert!(mem.size() >= 0x1000);
303 assert_eq!(mem.used(), 0);
304 }
305
306 #[test]
307 fn test_write() {
308 let mut mem = ExecutableMemory::allocate(0x1000).unwrap();
309 let code = [0x90, 0x90, 0x90, 0xC3]; let addr = mem.write(&code).unwrap();
312 assert_eq!(addr, mem.base());
313 assert_eq!(mem.used(), 4);
314
315 let read = mem.read_at(0, 4).unwrap();
316 assert_eq!(read, &code);
317 }
318
319 #[test]
320 fn test_contains() {
321 let mem = ExecutableMemory::allocate(0x1000).unwrap();
322 assert!(mem.contains(mem.base()));
323 assert!(mem.contains(mem.base() + 0x500));
324 assert!(!mem.contains(mem.base() + 0x2000));
325 }
326}