wraith/manipulation/inline_hook/trampoline/
allocator.rs

1//! Executable memory allocation for trampolines
2//!
3//! Provides RAII-managed executable memory regions for storing
4//! hook trampolines and stub code.
5
6use crate::error::{Result, WraithError};
7
8// memory allocation constants
9const MEM_COMMIT: u32 = 0x1000;
10const MEM_RESERVE: u32 = 0x2000;
11const MEM_RELEASE: u32 = 0x8000;
12const PAGE_EXECUTE_READWRITE: u32 = 0x40;
13
14/// executable memory region for trampolines
15///
16/// automatically freed when dropped
17pub struct ExecutableMemory {
18    base: *mut u8,
19    size: usize,
20    used: usize,
21}
22
23impl ExecutableMemory {
24    /// allocate executable memory near a target address
25    ///
26    /// tries to allocate within ±2GB of target for relative jumps.
27    /// falls back to any available address if near allocation fails.
28    pub fn allocate_near(target: usize, size: usize) -> Result<Self> {
29        // round size up to page boundary
30        let size = (size + 0xFFF) & !0xFFF;
31
32        // on x64, try to allocate within ±2GB for rel32 jumps
33        #[cfg(target_arch = "x86_64")]
34        {
35            if let Some(mem) = try_allocate_near_x64(target, size) {
36                return Ok(mem);
37            }
38        }
39
40        // fall back to allocation anywhere
41        Self::allocate(size)
42    }
43
44    /// allocate executable memory at any available address
45    pub fn allocate(size: usize) -> Result<Self> {
46        let size = (size + 0xFFF) & !0xFFF;
47
48        let base = unsafe {
49            VirtualAlloc(
50                core::ptr::null_mut(),
51                size,
52                MEM_COMMIT | MEM_RESERVE,
53                PAGE_EXECUTE_READWRITE,
54            )
55        };
56
57        if base.is_null() {
58            return Err(WraithError::AllocationFailed {
59                size,
60                protection: PAGE_EXECUTE_READWRITE,
61            });
62        }
63
64        // zero the memory
65        // SAFETY: base is valid for size bytes
66        unsafe {
67            core::ptr::write_bytes(base, 0xCC, size); // fill with INT3
68        }
69
70        Ok(Self {
71            base: base as *mut u8,
72            size,
73            used: 0,
74        })
75    }
76
77    /// get base address
78    pub fn base(&self) -> usize {
79        self.base as usize
80    }
81
82    /// get total allocated size
83    pub fn size(&self) -> usize {
84        self.size
85    }
86
87    /// get used bytes
88    pub fn used(&self) -> usize {
89        self.used
90    }
91
92    /// get available bytes
93    pub fn available(&self) -> usize {
94        self.size - self.used
95    }
96
97    /// write code to the memory region
98    ///
99    /// returns the address where the code was written
100    pub fn write(&mut self, code: &[u8]) -> Result<usize> {
101        if code.len() > self.available() {
102            return Err(WraithError::WriteFailed {
103                address: self.base as u64,
104                size: code.len(),
105            });
106        }
107
108        let write_addr = self.base as usize + self.used;
109
110        // SAFETY: bounds checked, we own the memory
111        unsafe {
112            core::ptr::copy_nonoverlapping(code.as_ptr(), write_addr as *mut u8, code.len());
113        }
114
115        self.used += code.len();
116
117        Ok(write_addr)
118    }
119
120    /// get pointer at offset
121    pub fn ptr_at(&self, offset: usize) -> *mut u8 {
122        // SAFETY: caller responsible for bounds checking
123        unsafe { self.base.add(offset) }
124    }
125
126    /// read bytes from offset
127    pub fn read_at(&self, offset: usize, len: usize) -> Result<&[u8]> {
128        if offset + len > self.size {
129            return Err(WraithError::ReadFailed {
130                address: (self.base as usize + offset) as u64,
131                size: len,
132            });
133        }
134
135        // SAFETY: bounds checked
136        Ok(unsafe { core::slice::from_raw_parts(self.base.add(offset), len) })
137    }
138
139    /// check if an address is within this memory region
140    pub fn contains(&self, addr: usize) -> bool {
141        addr >= self.base as usize && addr < (self.base as usize + self.size)
142    }
143
144    /// check if this memory is within rel32 range of target
145    pub fn is_near(&self, target: usize) -> bool {
146        let base = self.base as usize;
147        let distance = if base > target {
148            base - target
149        } else {
150            target - base
151        };
152        distance <= i32::MAX as usize
153    }
154
155    /// flush instruction cache for this region
156    pub fn flush_icache(&self) -> Result<()> {
157        let result = unsafe {
158            FlushInstructionCache(
159                GetCurrentProcess(),
160                self.base as *const _,
161                self.size,
162            )
163        };
164
165        if result == 0 {
166            Err(WraithError::from_last_error("FlushInstructionCache"))
167        } else {
168            Ok(())
169        }
170    }
171
172    /// take ownership without freeing
173    pub fn leak(self) -> *mut u8 {
174        let ptr = self.base;
175        core::mem::forget(self);
176        ptr
177    }
178}
179
180impl Drop for ExecutableMemory {
181    fn drop(&mut self) {
182        // SAFETY: self.base was allocated with VirtualAlloc
183        unsafe {
184            VirtualFree(self.base as *mut _, 0, MEM_RELEASE);
185        }
186    }
187}
188
189// SAFETY: we own the memory, safe to move between threads
190unsafe impl Send for ExecutableMemory {}
191unsafe impl Sync for ExecutableMemory {}
192
193/// try to allocate near target on x64
194#[cfg(target_arch = "x86_64")]
195fn try_allocate_near_x64(target: usize, size: usize) -> Option<ExecutableMemory> {
196    // try addresses within ±2GB of target
197    // search in 64KB increments (allocation granularity)
198    const GRANULARITY: usize = 0x10000;
199    const SEARCH_RANGE: i64 = 0x7FFF0000; // slightly less than 2GB
200
201    let target_i64 = target as i64;
202
203    // try below target first (often has more free space)
204    let mut addr = (target_i64 - SEARCH_RANGE).max(0x10000) as usize;
205    addr = addr & !(GRANULARITY - 1); // align down
206
207    while (addr as i64) < target_i64 {
208        let ptr = unsafe {
209            VirtualAlloc(
210                addr as *mut _,
211                size,
212                MEM_COMMIT | MEM_RESERVE,
213                PAGE_EXECUTE_READWRITE,
214            )
215        };
216
217        if !ptr.is_null() {
218            // verify it's actually within range
219            let distance = (target as i64 - ptr as i64).abs();
220            if distance <= i32::MAX as i64 {
221                unsafe {
222                    core::ptr::write_bytes(ptr, 0xCC, size);
223                }
224                return Some(ExecutableMemory {
225                    base: ptr as *mut u8,
226                    size,
227                    used: 0,
228                });
229            }
230            // wrong location, free and try next
231            unsafe {
232                VirtualFree(ptr, 0, MEM_RELEASE);
233            }
234        }
235        addr += GRANULARITY;
236    }
237
238    // try above target
239    addr = ((target_i64 + 0x10000) & !(GRANULARITY as i64 - 1)) as usize;
240    let max_addr = (target_i64 + SEARCH_RANGE) as usize;
241
242    while addr < max_addr {
243        let ptr = unsafe {
244            VirtualAlloc(
245                addr as *mut _,
246                size,
247                MEM_COMMIT | MEM_RESERVE,
248                PAGE_EXECUTE_READWRITE,
249            )
250        };
251
252        if !ptr.is_null() {
253            let distance = (target as i64 - ptr as i64).abs();
254            if distance <= i32::MAX as i64 {
255                unsafe {
256                    core::ptr::write_bytes(ptr, 0xCC, size);
257                }
258                return Some(ExecutableMemory {
259                    base: ptr as *mut u8,
260                    size,
261                    used: 0,
262                });
263            }
264            unsafe {
265                VirtualFree(ptr, 0, MEM_RELEASE);
266            }
267        }
268        addr += GRANULARITY;
269    }
270
271    None
272}
273
274#[link(name = "kernel32")]
275extern "system" {
276    fn VirtualAlloc(
277        lpAddress: *mut core::ffi::c_void,
278        dwSize: usize,
279        flAllocationType: u32,
280        flProtect: u32,
281    ) -> *mut core::ffi::c_void;
282
283    fn VirtualFree(lpAddress: *mut core::ffi::c_void, dwSize: usize, dwFreeType: u32) -> i32;
284
285    fn FlushInstructionCache(
286        hProcess: *mut core::ffi::c_void,
287        lpBaseAddress: *const core::ffi::c_void,
288        dwSize: usize,
289    ) -> i32;
290
291    fn GetCurrentProcess() -> *mut core::ffi::c_void;
292}
293
294#[cfg(test)]
295mod tests {
296    use super::*;
297
298    #[test]
299    fn test_allocate() {
300        let mem = ExecutableMemory::allocate(0x1000).unwrap();
301        assert!(mem.base() != 0);
302        assert!(mem.size() >= 0x1000);
303        assert_eq!(mem.used(), 0);
304    }
305
306    #[test]
307    fn test_write() {
308        let mut mem = ExecutableMemory::allocate(0x1000).unwrap();
309        let code = [0x90, 0x90, 0x90, 0xC3]; // nop; nop; nop; ret
310
311        let addr = mem.write(&code).unwrap();
312        assert_eq!(addr, mem.base());
313        assert_eq!(mem.used(), 4);
314
315        let read = mem.read_at(0, 4).unwrap();
316        assert_eq!(read, &code);
317    }
318
319    #[test]
320    fn test_contains() {
321        let mem = ExecutableMemory::allocate(0x1000).unwrap();
322        assert!(mem.contains(mem.base()));
323        assert!(mem.contains(mem.base() + 0x500));
324        assert!(!mem.contains(mem.base() + 0x2000));
325    }
326}