1use super::driver::DriverHandle;
4use super::process::ProcessOps;
5use super::{ClientError, ClientResult};
6
7pub mod MemoryProtection {
9 pub const NOACCESS: u32 = 0x01;
10 pub const READONLY: u32 = 0x02;
11 pub const READWRITE: u32 = 0x04;
12 pub const WRITECOPY: u32 = 0x08;
13 pub const EXECUTE: u32 = 0x10;
14 pub const EXECUTE_READ: u32 = 0x20;
15 pub const EXECUTE_READWRITE: u32 = 0x40;
16 pub const EXECUTE_WRITECOPY: u32 = 0x80;
17 pub const GUARD: u32 = 0x100;
18 pub const NOCACHE: u32 = 0x200;
19}
20
21pub struct RemoteMemory<'a> {
23 process: &'a ProcessOps<'a>,
24 address: u64,
25 size: u64,
26}
27
28impl<'a> RemoteMemory<'a> {
29 pub fn allocate(
31 process: &'a ProcessOps<'a>,
32 size: u64,
33 protection: u32,
34 ) -> ClientResult<Self> {
35 let address = process.allocate(size, protection)?;
36 Ok(Self {
37 process,
38 address,
39 size,
40 })
41 }
42
43 pub fn allocate_at(
45 process: &'a ProcessOps<'a>,
46 address: u64,
47 size: u64,
48 protection: u32,
49 ) -> ClientResult<Self> {
50 let address = process.allocate_at(address, size, protection)?;
51 Ok(Self {
52 process,
53 address,
54 size,
55 })
56 }
57
58 pub fn address(&self) -> u64 {
60 self.address
61 }
62
63 pub fn size(&self) -> u64 {
65 self.size
66 }
67
68 pub fn write(&self, data: &[u8]) -> ClientResult<()> {
70 if data.len() as u64 > self.size {
71 return Err(ClientError::BufferTooSmall {
72 required: data.len(),
73 provided: self.size as usize,
74 });
75 }
76 self.process.write_bytes(self.address, data)
77 }
78
79 pub fn write_value<T: Copy>(&self, value: &T) -> ClientResult<()> {
81 if std::mem::size_of::<T>() as u64 > self.size {
82 return Err(ClientError::BufferTooSmall {
83 required: std::mem::size_of::<T>(),
84 provided: self.size as usize,
85 });
86 }
87 self.process.write(self.address, value)
88 }
89
90 pub fn write_at(&self, offset: u64, data: &[u8]) -> ClientResult<()> {
92 if offset + data.len() as u64 > self.size {
93 return Err(ClientError::BufferTooSmall {
94 required: (offset + data.len() as u64) as usize,
95 provided: self.size as usize,
96 });
97 }
98 self.process.write_bytes(self.address + offset, data)
99 }
100
101 pub fn read(&self, size: usize) -> ClientResult<Vec<u8>> {
103 if size as u64 > self.size {
104 return Err(ClientError::BufferTooSmall {
105 required: size,
106 provided: self.size as usize,
107 });
108 }
109 self.process.read_bytes(self.address, size)
110 }
111
112 pub fn read_value<T: Copy>(&self) -> ClientResult<T> {
114 if std::mem::size_of::<T>() as u64 > self.size {
115 return Err(ClientError::BufferTooSmall {
116 required: std::mem::size_of::<T>(),
117 provided: self.size as usize,
118 });
119 }
120 self.process.read(self.address)
121 }
122
123 pub fn read_at(&self, offset: u64, size: usize) -> ClientResult<Vec<u8>> {
125 if offset + size as u64 > self.size {
126 return Err(ClientError::BufferTooSmall {
127 required: (offset + size as u64) as usize,
128 provided: self.size as usize,
129 });
130 }
131 self.process.read_bytes(self.address + offset, size)
132 }
133
134 pub fn protect(&self, protection: u32) -> ClientResult<u32> {
136 self.process.protect(self.address, self.size, protection)
137 }
138
139 pub fn leak(self) -> u64 {
141 let addr = self.address;
142 std::mem::forget(self);
143 addr
144 }
145}
146
147impl<'a> Drop for RemoteMemory<'a> {
148 fn drop(&mut self) {
149 let _ = self.process.free(self.address);
150 }
151}
152
153pub struct MemoryScanner<'a> {
155 process: &'a ProcessOps<'a>,
156}
157
158impl<'a> MemoryScanner<'a> {
159 pub fn new(process: &'a ProcessOps<'a>) -> Self {
161 Self { process }
162 }
163
164 pub fn scan_range(
166 &self,
167 start: u64,
168 size: usize,
169 pattern: &[u8],
170 mask: &[u8],
171 ) -> ClientResult<Vec<u64>> {
172 let mut results = Vec::new();
173
174 const CHUNK_SIZE: usize = 0x10000;
176 let mut offset = 0;
177
178 while offset < size {
179 let chunk_size = std::cmp::min(CHUNK_SIZE, size - offset);
180 let chunk = self.process.read_bytes(start + offset as u64, chunk_size)?;
181
182 for i in 0..chunk.len().saturating_sub(pattern.len()) {
184 let mut matched = true;
185 for (j, (&p, &m)) in pattern.iter().zip(mask.iter()).enumerate() {
186 if m != 0 && chunk[i + j] != p {
187 matched = false;
188 break;
189 }
190 }
191 if matched {
192 results.push(start + offset as u64 + i as u64);
193 }
194 }
195
196 offset += chunk_size - pattern.len(); }
198
199 Ok(results)
200 }
201
202 pub fn scan_bytes(&self, start: u64, size: usize, bytes: &[u8]) -> ClientResult<Vec<u64>> {
204 let mask = vec![0xFF; bytes.len()];
205 self.scan_range(start, size, bytes, &mask)
206 }
207
208 pub fn scan_ida_pattern(&self, start: u64, size: usize, pattern: &str) -> ClientResult<Vec<u64>> {
210 let (bytes, mask) = parse_ida_pattern(pattern)?;
211 self.scan_range(start, size, &bytes, &mask)
212 }
213}
214
215fn parse_ida_pattern(pattern: &str) -> ClientResult<(Vec<u8>, Vec<u8>)> {
217 let parts: Vec<&str> = pattern.split_whitespace().collect();
218 let mut bytes = Vec::with_capacity(parts.len());
219 let mut mask = Vec::with_capacity(parts.len());
220
221 for part in parts {
222 if part == "?" || part == "??" {
223 bytes.push(0);
224 mask.push(0);
225 } else {
226 let byte = u8::from_str_radix(part, 16)
227 .map_err(|_| ClientError::MemoryError {
228 address: 0,
229 reason: format!("invalid pattern byte: {}", part),
230 })?;
231 bytes.push(byte);
232 mask.push(0xFF);
233 }
234 }
235
236 Ok((bytes, mask))
237}
238
239pub struct ProtectionGuard<'a> {
241 process: &'a ProcessOps<'a>,
242 address: u64,
243 size: u64,
244 old_protection: u32,
245}
246
247impl<'a> ProtectionGuard<'a> {
248 pub fn new(
250 process: &'a ProcessOps<'a>,
251 address: u64,
252 size: u64,
253 new_protection: u32,
254 ) -> ClientResult<Self> {
255 let old_protection = process.protect(address, size, new_protection)?;
256 Ok(Self {
257 process,
258 address,
259 size,
260 old_protection,
261 })
262 }
263}
264
265impl<'a> Drop for ProtectionGuard<'a> {
266 fn drop(&mut self) {
267 let _ = self.process.protect(self.address, self.size, self.old_protection);
268 }
269}