wraith/manipulation/inline_hook/asm/
encoder.rs

1//! Instruction encoding utilities
2//!
3//! Provides a builder-style API for constructing x86/x64 instruction sequences.
4
5/// instruction encoder with builder API
6pub struct Encoder {
7    buffer: Vec<u8>,
8}
9
10impl Encoder {
11    /// create new empty encoder
12    pub fn new() -> Self {
13        Self {
14            buffer: Vec::with_capacity(64),
15        }
16    }
17
18    /// create encoder with pre-allocated capacity
19    pub fn with_capacity(capacity: usize) -> Self {
20        Self {
21            buffer: Vec::with_capacity(capacity),
22        }
23    }
24
25    /// get the encoded bytes
26    pub fn bytes(&self) -> &[u8] {
27        &self.buffer
28    }
29
30    /// consume encoder and return bytes
31    pub fn into_bytes(self) -> Vec<u8> {
32        self.buffer
33    }
34
35    /// get current length
36    pub fn len(&self) -> usize {
37        self.buffer.len()
38    }
39
40    /// check if empty
41    pub fn is_empty(&self) -> bool {
42        self.buffer.is_empty()
43    }
44
45    /// clear the buffer
46    pub fn clear(&mut self) {
47        self.buffer.clear();
48    }
49
50    /// append raw bytes
51    pub fn raw(&mut self, bytes: &[u8]) -> &mut Self {
52        self.buffer.extend_from_slice(bytes);
53        self
54    }
55
56    /// append single byte
57    pub fn byte(&mut self, b: u8) -> &mut Self {
58        self.buffer.push(b);
59        self
60    }
61
62    // === x86/x64 common instructions ===
63
64    /// NOP (single byte)
65    pub fn nop(&mut self) -> &mut Self {
66        self.buffer.push(0x90);
67        self
68    }
69
70    /// multi-byte NOP sled
71    pub fn nop_sled(&mut self, count: usize) -> &mut Self {
72        let mut remaining = count;
73        while remaining > 0 {
74            match remaining {
75                1 => {
76                    self.buffer.push(0x90);
77                    remaining -= 1;
78                }
79                2 => {
80                    self.buffer.extend_from_slice(&[0x66, 0x90]);
81                    remaining -= 2;
82                }
83                3 => {
84                    self.buffer.extend_from_slice(&[0x0F, 0x1F, 0x00]);
85                    remaining -= 3;
86                }
87                4 => {
88                    self.buffer.extend_from_slice(&[0x0F, 0x1F, 0x40, 0x00]);
89                    remaining -= 4;
90                }
91                5 => {
92                    self.buffer
93                        .extend_from_slice(&[0x0F, 0x1F, 0x44, 0x00, 0x00]);
94                    remaining -= 5;
95                }
96                6 => {
97                    self.buffer
98                        .extend_from_slice(&[0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00]);
99                    remaining -= 6;
100                }
101                7 => {
102                    self.buffer
103                        .extend_from_slice(&[0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00]);
104                    remaining -= 7;
105                }
106                _ => {
107                    self.buffer
108                        .extend_from_slice(&[0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]);
109                    remaining -= 8;
110                }
111            }
112        }
113        self
114    }
115
116    /// INT3 breakpoint
117    pub fn int3(&mut self) -> &mut Self {
118        self.buffer.push(0xCC);
119        self
120    }
121
122    /// RET (near return)
123    pub fn ret(&mut self) -> &mut Self {
124        self.buffer.push(0xC3);
125        self
126    }
127
128    /// PUSH imm32
129    pub fn push_imm32(&mut self, value: u32) -> &mut Self {
130        self.buffer.push(0x68);
131        self.buffer.extend_from_slice(&value.to_le_bytes());
132        self
133    }
134
135    /// PUSH imm8 (sign-extended)
136    pub fn push_imm8(&mut self, value: i8) -> &mut Self {
137        self.buffer.push(0x6A);
138        self.buffer.push(value as u8);
139        self
140    }
141
142    /// JMP rel32 (5 bytes)
143    pub fn jmp_rel32(&mut self, from: usize, to: usize) -> &mut Self {
144        let offset = (to as i64 - from as i64 - 5) as i32;
145        self.buffer.push(0xE9);
146        self.buffer.extend_from_slice(&offset.to_le_bytes());
147        self
148    }
149
150    /// JMP rel8 (2 bytes)
151    pub fn jmp_rel8(&mut self, offset: i8) -> &mut Self {
152        self.buffer.push(0xEB);
153        self.buffer.push(offset as u8);
154        self
155    }
156
157    /// CALL rel32 (5 bytes)
158    pub fn call_rel32(&mut self, from: usize, to: usize) -> &mut Self {
159        let offset = (to as i64 - from as i64 - 5) as i32;
160        self.buffer.push(0xE8);
161        self.buffer.extend_from_slice(&offset.to_le_bytes());
162        self
163    }
164
165    // === x64-specific instructions ===
166
167    /// JMP [RIP+0] with absolute address (14 bytes, x64)
168    #[cfg(target_arch = "x86_64")]
169    pub fn jmp_abs64(&mut self, target: u64) -> &mut Self {
170        // FF 25 00 00 00 00 = jmp qword ptr [rip+0]
171        self.buffer
172            .extend_from_slice(&[0xFF, 0x25, 0x00, 0x00, 0x00, 0x00]);
173        self.buffer.extend_from_slice(&target.to_le_bytes());
174        self
175    }
176
177    /// MOV RAX, imm64 (10 bytes, x64)
178    #[cfg(target_arch = "x86_64")]
179    pub fn mov_rax_imm64(&mut self, value: u64) -> &mut Self {
180        // 48 B8 = REX.W MOV RAX, imm64
181        self.buffer.extend_from_slice(&[0x48, 0xB8]);
182        self.buffer.extend_from_slice(&value.to_le_bytes());
183        self
184    }
185
186    /// JMP RAX (2 bytes, x64)
187    #[cfg(target_arch = "x86_64")]
188    pub fn jmp_rax(&mut self) -> &mut Self {
189        // FF E0 = jmp rax
190        self.buffer.extend_from_slice(&[0xFF, 0xE0]);
191        self
192    }
193
194    /// CALL RAX (2 bytes, x64)
195    #[cfg(target_arch = "x86_64")]
196    pub fn call_rax(&mut self) -> &mut Self {
197        // FF D0 = call rax
198        self.buffer.extend_from_slice(&[0xFF, 0xD0]);
199        self
200    }
201
202    /// CALL [RIP+0] with absolute address (14 bytes, x64)
203    #[cfg(target_arch = "x86_64")]
204    pub fn call_abs64(&mut self, target: u64) -> &mut Self {
205        // FF 15 00 00 00 00 = call qword ptr [rip+0]
206        self.buffer
207            .extend_from_slice(&[0xFF, 0x15, 0x00, 0x00, 0x00, 0x00]);
208        self.buffer.extend_from_slice(&target.to_le_bytes());
209        self
210    }
211
212    /// PUSH RAX (x64)
213    #[cfg(target_arch = "x86_64")]
214    pub fn push_rax(&mut self) -> &mut Self {
215        self.buffer.push(0x50);
216        self
217    }
218
219    /// POP RAX (x64)
220    #[cfg(target_arch = "x86_64")]
221    pub fn pop_rax(&mut self) -> &mut Self {
222        self.buffer.push(0x58);
223        self
224    }
225
226    /// SUB RSP, imm8 (x64)
227    #[cfg(target_arch = "x86_64")]
228    pub fn sub_rsp_imm8(&mut self, value: i8) -> &mut Self {
229        // 48 83 EC XX
230        self.buffer.extend_from_slice(&[0x48, 0x83, 0xEC]);
231        self.buffer.push(value as u8);
232        self
233    }
234
235    /// ADD RSP, imm8 (x64)
236    #[cfg(target_arch = "x86_64")]
237    pub fn add_rsp_imm8(&mut self, value: i8) -> &mut Self {
238        // 48 83 C4 XX
239        self.buffer.extend_from_slice(&[0x48, 0x83, 0xC4]);
240        self.buffer.push(value as u8);
241        self
242    }
243
244    // === x86-specific instructions ===
245
246    /// PUSH EAX (x86)
247    #[cfg(target_arch = "x86")]
248    pub fn push_eax(&mut self) -> &mut Self {
249        self.buffer.push(0x50);
250        self
251    }
252
253    /// POP EAX (x86)
254    #[cfg(target_arch = "x86")]
255    pub fn pop_eax(&mut self) -> &mut Self {
256        self.buffer.push(0x58);
257        self
258    }
259
260    /// PUSH EBP (x86)
261    #[cfg(target_arch = "x86")]
262    pub fn push_ebp(&mut self) -> &mut Self {
263        self.buffer.push(0x55);
264        self
265    }
266
267    /// MOV EBP, ESP (x86)
268    #[cfg(target_arch = "x86")]
269    pub fn mov_ebp_esp(&mut self) -> &mut Self {
270        self.buffer.extend_from_slice(&[0x8B, 0xEC]);
271        self
272    }
273
274    /// JMP [mem32] absolute (6 bytes, x86)
275    #[cfg(target_arch = "x86")]
276    pub fn jmp_abs32(&mut self, target: u32) -> &mut Self {
277        // push addr; ret
278        self.push_imm32(target);
279        self.ret();
280        self
281    }
282}
283
284impl Default for Encoder {
285    fn default() -> Self {
286        Self::new()
287    }
288}
289
290impl AsRef<[u8]> for Encoder {
291    fn as_ref(&self) -> &[u8] {
292        &self.buffer
293    }
294}
295
296#[cfg(test)]
297mod tests {
298    use super::*;
299
300    #[test]
301    fn test_nop() {
302        let mut enc = Encoder::new();
303        enc.nop();
304        assert_eq!(enc.bytes(), &[0x90]);
305    }
306
307    #[test]
308    fn test_nop_sled() {
309        for size in 1..=16 {
310            let mut enc = Encoder::new();
311            enc.nop_sled(size);
312            assert_eq!(enc.len(), size);
313        }
314    }
315
316    #[test]
317    fn test_push_ret() {
318        let mut enc = Encoder::new();
319        enc.push_imm32(0xDEADBEEF).ret();
320        assert_eq!(enc.bytes(), &[0x68, 0xEF, 0xBE, 0xAD, 0xDE, 0xC3]);
321    }
322
323    #[test]
324    fn test_jmp_rel32() {
325        let mut enc = Encoder::new();
326        enc.jmp_rel32(0x1000, 0x1100);
327        assert_eq!(enc.len(), 5);
328        assert_eq!(enc.bytes()[0], 0xE9);
329    }
330
331    #[cfg(target_arch = "x86_64")]
332    #[test]
333    fn test_jmp_abs64() {
334        let mut enc = Encoder::new();
335        enc.jmp_abs64(0xDEADBEEF12345678);
336        assert_eq!(enc.len(), 14);
337        assert_eq!(&enc.bytes()[0..6], &[0xFF, 0x25, 0x00, 0x00, 0x00, 0x00]);
338    }
339
340    #[cfg(target_arch = "x86_64")]
341    #[test]
342    fn test_mov_jmp_rax() {
343        let mut enc = Encoder::new();
344        enc.mov_rax_imm64(0xDEADBEEF12345678).jmp_rax();
345        assert_eq!(enc.len(), 12);
346    }
347}