wraith/manipulation/inline_hook/asm/
encoder.rs

1//! Instruction encoding utilities
2//!
3//! Provides a builder-style API for constructing x86/x64 instruction sequences.
4
5#[cfg(all(not(feature = "std"), feature = "alloc"))]
6use alloc::vec::Vec;
7
8#[cfg(feature = "std")]
9use std::vec::Vec;
10
11/// instruction encoder with builder API
12pub struct Encoder {
13    buffer: Vec<u8>,
14}
15
16impl Encoder {
17    /// create new empty encoder
18    pub fn new() -> Self {
19        Self {
20            buffer: Vec::with_capacity(64),
21        }
22    }
23
24    /// create encoder with pre-allocated capacity
25    pub fn with_capacity(capacity: usize) -> Self {
26        Self {
27            buffer: Vec::with_capacity(capacity),
28        }
29    }
30
31    /// get the encoded bytes
32    pub fn bytes(&self) -> &[u8] {
33        &self.buffer
34    }
35
36    /// consume encoder and return bytes
37    pub fn into_bytes(self) -> Vec<u8> {
38        self.buffer
39    }
40
41    /// get current length
42    pub fn len(&self) -> usize {
43        self.buffer.len()
44    }
45
46    /// check if empty
47    pub fn is_empty(&self) -> bool {
48        self.buffer.is_empty()
49    }
50
51    /// clear the buffer
52    pub fn clear(&mut self) {
53        self.buffer.clear();
54    }
55
56    /// append raw bytes
57    pub fn raw(&mut self, bytes: &[u8]) -> &mut Self {
58        self.buffer.extend_from_slice(bytes);
59        self
60    }
61
62    /// append single byte
63    pub fn byte(&mut self, b: u8) -> &mut Self {
64        self.buffer.push(b);
65        self
66    }
67
68    // === x86/x64 common instructions ===
69
70    /// NOP (single byte)
71    pub fn nop(&mut self) -> &mut Self {
72        self.buffer.push(0x90);
73        self
74    }
75
76    /// multi-byte NOP sled
77    pub fn nop_sled(&mut self, count: usize) -> &mut Self {
78        let mut remaining = count;
79        while remaining > 0 {
80            match remaining {
81                1 => {
82                    self.buffer.push(0x90);
83                    remaining -= 1;
84                }
85                2 => {
86                    self.buffer.extend_from_slice(&[0x66, 0x90]);
87                    remaining -= 2;
88                }
89                3 => {
90                    self.buffer.extend_from_slice(&[0x0F, 0x1F, 0x00]);
91                    remaining -= 3;
92                }
93                4 => {
94                    self.buffer.extend_from_slice(&[0x0F, 0x1F, 0x40, 0x00]);
95                    remaining -= 4;
96                }
97                5 => {
98                    self.buffer
99                        .extend_from_slice(&[0x0F, 0x1F, 0x44, 0x00, 0x00]);
100                    remaining -= 5;
101                }
102                6 => {
103                    self.buffer
104                        .extend_from_slice(&[0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00]);
105                    remaining -= 6;
106                }
107                7 => {
108                    self.buffer
109                        .extend_from_slice(&[0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00]);
110                    remaining -= 7;
111                }
112                _ => {
113                    self.buffer
114                        .extend_from_slice(&[0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]);
115                    remaining -= 8;
116                }
117            }
118        }
119        self
120    }
121
122    /// INT3 breakpoint
123    pub fn int3(&mut self) -> &mut Self {
124        self.buffer.push(0xCC);
125        self
126    }
127
128    /// RET (near return)
129    pub fn ret(&mut self) -> &mut Self {
130        self.buffer.push(0xC3);
131        self
132    }
133
134    /// PUSH imm32
135    pub fn push_imm32(&mut self, value: u32) -> &mut Self {
136        self.buffer.push(0x68);
137        self.buffer.extend_from_slice(&value.to_le_bytes());
138        self
139    }
140
141    /// PUSH imm8 (sign-extended)
142    pub fn push_imm8(&mut self, value: i8) -> &mut Self {
143        self.buffer.push(0x6A);
144        self.buffer.push(value as u8);
145        self
146    }
147
148    /// JMP rel32 (5 bytes)
149    pub fn jmp_rel32(&mut self, from: usize, to: usize) -> &mut Self {
150        let offset = (to as i64 - from as i64 - 5) as i32;
151        self.buffer.push(0xE9);
152        self.buffer.extend_from_slice(&offset.to_le_bytes());
153        self
154    }
155
156    /// JMP rel8 (2 bytes)
157    pub fn jmp_rel8(&mut self, offset: i8) -> &mut Self {
158        self.buffer.push(0xEB);
159        self.buffer.push(offset as u8);
160        self
161    }
162
163    /// CALL rel32 (5 bytes)
164    pub fn call_rel32(&mut self, from: usize, to: usize) -> &mut Self {
165        let offset = (to as i64 - from as i64 - 5) as i32;
166        self.buffer.push(0xE8);
167        self.buffer.extend_from_slice(&offset.to_le_bytes());
168        self
169    }
170
171    // === x64-specific instructions ===
172
173    /// JMP [RIP+0] with absolute address (14 bytes, x64)
174    #[cfg(target_arch = "x86_64")]
175    pub fn jmp_abs64(&mut self, target: u64) -> &mut Self {
176        // FF 25 00 00 00 00 = jmp qword ptr [rip+0]
177        self.buffer
178            .extend_from_slice(&[0xFF, 0x25, 0x00, 0x00, 0x00, 0x00]);
179        self.buffer.extend_from_slice(&target.to_le_bytes());
180        self
181    }
182
183    /// MOV RAX, imm64 (10 bytes, x64)
184    #[cfg(target_arch = "x86_64")]
185    pub fn mov_rax_imm64(&mut self, value: u64) -> &mut Self {
186        // 48 B8 = REX.W MOV RAX, imm64
187        self.buffer.extend_from_slice(&[0x48, 0xB8]);
188        self.buffer.extend_from_slice(&value.to_le_bytes());
189        self
190    }
191
192    /// JMP RAX (2 bytes, x64)
193    #[cfg(target_arch = "x86_64")]
194    pub fn jmp_rax(&mut self) -> &mut Self {
195        // FF E0 = jmp rax
196        self.buffer.extend_from_slice(&[0xFF, 0xE0]);
197        self
198    }
199
200    /// CALL RAX (2 bytes, x64)
201    #[cfg(target_arch = "x86_64")]
202    pub fn call_rax(&mut self) -> &mut Self {
203        // FF D0 = call rax
204        self.buffer.extend_from_slice(&[0xFF, 0xD0]);
205        self
206    }
207
208    /// CALL [RIP+0] with absolute address (14 bytes, x64)
209    #[cfg(target_arch = "x86_64")]
210    pub fn call_abs64(&mut self, target: u64) -> &mut Self {
211        // FF 15 00 00 00 00 = call qword ptr [rip+0]
212        self.buffer
213            .extend_from_slice(&[0xFF, 0x15, 0x00, 0x00, 0x00, 0x00]);
214        self.buffer.extend_from_slice(&target.to_le_bytes());
215        self
216    }
217
218    /// PUSH RAX (x64)
219    #[cfg(target_arch = "x86_64")]
220    pub fn push_rax(&mut self) -> &mut Self {
221        self.buffer.push(0x50);
222        self
223    }
224
225    /// POP RAX (x64)
226    #[cfg(target_arch = "x86_64")]
227    pub fn pop_rax(&mut self) -> &mut Self {
228        self.buffer.push(0x58);
229        self
230    }
231
232    /// SUB RSP, imm8 (x64)
233    #[cfg(target_arch = "x86_64")]
234    pub fn sub_rsp_imm8(&mut self, value: i8) -> &mut Self {
235        // 48 83 EC XX
236        self.buffer.extend_from_slice(&[0x48, 0x83, 0xEC]);
237        self.buffer.push(value as u8);
238        self
239    }
240
241    /// ADD RSP, imm8 (x64)
242    #[cfg(target_arch = "x86_64")]
243    pub fn add_rsp_imm8(&mut self, value: i8) -> &mut Self {
244        // 48 83 C4 XX
245        self.buffer.extend_from_slice(&[0x48, 0x83, 0xC4]);
246        self.buffer.push(value as u8);
247        self
248    }
249
250    // === x86-specific instructions ===
251
252    /// PUSH EAX (x86)
253    #[cfg(target_arch = "x86")]
254    pub fn push_eax(&mut self) -> &mut Self {
255        self.buffer.push(0x50);
256        self
257    }
258
259    /// POP EAX (x86)
260    #[cfg(target_arch = "x86")]
261    pub fn pop_eax(&mut self) -> &mut Self {
262        self.buffer.push(0x58);
263        self
264    }
265
266    /// PUSH EBP (x86)
267    #[cfg(target_arch = "x86")]
268    pub fn push_ebp(&mut self) -> &mut Self {
269        self.buffer.push(0x55);
270        self
271    }
272
273    /// MOV EBP, ESP (x86)
274    #[cfg(target_arch = "x86")]
275    pub fn mov_ebp_esp(&mut self) -> &mut Self {
276        self.buffer.extend_from_slice(&[0x8B, 0xEC]);
277        self
278    }
279
280    /// JMP [mem32] absolute (6 bytes, x86)
281    #[cfg(target_arch = "x86")]
282    pub fn jmp_abs32(&mut self, target: u32) -> &mut Self {
283        // push addr; ret
284        self.push_imm32(target);
285        self.ret();
286        self
287    }
288}
289
290impl Default for Encoder {
291    fn default() -> Self {
292        Self::new()
293    }
294}
295
296impl AsRef<[u8]> for Encoder {
297    fn as_ref(&self) -> &[u8] {
298        &self.buffer
299    }
300}
301
302#[cfg(test)]
303mod tests {
304    use super::*;
305
306    #[test]
307    fn test_nop() {
308        let mut enc = Encoder::new();
309        enc.nop();
310        assert_eq!(enc.bytes(), &[0x90]);
311    }
312
313    #[test]
314    fn test_nop_sled() {
315        for size in 1..=16 {
316            let mut enc = Encoder::new();
317            enc.nop_sled(size);
318            assert_eq!(enc.len(), size);
319        }
320    }
321
322    #[test]
323    fn test_push_ret() {
324        let mut enc = Encoder::new();
325        enc.push_imm32(0xDEADBEEF).ret();
326        assert_eq!(enc.bytes(), &[0x68, 0xEF, 0xBE, 0xAD, 0xDE, 0xC3]);
327    }
328
329    #[test]
330    fn test_jmp_rel32() {
331        let mut enc = Encoder::new();
332        enc.jmp_rel32(0x1000, 0x1100);
333        assert_eq!(enc.len(), 5);
334        assert_eq!(enc.bytes()[0], 0xE9);
335    }
336
337    #[cfg(target_arch = "x86_64")]
338    #[test]
339    fn test_jmp_abs64() {
340        let mut enc = Encoder::new();
341        enc.jmp_abs64(0xDEADBEEF12345678);
342        assert_eq!(enc.len(), 14);
343        assert_eq!(&enc.bytes()[0..6], &[0xFF, 0x25, 0x00, 0x00, 0x00, 0x00]);
344    }
345
346    #[cfg(target_arch = "x86_64")]
347    #[test]
348    fn test_mov_jmp_rax() {
349        let mut enc = Encoder::new();
350        enc.mov_rax_imm64(0xDEADBEEF12345678).jmp_rax();
351        assert_eq!(enc.len(), 12);
352    }
353}