vtil_parser/
pod.rs

1// BSD 3-Clause License
2//
3// Copyright © 2021 Keegan Saunders
4// Copyright © 2021 VTIL Project
5// All rights reserved.
6//
7// Redistribution and use in source and binary forms, with or without
8// modification, are permitted provided that the following conditions are met:
9//
10// 1. Redistributions of source code must retain the above copyright notice, this
11//    list of conditions and the following disclaimer.
12//
13// 2. Redistributions in binary form must reproduce the above copyright notice,
14//    this list of conditions and the following disclaimer in the documentation
15//    and/or other materials provided with the distribution.
16//
17// 3. Neither the name of the copyright holder nor the names of its
18//    contributors may be used to endorse or promote products derived from
19//    this software without specific prior written permission.
20//
21// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31//
32
33use crate::{
34    arch_info::{self, amd64, arm64},
35    Error, Result,
36};
37use indexmap::map::IndexMap;
38#[cfg(feature = "serde")]
39use serde::{Deserialize, Deserializer, Serialize, Serializer};
40use std::{convert::TryInto, fmt};
41
42/// Architecture for IL inside of VTIL routines
43#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
44#[derive(Debug, PartialEq, Eq, Clone, Copy)]
45pub enum ArchitectureIdentifier {
46    /// AMD64 (otherwise known as x86_64) architecture
47    Amd64,
48    /// AArch64 architecture
49    Arm64,
50    /// Virtual architecture (contains no physical register access)
51    Virtual,
52}
53
54/// Header containing metadata regarding the VTIL container
55#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
56#[derive(Debug)]
57pub struct Header {
58    /// The architecture used by the VTIL routine
59    pub arch_id: ArchitectureIdentifier,
60}
61
62/// VTIL instruction pointer
63#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
64#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
65#[repr(transparent)]
66pub struct Vip(pub u64);
67
68impl Vip {
69    /// Invalid instruction pointer, unassociated with [`BasicBlock`]
70    pub fn invalid() -> Vip {
71        Vip(!0)
72    }
73}
74
75bitflags! {
76    /// Flags describing register properties
77    #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
78    pub struct RegisterFlags: u64 {
79        /// Default value if no flags set. Read/write pure virtual register that
80        /// is not a stack pointer or flags
81        const VIRTUAL = 0;
82        /// Indicates that the register is a physical register
83        const PHYSICAL = 1 << 0;
84        /// Indicates that the register is a local temporary register of the current basic block
85        const LOCAL = 1 << 1;
86        /// Indicates that the register is used to hold CPU flags
87        const FLAGS = 1 << 2;
88        /// Indicates that the register is used as the stack pointer
89        const STACK_POINTER = 1 << 3;
90        /// Indicates that the register is an alias to the image base
91        const IMAGE_BASE = 1 << 4;
92        /// Indicates that the register can change spontanously (e.g.: IA32_TIME_STAMP_COUNTER)
93        const VOLATILE = 1 << 5;
94        /// Indicates that the register can change spontanously (e.g.: IA32_TIME_STAMP_COUNTER)
95        const READONLY = 1 << 6;
96        /// Indicates that it is the special "undefined" register
97        const UNDEFINED = 1 << 7;
98        /// Indicates that it is a internal-use register that should be treated
99        /// like any other virtual register
100        const INTERNAL = 1 << 8;
101        /// Combined mask of all special registers
102        const SPECIAL = Self::FLAGS.bits | Self::STACK_POINTER.bits | Self::IMAGE_BASE.bits | Self::UNDEFINED.bits;
103    }
104}
105
106/// Describes a VTIL register in an operand
107#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
108#[derive(Debug, Clone, Copy)]
109pub struct RegisterDesc {
110    /// Flags describing the register
111    pub flags: RegisterFlags,
112    /// Identifier for this register, use [`RegisterDesc::local_id`]
113    pub combined_id: u64,
114    /// The bit count of this register (e.g.: 32)
115    pub bit_count: i32,
116    /// The bit offset of register access
117    pub bit_offset: i32,
118}
119
120// Mask for local ID in `combined_id`, invert for architecture ID
121pub(crate) const LOCAL_ID_MASK: u64 = 0x00ffffffffffffff;
122
123// Define a physical register, including bit count and bit offset
124macro_rules! dr {
125    ($arch_id:expr, $name:ident, $id:expr, $offset:expr, $count:expr, $doc:expr) => {
126        #[doc = $doc]
127        #[doc = " register"]
128        pub const $name: RegisterDesc = RegisterDesc {
129            flags: RegisterFlags::PHYSICAL,
130            combined_id: (($arch_id as u64) << 56) | $id,
131            bit_count: $count * 8,
132            bit_offset: $offset * 8,
133        };
134    };
135
136    ($name:ident, $id:expr, $offset:expr, $count:expr) => {
137        dr!($name, $id, $offset, $count, stringify!($name));
138    };
139}
140
141macro_rules! dr_amd64 {
142    ($name:ident, $id:expr, $offset:expr, $count:expr) => {
143        dr!(
144            ArchitectureIdentifier::Amd64,
145            $name,
146            $id,
147            $offset,
148            $count,
149            stringify!($name)
150        );
151    };
152}
153
154macro_rules! dr_arm64 {
155    ($name:ident, $id:expr, $offset:expr, $count:expr) => {
156        dr!(
157            ArchitectureIdentifier::Arm64,
158            $name,
159            $id,
160            $offset,
161            $count,
162            stringify!($name)
163        );
164    };
165}
166
167impl RegisterDesc {
168    /// Undefined register
169    pub const UNDEFINED: RegisterDesc = RegisterDesc {
170        flags: RegisterFlags::from_bits_truncate(
171            RegisterFlags::VOLATILE.bits() | RegisterFlags::UNDEFINED.bits(),
172        ),
173        combined_id: 0,
174        bit_count: 64,
175        bit_offset: 0,
176    };
177
178    /// Image base register
179    pub const IMGBASE: RegisterDesc = RegisterDesc {
180        flags: RegisterFlags::from_bits_truncate(
181            RegisterFlags::READONLY.bits() | RegisterFlags::IMAGE_BASE.bits(),
182        ),
183        combined_id: 0,
184        bit_count: 64,
185        bit_offset: 0,
186    };
187
188    /// Flags register
189    pub const FLAGS: RegisterDesc = RegisterDesc {
190        flags: RegisterFlags::from_bits_truncate(
191            RegisterFlags::PHYSICAL.bits() | RegisterFlags::FLAGS.bits(),
192        ),
193        combined_id: 0,
194        bit_count: 64,
195        bit_offset: 0,
196    };
197
198    /// Stack pointer register
199    pub const SP: RegisterDesc = RegisterDesc {
200        flags: RegisterFlags::from_bits_truncate(
201            RegisterFlags::PHYSICAL.bits() | RegisterFlags::STACK_POINTER.bits(),
202        ),
203        combined_id: 0,
204        bit_count: 64,
205        bit_offset: 0,
206    };
207
208    dr_amd64!(X86_REG_RAX, amd64::X86_REG_RAX, 0, 8);
209    dr_amd64!(X86_REG_EAX, amd64::X86_REG_RAX, 0, 4);
210    dr_amd64!(X86_REG_AX, amd64::X86_REG_RAX, 0, 2);
211    dr_amd64!(X86_REG_AH, amd64::X86_REG_RAX, 1, 1);
212    dr_amd64!(X86_REG_AL, amd64::X86_REG_RAX, 0, 1);
213
214    dr_amd64!(X86_REG_RBX, amd64::X86_REG_RBX, 0, 8);
215    dr_amd64!(X86_REG_EBX, amd64::X86_REG_RBX, 0, 4);
216    dr_amd64!(X86_REG_BX, amd64::X86_REG_RBX, 0, 2);
217    dr_amd64!(X86_REG_BH, amd64::X86_REG_RBX, 1, 1);
218    dr_amd64!(X86_REG_BL, amd64::X86_REG_RBX, 0, 1);
219
220    dr_amd64!(X86_REG_RCX, amd64::X86_REG_RCX, 0, 8);
221    dr_amd64!(X86_REG_ECX, amd64::X86_REG_RCX, 0, 4);
222    dr_amd64!(X86_REG_CX, amd64::X86_REG_RCX, 0, 2);
223    dr_amd64!(X86_REG_CH, amd64::X86_REG_RCX, 1, 1);
224    dr_amd64!(X86_REG_CL, amd64::X86_REG_RCX, 0, 1);
225
226    dr_amd64!(X86_REG_RDX, amd64::X86_REG_RDX, 0, 8);
227    dr_amd64!(X86_REG_EDX, amd64::X86_REG_RDX, 0, 4);
228    dr_amd64!(X86_REG_DX, amd64::X86_REG_RDX, 0, 2);
229    dr_amd64!(X86_REG_DH, amd64::X86_REG_RDX, 1, 1);
230    dr_amd64!(X86_REG_DL, amd64::X86_REG_RDX, 0, 1);
231
232    dr_amd64!(X86_REG_RDI, amd64::X86_REG_RDI, 0, 8);
233    dr_amd64!(X86_REG_EDI, amd64::X86_REG_RDI, 0, 4);
234    dr_amd64!(X86_REG_DI, amd64::X86_REG_RDI, 0, 2);
235    dr_amd64!(X86_REG_DIL, amd64::X86_REG_RDI, 0, 1);
236
237    dr_amd64!(X86_REG_RSI, amd64::X86_REG_RSI, 0, 8);
238    dr_amd64!(X86_REG_ESI, amd64::X86_REG_RSI, 0, 4);
239    dr_amd64!(X86_REG_SI, amd64::X86_REG_RSI, 0, 2);
240    dr_amd64!(X86_REG_SIL, amd64::X86_REG_RSI, 0, 1);
241
242    dr_amd64!(X86_REG_RBP, amd64::X86_REG_RBP, 0, 8);
243    dr_amd64!(X86_REG_EBP, amd64::X86_REG_RBP, 0, 4);
244    dr_amd64!(X86_REG_BP, amd64::X86_REG_RBP, 0, 2);
245    dr_amd64!(X86_REG_BPL, amd64::X86_REG_RBP, 0, 1);
246
247    dr_amd64!(X86_REG_RSP, amd64::X86_REG_RSP, 0, 8);
248    dr_amd64!(X86_REG_ESP, amd64::X86_REG_RSP, 0, 4);
249    dr_amd64!(X86_REG_SP, amd64::X86_REG_RSP, 0, 2);
250    dr_amd64!(X86_REG_SPL, amd64::X86_REG_RSP, 0, 1);
251
252    dr_amd64!(X86_REG_R8, amd64::X86_REG_R8, 0, 8);
253    dr_amd64!(X86_REG_R8D, amd64::X86_REG_R8, 0, 4);
254    dr_amd64!(X86_REG_R8W, amd64::X86_REG_R8, 0, 2);
255    dr_amd64!(X86_REG_R8B, amd64::X86_REG_R8, 0, 1);
256
257    dr_amd64!(X86_REG_R9, amd64::X86_REG_R9, 0, 8);
258    dr_amd64!(X86_REG_R9D, amd64::X86_REG_R9, 0, 4);
259    dr_amd64!(X86_REG_R9W, amd64::X86_REG_R9, 0, 2);
260    dr_amd64!(X86_REG_R9B, amd64::X86_REG_R9, 0, 1);
261
262    dr_amd64!(X86_REG_R10, amd64::X86_REG_R10, 0, 8);
263    dr_amd64!(X86_REG_R10D, amd64::X86_REG_R10, 0, 4);
264    dr_amd64!(X86_REG_R10W, amd64::X86_REG_R10, 0, 2);
265    dr_amd64!(X86_REG_R10B, amd64::X86_REG_R10, 0, 1);
266
267    dr_amd64!(X86_REG_R11, amd64::X86_REG_R11, 0, 8);
268    dr_amd64!(X86_REG_R11D, amd64::X86_REG_R11, 0, 4);
269    dr_amd64!(X86_REG_R11W, amd64::X86_REG_R11, 0, 2);
270    dr_amd64!(X86_REG_R11B, amd64::X86_REG_R11, 0, 1);
271
272    dr_amd64!(X86_REG_R12, amd64::X86_REG_R12, 0, 8);
273    dr_amd64!(X86_REG_R12D, amd64::X86_REG_R12, 0, 4);
274    dr_amd64!(X86_REG_R12W, amd64::X86_REG_R12, 0, 2);
275    dr_amd64!(X86_REG_R12B, amd64::X86_REG_R12, 0, 1);
276
277    dr_amd64!(X86_REG_R13, amd64::X86_REG_R13, 0, 8);
278    dr_amd64!(X86_REG_R13D, amd64::X86_REG_R13, 0, 4);
279    dr_amd64!(X86_REG_R13W, amd64::X86_REG_R13, 0, 2);
280    dr_amd64!(X86_REG_R13B, amd64::X86_REG_R13, 0, 1);
281
282    dr_amd64!(X86_REG_R14, amd64::X86_REG_R14, 0, 8);
283    dr_amd64!(X86_REG_R14D, amd64::X86_REG_R14, 0, 4);
284    dr_amd64!(X86_REG_R14W, amd64::X86_REG_R14, 0, 2);
285    dr_amd64!(X86_REG_R14B, amd64::X86_REG_R14, 0, 1);
286
287    dr_amd64!(X86_REG_R15, amd64::X86_REG_R15, 0, 8);
288    dr_amd64!(X86_REG_R15D, amd64::X86_REG_R15, 0, 4);
289    dr_amd64!(X86_REG_R15W, amd64::X86_REG_R15, 0, 2);
290    dr_amd64!(X86_REG_R15B, amd64::X86_REG_R15, 0, 1);
291
292    dr_amd64!(X86_REG_EFLAGS, amd64::X86_REG_EFLAGS, 0, 8);
293
294    dr_arm64!(ARM64_REG_X0, arm64::ARM64_REG_X0, 0, 8);
295    dr_arm64!(ARM64_REG_W0, arm64::ARM64_REG_X0, 0, 4);
296
297    dr_arm64!(ARM64_REG_X1, arm64::ARM64_REG_X1, 0, 8);
298    dr_arm64!(ARM64_REG_W1, arm64::ARM64_REG_X1, 0, 4);
299
300    dr_arm64!(ARM64_REG_X2, arm64::ARM64_REG_X2, 0, 8);
301    dr_arm64!(ARM64_REG_W2, arm64::ARM64_REG_X2, 0, 4);
302
303    dr_arm64!(ARM64_REG_X3, arm64::ARM64_REG_X3, 0, 8);
304    dr_arm64!(ARM64_REG_W3, arm64::ARM64_REG_X3, 0, 4);
305
306    dr_arm64!(ARM64_REG_X4, arm64::ARM64_REG_X4, 0, 8);
307    dr_arm64!(ARM64_REG_W4, arm64::ARM64_REG_X4, 0, 4);
308
309    dr_arm64!(ARM64_REG_X5, arm64::ARM64_REG_X5, 0, 8);
310    dr_arm64!(ARM64_REG_W5, arm64::ARM64_REG_X5, 0, 4);
311
312    dr_arm64!(ARM64_REG_X6, arm64::ARM64_REG_X6, 0, 8);
313    dr_arm64!(ARM64_REG_W6, arm64::ARM64_REG_X6, 0, 4);
314
315    dr_arm64!(ARM64_REG_X7, arm64::ARM64_REG_X7, 0, 8);
316    dr_arm64!(ARM64_REG_W7, arm64::ARM64_REG_X7, 0, 4);
317
318    dr_arm64!(ARM64_REG_X8, arm64::ARM64_REG_X8, 0, 8);
319    dr_arm64!(ARM64_REG_W8, arm64::ARM64_REG_X8, 0, 4);
320
321    dr_arm64!(ARM64_REG_X9, arm64::ARM64_REG_X9, 0, 8);
322    dr_arm64!(ARM64_REG_W9, arm64::ARM64_REG_X9, 0, 4);
323
324    dr_arm64!(ARM64_REG_X10, arm64::ARM64_REG_X10, 0, 8);
325    dr_arm64!(ARM64_REG_W10, arm64::ARM64_REG_X10, 0, 4);
326
327    dr_arm64!(ARM64_REG_X11, arm64::ARM64_REG_X11, 0, 8);
328    dr_arm64!(ARM64_REG_W11, arm64::ARM64_REG_X11, 0, 4);
329
330    dr_arm64!(ARM64_REG_X12, arm64::ARM64_REG_X12, 0, 8);
331    dr_arm64!(ARM64_REG_W12, arm64::ARM64_REG_X12, 0, 4);
332
333    dr_arm64!(ARM64_REG_X13, arm64::ARM64_REG_X13, 0, 8);
334    dr_arm64!(ARM64_REG_W13, arm64::ARM64_REG_X13, 0, 4);
335
336    dr_arm64!(ARM64_REG_X14, arm64::ARM64_REG_X14, 0, 8);
337    dr_arm64!(ARM64_REG_W14, arm64::ARM64_REG_X14, 0, 4);
338
339    dr_arm64!(ARM64_REG_X15, arm64::ARM64_REG_X15, 0, 8);
340    dr_arm64!(ARM64_REG_W15, arm64::ARM64_REG_X15, 0, 4);
341
342    dr_arm64!(ARM64_REG_X16, arm64::ARM64_REG_X16, 0, 8);
343    dr_arm64!(ARM64_REG_W16, arm64::ARM64_REG_X16, 0, 4);
344
345    dr_arm64!(ARM64_REG_X17, arm64::ARM64_REG_X17, 0, 8);
346    dr_arm64!(ARM64_REG_W17, arm64::ARM64_REG_X17, 0, 4);
347
348    dr_arm64!(ARM64_REG_X18, arm64::ARM64_REG_X18, 0, 8);
349    dr_arm64!(ARM64_REG_W18, arm64::ARM64_REG_X18, 0, 4);
350
351    dr_arm64!(ARM64_REG_X19, arm64::ARM64_REG_X19, 0, 8);
352    dr_arm64!(ARM64_REG_W19, arm64::ARM64_REG_X19, 0, 4);
353
354    dr_arm64!(ARM64_REG_X20, arm64::ARM64_REG_X20, 0, 8);
355    dr_arm64!(ARM64_REG_W20, arm64::ARM64_REG_X20, 0, 4);
356
357    dr_arm64!(ARM64_REG_X21, arm64::ARM64_REG_X21, 0, 8);
358    dr_arm64!(ARM64_REG_W21, arm64::ARM64_REG_X21, 0, 4);
359
360    dr_arm64!(ARM64_REG_X22, arm64::ARM64_REG_X22, 0, 8);
361    dr_arm64!(ARM64_REG_W22, arm64::ARM64_REG_X22, 0, 4);
362
363    dr_arm64!(ARM64_REG_X23, arm64::ARM64_REG_X23, 0, 8);
364    dr_arm64!(ARM64_REG_W23, arm64::ARM64_REG_X23, 0, 4);
365
366    dr_arm64!(ARM64_REG_X24, arm64::ARM64_REG_X24, 0, 8);
367    dr_arm64!(ARM64_REG_W24, arm64::ARM64_REG_X24, 0, 4);
368
369    dr_arm64!(ARM64_REG_X25, arm64::ARM64_REG_X25, 0, 8);
370    dr_arm64!(ARM64_REG_W25, arm64::ARM64_REG_X25, 0, 4);
371
372    dr_arm64!(ARM64_REG_X26, arm64::ARM64_REG_X26, 0, 8);
373    dr_arm64!(ARM64_REG_W26, arm64::ARM64_REG_X26, 0, 4);
374
375    dr_arm64!(ARM64_REG_X27, arm64::ARM64_REG_X27, 0, 8);
376    dr_arm64!(ARM64_REG_W27, arm64::ARM64_REG_X27, 0, 4);
377
378    dr_arm64!(ARM64_REG_X28, arm64::ARM64_REG_X28, 0, 8);
379    dr_arm64!(ARM64_REG_W28, arm64::ARM64_REG_X28, 0, 4);
380
381    dr_arm64!(ARM64_REG_X29, arm64::ARM64_REG_X29, 0, 8);
382    dr_arm64!(ARM64_REG_FP, arm64::ARM64_REG_X29, 0, 8);
383    dr_arm64!(ARM64_REG_W29, arm64::ARM64_REG_X29, 0, 4);
384
385    dr_arm64!(ARM64_REG_X30, arm64::ARM64_REG_X30, 0, 8);
386    dr_arm64!(ARM64_REG_LR, arm64::ARM64_REG_X30, 0, 8);
387    dr_arm64!(ARM64_REG_W30, arm64::ARM64_REG_X30, 0, 4);
388
389    dr_arm64!(ARM64_REG_XZR, arm64::ARM64_REG_XZR, 0, 8);
390    dr_arm64!(ARM64_REG_WZR, arm64::ARM64_REG_XZR, 0, 4);
391
392    dr_arm64!(ARM64_REG_SP, arm64::ARM64_REG_SP, 0, 8);
393    dr_arm64!(ARM64_REG_WSP, arm64::ARM64_REG_SP, 0, 4);
394
395    dr_arm64!(ARM64_REG_NZCV, arm64::ARM64_REG_NZCV, 0, 8);
396
397    /// Local identifier that is intentionally unique to this register
398    pub fn local_id(&self) -> u64 {
399        self.combined_id & LOCAL_ID_MASK
400    }
401
402    /// The underlying architecture of this register
403    pub fn arch_id(&self) -> ArchitectureIdentifier {
404        match (self.combined_id & !LOCAL_ID_MASK) >> 56 {
405            0 => ArchitectureIdentifier::Amd64,
406            1 => ArchitectureIdentifier::Arm64,
407            _ => ArchitectureIdentifier::Virtual,
408        }
409    }
410
411    /// Operand size in bits, rounding up
412    pub fn size(&self) -> usize {
413        (self.bit_count as usize + 7) / 8
414    }
415}
416
417impl fmt::Display for RegisterDesc {
418    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
419        let mut prefix = String::new();
420
421        if self.flags.contains(RegisterFlags::VOLATILE) {
422            prefix = "?".to_string();
423        }
424
425        if self.flags.contains(RegisterFlags::READONLY) {
426            prefix += "&&";
427        }
428
429        let mut suffix = String::new();
430
431        if self.bit_offset != 0 {
432            suffix = format!("@{}", self.bit_offset);
433        }
434
435        if self.bit_count != 64 {
436            suffix.push_str(&format!(":{}", self.bit_count));
437        }
438
439        if self.flags.contains(RegisterFlags::INTERNAL) {
440            write!(f, "{}sr{}{}", prefix, self.local_id(), suffix)?;
441            return Ok(());
442        } else if self.flags.contains(RegisterFlags::UNDEFINED) {
443            write!(f, "{}UD{}", prefix, suffix)?;
444            return Ok(());
445        } else if self.flags.contains(RegisterFlags::FLAGS) {
446            write!(f, "{}$flags{}", prefix, suffix)?;
447            return Ok(());
448        } else if self.flags.contains(RegisterFlags::STACK_POINTER) {
449            write!(f, "{}$sp{}", prefix, suffix)?;
450            return Ok(());
451        } else if self.flags.contains(RegisterFlags::IMAGE_BASE) {
452            write!(f, "{}base{}", prefix, suffix)?;
453            return Ok(());
454        } else if self.flags.contains(RegisterFlags::LOCAL) {
455            write!(f, "{}t{}{}", prefix, self.local_id(), suffix)?;
456            return Ok(());
457        }
458
459        if self.flags.contains(RegisterFlags::PHYSICAL) {
460            match self.arch_id() {
461                ArchitectureIdentifier::Amd64 => {
462                    write!(
463                        f,
464                        "{}{}{}",
465                        prefix,
466                        arch_info::amd64::REGISTER_NAME_MAPPING[self.local_id() as usize],
467                        suffix
468                    )?;
469                    return Ok(());
470                }
471                ArchitectureIdentifier::Arm64 => {
472                    write!(
473                        f,
474                        "{}{}{}",
475                        prefix,
476                        arch_info::arm64::REGISTER_NAME_MAPPING[self.local_id() as usize],
477                        suffix
478                    )?;
479                    return Ok(());
480                }
481                _ => {}
482            }
483        }
484
485        write!(f, "{}vr{}{}", prefix, self.local_id(), suffix)?;
486        Ok(())
487    }
488}
489
490/// Routine calling convention information and associated metadata
491#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
492#[derive(Debug, Clone)]
493pub struct RoutineConvention {
494    /// List of registers that may change as a result of the routine execution but
495    /// will be considered trashed
496    pub volatile_registers: Vec<RegisterDesc>,
497    /// List of regsiters that this routine wlil read from as a way of taking arguments
498    /// * Additional arguments will be passed at `[$sp + shadow_space + n * 8]`
499    pub param_registers: Vec<RegisterDesc>,
500    /// List of registers that are used to store the return value of the routine and
501    /// thus will change during routine execution but must be considered "used" by return
502    pub retval_registers: Vec<RegisterDesc>,
503    /// Register that is generally used to store the stack frame if relevant
504    pub frame_register: RegisterDesc,
505    /// Size of the shadow space
506    pub shadow_space: u64,
507    /// Purges any writes to stack that will be end up below the final stack pointer
508    pub purge_stack: bool,
509}
510
511#[derive(Clone, Copy)]
512pub(crate) union Immediate {
513    pub(crate) u64: u64,
514    pub(crate) i64: i64,
515}
516
517impl Immediate {
518    pub(crate) fn u64(&self) -> u64 {
519        unsafe { self.u64 }
520    }
521
522    pub(crate) fn set_u64(&mut self, imm: u64) {
523        self.u64 = imm;
524    }
525
526    pub(crate) fn i64(&self) -> i64 {
527        unsafe { self.i64 }
528    }
529
530    pub(crate) fn set_i64(&mut self, imm: i64) {
531        self.i64 = imm;
532    }
533}
534
535#[cfg(feature = "serde")]
536impl Serialize for Immediate {
537    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
538    where
539        S: Serializer,
540    {
541        serializer.serialize_i64(self.i64())
542    }
543}
544
545#[cfg(feature = "serde")]
546impl<'de> Deserialize<'de> for Immediate {
547    fn deserialize<D>(deserializer: D) -> std::result::Result<Immediate, D::Error>
548    where
549        D: Deserializer<'de>,
550    {
551        Ok(Immediate {
552            i64: i64::deserialize(deserializer)?,
553        })
554    }
555}
556
557impl fmt::Debug for Immediate {
558    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
559        f.debug_struct("Immediate")
560            .field("u64", &self.u64())
561            .field("i64", &self.i64())
562            .finish()
563    }
564}
565
566/// Describes a VTIL immediate value in an operand
567#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
568#[derive(Debug, Clone, Copy)]
569pub struct ImmediateDesc {
570    pub(crate) value: Immediate,
571    /// The bit count of this register (e.g.: 32)
572    pub bit_count: u32,
573}
574
575impl From<i64> for ImmediateDesc {
576    fn from(imm: i64) -> ImmediateDesc {
577        ImmediateDesc::new_signed(imm, 64)
578    }
579}
580
581impl From<u64> for ImmediateDesc {
582    fn from(imm: u64) -> ImmediateDesc {
583        ImmediateDesc::new(imm, 64)
584    }
585}
586
587impl From<i32> for ImmediateDesc {
588    fn from(imm: i32) -> ImmediateDesc {
589        ImmediateDesc::new_signed(imm, 32)
590    }
591}
592
593impl From<u32> for ImmediateDesc {
594    fn from(imm: u32) -> ImmediateDesc {
595        ImmediateDesc::new(imm, 32)
596    }
597}
598
599impl From<i16> for ImmediateDesc {
600    fn from(imm: i16) -> ImmediateDesc {
601        ImmediateDesc::new_signed(imm, 16)
602    }
603}
604
605impl From<u16> for ImmediateDesc {
606    fn from(imm: u16) -> ImmediateDesc {
607        ImmediateDesc::new(imm, 16)
608    }
609}
610
611impl From<i8> for ImmediateDesc {
612    fn from(imm: i8) -> ImmediateDesc {
613        ImmediateDesc::new_signed(imm, 8)
614    }
615}
616
617impl From<u8> for ImmediateDesc {
618    fn from(imm: u8) -> ImmediateDesc {
619        ImmediateDesc::new(imm, 8)
620    }
621}
622
623impl ImmediateDesc {
624    /// Immediate from a `u64`
625    pub fn new<T: Into<u64>>(value: T, bit_count: u32) -> ImmediateDesc {
626        ImmediateDesc {
627            value: Immediate { u64: value.into() },
628            bit_count,
629        }
630    }
631
632    /// Immediate from an `i64`
633    pub fn new_signed<T: Into<i64>>(value: T, bit_count: u32) -> ImmediateDesc {
634        ImmediateDesc {
635            value: Immediate { i64: value.into() },
636            bit_count,
637        }
638    }
639
640    /// Access the underlying immediate as a `u64`
641    pub fn u64(&self) -> u64 {
642        self.value.u64()
643    }
644
645    /// Set the value of the underlying immediate as a `u64`
646    pub fn set_u64(&mut self, imm: u64) {
647        self.value.set_u64(imm);
648    }
649
650    /// Access the underlying immediate as an `i64`
651    pub fn i64(&self) -> i64 {
652        self.value.i64()
653    }
654
655    /// Set the value of the underlying immediate as an `i64`
656    pub fn set_i64(&mut self, imm: i64) {
657        self.value.set_i64(imm);
658    }
659
660    /// Operand size in bits, rounding up
661    pub fn size(&self) -> usize {
662        (self.bit_count as usize + 7) / 8
663    }
664}
665
666/// VTIL instruction operand
667#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
668#[derive(Debug, Clone, Copy)]
669pub enum Operand {
670    /// Immediate operand containing a sized immediate value
671    ImmediateDesc(ImmediateDesc),
672    /// Register operand containing a register description
673    RegisterDesc(RegisterDesc),
674}
675
676impl From<i64> for Operand {
677    fn from(imm: i64) -> Operand {
678        Operand::ImmediateDesc(imm.into())
679    }
680}
681
682impl From<u64> for Operand {
683    fn from(imm: u64) -> Operand {
684        Operand::ImmediateDesc(imm.into())
685    }
686}
687
688impl From<i32> for Operand {
689    fn from(imm: i32) -> Operand {
690        Operand::ImmediateDesc(imm.into())
691    }
692}
693
694impl From<u32> for Operand {
695    fn from(imm: u32) -> Operand {
696        Operand::ImmediateDesc(imm.into())
697    }
698}
699
700impl From<i16> for Operand {
701    fn from(imm: i16) -> Operand {
702        Operand::ImmediateDesc(imm.into())
703    }
704}
705
706impl From<u16> for Operand {
707    fn from(imm: u16) -> Operand {
708        Operand::ImmediateDesc(imm.into())
709    }
710}
711
712impl From<i8> for Operand {
713    fn from(imm: i8) -> Operand {
714        Operand::ImmediateDesc(imm.into())
715    }
716}
717
718impl From<u8> for Operand {
719    fn from(imm: u8) -> Operand {
720        Operand::ImmediateDesc(imm.into())
721    }
722}
723
724impl Operand {
725    /// Operand size in bits, rounding up
726    pub fn size(&self) -> usize {
727        match self {
728            Operand::ImmediateDesc(i) => i.size(),
729            Operand::RegisterDesc(r) => r.size(),
730        }
731    }
732}
733
734impl From<RegisterDesc> for Operand {
735    fn from(register_desc: RegisterDesc) -> Self {
736        Operand::RegisterDesc(register_desc)
737    }
738}
739
740impl From<ImmediateDesc> for Operand {
741    fn from(immediate_desc: ImmediateDesc) -> Self {
742        Operand::ImmediateDesc(immediate_desc)
743    }
744}
745
746impl<'a, 'b> TryInto<&'b ImmediateDesc> for &'a Operand
747where
748    'a: 'b,
749{
750    type Error = Error;
751
752    fn try_into(self) -> Result<&'a ImmediateDesc> {
753        match self {
754            Operand::ImmediateDesc(ref i) => Ok(i),
755            _ => Err(Error::OperandTypeMismatch),
756        }
757    }
758}
759
760impl<'a, 'b> TryInto<&'b RegisterDesc> for &'a Operand
761where
762    'a: 'b,
763{
764    type Error = Error;
765
766    fn try_into(self) -> Result<&'a RegisterDesc> {
767        match self {
768            Operand::RegisterDesc(r) => Ok(r),
769            _ => Err(Error::OperandTypeMismatch),
770        }
771    }
772}
773
774/// VTIL instruction and associated metadata
775#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
776#[derive(Debug)]
777pub struct Instruction {
778    /// Instruction operation and operators
779    pub op: Op,
780    /// The virtual instruction pointer of this instruction
781    pub vip: Vip,
782    /// Stack pointer offset at this instruction
783    pub sp_offset: i64,
784    /// Stack instance index
785    pub sp_index: u32,
786    /// If the stack pointer is reset at this instruction
787    pub sp_reset: bool,
788}
789
790/// VTIL operator and operands
791#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
792#[derive(Debug)]
793pub enum Op {
794    // Data/Memory instructions
795    /// OP1 = ZX(OP2)
796    Mov(Operand, Operand),
797    /// OP1 = SX(OP2)
798    Movsx(Operand, Operand),
799    /// \[OP1+OP2\] <= OP3
800    Str(Operand, Operand, Operand),
801    /// OP1 <= \[OP2+OP3\]
802    Ldd(Operand, Operand, Operand),
803
804    // Arithmetic instructions
805    /// OP1 = -OP1
806    Neg(Operand),
807    /// OP1 = OP1 + OP2
808    Add(Operand, Operand),
809    /// OP1 = OP1 - OP2
810    Sub(Operand, Operand),
811    /// OP1 = OP1 * OP2
812    Mul(Operand, Operand),
813    /// OP1 = \[OP1 * OP2\]>>N
814    Mulhi(Operand, Operand),
815    /// OP1 = OP1 * OP2 (Signed)
816    Imul(Operand, Operand),
817    /// OP1 = \[OP1 * OP2\]>>N (Signed)
818    Imulhi(Operand, Operand),
819    /// OP1 = \[OP2:OP1\] / OP3
820    Div(Operand, Operand, Operand),
821    /// OP1 = \[OP2:OP1\] % OP3
822    Rem(Operand, Operand, Operand),
823    /// OP1 = \[OP2:OP1\] / OP3 (Signed)
824    Idiv(Operand, Operand, Operand),
825    /// OP1 = \[OP2:OP1\] % OP3 (Signed)
826    Irem(Operand, Operand, Operand),
827
828    // Bitwise instructions
829    /// OP1 = popcnt OP1
830    Popcnt(Operand),
831    /// OP1 = OP1 ? BitScanForward OP1 + 1 : 0
832    Bsf(Operand),
833    /// OP1 = OP1 ? BitScanReverse OP1 + 1 : 0
834    Bsr(Operand),
835    /// OP1 = ~OP1
836    Not(Operand),
837    /// OP1 >>= OP2
838    Shr(Operand, Operand),
839    /// OP1 <<= OP2
840    Shl(Operand, Operand),
841    /// OP1 ^= OP2
842    Xor(Operand, Operand),
843    /// OP1 |= OP2
844    Or(Operand, Operand),
845    /// OP1 &= OP2
846    And(Operand, Operand),
847    /// OP1 = (OP1>>OP2) | (OP1<<(N-OP2))
848    Ror(Operand, Operand),
849    /// OP1 = (OP1<<OP2) | (OP1>>(N-OP2))
850    Rol(Operand, Operand),
851
852    // Conditional instructions
853    /// OP1 = OP2 > OP3
854    Tg(Operand, Operand, Operand),
855    /// OP1 = OP2 >= OP3
856    Tge(Operand, Operand, Operand),
857    /// OP1 = OP2 == OP3
858    Te(Operand, Operand, Operand),
859    /// OP1 = OP2 != OP3
860    Tne(Operand, Operand, Operand),
861    /// OP1 = OP2 < OP3
862    Tl(Operand, Operand, Operand),
863    /// OP1 = OP2 <= OP3
864    Tle(Operand, Operand, Operand),
865    /// OP1 = OP2 <= OP3
866    Tug(Operand, Operand, Operand),
867    /// OP1 = OP2   u>=  OP3
868    Tuge(Operand, Operand, Operand),
869    /// OP1 = OP2   u<   OP3
870    Tul(Operand, Operand, Operand),
871    /// OP1 = OP2   u<=  OP3
872    Tule(Operand, Operand, Operand),
873    /// OP1 = OP2 ? OP3 : 0
874    Ifs(Operand, Operand, Operand),
875
876    // Control flow instructions
877    /// Jumps to OP1 ? OP2 : OP3, continues virtual execution
878    Js(Operand, Operand, Operand),
879    /// Jumps to OP1, continues virtual execution
880    Jmp(Operand),
881    /// Jumps to OP1, continues real execution
882    Vexit(Operand),
883    /// Calls into OP1, pauses virtual execution until the call returns
884    Vxcall(Operand),
885
886    // Special instructions
887    /// Placeholder
888    Nop,
889    /// Assumes all memory is read from
890    Sfence,
891    /// Assumes all memory is written to
892    Lfence,
893    /// Emits the opcode as is to the final instruction stream
894    Vemit(Operand),
895    /// Pins the register for read
896    Vpinr(Operand),
897    /// Pins the register for write
898    Vpinw(Operand),
899    /// Pins the memory location for read, with size = OP3
900    Vpinrm(Operand, Operand, Operand),
901    /// Pins the memory location for write, with size = OP3
902    Vpinwm(Operand, Operand, Operand),
903}
904
905impl Op {
906    /// Name of the operand
907    pub fn name(&self) -> &'static str {
908        match self {
909            Op::Mov(_, _) => "mov",
910            Op::Movsx(_, _) => "movsx",
911            Op::Str(_, _, _) => "str",
912            Op::Ldd(_, _, _) => "ldd",
913            Op::Neg(_) => "neg",
914            Op::Add(_, _) => "add",
915            Op::Sub(_, _) => "sub",
916            Op::Mul(_, _) => "mul",
917            Op::Mulhi(_, _) => "mulhi",
918            Op::Imul(_, _) => "imul",
919            Op::Imulhi(_, _) => "imulhi",
920            Op::Div(_, _, _) => "div",
921            Op::Rem(_, _, _) => "rem",
922            Op::Idiv(_, _, _) => "idiv",
923            Op::Irem(_, _, _) => "irem",
924            Op::Popcnt(_) => "popcnt",
925            Op::Bsf(_) => "bsf",
926            Op::Bsr(_) => "bsr",
927            Op::Not(_) => "not",
928            Op::Shr(_, _) => "shr",
929            Op::Shl(_, _) => "shl",
930            Op::Xor(_, _) => "xor",
931            Op::Or(_, _) => "or",
932            Op::And(_, _) => "and",
933            Op::Ror(_, _) => "ror",
934            Op::Rol(_, _) => "rol",
935            Op::Tg(_, _, _) => "tg",
936            Op::Tge(_, _, _) => "tge",
937            Op::Te(_, _, _) => "te",
938            Op::Tne(_, _, _) => "tne",
939            Op::Tl(_, _, _) => "tl",
940            Op::Tle(_, _, _) => "tle",
941            Op::Tug(_, _, _) => "tug",
942            Op::Tuge(_, _, _) => "tuge",
943            Op::Tul(_, _, _) => "tul",
944            Op::Tule(_, _, _) => "tule",
945            Op::Ifs(_, _, _) => "ifs",
946            Op::Js(_, _, _) => "js",
947            Op::Jmp(_) => "jmp",
948            Op::Vexit(_) => "vexit",
949            Op::Vxcall(_) => "vxcall",
950            Op::Nop => "nop",
951            Op::Sfence => "sfence",
952            Op::Lfence => "lfence",
953            Op::Vemit(_) => "vemit",
954            Op::Vpinr(_) => "vpinr",
955            Op::Vpinw(_) => "vpinw",
956            Op::Vpinrm(_, _, _) => "vpinrm",
957            Op::Vpinwm(_, _, _) => "vpinwm",
958        }
959    }
960
961    /// Operands for operator
962    pub fn operands(&self) -> Vec<&Operand> {
963        match *self {
964            Op::Nop | Op::Sfence | Op::Lfence => vec![],
965            Op::Neg(ref op1)
966            | Op::Popcnt(ref op1)
967            | Op::Bsf(ref op1)
968            | Op::Bsr(ref op1)
969            | Op::Not(ref op1)
970            | Op::Jmp(ref op1)
971            | Op::Vexit(ref op1)
972            | Op::Vxcall(ref op1)
973            | Op::Vemit(ref op1)
974            | Op::Vpinr(ref op1)
975            | Op::Vpinw(ref op1) => vec![op1],
976            Op::Mov(ref op1, ref op2)
977            | Op::Movsx(ref op1, ref op2)
978            | Op::Add(ref op1, ref op2)
979            | Op::Sub(ref op1, ref op2)
980            | Op::Mul(ref op1, ref op2)
981            | Op::Mulhi(ref op1, ref op2)
982            | Op::Imul(ref op1, ref op2)
983            | Op::Imulhi(ref op1, ref op2)
984            | Op::Shr(ref op1, ref op2)
985            | Op::Shl(ref op1, ref op2)
986            | Op::Xor(ref op1, ref op2)
987            | Op::Or(ref op1, ref op2)
988            | Op::And(ref op1, ref op2)
989            | Op::Ror(ref op1, ref op2)
990            | Op::Rol(ref op1, ref op2) => vec![op1, op2],
991            Op::Str(ref op1, ref op2, ref op3)
992            | Op::Ldd(ref op1, ref op2, ref op3)
993            | Op::Div(ref op1, ref op2, ref op3)
994            | Op::Rem(ref op1, ref op2, ref op3)
995            | Op::Idiv(ref op1, ref op2, ref op3)
996            | Op::Irem(ref op1, ref op2, ref op3)
997            | Op::Tg(ref op1, ref op2, ref op3)
998            | Op::Tge(ref op1, ref op2, ref op3)
999            | Op::Te(ref op1, ref op2, ref op3)
1000            | Op::Tne(ref op1, ref op2, ref op3)
1001            | Op::Tl(ref op1, ref op2, ref op3)
1002            | Op::Tle(ref op1, ref op2, ref op3)
1003            | Op::Tug(ref op1, ref op2, ref op3)
1004            | Op::Tuge(ref op1, ref op2, ref op3)
1005            | Op::Tul(ref op1, ref op2, ref op3)
1006            | Op::Tule(ref op1, ref op2, ref op3)
1007            | Op::Ifs(ref op1, ref op2, ref op3)
1008            | Op::Js(ref op1, ref op2, ref op3)
1009            | Op::Vpinrm(ref op1, ref op2, ref op3)
1010            | Op::Vpinwm(ref op1, ref op2, ref op3) => vec![op1, op2, op3],
1011        }
1012    }
1013
1014    /// Mutable operands for operator
1015    pub fn operands_mut(&mut self) -> Vec<&mut Operand> {
1016        match *self {
1017            Op::Nop | Op::Sfence | Op::Lfence => vec![],
1018            Op::Neg(ref mut op1)
1019            | Op::Popcnt(ref mut op1)
1020            | Op::Bsf(ref mut op1)
1021            | Op::Bsr(ref mut op1)
1022            | Op::Not(ref mut op1)
1023            | Op::Jmp(ref mut op1)
1024            | Op::Vexit(ref mut op1)
1025            | Op::Vxcall(ref mut op1)
1026            | Op::Vemit(ref mut op1)
1027            | Op::Vpinr(ref mut op1)
1028            | Op::Vpinw(ref mut op1) => vec![op1],
1029            Op::Mov(ref mut op1, ref mut op2)
1030            | Op::Movsx(ref mut op1, ref mut op2)
1031            | Op::Add(ref mut op1, ref mut op2)
1032            | Op::Sub(ref mut op1, ref mut op2)
1033            | Op::Mul(ref mut op1, ref mut op2)
1034            | Op::Mulhi(ref mut op1, ref mut op2)
1035            | Op::Imul(ref mut op1, ref mut op2)
1036            | Op::Imulhi(ref mut op1, ref mut op2)
1037            | Op::Shr(ref mut op1, ref mut op2)
1038            | Op::Shl(ref mut op1, ref mut op2)
1039            | Op::Xor(ref mut op1, ref mut op2)
1040            | Op::Or(ref mut op1, ref mut op2)
1041            | Op::And(ref mut op1, ref mut op2)
1042            | Op::Ror(ref mut op1, ref mut op2)
1043            | Op::Rol(ref mut op1, ref mut op2) => vec![op1, op2],
1044            Op::Str(ref mut op1, ref mut op2, ref mut op3)
1045            | Op::Ldd(ref mut op1, ref mut op2, ref mut op3)
1046            | Op::Div(ref mut op1, ref mut op2, ref mut op3)
1047            | Op::Rem(ref mut op1, ref mut op2, ref mut op3)
1048            | Op::Idiv(ref mut op1, ref mut op2, ref mut op3)
1049            | Op::Irem(ref mut op1, ref mut op2, ref mut op3)
1050            | Op::Tg(ref mut op1, ref mut op2, ref mut op3)
1051            | Op::Tge(ref mut op1, ref mut op2, ref mut op3)
1052            | Op::Te(ref mut op1, ref mut op2, ref mut op3)
1053            | Op::Tne(ref mut op1, ref mut op2, ref mut op3)
1054            | Op::Tl(ref mut op1, ref mut op2, ref mut op3)
1055            | Op::Tle(ref mut op1, ref mut op2, ref mut op3)
1056            | Op::Tug(ref mut op1, ref mut op2, ref mut op3)
1057            | Op::Tuge(ref mut op1, ref mut op2, ref mut op3)
1058            | Op::Tul(ref mut op1, ref mut op2, ref mut op3)
1059            | Op::Tule(ref mut op1, ref mut op2, ref mut op3)
1060            | Op::Ifs(ref mut op1, ref mut op2, ref mut op3)
1061            | Op::Js(ref mut op1, ref mut op2, ref mut op3)
1062            | Op::Vpinrm(ref mut op1, ref mut op2, ref mut op3)
1063            | Op::Vpinwm(ref mut op1, ref mut op2, ref mut op3) => vec![op1, op2, op3],
1064        }
1065    }
1066
1067    /// Returns if the instruction is volatile
1068    pub fn is_volatile(&self) -> bool {
1069        matches!(
1070            self,
1071            Op::Sfence
1072                | Op::Lfence
1073                | Op::Vemit(_)
1074                | Op::Vpinr(_)
1075                | Op::Vpinw(_)
1076                | Op::Vpinrm(_, _, _)
1077                | Op::Vpinwm(_, _, _)
1078        )
1079    }
1080
1081    /// Returns if the instruction is a branching operation
1082    pub fn is_branching(&self) -> bool {
1083        matches!(
1084            self,
1085            Op::Js(_, _, _) | Op::Jmp(_) | Op::Vexit(_) | Op::Vxcall(_)
1086        )
1087    }
1088}
1089
1090/// Basic block containing a linear sequence of VTIL instructions
1091#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
1092#[derive(Debug)]
1093pub struct BasicBlock {
1094    /// The virtual instruction pointer at entry
1095    pub vip: Vip,
1096    /// The stack pointer offset at entry
1097    pub sp_offset: i64,
1098    /// The stack instance index at entry
1099    pub sp_index: u32,
1100    /// Last temporary index used
1101    pub last_temporary_index: u32,
1102    /// List of instructions contained in this basic block (in order)
1103    pub instructions: Vec<Instruction>,
1104    /// Predecessor basic block entrypoint(s)
1105    pub prev_vip: Vec<Vip>,
1106    /// Successor basic block entrypoint(s)
1107    pub next_vip: Vec<Vip>,
1108}
1109
1110impl BasicBlock {
1111    /// Allocate a temporary register for this basic block
1112    pub fn tmp(&mut self, bit_count: i32) -> RegisterDesc {
1113        let reg = RegisterDesc {
1114            flags: RegisterFlags::LOCAL,
1115            combined_id: self.last_temporary_index as u64,
1116            bit_count,
1117            bit_offset: 0,
1118        };
1119        self.last_temporary_index += 1;
1120        reg
1121    }
1122
1123    /// Returns if the block is complete: terminated by a branching instruction
1124    pub fn is_complete(&self) -> bool {
1125        let instructions = &self.instructions;
1126        !instructions.is_empty() && instructions[instructions.len() - 1].op.is_branching()
1127    }
1128
1129    /// Makes a new [`BasicBlock`] connected to the current block, at the specified
1130    /// instruction pointer
1131    ///
1132    /// # Panics
1133    ///
1134    /// Panics if the block is incomplete, or `entry` is an invalid instruction
1135    /// pointer
1136    pub fn fork(&mut self, entry: Vip) -> BasicBlock {
1137        assert!(self.is_complete());
1138        assert!(entry != Vip::invalid());
1139
1140        let basic_block = BasicBlock {
1141            vip: entry,
1142            sp_offset: 0,
1143            sp_index: 0,
1144            last_temporary_index: 0,
1145            instructions: vec![],
1146            prev_vip: vec![self.vip],
1147            next_vip: vec![],
1148        };
1149
1150        self.next_vip.push(entry);
1151
1152        basic_block
1153    }
1154}
1155
1156/// Alias for [`RoutineConvention`] for consistent naming
1157pub type SubroutineConvention = RoutineConvention;
1158
1159/// VTIL routine container
1160#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
1161#[derive(Debug)]
1162pub struct Routine {
1163    /// Header containing metadata about the VTIL container
1164    pub header: Header,
1165    /// The entry virtual instruction pointer for this VTIL routine
1166    pub vip: Vip,
1167    /// Metadata regarding the calling conventions of the VTIL routine
1168    pub routine_convention: RoutineConvention,
1169    /// Metadata regarding the calling conventions of the VTIL subroutine
1170    pub subroutine_convention: SubroutineConvention,
1171    /// All special subroutine calling conventions in the top-level VTIL routine
1172    pub spec_subroutine_conventions: Vec<SubroutineConvention>,
1173    /// Reachable [`BasicBlock`]s generated during a code-discovery analysis pass
1174    pub explored_blocks: IndexMap<Vip, BasicBlock>,
1175}