Skip to main content

vmi_arch_amd64/
lib.rs

1//! AMD64 architecture definitions.
2
3mod address;
4mod cr;
5mod descriptor;
6mod dr;
7mod efer;
8mod event;
9mod interrupt;
10mod paging;
11mod registers;
12mod rflags;
13mod segment;
14mod translation;
15
16use vmi_core::{
17    AccessContext, AddressContext, Architecture, Gfn, MemoryAccess, Pa, Va, VmiCore, VmiDriver,
18    VmiError,
19};
20use zerocopy::FromBytes;
21
22pub use self::{
23    cr::{ControlRegister, Cr0, Cr2, Cr3, Cr4},
24    descriptor::{Gdtr, Idtr},
25    dr::{Dr0, Dr1, Dr2, Dr3, Dr6, Dr7},
26    efer::MsrEfer,
27    event::{
28        EventCpuId, EventInterrupt, EventIo, EventIoDirection, EventMemoryAccess, EventMonitor,
29        EventReason, EventSinglestep, EventWriteControlRegister, MemoryAccessFlags,
30    },
31    interrupt::{ExceptionVector, Idt, IdtAccess, IdtEntry, Interrupt, InterruptType},
32    paging::{PageTableEntry, PageTableLevel, PagingMode},
33    registers::{GpRegisters, Registers},
34    rflags::Rflags,
35    segment::{
36        DescriptorTable, DescriptorType, Granularity, OperationSize, SegmentAccess,
37        SegmentDescriptor, Selector,
38    },
39    translation::{TranslationEntries, TranslationEntry, VaTranslation},
40};
41
42/// AMD64 architecture.
43#[derive(Debug)]
44pub struct Amd64;
45
46impl Architecture for Amd64 {
47    const PAGE_SIZE: u64 = 0x1000;
48    const PAGE_SHIFT: u64 = 12;
49    const PAGE_MASK: u64 = 0xFFFFFFFFFFFFF000;
50
51    const BREAKPOINT: &'static [u8] = &[0xcc];
52
53    type Registers = Registers;
54    type PageTableLevel = PageTableLevel;
55    type Interrupt = Interrupt;
56    type SpecialRegister = ControlRegister;
57
58    type EventMonitor = EventMonitor;
59    type EventReason = EventReason;
60
61    fn gfn_from_pa(pa: Pa) -> Gfn {
62        Gfn(pa.0 >> Self::PAGE_SHIFT)
63    }
64
65    fn pa_from_gfn(gfn: Gfn) -> Pa {
66        Pa(gfn.0 << Self::PAGE_SHIFT)
67    }
68
69    fn pa_offset(pa: Pa) -> u64 {
70        pa.0 & !Self::PAGE_MASK
71    }
72
73    fn va_align_down(va: Va) -> Va {
74        Self::va_align_down_for(va, PageTableLevel::Pt)
75    }
76
77    fn va_align_down_for(va: Va, level: Self::PageTableLevel) -> Va {
78        let mask = match level {
79            PageTableLevel::Pt => !0xfff,
80            PageTableLevel::Pd => !0x1fffff,
81            PageTableLevel::Pdpt => !0x3fffffff,
82            PageTableLevel::Pml4 => !0x7fffffffff,
83        };
84
85        va & mask
86    }
87
88    fn va_align_up(va: Va) -> Va {
89        Self::va_align_up_for(va, PageTableLevel::Pt)
90    }
91
92    fn va_align_up_for(va: Va, level: Self::PageTableLevel) -> Va {
93        let mask = match level {
94            PageTableLevel::Pt => 0xfff,
95            PageTableLevel::Pd => 0x1fffff,
96            PageTableLevel::Pdpt => 0x3fffffff,
97            PageTableLevel::Pml4 => 0x7fffffffff,
98        };
99
100        (va + mask) & !mask
101    }
102
103    fn va_offset(va: Va) -> u64 {
104        Self::va_offset_for(va, PageTableLevel::Pt)
105    }
106
107    fn va_offset_for(va: Va, level: Self::PageTableLevel) -> u64 {
108        match level {
109            // 4KB page (4 * 1024 - 1).
110            PageTableLevel::Pt => va.0 & 0xfff,
111
112            // 2MB page (2 * 1024 * 1024 - 1).
113            PageTableLevel::Pd => va.0 & 0x1fffff,
114
115            // 1GB page (1024 * 1024 * 1024 - 1).
116            PageTableLevel::Pdpt => va.0 & 0x3fffffff,
117
118            // 512GB page (512 * 1024 * 1024 * 1024 - 1).
119            PageTableLevel::Pml4 => va.0 & 0x7fffffffff,
120        }
121    }
122
123    fn va_index(va: Va) -> u64 {
124        Self::va_index_for(va, PageTableLevel::Pt)
125    }
126
127    fn va_index_for(va: Va, level: Self::PageTableLevel) -> u64 {
128        match level {
129            PageTableLevel::Pt => (va.0 >> 12) & 0x1ff,
130            PageTableLevel::Pd => (va.0 >> 21) & 0x1ff,
131            PageTableLevel::Pdpt => (va.0 >> 30) & 0x1ff,
132            PageTableLevel::Pml4 => (va.0 >> 39) & 0x1ff,
133        }
134    }
135
136    fn translate_address<Driver>(vmi: &VmiCore<Driver>, va: Va, root: Pa) -> Result<Pa, VmiError>
137    where
138        Driver: VmiDriver<Architecture = Self>,
139    {
140        // Read the PML4 table
141        let buffer = vmi.read_page(Self::gfn_from_pa(root))?;
142        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
143
144        let pml4i = Self::va_index_for(va, PageTableLevel::Pml4) as usize;
145        let pml4e = page_table[pml4i];
146
147        if !pml4e.present() {
148            return Err(VmiError::page_fault((va, root)));
149        }
150
151        if pml4e.large() {
152            return Ok(
153                Self::pa_from_gfn(pml4e.pfn()) + Self::va_offset_for(va, PageTableLevel::Pml4)
154            );
155        }
156
157        // Read the PDPT table
158        let buffer = vmi.read_page(pml4e.pfn())?;
159        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
160
161        let pdpti = Self::va_index_for(va, PageTableLevel::Pdpt) as usize;
162        let pdpte = page_table[pdpti];
163
164        if !pdpte.present() {
165            return Err(VmiError::page_fault((va, root)));
166        }
167
168        if pdpte.large() {
169            return Ok(
170                Self::pa_from_gfn(pdpte.pfn()) + Self::va_offset_for(va, PageTableLevel::Pdpt)
171            );
172        }
173
174        // Read the PD table
175        let buffer = vmi.read_page(pdpte.pfn())?;
176        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
177
178        let pdi = Self::va_index_for(va, PageTableLevel::Pd) as usize;
179        let pde = page_table[pdi];
180
181        if !pde.present() {
182            return Err(VmiError::page_fault((va, root)));
183        }
184
185        if pde.large() {
186            return Ok(Self::pa_from_gfn(pde.pfn()) + Self::va_offset_for(va, PageTableLevel::Pd));
187        }
188
189        // Read the PT table
190        let buffer = vmi.read_page(pde.pfn())?;
191        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
192
193        let pti = Self::va_index_for(va, PageTableLevel::Pt) as usize;
194        let pte = page_table[pti];
195
196        if !pte.present() {
197            return Err(VmiError::page_fault((va, root)));
198        }
199
200        Ok(Self::pa_from_gfn(pte.pfn()) + Self::va_offset_for(va, PageTableLevel::Pt))
201    }
202}
203
204impl Amd64 {
205    /// Canonicalize a virtual address.
206    pub fn va_canonical(va: Va) -> Va {
207        const BITS: u64 = 48;
208        const MASK: u64 = (1 << BITS) - 1;
209
210        Va(va.0 & MASK)
211    }
212
213    /// Determine the paging mode of the processor based on control register
214    /// values.
215    ///
216    /// # Paging Modes
217    ///
218    /// - **No Paging**: When paging is disabled (CR0.PG = 0)
219    /// - **32-bit Paging**: Used when CR0.PG = 1 and CR4.PAE = 0
220    /// - **PAE Paging**: Used when CR0.PG = 1, CR4.PAE = 1, and IA32_EFER.LME = 0
221    /// - **4-level Paging**: Used when CR0.PG = 1, CR4.PAE = 1, IA32_EFER.LME = 1, and CR4.LA57 = 0
222    /// - **5-level Paging**: Used when CR0.PG = 1, CR4.PAE = 1, IA32_EFER.LME = 1, and CR4.LA57 = 1
223    ///
224    /// If paging is disabled, the function returns `None`.
225    pub fn paging_mode(registers: &Registers) -> Option<PagingMode> {
226        if !registers.cr0.paging() {
227            return None;
228        }
229
230        if !registers.cr4.physical_address_extension() {
231            return Some(PagingMode::Legacy);
232        }
233
234        if !registers.msr_efer.long_mode_enable() {
235            return Some(PagingMode::PAE);
236        }
237
238        if !registers.cr4.linear_address_57_bit() {
239            return Some(PagingMode::Ia32e);
240        }
241
242        Some(PagingMode::Ia32eLA57)
243    }
244
245    /// Retrieves the Interrupt Descriptor Table (IDT) for a specific virtual
246    /// CPU.
247    pub fn interrupt_descriptor_table<Driver>(
248        vmi: &VmiCore<Driver>,
249        registers: &Registers,
250    ) -> Result<Idt, VmiError>
251    where
252        Driver: VmiDriver<Architecture = Self>,
253    {
254        let idtr_base = registers.idtr.base.into();
255        vmi.read_struct::<Idt>((idtr_base, registers.cr3.into()))
256    }
257
258    /// Performs a page table walk to translate a virtual address to a physical
259    /// address.
260    ///
261    /// This function implements the 4-level paging translation process, walking
262    /// through the page tables to convert a virtual address to a physical
263    /// address.
264    ///
265    /// # Process
266    ///
267    /// 1. Initializes the translation entries vector.
268    /// 2. Ensures the virtual address is in canonical form.
269    /// 3. Walks through the 4-level paging structure:
270    ///    - PML4 (Page Map Level 4)
271    ///    - PDPT (Page Directory Pointer Table)
272    ///    - PD (Page Directory)
273    ///    - PT (Page Table)
274    /// 4. At each level:
275    ///    - Reads the relevant page table.
276    ///    - Extracts the appropriate entry.
277    ///    - Checks if the entry is present.
278    ///    - Checks if it's a large page (1GB or 2MB).
279    ///    - If it's a large page or the last level, calculates the final
280    ///      physical address.
281    ///    - Otherwise, continues to the next level.
282    ///
283    /// # Errors
284    ///
285    /// If any page table read fails, the function returns a [`VaTranslation`]
286    /// with the entries collected so far and `None` as the physical
287    /// address.
288    ///
289    /// # Notes
290    ///
291    /// - This implementation assumes x86-64 4-level paging. It doesn't handle
292    ///   5-level paging.
293    /// - The function handles large pages (1GB and 2MB) as well as standard 4KB
294    ///   pages.
295    /// - Each step of the translation is recorded, allowing for detailed
296    ///   analysis of the translation process.
297    pub fn translation<Driver>(vmi: &VmiCore<Driver>, va: Va, root: Pa) -> VaTranslation
298    where
299        Driver: VmiDriver<Architecture = Self>,
300    {
301        let mut entries = TranslationEntries::new();
302        let va = Self::va_canonical(va);
303
304        // Read the PML4 table
305        let buffer = match vmi.read_page(Self::gfn_from_pa(root)) {
306            Ok(buffer) => buffer,
307            Err(_) => return VaTranslation { entries, pa: None },
308        };
309        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
310        let pml4i = Self::va_index_for(va, PageTableLevel::Pml4) as usize;
311        let pml4e = page_table[pml4i];
312
313        entries.push(TranslationEntry {
314            level: PageTableLevel::Pml4,
315            entry: pml4e,
316            entry_address: root + (pml4i * size_of::<PageTableEntry>()) as u64,
317        });
318
319        if !pml4e.present() {
320            return VaTranslation { entries, pa: None };
321        }
322
323        if pml4e.large() {
324            return VaTranslation {
325                entries,
326                pa: Some(
327                    Self::pa_from_gfn(pml4e.pfn()) + Self::va_offset_for(va, PageTableLevel::Pml4),
328                ),
329            };
330        }
331
332        // Read the PDPT table
333        let buffer = match vmi.read_page(pml4e.pfn()) {
334            Ok(buffer) => buffer,
335            Err(_) => return VaTranslation { entries, pa: None },
336        };
337        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
338        let pdpti = Self::va_index_for(va, PageTableLevel::Pdpt) as usize;
339        let pdpte = page_table[pdpti];
340
341        entries.push(TranslationEntry {
342            level: PageTableLevel::Pdpt,
343            entry: pdpte,
344            entry_address: Self::pa_from_gfn(pml4e.pfn())
345                + (pdpti * size_of::<PageTableEntry>()) as u64,
346        });
347
348        if !pdpte.present() {
349            return VaTranslation { entries, pa: None };
350        }
351
352        if pdpte.large() {
353            return VaTranslation {
354                entries,
355                pa: Some(
356                    Self::pa_from_gfn(pdpte.pfn()) + Self::va_offset_for(va, PageTableLevel::Pdpt),
357                ),
358            };
359        }
360
361        // Read the PD table
362        let buffer = match vmi.read_page(pdpte.pfn()) {
363            Ok(buffer) => buffer,
364            Err(_) => return VaTranslation { entries, pa: None },
365        };
366        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
367        let pdi = Self::va_index_for(va, PageTableLevel::Pd) as usize;
368        let pde = page_table[pdi];
369
370        entries.push(TranslationEntry {
371            level: PageTableLevel::Pd,
372            entry: pde,
373            entry_address: Self::pa_from_gfn(pdpte.pfn())
374                + (pdi * size_of::<PageTableEntry>()) as u64,
375        });
376
377        if !pde.present() {
378            return VaTranslation { entries, pa: None };
379        }
380
381        if pde.large() {
382            return VaTranslation {
383                entries,
384                pa: Some(
385                    Self::pa_from_gfn(pde.pfn()) + Self::va_offset_for(va, PageTableLevel::Pd),
386                ),
387            };
388        }
389
390        // Read the PT table
391        let buffer = match vmi.read_page(pde.pfn()) {
392            Ok(buffer) => buffer,
393            Err(_) => return VaTranslation { entries, pa: None },
394        };
395        let page_table = <[PageTableEntry]>::ref_from_bytes(&buffer).unwrap();
396        let pti = Self::va_index_for(va, PageTableLevel::Pt) as usize;
397        let pte = page_table[pti];
398
399        entries.push(TranslationEntry {
400            level: PageTableLevel::Pt,
401            entry: pte,
402            entry_address: Self::pa_from_gfn(pde.pfn())
403                + (pti * size_of::<PageTableEntry>()) as u64,
404        });
405
406        VaTranslation {
407            entries,
408            pa: Some(Self::pa_from_gfn(pte.pfn()) + Self::va_offset_for(va, PageTableLevel::Pt)),
409        }
410    }
411}
412
413impl vmi_core::arch::Registers for Registers {
414    type Architecture = Amd64;
415
416    type GpRegisters = GpRegisters;
417
418    fn instruction_pointer(&self) -> u64 {
419        self.rip
420    }
421
422    fn set_instruction_pointer(&mut self, ip: u64) {
423        self.rip = ip;
424    }
425
426    fn stack_pointer(&self) -> u64 {
427        self.rsp
428    }
429
430    fn set_stack_pointer(&mut self, sp: u64) {
431        self.rsp = sp;
432    }
433
434    fn result(&self) -> u64 {
435        self.rax
436    }
437
438    fn set_result(&mut self, result: u64) {
439        self.rax = result;
440    }
441
442    fn gp_registers(&self) -> GpRegisters {
443        GpRegisters {
444            rax: self.rax,
445            rbx: self.rbx,
446            rcx: self.rcx,
447            rdx: self.rdx,
448            rbp: self.rbp,
449            rsi: self.rsi,
450            rdi: self.rdi,
451            rsp: self.rsp,
452            r8: self.r8,
453            r9: self.r9,
454            r10: self.r10,
455            r11: self.r11,
456            r12: self.r12,
457            r13: self.r13,
458            r14: self.r14,
459            r15: self.r15,
460            rip: self.rip,
461            rflags: self.rflags,
462        }
463    }
464
465    fn set_gp_registers(&mut self, gp: &GpRegisters) {
466        self.rax = gp.rax;
467        self.rbx = gp.rbx;
468        self.rcx = gp.rcx;
469        self.rdx = gp.rdx;
470        self.rbp = gp.rbp;
471        self.rsi = gp.rsi;
472        self.rdi = gp.rdi;
473        self.rsp = gp.rsp;
474        self.r8 = gp.r8;
475        self.r9 = gp.r9;
476        self.r10 = gp.r10;
477        self.r11 = gp.r11;
478        self.r12 = gp.r12;
479        self.r13 = gp.r13;
480        self.r14 = gp.r14;
481        self.r15 = gp.r15;
482        self.rip = gp.rip;
483        self.rflags = gp.rflags;
484    }
485
486    fn address_width(&self) -> usize {
487        Amd64::paging_mode(self).map_or(0, PagingMode::address_width)
488    }
489
490    fn effective_address_width(&self) -> usize {
491        // IA-32e mode uses a previously unused bit in the CS descriptor.
492        // Bit 53 is defined as the 64-bit (L) flag and is used to select
493        // between 64-bit mode and compatibility mode when IA-32e mode is
494        // active (IA32_EFER.LMA = 1).
495        //
496        // — If CS.L = 0 and IA-32e mode is active, the processor is running
497        //   in compatibility mode. In this case, CS.D selects the default
498        //   size for data and addresses. If CS.D = 0, the default data and
499        //   address size is 16 bits. If CS.D = 1, the default data and address
500        //   size is 32 bits.
501        //
502        // — If CS.L = 1 and IA-32e mode is active, the only valid setting
503        //   is CS.D = 0. This setting indicates a default operand size of
504        //   32 bits and a default address size of 64 bits. The CS.L = 1 and
505        //   CS.D = 1 bit combination is reserved for future use and a #GP
506        //   fault will be generated on an attempt to use a code segment
507        //   with these bits set in IA-32e mode.
508        //
509        // [Intel SDM Vol. 3A 5.2.1 (Code-Segment Descriptor in 64-bit Mode)]
510
511        match Amd64::paging_mode(self) {
512            Some(PagingMode::Ia32e | PagingMode::Ia32eLA57) if !self.cs.access.long_mode() => 4,
513            Some(paging_mode) => paging_mode.address_width(),
514            _ => 0,
515        }
516    }
517
518    fn access_context(&self, va: Va) -> AccessContext {
519        self.address_context(va).into()
520    }
521
522    fn address_context(&self, va: Va) -> AddressContext {
523        (va, self.cr3.into()).into()
524    }
525
526    fn translation_root(&self, _va: Va) -> Pa {
527        self.cr3.into()
528    }
529
530    fn return_address<Driver>(&self, vmi: &VmiCore<Driver>) -> Result<Va, VmiError>
531    where
532        Driver: VmiDriver,
533    {
534        vmi.read_va(
535            (self.rsp.into(), self.cr3.into()),
536            self.effective_address_width(),
537        )
538    }
539}
540
541impl vmi_core::arch::EventMemoryAccess for EventMemoryAccess {
542    type Architecture = Amd64;
543
544    fn pa(&self) -> Pa {
545        self.pa
546    }
547
548    fn va(&self) -> Va {
549        self.va
550    }
551
552    fn access(&self) -> MemoryAccess {
553        self.access
554    }
555}
556
557impl vmi_core::arch::EventInterrupt for EventInterrupt {
558    type Architecture = Amd64;
559
560    fn gfn(&self) -> Gfn {
561        self.gfn
562    }
563}
564
565impl vmi_core::arch::EventReason for EventReason {
566    type Architecture = Amd64;
567
568    fn as_memory_access(
569        &self,
570    ) -> Option<&impl vmi_core::arch::EventMemoryAccess<Architecture = Amd64>> {
571        match self {
572            EventReason::MemoryAccess(memory_access) => Some(memory_access),
573            _ => None,
574        }
575    }
576
577    fn as_interrupt(&self) -> Option<&impl vmi_core::arch::EventInterrupt<Architecture = Amd64>> {
578        match self {
579            EventReason::Interrupt(interrupt) => Some(interrupt),
580            _ => None,
581        }
582    }
583
584    fn as_software_breakpoint(
585        &self,
586    ) -> Option<&impl vmi_core::arch::EventInterrupt<Architecture = Amd64>> {
587        match self {
588            EventReason::Interrupt(interrupt)
589                if interrupt.interrupt.vector == ExceptionVector::Breakpoint
590                    && interrupt.interrupt.typ == InterruptType::SoftwareException =>
591            {
592                Some(interrupt)
593            }
594            _ => None,
595        }
596    }
597}