memflow/architecture/x86/
x32_pae.rs1use super::{
2 super::{ArchitectureObj, Endianess},
3 X86Architecture, X86VirtualTranslate,
4};
5
6use crate::mem::virt_translate::mmu::ArchMmuDef;
7
8use crate::types::Address;
9
10pub(super) static ARCH_SPEC: X86Architecture = X86Architecture {
11 bits: 32,
12 mmu: ArchMmuDef {
13 virtual_address_splits: &[2, 9, 9, 12],
14 valid_final_page_steps: &[2, 3],
15 address_space_bits: 36,
16 endianess: Endianess::LittleEndian,
17 addr_size: 4,
18 pte_size: 8,
19 present_bit: |a| a.bit_at(0),
20 writeable_bit: |a, pb| pb || a.bit_at(1),
21 nx_bit: |a, pb| pb || a.bit_at(63),
22 large_page_bit: |a| a.bit_at(7),
23 }
24 .into_spec(),
25};
26
27pub static ARCH: ArchitectureObj = &ARCH_SPEC;
28
29pub fn new_translator(dtb: Address) -> X86VirtualTranslate {
30 X86VirtualTranslate::new(&ARCH_SPEC, dtb)
31}
32
33#[cfg(test)]
35mod tests {
36 use crate::mem::virt_translate::mmu::ArchMmuSpec;
37 use crate::types::{mem, size, Address};
38
39 fn get_mmu_spec() -> &'static ArchMmuSpec {
40 &super::ARCH_SPEC.mmu
41 }
42
43 #[test]
44 fn x86_pae_pte_bitmasks() {
45 let mmu = get_mmu_spec();
46 let mask_addr = Address::invalid();
47 assert_eq!(
48 mmu.pte_addr_mask(mask_addr, 0),
49 Address::bit_mask(5..=35).to_umem()
50 );
51 assert_eq!(
52 mmu.pte_addr_mask(mask_addr, 1),
53 Address::bit_mask(12..=35).to_umem()
54 );
55 assert_eq!(
56 mmu.pte_addr_mask(mask_addr, 2),
57 Address::bit_mask(12..=35).to_umem()
58 );
59 }
60
61 #[test]
62 fn x86_pae_pte_leaf_size() {
63 let mmu = get_mmu_spec();
64 assert_eq!(mmu.pt_leaf_size(0), 32);
65 assert_eq!(mmu.pt_leaf_size(1), size::kb(4));
66 }
67
68 #[test]
69 fn x86_pae_page_size_level() {
70 let mmu = get_mmu_spec();
71 assert_eq!(mmu.page_size_level(1), mem::kb(4));
72 assert_eq!(mmu.page_size_level(2), mem::mb(2));
73 }
74}