vminer_core/arch/
mod.rs

1pub mod aarch64;
2pub use aarch64::Aarch64;
3
4pub mod runtime;
5pub use runtime::Architecture as RuntimeArchitecture;
6
7pub mod x86_64;
8pub use x86_64::X86_64;
9
10use crate::{
11    addr::MmuEntry, mask, MemoryAccessResult, PhysicalAddress, TranslationResult, VcpuError,
12    VcpuResult, VirtualAddress,
13};
14
15fn try_all_addresses(test: impl Fn(PhysicalAddress) -> bool) -> Option<PhysicalAddress> {
16    log::debug!("Trying all addresses to guess kernel PGD");
17
18    for addr in (0..u32::MAX as u64).step_by(0x1000) {
19        let addr = PhysicalAddress(addr);
20        if test(addr) {
21            return Some(addr);
22        }
23    }
24
25    None
26}
27
28fn make_address_test<'a>(
29    vcpus: &'a (impl HasVcpus + ?Sized),
30    memory: &'a (impl crate::Memory + ?Sized),
31    use_per_cpu: bool,
32    additional: &'a [&[VirtualAddress]],
33) -> impl Fn(PhysicalAddress) -> bool + 'a {
34    let mut addresses = additional.concat();
35
36    if use_per_cpu {
37        addresses.reserve(vcpus.vcpus_count());
38
39        for vcpu in vcpus.iter_vcpus() {
40            match vcpus.kernel_per_cpu(vcpu) {
41                Ok(Some(addr)) => addresses.push(addr),
42                Ok(None) => (),
43                Err(err) => log::warn!("Failed to get kernel per cpu address: {err}"),
44            }
45        }
46    }
47
48    move |addr| {
49        addresses.iter().all(|&test_addr| {
50            match vcpus.arch().virtual_to_physical(memory, addr, test_addr) {
51                Ok(addr) => memory.is_valid(addr, 1),
52                _ => false,
53            }
54        })
55    }
56}
57
58#[derive(Debug, Clone, Copy, PartialEq, Eq)]
59pub struct VcpuId(pub usize);
60
61#[derive(Debug, Clone)]
62pub struct VcpuIterator(core::ops::Range<usize>);
63
64impl Iterator for VcpuIterator {
65    type Item = VcpuId;
66
67    #[inline]
68    fn next(&mut self) -> Option<VcpuId> {
69        self.0.next().map(VcpuId)
70    }
71
72    #[inline]
73    fn size_hint(&self) -> (usize, Option<usize>) {
74        self.0.size_hint()
75    }
76}
77
78impl ExactSizeIterator for VcpuIterator {
79    #[inline]
80    fn len(&self) -> usize {
81        self.0.len()
82    }
83}
84
85pub trait HasVcpus {
86    type Arch: Architecture;
87
88    fn arch(&self) -> Self::Arch;
89
90    fn vcpus_count(&self) -> usize;
91
92    #[inline]
93    fn iter_vcpus(&self) -> VcpuIterator {
94        VcpuIterator(0..self.vcpus_count())
95    }
96
97    fn registers(&self, vcpu: VcpuId) -> VcpuResult<<Self::Arch as Architecture>::Registers>;
98
99    fn special_registers(
100        &self,
101        vcpu: VcpuId,
102    ) -> VcpuResult<<Self::Arch as Architecture>::SpecialRegisters>;
103
104    fn other_registers(
105        &self,
106        vcpu: VcpuId,
107    ) -> VcpuResult<<Self::Arch as Architecture>::OtherRegisters>;
108
109    #[inline]
110    fn instruction_pointer(&self, vcpu: VcpuId) -> VcpuResult<VirtualAddress> {
111        self.arch().instruction_pointer(self, vcpu)
112    }
113
114    #[inline]
115    fn stack_pointer(&self, vcpu: VcpuId) -> VcpuResult<VirtualAddress> {
116        self.arch().stack_pointer(self, vcpu)
117    }
118
119    #[inline]
120    fn base_pointer(&self, vcpu: VcpuId) -> VcpuResult<Option<VirtualAddress>> {
121        self.arch().base_pointer(self, vcpu)
122    }
123
124    #[inline]
125    fn pgd(&self, vcpu: VcpuId) -> VcpuResult<PhysicalAddress> {
126        self.arch().pgd(self, vcpu)
127    }
128
129    #[inline]
130    fn kernel_per_cpu(&self, vcpu: VcpuId) -> VcpuResult<Option<VirtualAddress>> {
131        self.arch().kernel_per_cpu(self, vcpu)
132    }
133}
134
135#[derive(Debug)]
136pub struct AssumeX86_64<'a, Vcpus: ?Sized>(pub &'a Vcpus);
137
138impl<Vcpus: HasVcpus + ?Sized> HasVcpus for AssumeX86_64<'_, Vcpus> {
139    type Arch = X86_64;
140
141    #[inline]
142    fn arch(&self) -> Self::Arch {
143        X86_64
144    }
145
146    #[inline]
147    fn vcpus_count(&self) -> usize {
148        self.0.vcpus_count()
149    }
150
151    #[inline]
152    fn registers(&self, vcpu: VcpuId) -> VcpuResult<<Self::Arch as Architecture>::Registers> {
153        match self.0.registers(vcpu)?.into() {
154            runtime::Registers::X86_64(regs) => Ok(regs),
155            _ => Err(VcpuError::BadArchitecture),
156        }
157    }
158
159    #[inline]
160    fn special_registers(
161        &self,
162        vcpu: VcpuId,
163    ) -> VcpuResult<<Self::Arch as Architecture>::SpecialRegisters> {
164        match self.0.special_registers(vcpu)?.into() {
165            runtime::SpecialRegisters::X86_64(regs) => Ok(regs),
166            _ => Err(VcpuError::BadArchitecture),
167        }
168    }
169
170    #[inline]
171    fn other_registers(
172        &self,
173        vcpu: VcpuId,
174    ) -> VcpuResult<<Self::Arch as Architecture>::OtherRegisters> {
175        match self.0.other_registers(vcpu)?.into() {
176            runtime::OtherRegisters::X86_64(regs) => Ok(regs),
177            _ => Err(VcpuError::BadArchitecture),
178        }
179    }
180
181    // We don't forward other methods here to check the architecture
182}
183
184#[derive(Debug)]
185pub struct AssumeAarch64<'a, Vcpus: ?Sized>(pub &'a Vcpus);
186
187impl<Vcpus: HasVcpus + ?Sized> HasVcpus for AssumeAarch64<'_, Vcpus> {
188    type Arch = Aarch64;
189
190    #[inline]
191    fn arch(&self) -> Self::Arch {
192        Aarch64
193    }
194
195    #[inline]
196    fn vcpus_count(&self) -> usize {
197        self.0.vcpus_count()
198    }
199
200    #[inline]
201    fn registers(&self, vcpu: VcpuId) -> VcpuResult<<Self::Arch as Architecture>::Registers> {
202        match self.0.registers(vcpu)?.into() {
203            runtime::Registers::Aarch64(regs) => Ok(regs),
204            _ => Err(VcpuError::BadArchitecture),
205        }
206    }
207
208    #[inline]
209    fn special_registers(
210        &self,
211        vcpu: VcpuId,
212    ) -> VcpuResult<<Self::Arch as Architecture>::SpecialRegisters> {
213        match self.0.special_registers(vcpu)?.into() {
214            runtime::SpecialRegisters::Aarch64(regs) => Ok(regs),
215            _ => Err(VcpuError::BadArchitecture),
216        }
217    }
218
219    #[inline]
220    fn other_registers(
221        &self,
222        vcpu: VcpuId,
223    ) -> VcpuResult<<Self::Arch as Architecture>::OtherRegisters> {
224        match self.0.other_registers(vcpu)?.into() {
225            runtime::OtherRegisters::Aarch64(regs) => Ok(regs),
226            _ => Err(VcpuError::BadArchitecture),
227        }
228    }
229
230    // We don't forward other methods here to check the architecture
231}
232
233impl<V: HasVcpus + ?Sized> HasVcpus for alloc::sync::Arc<V> {
234    type Arch = V::Arch;
235
236    #[inline]
237    fn arch(&self) -> Self::Arch {
238        (**self).arch()
239    }
240
241    #[inline]
242    fn vcpus_count(&self) -> usize {
243        (**self).vcpus_count()
244    }
245
246    #[inline]
247    fn registers(&self, vcpu: VcpuId) -> VcpuResult<<Self::Arch as Architecture>::Registers> {
248        (**self).registers(vcpu)
249    }
250
251    #[inline]
252    fn special_registers(
253        &self,
254        vcpu: VcpuId,
255    ) -> VcpuResult<<Self::Arch as Architecture>::SpecialRegisters> {
256        (**self).special_registers(vcpu)
257    }
258
259    #[inline]
260    fn other_registers(
261        &self,
262        vcpu: VcpuId,
263    ) -> VcpuResult<<Self::Arch as Architecture>::OtherRegisters> {
264        (**self).other_registers(vcpu)
265    }
266
267    #[inline]
268    fn instruction_pointer(&self, vcpu: VcpuId) -> VcpuResult<VirtualAddress> {
269        (**self).instruction_pointer(vcpu)
270    }
271
272    #[inline]
273    fn stack_pointer(&self, vcpu: VcpuId) -> VcpuResult<VirtualAddress> {
274        (**self).stack_pointer(vcpu)
275    }
276
277    #[inline]
278    fn base_pointer(&self, vcpu: VcpuId) -> VcpuResult<Option<VirtualAddress>> {
279        (**self).base_pointer(vcpu)
280    }
281
282    #[inline]
283    fn pgd(&self, vcpu: VcpuId) -> VcpuResult<PhysicalAddress> {
284        (**self).pgd(vcpu)
285    }
286
287    #[inline]
288    fn kernel_per_cpu(&self, vcpu: VcpuId) -> VcpuResult<Option<VirtualAddress>> {
289        (**self).kernel_per_cpu(vcpu)
290    }
291}
292
293/// A hardware architecture
294///
295/// This trait has a lifetime, which will be removed when GAT are stable
296pub trait Architecture {
297    type Endian: crate::Endianness;
298
299    type Registers: Into<runtime::Registers>;
300    type SpecialRegisters: Into<runtime::SpecialRegisters>;
301    type OtherRegisters: Into<runtime::OtherRegisters>;
302
303    fn into_runtime(self) -> runtime::Architecture;
304
305    fn endianness(&self) -> Self::Endian;
306
307    fn virtual_to_physical<M: crate::Memory + ?Sized>(
308        &self,
309        memory: &M,
310        mmu_addr: PhysicalAddress,
311        addr: VirtualAddress,
312    ) -> TranslationResult<PhysicalAddress>;
313
314    fn find_kernel_pgd<M: crate::Memory + ?Sized>(
315        &self,
316        memory: &M,
317        vcpus: &(impl HasVcpus<Arch = Self> + ?Sized),
318        use_per_cpu: bool,
319        additional: &[VirtualAddress],
320    ) -> crate::VmResult<Option<PhysicalAddress>>;
321
322    fn find_in_kernel_memory_raw<M: crate::Memory + ?Sized>(
323        &self,
324        memory: &M,
325        mmu_addr: PhysicalAddress,
326        base_search_addr: VirtualAddress,
327        finder: &memchr::memmem::Finder,
328        buf: &mut [u8],
329    ) -> MemoryAccessResult<Option<VirtualAddress>>;
330
331    fn find_in_kernel_memory<M: crate::Memory + ?Sized>(
332        &self,
333        memory: &M,
334        mmu_addr: PhysicalAddress,
335        needle: &[u8],
336    ) -> MemoryAccessResult<Option<VirtualAddress>>;
337
338    fn kernel_base(&self) -> VirtualAddress;
339
340    fn instruction_pointer<Vcpus: HasVcpus<Arch = Self> + ?Sized>(
341        &self,
342        vcpus: &Vcpus,
343        vcpu: VcpuId,
344    ) -> VcpuResult<VirtualAddress>;
345
346    fn stack_pointer<Vcpus: HasVcpus<Arch = Self> + ?Sized>(
347        &self,
348        vcpus: &Vcpus,
349        vcpu: VcpuId,
350    ) -> VcpuResult<VirtualAddress>;
351
352    fn base_pointer<Vcpus: HasVcpus<Arch = Self> + ?Sized>(
353        &self,
354        vcpus: &Vcpus,
355        vcpu: VcpuId,
356    ) -> VcpuResult<Option<VirtualAddress>>;
357
358    fn pgd<Vcpus: HasVcpus<Arch = Self> + ?Sized>(
359        &self,
360        vcpus: &Vcpus,
361        vcpu: VcpuId,
362    ) -> VcpuResult<PhysicalAddress>;
363
364    fn kernel_per_cpu<Vcpus: HasVcpus<Arch = Self> + ?Sized>(
365        &self,
366        vcpus: &Vcpus,
367        vcpu: VcpuId,
368    ) -> VcpuResult<Option<VirtualAddress>>;
369}
370
371/// The description of how a MMU works
372///
373/// All architechtures have similar MMU with multiple tables, so this trait
374/// tries to abstract that, giving configurations capabities to adapt to each
375/// architecture.
376///
377/// Using a trait here enable many compile-time optimisations.
378trait MmuDesc {
379    /// The number of significant bits in an address.
380    const ADDR_BITS: u32 = 48;
381
382    /// The bits at which each an index can be found for each table entry.
383    ///
384    /// The boolean should be `true` if a large page can be encountered at this
385    /// level.
386    const LEVELS: &'static [(u32, bool)] = &[(39, false), (30, true), (21, true), (12, false)];
387
388    /// Returns true if an entry is valid
389    fn is_valid(mmu_entry: MmuEntry) -> bool;
390
391    /// Returns true if an entry is a "large" one.
392    ///
393    /// This is required to support 2M pages for example. If a large page is
394    /// encountered, address translation stops here.
395    fn is_large(mmu_entry: MmuEntry) -> bool;
396}
397
398fn virtual_to_physical<Mmu: MmuDesc, M: crate::Memory + ?Sized>(
399    memory: &M,
400    mmu_addr: PhysicalAddress,
401    addr: VirtualAddress,
402) -> TranslationResult<PhysicalAddress> {
403    let mut mmu_entry = MmuEntry(mmu_addr.0);
404
405    // This loop is generally unrolled and values are calculated at compile time
406    for &(shift, has_huge) in Mmu::LEVELS {
407        // First, retreive the index in the table
408        let table_addr = mmu_entry.take_bits(12, Mmu::ADDR_BITS);
409        let index = (addr.0 >> shift) & mask(9);
410
411        // Each entry is 64 bits (8 bytes) large. This should probably be
412        // changed to support 32 bits.
413        memory.read_physical(
414            table_addr + 8 * index,
415            bytemuck::bytes_of_mut(&mut mmu_entry),
416        )?;
417        if !Mmu::is_valid(mmu_entry) {
418            return Err(crate::TranslationError::Invalid(mmu_entry.0));
419        }
420
421        // If we encounter a huge page, we are done
422        if has_huge && Mmu::is_large(mmu_entry) {
423            let base = mmu_entry.take_bits(shift, Mmu::ADDR_BITS);
424            let phys_addr = base + (addr.0 & mask(shift));
425            return Ok(phys_addr);
426        }
427    }
428
429    let phys_addr = mmu_entry.take_bits(12, Mmu::ADDR_BITS) + (addr.0 & mask(12));
430    Ok(phys_addr)
431}
432
433/// This is a recursive function to walk the translation table.
434///
435/// The buffer is used to avoid allocating a new one for each entry.
436fn find_in_kernel_memory_inner<Mmu: MmuDesc, M: crate::Memory + ?Sized>(
437    memory: &M,
438    table_addr: PhysicalAddress,
439    base_search_addr: VirtualAddress,
440    finder: &memchr::memmem::Finder,
441    buf: &mut [u8],
442    levels: &[(u32, bool)],
443) -> MemoryAccessResult<Option<VirtualAddress>> {
444    let (shift, has_large, rest) = match levels {
445        [] => return Ok(None),
446        [(shift, has_large), rest @ ..] => (*shift, *has_large, rest),
447    };
448
449    let mut table = [MmuEntry(0u64); 512];
450    match memory.read_physical(table_addr, bytemuck::bytes_of_mut(&mut table)) {
451        Err(crate::MemoryAccessError::OutOfBounds) => return Ok(None),
452        Err(err) => return Err(err),
453        _ => (),
454    }
455    let page_size = 1 << shift;
456
457    // The search address can be split in three parts:
458    // - A prefix that will used to get final address
459    // - An index for the current level to start searching
460    // - The rest of the adress that will be given to the next level
461    let prefix = VirtualAddress(base_search_addr.0 & !mask(shift + 9));
462    let base_index = ((base_search_addr.0 >> shift) & mask(9)) as usize;
463    let search_rest = base_search_addr.0 & mask(shift);
464
465    // Iterate over the valid entries
466    for (index, entry) in table
467        .into_iter()
468        .enumerate()
469        .skip(base_index)
470        .filter(|(_, mmu_entry)| Mmu::is_valid(*mmu_entry))
471    {
472        let base_addr = prefix + index as u64 * page_size;
473        let offset = if index == base_index { search_rest } else { 0 };
474
475        if rest.is_empty() || (has_large && Mmu::is_large(entry)) {
476            // If this is the last level or if we encountered a large page, look
477            // for the pattern in memory
478            let addr = entry.take_bits(shift, Mmu::ADDR_BITS);
479            match memory.search(addr + offset, page_size - offset, finder, buf) {
480                Ok(Some(i)) => return Ok(Some(base_addr + offset + i)),
481                Ok(None) | Err(crate::MemoryAccessError::OutOfBounds) => (),
482                Err(err) => return Err(err),
483            }
484        } else {
485            // Else call ourselves recursively
486            let table_addr = entry.take_bits(12, Mmu::ADDR_BITS);
487            let base_search_addr = base_addr + offset;
488            let result = find_in_kernel_memory_inner::<Mmu, M>(
489                memory,
490                table_addr,
491                base_search_addr,
492                finder,
493                buf,
494                rest,
495            )?;
496            if let Some(addr) = result {
497                return Ok(Some(addr));
498            }
499        }
500    }
501
502    Ok(None)
503}
504
505/// Find a pattern in kernel memory by walking the translation table starting
506/// from the given address.
507///
508/// This will probably fail if the pattern overlaps multiple pages.
509fn find_in_kernel_memory_raw<Mmu: MmuDesc, M: crate::Memory + ?Sized>(
510    memory: &M,
511    mmu_addr: PhysicalAddress,
512    base_search_addr: VirtualAddress,
513    finder: &memchr::memmem::Finder,
514    buf: &mut [u8],
515) -> MemoryAccessResult<Option<VirtualAddress>> {
516    let table_addr = MmuEntry(mmu_addr.0).take_bits(12, Mmu::ADDR_BITS);
517
518    find_in_kernel_memory_inner::<Mmu, M>(
519        memory,
520        table_addr,
521        base_search_addr,
522        finder,
523        buf,
524        Mmu::LEVELS,
525    )
526}
527
528/// Find a pattern in kernel memory by walking the translation table starting
529/// from the given address.
530///
531/// This will probably fail if the pattern overlaps multiple pages.
532fn find_in_kernel_memory<Mmu: MmuDesc, M: crate::Memory + ?Sized>(
533    memory: &M,
534    mmu_addr: PhysicalAddress,
535    needle: &[u8],
536    base_search_addr: VirtualAddress,
537) -> MemoryAccessResult<Option<VirtualAddress>> {
538    let mut buf = alloc::vec![0; (1 << 21) + needle.len()];
539    let finder = memchr::memmem::Finder::new(needle);
540
541    find_in_kernel_memory_raw::<Mmu, M>(memory, mmu_addr, base_search_addr, &finder, &mut buf)
542}