vmi_core/
lib.rs

1//! Core VMI functionality.
2
3pub mod arch;
4mod core;
5mod ctx;
6mod driver;
7mod error;
8mod event;
9mod handler;
10pub mod os;
11mod page;
12
13use std::{cell::RefCell, num::NonZeroUsize, time::Duration};
14
15use isr_macros::Field;
16use lru::LruCache;
17use zerocopy::{FromBytes, Immutable, IntoBytes};
18
19pub use self::{
20    arch::{Architecture, Registers},
21    core::{
22        AccessContext, AddressContext, Gfn, Hex, MemoryAccess, MemoryAccessOptions, Pa,
23        TranslationMechanism, Va, VcpuId, View, VmiInfo, VmiVa,
24    },
25    ctx::{VmiContext, VmiOsContext, VmiOsState, VmiProber, VmiSession, VmiState},
26    driver::VmiDriver,
27    error::{PageFaults, VmiError},
28    event::{VmiEvent, VmiEventFlags, VmiEventResponse, VmiEventResponseFlags},
29    handler::VmiHandler,
30    os::VmiOs,
31    page::VmiMappedPage,
32};
33
34struct Cache {
35    gfn: RefCell<LruCache<Gfn, VmiMappedPage>>,
36    v2p: RefCell<LruCache<AccessContext, Pa>>,
37}
38
39impl Cache {
40    const DEFAULT_SIZE: usize = 8192;
41
42    pub fn new() -> Self {
43        Self {
44            gfn: RefCell::new(LruCache::new(
45                NonZeroUsize::new(Self::DEFAULT_SIZE).unwrap(),
46            )),
47            v2p: RefCell::new(LruCache::new(
48                NonZeroUsize::new(Self::DEFAULT_SIZE).unwrap(),
49            )),
50        }
51    }
52}
53
54/// The core functionality for Virtual Machine Introspection (VMI).
55pub struct VmiCore<Driver>
56where
57    Driver: VmiDriver,
58{
59    driver: Driver,
60    cache: Cache,
61
62    read_page_fn: fn(&Self, Gfn) -> Result<VmiMappedPage, VmiError>,
63    translate_access_context_fn: fn(&Self, AccessContext) -> Result<Pa, VmiError>,
64
65    read_string_length_limit: RefCell<Option<usize>>,
66}
67
68impl<Driver> VmiCore<Driver>
69where
70    Driver: VmiDriver,
71{
72    /// Creates a new `VmiCore` instance with the given driver.
73    ///
74    /// Both the GFN cache and the V2P cache are enabled by default,
75    /// each with a capacity of 8192 entries.
76    pub fn new(driver: Driver) -> Result<Self, VmiError> {
77        Ok(Self {
78            driver,
79            cache: Cache::new(),
80            read_page_fn: Self::read_page_cache,
81            translate_access_context_fn: Self::translate_access_context_cache,
82            read_string_length_limit: RefCell::new(None),
83        })
84    }
85
86    /// Enables the Guest Frame Number (GFN) cache.
87    ///
88    /// The GFN cache stores the contents of recently accessed memory pages,
89    /// indexed by their GFN. This can significantly improve performance when
90    /// repeatedly accessing the same memory regions, as it avoids redundant
91    /// reads from the virtual machine.
92    ///
93    /// When enabled, subsequent calls to [`read_page`] will first check
94    /// the cache before querying the driver.
95    ///
96    /// # Panics
97    ///
98    /// Panics if `size` is zero.
99    ///
100    /// [`read_page`]: Self::read_page
101    pub fn with_gfn_cache(self, size: usize) -> Self {
102        Self {
103            cache: Cache {
104                gfn: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
105                ..self.cache
106            },
107            read_page_fn: Self::read_page_cache,
108            ..self
109        }
110    }
111
112    /// Enables the GFN cache.
113    ///
114    /// See [`with_gfn_cache`] for more details.
115    ///
116    /// [`with_gfn_cache`]: Self::with_gfn_cache
117    pub fn enable_gfn_cache(&mut self) {
118        self.read_page_fn = Self::read_page_cache;
119    }
120
121    /// Disables the GFN cache.
122    ///
123    /// Subsequent calls to [`read_page`] will bypass the cache and read
124    /// directly from the virtual machine.
125    ///
126    /// [`read_page`]: Self::read_page
127    pub fn disable_gfn_cache(&mut self) {
128        self.read_page_fn = Self::read_page_nocache;
129    }
130
131    /// Resizes the GFN cache.
132    ///
133    /// This allows you to adjust the cache size dynamically based on your
134    /// performance needs. A larger cache can improve performance for
135    /// workloads with high memory locality, but consumes more memory.
136    ///
137    /// # Panics
138    ///
139    /// Panics if `size` is zero.
140    pub fn resize_gfn_cache(&mut self, size: usize) {
141        self.cache
142            .gfn
143            .borrow_mut()
144            .resize(NonZeroUsize::new(size).unwrap());
145    }
146
147    /// Removes a specific entry from the GFN cache.
148    ///
149    /// Returns the removed entry if it was present.
150    /// This is useful for invalidating cached data that might have
151    /// become stale.
152    pub fn flush_gfn_cache_entry(&self, gfn: Gfn) -> Option<VmiMappedPage> {
153        self.cache.gfn.borrow_mut().pop(&gfn)
154    }
155
156    /// Clears the entire GFN cache.
157    pub fn flush_gfn_cache(&self) {
158        self.cache.gfn.borrow_mut().clear();
159    }
160
161    ///// Retrieves metrics about the GFN cache.
162    //pub fn gfn_cache_metrics(&self) -> CacheMetrics {
163    //    let cache = self.cache.gfn.borrow();
164    //    CacheMetrics {
165    //        hits: ...,
166    //        misses: ...,
167    //    }
168    //}
169
170    /// Enables the virtual-to-physical (V2P) address translation cache.
171    ///
172    /// The V2P cache stores the results of recent address translations,
173    /// mapping virtual addresses (represented by [`AccessContext`]) to their
174    /// corresponding physical addresses ([`Pa`]). This can significantly
175    /// speed up memory access operations, as address translation can be a
176    /// relatively expensive operation.
177    ///
178    /// When enabled, [`translate_access_context`] will consult the cache
179    /// before performing a full translation.
180    ///
181    /// # Panics
182    ///
183    /// Panics if `size` is zero.
184    ///
185    /// [`translate_access_context`]: Self::translate_access_context
186    pub fn with_v2p_cache(self, size: usize) -> Self {
187        Self {
188            cache: Cache {
189                v2p: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
190                ..self.cache
191            },
192            translate_access_context_fn: Self::translate_access_context_cache,
193            ..self
194        }
195    }
196
197    /// Enables the V2P cache.
198    ///
199    /// See [`with_v2p_cache`] for more details.
200    ///
201    /// [`with_v2p_cache`]: Self::with_v2p_cache
202    pub fn enable_v2p_cache(&mut self) {
203        self.translate_access_context_fn = Self::translate_access_context_cache;
204    }
205
206    /// Disables the V2P cache.
207    ///
208    /// Subsequent calls to [`translate_access_context`] will bypass the cache
209    /// and perform a full address translation every time.
210    ///
211    /// [`translate_access_context`]: Self::translate_access_context
212    pub fn disable_v2p_cache(&mut self) {
213        self.translate_access_context_fn = Self::translate_access_context_nocache;
214    }
215
216    /// Resizes the V2P cache.
217    ///
218    /// This allows dynamic adjustment of the cache size to balance
219    /// performance and memory usage. A larger cache can lead to better
220    /// performance if address translations are frequent and exhibit
221    /// good locality.
222    ///
223    /// # Panics
224    ///
225    /// Panics if `size` is zero.
226    pub fn resize_v2p_cache(&mut self, size: usize) {
227        self.cache
228            .v2p
229            .borrow_mut()
230            .resize(NonZeroUsize::new(size).unwrap());
231    }
232
233    /// Removes a specific entry from the V2P cache.
234    ///
235    /// Returns the removed entry if it was present.
236    /// This can be used to invalidate cached translations that may have
237    /// become stale due to changes in the guest's memory mapping.
238    pub fn flush_v2p_cache_entry(&self, ctx: AccessContext) -> Option<Pa> {
239        self.cache.v2p.borrow_mut().pop(&ctx)
240    }
241
242    /// Clears the entire V2P cache.
243    ///
244    /// This method is crucial for maintaining consistency when handling events.
245    /// The guest operating system can modify page tables or other structures
246    /// related to address translation between events. Using stale translations
247    /// can lead to incorrect memory access and unexpected behavior.
248    /// It is recommended to call this method at the beginning of each
249    /// [`VmiHandler::handle_event`] loop to ensure that you are working with
250    /// the most up-to-date address mappings.
251    pub fn flush_v2p_cache(&self) {
252        self.cache.v2p.borrow_mut().clear();
253    }
254
255    ///// Retrieves metrics about the V2P cache.
256    //pub fn v2p_cache_metrics(&self) -> CacheMetrics {
257    //    let cache = self.cache.v2p.borrow();
258    //    CacheMetrics {
259    //        hits: ...,
260    //        misses: ...,
261    //    }
262    //}
263
264    /// Sets a limit on the length of strings read by the `read_string` methods.
265    /// If the limit is reached, the string will be truncated.
266    pub fn with_read_string_length_limit(self, limit_in_bytes: usize) -> Self {
267        Self {
268            read_string_length_limit: RefCell::new(Some(limit_in_bytes)),
269            ..self
270        }
271    }
272
273    /// Returns the current limit on the length of strings read by the
274    /// `read_string` methods.
275    pub fn read_string_length_limit(&self) -> Option<usize> {
276        *self.read_string_length_limit.borrow()
277    }
278
279    /// Sets a limit on the length of strings read by the `read_string` methods.
280    ///
281    /// This method allows you to set a maximum length (in bytes) for strings
282    /// read from the virtual machine's memory. When set, string reading
283    /// operations will truncate their results to this limit. This can be
284    /// useful for preventing excessively long string reads, which might
285    /// impact performance or consume too much memory.
286    ///
287    /// If the limit is reached during a string read operation, the resulting
288    /// string will be truncated to the specified length.
289    ///
290    /// To remove the limit, call this method with `None`.
291    pub fn set_read_string_length_limit(&self, limit: usize) {
292        *self.read_string_length_limit.borrow_mut() = Some(limit);
293    }
294
295    /// Returns the driver used by this `VmiCore` instance.
296    pub fn driver(&self) -> &Driver {
297        &self.driver
298    }
299
300    /// Retrieves information about the virtual machine.
301    pub fn info(&self) -> Result<VmiInfo, VmiError> {
302        self.driver.info()
303    }
304
305    /// Pauses the virtual machine.
306    pub fn pause(&self) -> Result<(), VmiError> {
307        self.driver.pause()
308    }
309
310    /// Resumes the virtual machine.
311    pub fn resume(&self) -> Result<(), VmiError> {
312        self.driver.resume()
313    }
314
315    /// Pauses the virtual machine and returns a guard that will resume it when
316    /// dropped.
317    pub fn pause_guard(&self) -> Result<VmiPauseGuard<'_, Driver>, VmiError> {
318        VmiPauseGuard::new(&self.driver)
319    }
320
321    /// Retrieves the current state of CPU registers for a specified virtual
322    /// CPU.
323    ///
324    /// This method allows you to access the current values of CPU registers,
325    /// which is crucial for understanding the state of the virtual machine
326    /// at a given point in time.
327    ///
328    /// # Notes
329    ///
330    /// The exact structure and content of the returned registers depend on the
331    /// specific architecture of the VM being introspected. Refer to the
332    /// documentation of your [`Architecture`] implementation for details on
333    /// how to interpret the register values.
334    pub fn registers(
335        &self,
336        vcpu: VcpuId,
337    ) -> Result<<Driver::Architecture as Architecture>::Registers, VmiError> {
338        self.driver.registers(vcpu)
339    }
340
341    /// Sets the registers of a virtual CPU.
342    pub fn set_registers(
343        &self,
344        vcpu: VcpuId,
345        registers: <Driver::Architecture as Architecture>::Registers,
346    ) -> Result<(), VmiError> {
347        self.driver.set_registers(vcpu, registers)
348    }
349
350    /// Retrieves the memory access permissions for a specific guest frame
351    /// number (GFN).
352    ///
353    /// The returned `MemoryAccess` indicates the current read, write, and
354    /// execute permissions for the specified memory page in the given view.
355    pub fn memory_access(&self, gfn: Gfn, view: View) -> Result<MemoryAccess, VmiError> {
356        self.driver.memory_access(gfn, view)
357    }
358
359    /// Sets the memory access permissions for a specific guest frame number
360    /// (GFN).
361    ///
362    /// This method allows you to modify the read, write, and execute
363    /// permissions for a given memory page in the specified view.
364    pub fn set_memory_access(
365        &self,
366        gfn: Gfn,
367        view: View,
368        access: MemoryAccess,
369    ) -> Result<(), VmiError> {
370        self.driver.set_memory_access(gfn, view, access)
371    }
372
373    /// Sets the memory access permissions for a specific guest frame number
374    /// (GFN) with additional options.
375    ///
376    /// In addition to the basic read, write, and execute permissions, this
377    /// method allows you to specify additional options for the memory access.
378    pub fn set_memory_access_with_options(
379        &self,
380        gfn: Gfn,
381        view: View,
382        access: MemoryAccess,
383        options: MemoryAccessOptions,
384    ) -> Result<(), VmiError> {
385        self.driver
386            .set_memory_access_with_options(gfn, view, access, options)
387    }
388
389    /// Allocates the next available guest frame number (GFN).
390    ///
391    /// This method finds and allocates the next free GFN after the current
392    /// maximum GFN. It's useful when you need to allocate new memory pages
393    /// for the VM.
394    pub fn allocate_next_available_gfn(&self) -> Result<Gfn, VmiError> {
395        let info = self.info()?;
396
397        let next_available_gfn = info.max_gfn + 1;
398        self.allocate_gfn(next_available_gfn)?;
399        Ok(next_available_gfn)
400    }
401
402    /// Allocates a specific guest frame number (GFN).
403    ///
404    /// This method allows you to allocate a particular GFN. It's useful when
405    /// you need to allocate a specific memory page for the VM.
406    pub fn allocate_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
407        self.driver.allocate_gfn(gfn)
408    }
409
410    /// Frees a previously allocated guest frame number (GFN).
411    ///
412    /// This method deallocates a GFN that was previously allocated. It's
413    /// important to free GFNs when they're no longer needed to prevent
414    /// memory leaks in the VM.
415    pub fn free_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
416        self.driver.free_gfn(gfn)
417    }
418
419    /// Returns the default view for the virtual machine.
420    ///
421    /// The default view typically represents the normal, unmodified state of
422    /// the VM's memory.
423    pub fn default_view(&self) -> View {
424        self.driver.default_view()
425    }
426
427    /// Creates a new view with the specified default access permissions.
428    ///
429    /// Views allow for creating different perspectives of the VM's memory,
430    /// which can be useful for analysis or isolation purposes. The default
431    /// access permissions apply to memory pages not explicitly modified
432    /// within this view.
433    pub fn create_view(&self, default_access: MemoryAccess) -> Result<View, VmiError> {
434        self.driver.create_view(default_access)
435    }
436
437    /// Destroys a previously created view.
438    ///
439    /// This method removes a view and frees associated resources. It should be
440    /// called when a view is no longer needed to prevent resource leaks.
441    pub fn destroy_view(&self, view: View) -> Result<(), VmiError> {
442        self.driver.destroy_view(view)
443    }
444
445    /// Switches to a different view for all virtual CPUs.
446    ///
447    /// This method changes the current active view for all vCPUs, affecting
448    /// subsequent memory operations across the entire VM. It allows for
449    /// quick transitions between different memory perspectives globally.
450    ///
451    /// Note the difference between this method and
452    /// [`VmiEventResponse::set_view()`]:
453    /// - `switch_to_view()` changes the view for all vCPUs immediately.
454    /// - `VmiEventResponse::set_view()` sets the view only for the specific
455    ///   vCPU that received the event, and the change is applied when the event
456    ///   handler returns.
457    ///
458    /// Use `switch_to_view()` for global view changes, and
459    /// `VmiEventResponse::set_view()` for targeted, event-specific view
460    /// modifications on individual vCPUs.
461    pub fn switch_to_view(&self, view: View) -> Result<(), VmiError> {
462        self.driver.switch_to_view(view)
463    }
464
465    /// Changes the mapping of a guest frame number (GFN) in a specific view.
466    ///
467    /// This method allows for remapping a GFN to a different physical frame
468    /// within a view, enabling fine-grained control over memory layout in
469    /// different views.
470    ///
471    /// A notable use case for this method is implementing "stealth hooks":
472    /// 1. Create a new GFN and copy the contents of the original page to it.
473    /// 2. Modify the new page by installing a breakpoint (e.g., 0xcc on AMD64)
474    ///    at a strategic location.
475    /// 3. Use this method to change the mapping of the original GFN to the new
476    ///    one.
477    /// 4. Set the memory access of the new GFN to non-readable.
478    ///
479    /// When a read access occurs:
480    /// - The handler should enable single-stepping.
481    /// - Switch to an unmodified view (e.g., `default_view`) to execute the
482    ///   read instruction, which will read the original non-breakpoint byte.
483    /// - Re-enable single-stepping afterwards.
484    ///
485    /// This technique allows for transparent breakpoints that are difficult to
486    /// detect by the guest OS or applications.
487    pub fn change_view_gfn(&self, view: View, old_gfn: Gfn, new_gfn: Gfn) -> Result<(), VmiError> {
488        self.driver.change_view_gfn(view, old_gfn, new_gfn)
489    }
490
491    /// Resets the mapping of a guest frame number (GFN) in a specific view to
492    /// its original state.
493    ///
494    /// This method reverts any custom mapping for the specified GFN in the
495    /// given view, restoring it to the default mapping.
496    pub fn reset_view_gfn(&self, view: View, gfn: Gfn) -> Result<(), VmiError> {
497        self.driver.reset_view_gfn(view, gfn)
498    }
499
500    /// Enables monitoring of specific events.
501    ///
502    /// This method allows you to enable monitoring of specific events, such as
503    /// control register writes, interrupts, or single-step execution.
504    /// Monitoring events can be useful for tracking specific guest behavior or
505    /// for implementing custom analysis tools.
506    ///
507    /// The type of event to monitor is defined by the architecture-specific
508    /// [`Architecture::EventMonitor`] type.
509    ///
510    /// When an event occurs, it will be passed to the event callback function
511    /// for processing.
512    pub fn monitor_enable(
513        &self,
514        option: <Driver::Architecture as Architecture>::EventMonitor,
515    ) -> Result<(), VmiError> {
516        self.driver.monitor_enable(option)
517    }
518
519    /// Disables monitoring of specific events.
520    ///
521    /// This method allows you to disable monitoring of specific events that
522    /// were previously enabled. It can be used to stop tracking certain
523    /// hardware events or to reduce the overhead of event processing.
524    ///
525    /// The type of event to disable is defined by the architecture-specific
526    /// [`Architecture::EventMonitor`] type.
527    pub fn monitor_disable(
528        &self,
529        option: <Driver::Architecture as Architecture>::EventMonitor,
530    ) -> Result<(), VmiError> {
531        self.driver.monitor_disable(option)
532    }
533
534    /// Injects an interrupt into a specific virtual CPU.
535    ///
536    /// This method allows for the injection of architecture-specific interrupts
537    /// into a given vCPU. It can be used to simulate hardware events or to
538    /// manipulate the guest's execution flow for analysis purposes.
539    ///
540    /// The type of interrupt and its parameters are defined by the
541    /// architecture-specific [`Architecture::Interrupt`] type.
542    pub fn inject_interrupt(
543        &self,
544        vcpu: VcpuId,
545        interrupt: <Driver::Architecture as Architecture>::Interrupt,
546    ) -> Result<(), VmiError> {
547        self.driver.inject_interrupt(vcpu, interrupt)
548    }
549
550    /// Returns the number of pending events.
551    ///
552    /// This method provides a count of events that have occurred but have not
553    /// yet been processed.
554    pub fn events_pending(&self) -> usize {
555        self.driver.events_pending()
556    }
557
558    /// Returns the time spent processing events by the driver.
559    ///
560    /// This method provides a measure of the overhead introduced by event
561    /// processing. It can be useful for performance tuning and
562    /// understanding the impact of VMI operations on overall system
563    /// performance.
564    pub fn event_processing_overhead(&self) -> Duration {
565        self.driver.event_processing_overhead()
566    }
567
568    /// Waits for an event to occur and processes it with the provided handler.
569    ///
570    /// This method blocks until an event occurs or the specified timeout is
571    /// reached. When an event occurs, it is passed to the provided callback
572    /// function for processing.
573    pub fn wait_for_event(
574        &self,
575        timeout: Duration,
576        handler: impl FnMut(&VmiEvent<Driver::Architecture>) -> VmiEventResponse<Driver::Architecture>,
577    ) -> Result<(), VmiError> {
578        self.driver.wait_for_event(timeout, handler)
579    }
580
581    /// Resets the state of the VMI system.
582    ///
583    /// This method clears all event monitors, caches, and any other stateful
584    /// data maintained by the VMI system. It's useful for bringing the VMI
585    /// system back to a known clean state, which can be necessary when
586    /// switching between different analysis tasks or recovering from error
587    /// conditions.
588    pub fn reset_state(&self) -> Result<(), VmiError> {
589        self.driver.reset_state()
590    }
591
592    /// Reads memory from the virtual machine.
593    pub fn read(&self, ctx: impl Into<AccessContext>, buffer: &mut [u8]) -> Result<(), VmiError> {
594        let ctx = ctx.into();
595        let mut position = 0usize;
596        let mut remaining = buffer.len();
597
598        while remaining > 0 {
599            let address = self.translate_access_context(ctx + position as u64)?;
600            let gfn = Driver::Architecture::gfn_from_pa(address);
601            let offset = Driver::Architecture::pa_offset(address) as usize;
602
603            let page = self.read_page(gfn)?;
604            let page = &page[offset..];
605
606            let size = std::cmp::min(remaining, page.len());
607            buffer[position..position + size].copy_from_slice(&page[..size]);
608
609            position += size;
610            remaining -= size;
611        }
612
613        Ok(())
614    }
615
616    /// Writes memory to the virtual machine.
617    pub fn write(&self, ctx: impl Into<AccessContext>, buffer: &[u8]) -> Result<(), VmiError> {
618        let ctx = ctx.into();
619        let mut position = 0usize;
620        let mut remaining = buffer.len();
621
622        let page_size = self.info()?.page_size;
623
624        while remaining > 0 {
625            let address = self.translate_access_context(ctx + position as u64)?;
626            let gfn = Driver::Architecture::gfn_from_pa(address);
627            let offset = Driver::Architecture::pa_offset(address);
628
629            let size = std::cmp::min(remaining, (page_size - offset) as usize);
630            let content = &buffer[position..position + size];
631
632            self.driver.write_page(gfn, offset, content)?;
633
634            position += size;
635            remaining -= size;
636        }
637
638        Ok(())
639    }
640
641    /// Reads a single byte from the virtual machine.
642    pub fn read_u8(&self, ctx: impl Into<AccessContext>) -> Result<u8, VmiError> {
643        let mut buffer = [0u8; 1];
644        self.read(ctx, &mut buffer)?;
645        Ok(buffer[0])
646    }
647
648    /// Reads a 16-bit unsigned integer from the virtual machine.
649    pub fn read_u16(&self, ctx: impl Into<AccessContext>) -> Result<u16, VmiError> {
650        let mut buffer = [0u8; 2];
651        self.read(ctx, &mut buffer)?;
652        Ok(u16::from_le_bytes(buffer))
653    }
654
655    /// Reads a 32-bit unsigned integer from the virtual machine.
656    pub fn read_u32(&self, ctx: impl Into<AccessContext>) -> Result<u32, VmiError> {
657        let mut buffer = [0u8; 4];
658        self.read(ctx, &mut buffer)?;
659        Ok(u32::from_le_bytes(buffer))
660    }
661
662    /// Reads a 64-bit unsigned integer from the virtual machine.
663    pub fn read_u64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
664        let mut buffer = [0u8; 8];
665        self.read(ctx, &mut buffer)?;
666        Ok(u64::from_le_bytes(buffer))
667    }
668
669    /// Reads an unsigned integer of the specified size from the virtual machine.
670    ///
671    /// This method reads an unsigned integer of the specified size (in bytes)
672    /// from the virtual machine. Note that the size must be 1, 2, 4, or 8.
673    ///
674    /// The result is returned as a [`u64`] to accommodate the widest possible
675    /// integer size.
676    pub fn read_uint(&self, ctx: impl Into<AccessContext>, size: usize) -> Result<u64, VmiError> {
677        match size {
678            1 => self.read_u8(ctx).map(u64::from),
679            2 => self.read_u16(ctx).map(u64::from),
680            4 => self.read_u32(ctx).map(u64::from),
681            8 => self.read_u64(ctx),
682            _ => Err(VmiError::InvalidAddressWidth),
683        }
684    }
685
686    /// Reads a field of a structure from the virtual machine.
687    ///
688    /// This method reads a field from the virtual machine. The field is
689    /// defined by the provided [`Field`] structure, which specifies the
690    /// offset and size of the field within the memory region.
691    ///
692    /// The result is returned as a [`u64`] to accommodate the widest possible
693    /// integer size.
694    pub fn read_field(
695        &self,
696        ctx: impl Into<AccessContext>,
697        field: &Field,
698    ) -> Result<u64, VmiError> {
699        self.read_uint(ctx.into() + field.offset(), field.size() as usize)
700    }
701
702    /// Reads an address-sized unsigned integer from the virtual machine.
703    ///
704    /// This method reads an address of the specified width (in bytes) from
705    /// the given access context. It's useful when dealing with architectures
706    /// that can operate in different address modes.
707    pub fn read_address(
708        &self,
709        ctx: impl Into<AccessContext>,
710        address_width: usize,
711    ) -> Result<u64, VmiError> {
712        match address_width {
713            4 => self.read_address32(ctx),
714            8 => self.read_address64(ctx),
715            _ => Err(VmiError::InvalidAddressWidth),
716        }
717    }
718
719    /// Reads a 32-bit address from the virtual machine.
720    pub fn read_address32(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
721        Ok(self.read_u32(ctx)? as u64)
722    }
723
724    /// Reads a 64-bit address from the virtual machine.
725    pub fn read_address64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
726        self.read_u64(ctx)
727    }
728
729    /// Reads a virtual address from the virtual machine.
730    pub fn read_va(
731        &self,
732        ctx: impl Into<AccessContext>,
733        address_width: usize,
734    ) -> Result<Va, VmiError> {
735        Ok(Va(self.read_address(ctx, address_width)?))
736    }
737
738    /// Reads a 32-bit virtual address from the virtual machine.
739    pub fn read_va32(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
740        Ok(Va(self.read_address32(ctx)?))
741    }
742
743    /// Reads a 64-bit virtual address from the virtual machine.
744    pub fn read_va64(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
745        Ok(Va(self.read_address64(ctx)?))
746    }
747
748    /// Reads a null-terminated string of bytes from the virtual machine with a
749    /// specified limit.
750    pub fn read_string_bytes_limited(
751        &self,
752        ctx: impl Into<AccessContext>,
753        limit: usize,
754    ) -> Result<Vec<u8>, VmiError> {
755        let mut ctx = ctx.into();
756
757        // read until the end of page
758        let mut buffer = vec![
759            0u8;
760            (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
761                as usize
762        ];
763        self.read(ctx, &mut buffer)?;
764
765        // try to find the null terminator
766        let position = buffer.iter().position(|&b| b == 0);
767
768        if let Some(position) = position {
769            buffer.truncate(limit.min(position));
770            return Ok(buffer);
771        }
772
773        let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
774        loop {
775            ctx.address += buffer.len() as u64;
776            self.read(ctx, &mut page)?;
777
778            let position = page.iter().position(|&b| b == 0);
779
780            if let Some(position) = position {
781                buffer.extend_from_slice(&page[..position]);
782
783                if buffer.len() >= limit {
784                    buffer.truncate(limit);
785                }
786
787                break;
788            }
789
790            buffer.extend_from_slice(&page);
791
792            if buffer.len() >= limit {
793                buffer.truncate(limit);
794                break;
795            }
796        }
797
798        Ok(buffer)
799    }
800
801    /// Reads a null-terminated string of bytes from the virtual machine.
802    pub fn read_string_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u8>, VmiError> {
803        self.read_string_bytes_limited(
804            ctx,
805            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
806        )
807    }
808
809    /// Reads a null-terminated wide string (UTF-16) from the virtual machine
810    /// with a specified limit.
811    pub fn read_wstring_bytes_limited(
812        &self,
813        ctx: impl Into<AccessContext>,
814        limit: usize,
815    ) -> Result<Vec<u16>, VmiError> {
816        let mut ctx = ctx.into();
817
818        // read until the end of page
819        let mut buffer = vec![
820            0u8;
821            (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
822                as usize
823        ];
824        self.read(ctx, &mut buffer)?;
825
826        // try to find the null terminator
827        let position = buffer
828            .chunks_exact(2)
829            .position(|chunk| chunk[0] == 0 && chunk[1] == 0);
830
831        if let Some(position) = position {
832            buffer.truncate(limit.min(position * 2));
833            return Ok(buffer
834                .chunks_exact(2)
835                .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
836                .collect());
837        }
838
839        let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
840        loop {
841            ctx.address += buffer.len() as u64;
842            self.read(ctx, &mut page)?;
843
844            let position = page
845                .chunks_exact(2)
846                .position(|chunk| chunk[0] == 0 && chunk[1] == 0);
847
848            if let Some(position) = position {
849                buffer.extend_from_slice(&page[..position * 2]);
850
851                if buffer.len() >= limit {
852                    buffer.truncate(limit);
853                }
854
855                break;
856            }
857
858            buffer.extend_from_slice(&page);
859
860            if buffer.len() >= limit {
861                buffer.truncate(limit);
862                break;
863            }
864        }
865
866        Ok(buffer
867            .chunks_exact(2)
868            .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
869            .collect())
870    }
871
872    /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
873    pub fn read_wstring_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u16>, VmiError> {
874        self.read_wstring_bytes_limited(
875            ctx,
876            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
877        )
878    }
879
880    /// Reads a null-terminated string from the virtual machine with a specified
881    /// limit.
882    pub fn read_string_limited(
883        &self,
884        ctx: impl Into<AccessContext>,
885        limit: usize,
886    ) -> Result<String, VmiError> {
887        Ok(String::from_utf8_lossy(&self.read_string_bytes_limited(ctx, limit)?).into())
888    }
889
890    /// Reads a null-terminated string from the virtual machine.
891    pub fn read_string(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
892        self.read_string_limited(
893            ctx,
894            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
895        )
896    }
897
898    /// Reads a null-terminated wide string (UTF-16) from the virtual machine
899    /// with a specified limit.
900    pub fn read_wstring_limited(
901        &self,
902        ctx: impl Into<AccessContext>,
903        limit: usize,
904    ) -> Result<String, VmiError> {
905        Ok(String::from_utf16_lossy(
906            &self.read_wstring_bytes_limited(ctx, limit)?,
907        ))
908    }
909
910    /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
911    pub fn read_wstring(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
912        self.read_wstring_limited(
913            ctx,
914            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
915        )
916    }
917
918    /// Reads a struct from the virtual machine.
919    pub fn read_struct<T>(&self, ctx: impl Into<AccessContext>) -> Result<T, VmiError>
920    where
921        T: FromBytes + IntoBytes,
922    {
923        let mut result = T::new_zeroed();
924        self.read(ctx, result.as_mut_bytes())?;
925        Ok(result)
926    }
927
928    /// Writes a single byte to the virtual machine.
929    pub fn write_u8(&self, ctx: impl Into<AccessContext>, value: u8) -> Result<(), VmiError> {
930        self.write(ctx, &value.to_le_bytes())
931    }
932
933    /// Writes a 16-bit unsigned integer to the virtual machine.
934    pub fn write_u16(&self, ctx: impl Into<AccessContext>, value: u16) -> Result<(), VmiError> {
935        self.write(ctx, &value.to_le_bytes())
936    }
937
938    /// Writes a 32-bit unsigned integer to the virtual machine.
939    pub fn write_u32(&self, ctx: impl Into<AccessContext>, value: u32) -> Result<(), VmiError> {
940        self.write(ctx, &value.to_le_bytes())
941    }
942
943    /// Writes a 64-bit unsigned integer to the virtual machine.
944    pub fn write_u64(&self, ctx: impl Into<AccessContext>, value: u64) -> Result<(), VmiError> {
945        self.write(ctx, &value.to_le_bytes())
946    }
947
948    /// Writes a struct to the virtual machine.
949    pub fn write_struct<T>(&self, ctx: impl Into<AccessContext>, value: T) -> Result<(), VmiError>
950    where
951        T: IntoBytes + Immutable,
952    {
953        self.write(ctx, value.as_bytes())
954    }
955
956    /// Translates a virtual address to a physical address.
957    pub fn translate_address(&self, ctx: impl Into<AddressContext>) -> Result<Pa, VmiError> {
958        self.translate_access_context(AccessContext::from(ctx.into()))
959    }
960
961    /// Translates an access context to a physical address.
962    pub fn translate_access_context(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
963        (self.translate_access_context_fn)(self, ctx)
964    }
965
966    /// Reads a page of memory from the virtual machine.
967    pub fn read_page(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
968        (self.read_page_fn)(self, gfn)
969    }
970
971    /// Reads a page of memory from the virtual machine without using the cache.
972    fn read_page_nocache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
973        self.driver.read_page(gfn)
974    }
975
976    /// Reads a page of memory from the virtual machine, using the cache if
977    /// enabled.
978    fn read_page_cache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
979        let mut cache = self.cache.gfn.borrow_mut();
980        let value = cache.try_get_or_insert(gfn, || self.read_page_nocache(gfn))?;
981
982        // Mapped pages are reference counted, so cloning it is cheap.
983        Ok(value.clone())
984    }
985
986    /// Translates an access context to a physical address without using the
987    /// cache.
988    ///
989    /// # Notes
990    ///
991    /// If [`TranslationMechanism::Paging`] is used, the `root` must be present.
992    /// In case the root is not present, a [`VmiError::RootNotPresent`] error is
993    /// returned.
994    fn translate_access_context_nocache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
995        Ok(match ctx.mechanism {
996            TranslationMechanism::Direct => Pa(ctx.address),
997            TranslationMechanism::Paging { root } => match root {
998                Some(root) => <Driver::Architecture as Architecture>::translate_address(
999                    self,
1000                    ctx.address.into(),
1001                    root,
1002                )?,
1003                None => return Err(VmiError::RootNotPresent),
1004            },
1005        })
1006    }
1007
1008    /// Translates an access context to a physical address, using the cache if
1009    /// enabled.
1010    fn translate_access_context_cache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
1011        let mut cache = self.cache.v2p.borrow_mut();
1012        let value = cache.try_get_or_insert(ctx, || self.translate_access_context_nocache(ctx))?;
1013        Ok(*value)
1014    }
1015}
1016
1017/// A guard that pauses the virtual machine on creation and resumes it on drop.
1018pub struct VmiPauseGuard<'a, Driver>
1019where
1020    Driver: VmiDriver,
1021{
1022    driver: &'a Driver,
1023}
1024
1025impl<'a, Driver> VmiPauseGuard<'a, Driver>
1026where
1027    Driver: VmiDriver,
1028{
1029    /// Creates a new pause guard.
1030    pub fn new(driver: &'a Driver) -> Result<Self, VmiError> {
1031        driver.pause()?;
1032        Ok(Self { driver })
1033    }
1034}
1035
1036impl<Driver> Drop for VmiPauseGuard<'_, Driver>
1037where
1038    Driver: VmiDriver,
1039{
1040    fn drop(&mut self) {
1041        if let Err(err) = self.driver.resume() {
1042            tracing::error!(?err, "Failed to resume the virtual machine");
1043        }
1044    }
1045}