vmi_core/lib.rs
1//! Core VMI functionality.
2
3pub mod arch;
4mod core;
5mod ctx;
6pub mod driver;
7mod error;
8mod event;
9mod handler;
10pub mod os;
11mod page;
12
13use std::{cell::RefCell, num::NonZeroUsize, time::Duration};
14
15use isr_macros::Field;
16use lru::LruCache;
17use zerocopy::{FromBytes, Immutable, IntoBytes};
18
19pub use self::{
20 arch::{Architecture, Registers},
21 core::{
22 AccessContext, AddressContext, Gfn, Hex, MemoryAccess, MemoryAccessOptions, Pa,
23 TranslationMechanism, Va, VcpuId, View, VmiInfo, VmiVa,
24 },
25 ctx::{VmiContext, VmiOsContext, VmiOsState, VmiProber, VmiSession, VmiState},
26 driver::{
27 VmiDriver, VmiEventControl, VmiFullDriver, VmiMemory, VmiProtection, VmiQueryProtection,
28 VmiQueryRegisters, VmiRead, VmiReadAccess, VmiRegisters, VmiSetProtection, VmiSetRegisters,
29 VmiViewControl, VmiVmControl, VmiWrite, VmiWriteAccess,
30 },
31 error::{PageFaults, VmiError},
32 event::{VmiEvent, VmiEventAction, VmiEventFlags, VmiEventResponse},
33 handler::VmiHandler,
34 os::VmiOs,
35 page::VmiMappedPage,
36};
37
38struct Cache {
39 gfn: RefCell<LruCache<Gfn, VmiMappedPage>>,
40 v2p: RefCell<LruCache<AccessContext, Pa>>,
41}
42
43impl Cache {
44 const DEFAULT_SIZE: NonZeroUsize = NonZeroUsize::new(8192).unwrap();
45
46 pub fn new() -> Self {
47 Self {
48 gfn: RefCell::new(LruCache::new(Self::DEFAULT_SIZE)),
49 v2p: RefCell::new(LruCache::new(Self::DEFAULT_SIZE)),
50 }
51 }
52}
53
54/// The core functionality for Virtual Machine Introspection (VMI).
55pub struct VmiCore<Driver>
56where
57 Driver: VmiDriver,
58{
59 driver: Driver,
60 cache: Cache,
61
62 read_page_fn: fn(&Self, Gfn) -> Result<VmiMappedPage, VmiError>,
63 translate_access_context_fn: fn(&Self, AccessContext) -> Result<Pa, VmiError>,
64
65 read_string_length_limit: RefCell<Option<usize>>,
66}
67
68///////////////////////////////////////////////////////////////////////////////
69// VmiDriver
70///////////////////////////////////////////////////////////////////////////////
71
72impl<Driver> VmiCore<Driver>
73where
74 Driver: VmiDriver,
75{
76 /// Returns the driver used by this `VmiCore` instance.
77 pub fn driver(&self) -> &Driver {
78 &self.driver
79 }
80
81 /// Retrieves information about the virtual machine.
82 pub fn info(&self) -> Result<VmiInfo, VmiError> {
83 self.driver.info()
84 }
85}
86
87///////////////////////////////////////////////////////////////////////////////
88// VmiRead
89///////////////////////////////////////////////////////////////////////////////
90
91impl<Driver> VmiCore<Driver>
92where
93 Driver: VmiRead,
94{
95 /// Creates a new `VmiCore` instance with the given driver.
96 ///
97 /// Both the GFN cache and the V2P cache are enabled by default,
98 /// each with a capacity of 8192 entries.
99 pub fn new(driver: Driver) -> Result<Self, VmiError> {
100 Ok(Self {
101 driver,
102 cache: Cache::new(),
103 read_page_fn: Self::read_page_cache,
104 translate_access_context_fn: Self::translate_access_context_cache,
105 read_string_length_limit: RefCell::new(None),
106 })
107 }
108
109 /// Enables the Guest Frame Number (GFN) cache.
110 ///
111 /// The GFN cache stores the contents of recently accessed memory pages,
112 /// indexed by their GFN. This can significantly improve performance when
113 /// repeatedly accessing the same memory regions, as it avoids redundant
114 /// reads from the virtual machine.
115 ///
116 /// When enabled, subsequent calls to [`read_page`] will first check
117 /// the cache before querying the driver.
118 ///
119 /// # Panics
120 ///
121 /// Panics if `size` is zero.
122 ///
123 /// [`read_page`]: Self::read_page
124 pub fn with_gfn_cache(self, size: usize) -> Self {
125 Self {
126 cache: Cache {
127 gfn: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
128 ..self.cache
129 },
130 read_page_fn: Self::read_page_cache,
131 ..self
132 }
133 }
134
135 /// Enables the GFN cache.
136 ///
137 /// See [`with_gfn_cache`] for more details.
138 ///
139 /// [`with_gfn_cache`]: Self::with_gfn_cache
140 pub fn enable_gfn_cache(&mut self) {
141 self.read_page_fn = Self::read_page_cache;
142 }
143
144 /// Disables the GFN cache.
145 ///
146 /// Subsequent calls to [`read_page`] will bypass the cache and read
147 /// directly from the virtual machine.
148 ///
149 /// [`read_page`]: Self::read_page
150 pub fn disable_gfn_cache(&mut self) {
151 self.read_page_fn = Self::read_page_nocache;
152 }
153
154 /// Resizes the GFN cache.
155 ///
156 /// This allows you to adjust the cache size dynamically based on your
157 /// performance needs. A larger cache can improve performance for
158 /// workloads with high memory locality, but consumes more memory.
159 ///
160 /// # Panics
161 ///
162 /// Panics if `size` is zero.
163 pub fn resize_gfn_cache(&mut self, size: usize) {
164 self.cache
165 .gfn
166 .borrow_mut()
167 .resize(NonZeroUsize::new(size).unwrap());
168 }
169
170 /// Removes a specific entry from the GFN cache.
171 ///
172 /// Returns the removed entry if it was present.
173 /// This is useful for invalidating cached data that might have
174 /// become stale.
175 pub fn flush_gfn_cache_entry(&self, gfn: Gfn) -> Option<VmiMappedPage> {
176 self.cache.gfn.borrow_mut().pop(&gfn)
177 }
178
179 /// Clears the entire GFN cache.
180 pub fn flush_gfn_cache(&self) {
181 self.cache.gfn.borrow_mut().clear();
182 }
183
184 ///// Retrieves metrics about the GFN cache.
185 //pub fn gfn_cache_metrics(&self) -> CacheMetrics {
186 // let cache = self.cache.gfn.borrow();
187 // CacheMetrics {
188 // hits: ...,
189 // misses: ...,
190 // }
191 //}
192
193 /// Enables the virtual-to-physical (V2P) address translation cache.
194 ///
195 /// The V2P cache stores the results of recent address translations,
196 /// mapping virtual addresses (represented by [`AccessContext`]) to their
197 /// corresponding physical addresses ([`Pa`]). This can significantly
198 /// speed up memory access operations, as address translation can be a
199 /// relatively expensive operation.
200 ///
201 /// When enabled, [`translate_access_context`] will consult the cache
202 /// before performing a full translation.
203 ///
204 /// # Panics
205 ///
206 /// Panics if `size` is zero.
207 ///
208 /// [`translate_access_context`]: Self::translate_access_context
209 pub fn with_v2p_cache(self, size: usize) -> Self {
210 Self {
211 cache: Cache {
212 v2p: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
213 ..self.cache
214 },
215 translate_access_context_fn: Self::translate_access_context_cache,
216 ..self
217 }
218 }
219
220 /// Enables the V2P cache.
221 ///
222 /// See [`with_v2p_cache`] for more details.
223 ///
224 /// [`with_v2p_cache`]: Self::with_v2p_cache
225 pub fn enable_v2p_cache(&mut self) {
226 self.translate_access_context_fn = Self::translate_access_context_cache;
227 }
228
229 /// Disables the V2P cache.
230 ///
231 /// Subsequent calls to [`translate_access_context`] will bypass the cache
232 /// and perform a full address translation every time.
233 ///
234 /// [`translate_access_context`]: Self::translate_access_context
235 pub fn disable_v2p_cache(&mut self) {
236 self.translate_access_context_fn = Self::translate_access_context_nocache;
237 }
238
239 /// Resizes the V2P cache.
240 ///
241 /// This allows dynamic adjustment of the cache size to balance
242 /// performance and memory usage. A larger cache can lead to better
243 /// performance if address translations are frequent and exhibit
244 /// good locality.
245 ///
246 /// # Panics
247 ///
248 /// Panics if `size` is zero.
249 pub fn resize_v2p_cache(&mut self, size: usize) {
250 self.cache
251 .v2p
252 .borrow_mut()
253 .resize(NonZeroUsize::new(size).unwrap());
254 }
255
256 /// Removes a specific entry from the V2P cache.
257 ///
258 /// Returns the removed entry if it was present.
259 /// This can be used to invalidate cached translations that may have
260 /// become stale due to changes in the guest's memory mapping.
261 pub fn flush_v2p_cache_entry(&self, ctx: AccessContext) -> Option<Pa> {
262 self.cache.v2p.borrow_mut().pop(&ctx)
263 }
264
265 /// Clears the entire V2P cache.
266 ///
267 /// This method is crucial for maintaining consistency when handling events.
268 /// The guest operating system can modify page tables or other structures
269 /// related to address translation between events. Using stale translations
270 /// can lead to incorrect memory access and unexpected behavior.
271 /// It is recommended to call this method at the beginning of each
272 /// [`VmiHandler::handle_event`] loop to ensure that you are working with
273 /// the most up-to-date address mappings.
274 pub fn flush_v2p_cache(&self) {
275 self.cache.v2p.borrow_mut().clear();
276 }
277
278 ///// Retrieves metrics about the V2P cache.
279 //pub fn v2p_cache_metrics(&self) -> CacheMetrics {
280 // let cache = self.cache.v2p.borrow();
281 // CacheMetrics {
282 // hits: ...,
283 // misses: ...,
284 // }
285 //}
286
287 /// Sets a limit on the length of strings read by the `read_string` methods.
288 /// If the limit is reached, the string will be truncated.
289 pub fn with_read_string_length_limit(self, limit_in_bytes: usize) -> Self {
290 Self {
291 read_string_length_limit: RefCell::new(Some(limit_in_bytes)),
292 ..self
293 }
294 }
295
296 /// Returns the current limit on the length of strings read by the
297 /// `read_string` methods.
298 pub fn read_string_length_limit(&self) -> Option<usize> {
299 *self.read_string_length_limit.borrow()
300 }
301
302 /// Sets a limit on the length of strings read by the `read_string` methods.
303 ///
304 /// This method allows you to set a maximum length (in bytes) for strings
305 /// read from the virtual machine's memory. When set, string reading
306 /// operations will truncate their results to this limit. This can be
307 /// useful for preventing excessively long string reads, which might
308 /// impact performance or consume too much memory.
309 ///
310 /// If the limit is reached during a string read operation, the resulting
311 /// string will be truncated to the specified length.
312 ///
313 /// To remove the limit, call this method with `None`.
314 pub fn set_read_string_length_limit(&self, limit: usize) {
315 *self.read_string_length_limit.borrow_mut() = Some(limit);
316 }
317
318 /// Reads memory from the virtual machine.
319 pub fn read(&self, ctx: impl Into<AccessContext>, buffer: &mut [u8]) -> Result<(), VmiError> {
320 let ctx = ctx.into();
321 let mut position = 0usize;
322 let mut remaining = buffer.len();
323
324 while remaining > 0 {
325 let address = self.translate_access_context(ctx + position as u64)?;
326 let gfn = Driver::Architecture::gfn_from_pa(address);
327 let offset = Driver::Architecture::pa_offset(address) as usize;
328
329 let page = self.read_page(gfn)?;
330 let page = &page[offset..];
331
332 let size = std::cmp::min(remaining, page.len());
333 buffer[position..position + size].copy_from_slice(&page[..size]);
334
335 position += size;
336 remaining -= size;
337 }
338
339 Ok(())
340 }
341
342 /// Reads a single byte from the virtual machine.
343 pub fn read_u8(&self, ctx: impl Into<AccessContext>) -> Result<u8, VmiError> {
344 let mut buffer = [0u8; 1];
345 self.read(ctx, &mut buffer)?;
346 Ok(buffer[0])
347 }
348
349 /// Reads a 16-bit unsigned integer from the virtual machine.
350 pub fn read_u16(&self, ctx: impl Into<AccessContext>) -> Result<u16, VmiError> {
351 let mut buffer = [0u8; 2];
352 self.read(ctx, &mut buffer)?;
353 Ok(u16::from_le_bytes(buffer))
354 }
355
356 /// Reads a 32-bit unsigned integer from the virtual machine.
357 pub fn read_u32(&self, ctx: impl Into<AccessContext>) -> Result<u32, VmiError> {
358 let mut buffer = [0u8; 4];
359 self.read(ctx, &mut buffer)?;
360 Ok(u32::from_le_bytes(buffer))
361 }
362
363 /// Reads a 64-bit unsigned integer from the virtual machine.
364 pub fn read_u64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
365 let mut buffer = [0u8; 8];
366 self.read(ctx, &mut buffer)?;
367 Ok(u64::from_le_bytes(buffer))
368 }
369
370 /// Reads an unsigned integer of the specified size from the virtual machine.
371 ///
372 /// This method reads an unsigned integer of the specified size (in bytes)
373 /// from the virtual machine. Note that the size must be 1, 2, 4, or 8.
374 ///
375 /// The result is returned as a [`u64`] to accommodate the widest possible
376 /// integer size.
377 pub fn read_uint(&self, ctx: impl Into<AccessContext>, size: usize) -> Result<u64, VmiError> {
378 match size {
379 1 => self.read_u8(ctx).map(u64::from),
380 2 => self.read_u16(ctx).map(u64::from),
381 4 => self.read_u32(ctx).map(u64::from),
382 8 => self.read_u64(ctx),
383 _ => Err(VmiError::InvalidAddressWidth),
384 }
385 }
386
387 /// Reads a field of a structure from the virtual machine.
388 ///
389 /// This method reads a field from the virtual machine. The field is
390 /// defined by the provided [`Field`] structure, which specifies the
391 /// offset and size of the field within the memory region.
392 ///
393 /// The result is returned as a [`u64`] to accommodate the widest possible
394 /// integer size.
395 pub fn read_field(
396 &self,
397 ctx: impl Into<AccessContext>,
398 field: &Field,
399 ) -> Result<u64, VmiError> {
400 self.read_uint(ctx.into() + field.offset(), field.size() as usize)
401 }
402
403 /// Reads an address-sized unsigned integer from the virtual machine.
404 ///
405 /// This method reads an address of the specified width (in bytes) from
406 /// the given access context. It's useful when dealing with architectures
407 /// that can operate in different address modes.
408 pub fn read_address(
409 &self,
410 ctx: impl Into<AccessContext>,
411 address_width: usize,
412 ) -> Result<u64, VmiError> {
413 match address_width {
414 4 => self.read_address32(ctx),
415 8 => self.read_address64(ctx),
416 _ => Err(VmiError::InvalidAddressWidth),
417 }
418 }
419
420 /// Reads a 32-bit address from the virtual machine.
421 pub fn read_address32(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
422 Ok(self.read_u32(ctx)? as u64)
423 }
424
425 /// Reads a 64-bit address from the virtual machine.
426 pub fn read_address64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
427 self.read_u64(ctx)
428 }
429
430 /// Reads a virtual address from the virtual machine.
431 pub fn read_va(
432 &self,
433 ctx: impl Into<AccessContext>,
434 address_width: usize,
435 ) -> Result<Va, VmiError> {
436 Ok(Va(self.read_address(ctx, address_width)?))
437 }
438
439 /// Reads a 32-bit virtual address from the virtual machine.
440 pub fn read_va32(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
441 Ok(Va(self.read_address32(ctx)?))
442 }
443
444 /// Reads a 64-bit virtual address from the virtual machine.
445 pub fn read_va64(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
446 Ok(Va(self.read_address64(ctx)?))
447 }
448
449 /// Reads a null-terminated string of bytes from the virtual machine with a
450 /// specified limit.
451 pub fn read_string_bytes_limited(
452 &self,
453 ctx: impl Into<AccessContext>,
454 limit: usize,
455 ) -> Result<Vec<u8>, VmiError> {
456 let mut ctx = ctx.into();
457
458 // read until the end of page
459 let mut buffer = vec![
460 0u8;
461 (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
462 as usize
463 ];
464 self.read(ctx, &mut buffer)?;
465
466 // try to find the null terminator
467 let position = buffer.iter().position(|&b| b == 0);
468
469 if let Some(position) = position {
470 buffer.truncate(limit.min(position));
471 return Ok(buffer);
472 }
473
474 let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
475 loop {
476 ctx.address += buffer.len() as u64;
477 self.read(ctx, &mut page)?;
478
479 let position = page.iter().position(|&b| b == 0);
480
481 if let Some(position) = position {
482 buffer.extend_from_slice(&page[..position]);
483
484 if buffer.len() >= limit {
485 buffer.truncate(limit);
486 }
487
488 break;
489 }
490
491 buffer.extend_from_slice(&page);
492
493 if buffer.len() >= limit {
494 buffer.truncate(limit);
495 break;
496 }
497 }
498
499 Ok(buffer)
500 }
501
502 /// Reads a null-terminated string of bytes from the virtual machine.
503 pub fn read_string_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u8>, VmiError> {
504 self.read_string_bytes_limited(
505 ctx,
506 self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
507 )
508 }
509
510 /// Reads a null-terminated wide string (UTF-16) from the virtual machine
511 /// with a specified limit.
512 pub fn read_string_utf16_bytes_limited(
513 &self,
514 ctx: impl Into<AccessContext>,
515 limit: usize,
516 ) -> Result<Vec<u16>, VmiError> {
517 let mut ctx = ctx.into();
518
519 // read until the end of page
520 let mut buffer = vec![
521 0u8;
522 (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
523 as usize
524 ];
525 self.read(ctx, &mut buffer)?;
526
527 // try to find the null terminator
528 let position = buffer
529 .chunks_exact(2)
530 .position(|chunk| chunk[0] == 0 && chunk[1] == 0);
531
532 if let Some(position) = position {
533 buffer.truncate(limit.min(position * 2));
534 return Ok(buffer
535 .chunks_exact(2)
536 .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
537 .collect());
538 }
539
540 let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
541 loop {
542 ctx.address += buffer.len() as u64;
543 self.read(ctx, &mut page)?;
544
545 let position = page
546 .chunks_exact(2)
547 .position(|chunk| chunk[0] == 0 && chunk[1] == 0);
548
549 if let Some(position) = position {
550 buffer.extend_from_slice(&page[..position * 2]);
551
552 if buffer.len() >= limit {
553 buffer.truncate(limit);
554 }
555
556 break;
557 }
558
559 buffer.extend_from_slice(&page);
560
561 if buffer.len() >= limit {
562 buffer.truncate(limit);
563 break;
564 }
565 }
566
567 Ok(buffer
568 .chunks_exact(2)
569 .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
570 .collect())
571 }
572
573 /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
574 pub fn read_string_utf16_bytes(
575 &self,
576 ctx: impl Into<AccessContext>,
577 ) -> Result<Vec<u16>, VmiError> {
578 self.read_string_utf16_bytes_limited(
579 ctx,
580 self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
581 )
582 }
583
584 /// Reads a null-terminated string from the virtual machine with a specified
585 /// limit.
586 pub fn read_string_limited(
587 &self,
588 ctx: impl Into<AccessContext>,
589 limit: usize,
590 ) -> Result<String, VmiError> {
591 Ok(String::from_utf8_lossy(&self.read_string_bytes_limited(ctx, limit)?).into())
592 }
593
594 /// Reads a null-terminated string from the virtual machine.
595 pub fn read_string(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
596 self.read_string_limited(
597 ctx,
598 self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
599 )
600 }
601
602 /// Reads a null-terminated wide string (UTF-16) from the virtual machine
603 /// with a specified limit.
604 pub fn read_string_utf16_limited(
605 &self,
606 ctx: impl Into<AccessContext>,
607 limit: usize,
608 ) -> Result<String, VmiError> {
609 Ok(String::from_utf16_lossy(
610 &self.read_string_utf16_bytes_limited(ctx, limit)?,
611 ))
612 }
613
614 /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
615 pub fn read_string_utf16(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
616 self.read_string_utf16_limited(
617 ctx,
618 self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
619 )
620 }
621
622 /// Reads a struct from the virtual machine.
623 pub fn read_struct<T>(&self, ctx: impl Into<AccessContext>) -> Result<T, VmiError>
624 where
625 T: FromBytes + IntoBytes,
626 {
627 let mut result = T::new_zeroed();
628 self.read(ctx, result.as_mut_bytes())?;
629 Ok(result)
630 }
631
632 /// Translates a virtual address to a physical address.
633 pub fn translate_address(&self, ctx: impl Into<AddressContext>) -> Result<Pa, VmiError> {
634 self.translate_access_context(AccessContext::from(ctx.into()))
635 }
636
637 /// Translates an access context to a physical address.
638 pub fn translate_access_context(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
639 (self.translate_access_context_fn)(self, ctx)
640 }
641
642 /// Reads a page of memory from the virtual machine.
643 pub fn read_page(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
644 (self.read_page_fn)(self, gfn)
645 }
646
647 /// Reads a page of memory from the virtual machine without using the cache.
648 fn read_page_nocache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
649 self.driver.read_page(gfn)
650 }
651
652 /// Reads a page of memory from the virtual machine, using the cache if
653 /// enabled.
654 fn read_page_cache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
655 let mut cache = self.cache.gfn.borrow_mut();
656 let value = cache.try_get_or_insert(gfn, || self.read_page_nocache(gfn))?;
657
658 // Mapped pages are reference counted, so cloning it is cheap.
659 Ok(value.clone())
660 }
661
662 /// Translates an access context to a physical address without using the
663 /// cache.
664 ///
665 /// # Notes
666 ///
667 /// If [`TranslationMechanism::Paging`] is used, the `root` must be present.
668 /// In case the root is not present, a [`VmiError::RootNotPresent`] error is
669 /// returned.
670 fn translate_access_context_nocache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
671 Ok(match ctx.mechanism {
672 TranslationMechanism::Direct => Pa(ctx.address),
673 TranslationMechanism::Paging { root } => match root {
674 Some(root) => <Driver::Architecture as Architecture>::translate_address(
675 self,
676 ctx.address.into(),
677 root,
678 )?,
679 None => return Err(VmiError::RootNotPresent),
680 },
681 })
682 }
683
684 /// Translates an access context to a physical address, using the cache if
685 /// enabled.
686 fn translate_access_context_cache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
687 let mut cache = self.cache.v2p.borrow_mut();
688 let value = cache.try_get_or_insert(ctx, || self.translate_access_context_nocache(ctx))?;
689 Ok(*value)
690 }
691}
692
693///////////////////////////////////////////////////////////////////////////////
694// VmiRead + VmiWrite
695///////////////////////////////////////////////////////////////////////////////
696
697impl<Driver> VmiCore<Driver>
698where
699 Driver: VmiRead + VmiWrite,
700{
701 /// Writes memory to the virtual machine.
702 pub fn write(&self, ctx: impl Into<AccessContext>, buffer: &[u8]) -> Result<(), VmiError> {
703 let ctx = ctx.into();
704 let mut position = 0usize;
705 let mut remaining = buffer.len();
706
707 while remaining > 0 {
708 let address = self.translate_access_context(ctx + position as u64)?;
709 let gfn = Driver::Architecture::gfn_from_pa(address);
710 let offset = Driver::Architecture::pa_offset(address);
711
712 let size = std::cmp::min(
713 remaining,
714 (Driver::Architecture::PAGE_SIZE - offset) as usize,
715 );
716 let content = &buffer[position..position + size];
717
718 self.driver.write_page(gfn, offset, content)?;
719
720 position += size;
721 remaining -= size;
722 }
723
724 Ok(())
725 }
726
727 /// Writes a single byte to the virtual machine.
728 pub fn write_u8(&self, ctx: impl Into<AccessContext>, value: u8) -> Result<(), VmiError> {
729 self.write(ctx, &value.to_le_bytes())
730 }
731
732 /// Writes a 16-bit unsigned integer to the virtual machine.
733 pub fn write_u16(&self, ctx: impl Into<AccessContext>, value: u16) -> Result<(), VmiError> {
734 self.write(ctx, &value.to_le_bytes())
735 }
736
737 /// Writes a 32-bit unsigned integer to the virtual machine.
738 pub fn write_u32(&self, ctx: impl Into<AccessContext>, value: u32) -> Result<(), VmiError> {
739 self.write(ctx, &value.to_le_bytes())
740 }
741
742 /// Writes a 64-bit unsigned integer to the virtual machine.
743 pub fn write_u64(&self, ctx: impl Into<AccessContext>, value: u64) -> Result<(), VmiError> {
744 self.write(ctx, &value.to_le_bytes())
745 }
746
747 /// Writes a struct to the virtual machine.
748 pub fn write_struct<T>(&self, ctx: impl Into<AccessContext>, value: T) -> Result<(), VmiError>
749 where
750 T: IntoBytes + Immutable,
751 {
752 self.write(ctx, value.as_bytes())
753 }
754}
755
756///////////////////////////////////////////////////////////////////////////////
757// VmiQueryProtection
758///////////////////////////////////////////////////////////////////////////////
759
760impl<Driver> VmiCore<Driver>
761where
762 Driver: VmiQueryProtection,
763{
764 /// Retrieves the memory access permissions for a specific guest frame
765 /// number (GFN).
766 ///
767 /// The returned `MemoryAccess` indicates the current read, write, and
768 /// execute permissions for the specified memory page in the given view.
769 pub fn memory_access(&self, gfn: Gfn, view: View) -> Result<MemoryAccess, VmiError> {
770 self.driver.memory_access(gfn, view)
771 }
772}
773
774///////////////////////////////////////////////////////////////////////////////
775// VmiSetProtection
776///////////////////////////////////////////////////////////////////////////////
777
778impl<Driver> VmiCore<Driver>
779where
780 Driver: VmiSetProtection,
781{
782 /// Sets the memory access permissions for a specific guest frame number
783 /// (GFN).
784 ///
785 /// This method allows you to modify the read, write, and execute
786 /// permissions for a given memory page in the specified view.
787 pub fn set_memory_access(
788 &self,
789 gfn: Gfn,
790 view: View,
791 access: MemoryAccess,
792 ) -> Result<(), VmiError> {
793 self.driver.set_memory_access(gfn, view, access)
794 }
795
796 /// Sets the memory access permissions for a specific guest frame number
797 /// (GFN) with additional options.
798 ///
799 /// In addition to the basic read, write, and execute permissions, this
800 /// method allows you to specify additional options for the memory access.
801 pub fn set_memory_access_with_options(
802 &self,
803 gfn: Gfn,
804 view: View,
805 access: MemoryAccess,
806 options: MemoryAccessOptions,
807 ) -> Result<(), VmiError> {
808 self.driver
809 .set_memory_access_with_options(gfn, view, access, options)
810 }
811}
812
813///////////////////////////////////////////////////////////////////////////////
814// VmiQueryRegisters
815///////////////////////////////////////////////////////////////////////////////
816
817impl<Driver> VmiCore<Driver>
818where
819 Driver: VmiQueryRegisters,
820{
821 /// Retrieves the current state of CPU registers for a specified virtual
822 /// CPU.
823 ///
824 /// This method allows you to access the current values of CPU registers,
825 /// which is crucial for understanding the state of the virtual machine
826 /// at a given point in time.
827 ///
828 /// # Notes
829 ///
830 /// The exact structure and content of the returned registers depend on the
831 /// specific architecture of the VM being introspected. Refer to the
832 /// documentation of your [`Architecture`] implementation for details on
833 /// how to interpret the register values.
834 pub fn registers(
835 &self,
836 vcpu: VcpuId,
837 ) -> Result<<Driver::Architecture as Architecture>::Registers, VmiError> {
838 self.driver.registers(vcpu)
839 }
840}
841
842///////////////////////////////////////////////////////////////////////////////
843// VmiSetRegisters
844///////////////////////////////////////////////////////////////////////////////
845
846impl<Driver> VmiCore<Driver>
847where
848 Driver: VmiSetRegisters,
849{
850 /// Sets the registers of a virtual CPU.
851 pub fn set_registers(
852 &self,
853 vcpu: VcpuId,
854 registers: <Driver::Architecture as Architecture>::Registers,
855 ) -> Result<(), VmiError> {
856 self.driver.set_registers(vcpu, registers)
857 }
858}
859
860///////////////////////////////////////////////////////////////////////////////
861// VmiViewControl
862///////////////////////////////////////////////////////////////////////////////
863
864impl<Driver> VmiCore<Driver>
865where
866 Driver: VmiViewControl,
867{
868 /// Returns the default view for the virtual machine.
869 ///
870 /// The default view typically represents the normal, unmodified state of
871 /// the VM's memory.
872 pub fn default_view(&self) -> View {
873 self.driver.default_view()
874 }
875
876 /// Creates a new view with the specified default access permissions.
877 ///
878 /// Views allow for creating different perspectives of the VM's memory,
879 /// which can be useful for analysis or isolation purposes. The default
880 /// access permissions apply to memory pages not explicitly modified
881 /// within this view.
882 pub fn create_view(&self, default_access: MemoryAccess) -> Result<View, VmiError> {
883 self.driver.create_view(default_access)
884 }
885
886 /// Destroys a previously created view.
887 ///
888 /// This method removes a view and frees associated resources. It should be
889 /// called when a view is no longer needed to prevent resource leaks.
890 pub fn destroy_view(&self, view: View) -> Result<(), VmiError> {
891 self.driver.destroy_view(view)
892 }
893
894 /// Switches to a different view for all virtual CPUs.
895 ///
896 /// This method changes the current active view for all vCPUs, affecting
897 /// subsequent memory operations across the entire VM. It allows for
898 /// quick transitions between different memory perspectives globally.
899 ///
900 /// Note the difference between this method and
901 /// [`VmiEventResponse::with_view()`]:
902 /// - `switch_to_view()` changes the view for all vCPUs immediately.
903 /// - `VmiEventResponse::with_view()` sets the view only for the specific
904 /// vCPU that received the event, and the change is applied when the event
905 /// handler returns.
906 ///
907 /// Use `switch_to_view()` for global view changes, and
908 /// `VmiEventResponse::with_view()` for targeted, event-specific view
909 /// modifications on individual vCPUs.
910 pub fn switch_to_view(&self, view: View) -> Result<(), VmiError> {
911 self.driver.switch_to_view(view)
912 }
913
914 /// Changes the mapping of a guest frame number (GFN) in a specific view.
915 ///
916 /// This method allows for remapping a GFN to a different physical frame
917 /// within a view, enabling fine-grained control over memory layout in
918 /// different views.
919 ///
920 /// A notable use case for this method is implementing "stealth hooks":
921 /// 1. Create a new GFN and copy the contents of the original page to it.
922 /// 2. Modify the new page by installing a breakpoint (e.g., 0xcc on AMD64)
923 /// at a strategic location.
924 /// 3. Use this method to change the mapping of the original GFN to the new
925 /// one.
926 /// 4. Set the memory access of the new GFN to non-readable.
927 ///
928 /// When a read access occurs:
929 /// - The handler should enable single-stepping.
930 /// - Switch to an unmodified view (e.g., `default_view`) to execute the
931 /// read instruction, which will read the original non-breakpoint byte.
932 /// - Re-enable single-stepping afterwards.
933 ///
934 /// This technique allows for transparent breakpoints that are difficult to
935 /// detect by the guest OS or applications.
936 pub fn change_view_gfn(&self, view: View, old_gfn: Gfn, new_gfn: Gfn) -> Result<(), VmiError> {
937 self.driver.change_view_gfn(view, old_gfn, new_gfn)
938 }
939
940 /// Resets the mapping of a guest frame number (GFN) in a specific view to
941 /// its original state.
942 ///
943 /// This method reverts any custom mapping for the specified GFN in the
944 /// given view, restoring it to the default mapping.
945 pub fn reset_view_gfn(&self, view: View, gfn: Gfn) -> Result<(), VmiError> {
946 self.driver.reset_view_gfn(view, gfn)
947 }
948}
949
950///////////////////////////////////////////////////////////////////////////////
951// VmiEventControl
952///////////////////////////////////////////////////////////////////////////////
953
954impl<Driver> VmiCore<Driver>
955where
956 Driver: VmiEventControl,
957{
958 /// Enables monitoring of specific events.
959 ///
960 /// This method allows you to enable monitoring of specific events, such as
961 /// control register writes, interrupts, or single-step execution.
962 /// Monitoring events can be useful for tracking specific guest behavior or
963 /// for implementing custom analysis tools.
964 ///
965 /// The type of event to monitor is defined by the architecture-specific
966 /// [`Architecture::EventMonitor`] type.
967 ///
968 /// When an event occurs, it will be passed to the event callback function
969 /// for processing.
970 pub fn monitor_enable(
971 &self,
972 option: <Driver::Architecture as Architecture>::EventMonitor,
973 ) -> Result<(), VmiError> {
974 self.driver.monitor_enable(option)
975 }
976
977 /// Disables monitoring of specific events.
978 ///
979 /// This method allows you to disable monitoring of specific events that
980 /// were previously enabled. It can be used to stop tracking certain
981 /// hardware events or to reduce the overhead of event processing.
982 ///
983 /// The type of event to disable is defined by the architecture-specific
984 /// [`Architecture::EventMonitor`] type.
985 pub fn monitor_disable(
986 &self,
987 option: <Driver::Architecture as Architecture>::EventMonitor,
988 ) -> Result<(), VmiError> {
989 self.driver.monitor_disable(option)
990 }
991
992 /// Returns the number of pending events.
993 ///
994 /// This method provides a count of events that have occurred but have not
995 /// yet been processed.
996 pub fn events_pending(&self) -> usize {
997 self.driver.events_pending()
998 }
999
1000 /// Returns the time spent processing events by the driver.
1001 ///
1002 /// This method provides a measure of the overhead introduced by event
1003 /// processing. It can be useful for performance tuning and
1004 /// understanding the impact of VMI operations on overall system
1005 /// performance.
1006 pub fn event_processing_overhead(&self) -> Duration {
1007 self.driver.event_processing_overhead()
1008 }
1009
1010 /// Waits for an event to occur and processes it with the provided handler.
1011 ///
1012 /// This method blocks until an event occurs or the specified timeout is
1013 /// reached. When an event occurs, it is passed to the provided callback
1014 /// function for processing.
1015 pub fn wait_for_event(
1016 &self,
1017 timeout: Duration,
1018 handler: impl FnMut(&VmiEvent<Driver::Architecture>) -> VmiEventResponse<Driver::Architecture>,
1019 ) -> Result<(), VmiError> {
1020 self.driver.wait_for_event(timeout, handler)
1021 }
1022}
1023
1024///////////////////////////////////////////////////////////////////////////////
1025// VmiVmControl
1026///////////////////////////////////////////////////////////////////////////////
1027
1028impl<Driver> VmiCore<Driver>
1029where
1030 Driver: VmiVmControl,
1031{
1032 /// Pauses the virtual machine.
1033 pub fn pause(&self) -> Result<(), VmiError> {
1034 self.driver.pause()
1035 }
1036
1037 /// Resumes the virtual machine.
1038 pub fn resume(&self) -> Result<(), VmiError> {
1039 self.driver.resume()
1040 }
1041
1042 /// Pauses the virtual machine and returns a guard that will resume it when
1043 /// dropped.
1044 pub fn pause_guard(&self) -> Result<VmiPauseGuard<'_, Driver>, VmiError> {
1045 VmiPauseGuard::new(&self.driver)
1046 }
1047
1048 /// Allocates a guest frame number (GFN).
1049 ///
1050 /// This method allocates a new GFN, with the driver responsible for
1051 /// choosing the specific frame to allocate. It's useful when you need
1052 /// to allocate new memory pages for the VM without caring about the
1053 /// specific location.
1054 pub fn allocate_gfn(&self) -> Result<Gfn, VmiError> {
1055 self.driver.allocate_gfn()
1056 }
1057
1058 /// Allocates a guest frame number (GFN) at a specific location.
1059 ///
1060 /// This method allows you to allocate a particular GFN. It's useful
1061 /// when you need to allocate a specific memory page for the VM.
1062 pub fn allocate_gfn_at(&self, gfn: Gfn) -> Result<(), VmiError> {
1063 self.driver.allocate_gfn_at(gfn)
1064 }
1065
1066 /// Frees a previously allocated guest frame number (GFN).
1067 ///
1068 /// This method deallocates a GFN that was previously allocated. It's
1069 /// important to free GFNs when they're no longer needed to prevent
1070 /// memory leaks in the VM.
1071 pub fn free_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
1072 self.driver.free_gfn(gfn)
1073 }
1074
1075 /// Injects an interrupt into a specific virtual CPU.
1076 ///
1077 /// This method allows for the injection of architecture-specific interrupts
1078 /// into a given vCPU. It can be used to simulate hardware events or to
1079 /// manipulate the guest's execution flow for analysis purposes.
1080 ///
1081 /// The type of interrupt and its parameters are defined by the
1082 /// architecture-specific [`Architecture::Interrupt`] type.
1083 pub fn inject_interrupt(
1084 &self,
1085 vcpu: VcpuId,
1086 interrupt: <Driver::Architecture as Architecture>::Interrupt,
1087 ) -> Result<(), VmiError> {
1088 self.driver.inject_interrupt(vcpu, interrupt)
1089 }
1090
1091 /// Resets the state of the VMI system.
1092 ///
1093 /// This method clears all event monitors, caches, and any other stateful
1094 /// data maintained by the VMI system. It's useful for bringing the VMI
1095 /// system back to a known clean state, which can be necessary when
1096 /// switching between different analysis tasks or recovering from error
1097 /// conditions.
1098 pub fn reset_state(&self) -> Result<(), VmiError> {
1099 self.driver.reset_state()
1100 }
1101}
1102
1103/// A guard that pauses the virtual machine on creation and resumes it on drop.
1104pub struct VmiPauseGuard<'a, Driver>
1105where
1106 Driver: VmiVmControl,
1107{
1108 driver: &'a Driver,
1109}
1110
1111impl<'a, Driver> VmiPauseGuard<'a, Driver>
1112where
1113 Driver: VmiVmControl,
1114{
1115 /// Creates a new pause guard.
1116 pub fn new(driver: &'a Driver) -> Result<Self, VmiError> {
1117 driver.pause()?;
1118 Ok(Self { driver })
1119 }
1120}
1121
1122impl<Driver> Drop for VmiPauseGuard<'_, Driver>
1123where
1124 Driver: VmiVmControl,
1125{
1126 fn drop(&mut self) {
1127 if let Err(err) = self.driver.resume() {
1128 tracing::error!(%err, "Failed to resume the virtual machine");
1129 }
1130 }
1131}