vmi_core/
lib.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
//! Core VMI functionality.

pub mod arch;
mod context;
mod core;
mod driver;
mod error;
mod event;
mod handler;
pub mod os;
mod page;
mod session;

use std::{
    cell::RefCell,
    num::NonZeroUsize,
    time::{Duration, Instant},
};

use lru::LruCache;
use zerocopy::{FromBytes, Immutable, IntoBytes};

pub use self::{
    arch::{Architecture, Registers},
    context::{VmiContext, VmiContextProber, VmiOsContext, VmiOsContextProber},
    core::{
        AccessContext, AddressContext, Gfn, Hex, MemoryAccess, Pa, TranslationMechanism, Va,
        VcpuId, View, VmiInfo,
    },
    driver::VmiDriver,
    error::{PageFault, PageFaults, VmiError},
    event::{VmiEvent, VmiEventFlags, VmiEventResponse, VmiEventResponseFlags},
    handler::VmiHandler,
    os::VmiOs,
    page::VmiMappedPage,
    session::{VmiOsSession, VmiOsSessionProber, VmiSession, VmiSessionProber},
};

struct Cache {
    gfn: RefCell<LruCache<Gfn, VmiMappedPage>>,
    v2p: RefCell<LruCache<AccessContext, Pa>>,
}

impl Cache {
    const DEFAULT_SIZE: usize = 8192;

    pub fn new() -> Self {
        Self {
            gfn: RefCell::new(LruCache::new(
                NonZeroUsize::new(Self::DEFAULT_SIZE).unwrap(),
            )),
            v2p: RefCell::new(LruCache::new(
                NonZeroUsize::new(Self::DEFAULT_SIZE).unwrap(),
            )),
        }
    }
}

/// A callback function type for handling VMI events.
///
/// This type represents a function or closure that is called when a VMI event
/// occurs. The [`VmiEventResponse`] returned by the callback determines how the
/// event is handled.
pub type VmiEventCallback<'a, Arch> = dyn FnMut(&VmiEvent<Arch>) -> VmiEventResponse<Arch> + 'a;

/// The core functionality for Virtual Machine Introspection (VMI).
pub struct VmiCore<Driver>
where
    Driver: VmiDriver,
{
    driver: Driver,
    cache: Cache,

    read_page_fn: fn(&Self, Gfn) -> Result<VmiMappedPage, VmiError>,
    translate_access_context_fn: fn(&Self, AccessContext) -> Result<Pa, VmiError>,

    read_string_length_limit: RefCell<Option<usize>>,
    created: Instant,
}

impl<Driver> VmiCore<Driver>
where
    Driver: VmiDriver,
{
    /// Creates a new VmiCore instance with the given driver.
    ///
    /// Both the GFN cache and the V2P cache are enabled by default,
    /// each with a capacity of 8192 entries.
    pub fn new(driver: Driver) -> Result<Self, VmiError> {
        Ok(Self {
            driver,
            cache: Cache::new(),
            read_page_fn: Self::read_page_cache,
            translate_access_context_fn: Self::translate_access_context_cache,
            read_string_length_limit: RefCell::new(None),
            created: Instant::now(),
        })
    }

    /// Enables the Guest Frame Number (GFN) cache.
    ///
    /// The GFN cache stores the contents of recently accessed memory pages,
    /// indexed by their GFN. This can significantly improve performance when
    /// repeatedly accessing the same memory regions, as it avoids redundant
    /// reads from the virtual machine.
    ///
    /// When enabled, subsequent calls to [`read_page`] will first check
    /// the cache before querying the driver.
    ///
    /// # Panics
    ///
    /// Panics if `size` is zero.
    ///
    /// [`read_page`]: Self::read_page
    pub fn with_gfn_cache(self, size: usize) -> Self {
        Self {
            cache: Cache {
                gfn: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
                ..self.cache
            },
            read_page_fn: Self::read_page_cache,
            ..self
        }
    }

    /// Enables the GFN cache.
    ///
    /// See [`with_gfn_cache`] for more details.
    ///
    /// [`with_gfn_cache`]: Self::with_gfn_cache
    pub fn enable_gfn_cache(&mut self) {
        self.read_page_fn = Self::read_page_cache;
    }

    /// Disables the GFN cache.
    ///
    /// Subsequent calls to [`read_page`] will bypass the cache and read
    /// directly from the virtual machine.
    ///
    /// [`read_page`]: Self::read_page
    pub fn disable_gfn_cache(&mut self) {
        self.read_page_fn = Self::read_page_nocache;
    }

    /// Resizes the GFN cache.
    ///
    /// This allows you to adjust the cache size dynamically based on your
    /// performance needs. A larger cache can improve performance for
    /// workloads with high memory locality, but consumes more memory.
    ///
    /// # Panics
    ///
    /// Panics if `size` is zero.
    pub fn resize_gfn_cache(&mut self, size: usize) {
        self.cache
            .gfn
            .borrow_mut()
            .resize(NonZeroUsize::new(size).unwrap());
    }

    /// Removes a specific entry from the GFN cache.
    ///
    /// Returns the removed entry if it was present.
    /// This is useful for invalidating cached data that might have
    /// become stale.
    pub fn flush_gfn_cache_entry(&self, gfn: Gfn) -> Option<VmiMappedPage> {
        self.cache.gfn.borrow_mut().pop(&gfn)
    }

    /// Clears the entire GFN cache.
    pub fn flush_gfn_cache(&self) {
        self.cache.gfn.borrow_mut().clear();
    }

    ///// Retrieves metrics about the GFN cache.
    //pub fn gfn_cache_metrics(&self) -> CacheMetrics {
    //    let cache = self.cache.gfn.borrow();
    //    CacheMetrics {
    //        hits: ...,
    //        misses: ...,
    //    }
    //}

    /// Enables the virtual-to-physical (V2P) address translation cache.
    ///
    /// The V2P cache stores the results of recent address translations,
    /// mapping virtual addresses (represented by [`AccessContext`]) to their
    /// corresponding physical addresses ([`Pa`]). This can significantly
    /// speed up memory access operations, as address translation can be a
    /// relatively expensive operation.
    ///
    /// When enabled, [`translate_access_context`] will consult the cache
    /// before performing a full translation.
    ///
    /// # Panics
    ///
    /// Panics if `size` is zero.
    ///
    /// [`translate_access_context`]: Self::translate_access_context
    pub fn with_v2p_cache(self, size: usize) -> Self {
        Self {
            cache: Cache {
                v2p: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
                ..self.cache
            },
            translate_access_context_fn: Self::translate_access_context_cache,
            ..self
        }
    }

    /// Enables the V2P cache.
    ///
    /// See [`with_v2p_cache`] for more details.
    ///
    /// [`with_v2p_cache`]: Self::with_v2p_cache
    pub fn enable_v2p_cache(&mut self) {
        self.translate_access_context_fn = Self::translate_access_context_cache;
    }

    /// Disables the V2P cache.
    ///
    /// Subsequent calls to [`translate_access_context`] will bypass the cache
    /// and perform a full address translation every time.
    ///
    /// [`translate_access_context`]: Self::translate_access_context
    pub fn disable_v2p_cache(&mut self) {
        self.translate_access_context_fn = Self::translate_access_context_nocache;
    }

    /// Resizes the V2P cache.
    ///
    /// This allows dynamic adjustment of the cache size to balance
    /// performance and memory usage. A larger cache can lead to better
    /// performance if address translations are frequent and exhibit
    /// good locality.
    ///
    /// # Panics
    ///
    /// Panics if `size` is zero.
    pub fn resize_v2p_cache(&mut self, size: usize) {
        self.cache
            .v2p
            .borrow_mut()
            .resize(NonZeroUsize::new(size).unwrap());
    }

    /// Removes a specific entry from the V2P cache.
    ///
    /// Returns the removed entry if it was present.
    /// This can be used to invalidate cached translations that may have
    /// become stale due to changes in the guest's memory mapping.
    pub fn flush_v2p_cache_entry(&self, ctx: AccessContext) -> Option<Pa> {
        self.cache.v2p.borrow_mut().pop(&ctx)
    }

    /// Clears the entire V2P cache.
    ///
    /// This method is crucial for maintaining consistency when handling events.
    /// The guest operating system can modify page tables or other structures
    /// related to address translation between events. Using stale translations
    /// can lead to incorrect memory access and unexpected behavior.
    /// It is recommended to call this method at the beginning of each
    /// [`VmiHandler::handle_event`] loop to ensure that you are working with
    /// the most up-to-date address mappings.
    pub fn flush_v2p_cache(&self) {
        self.cache.v2p.borrow_mut().clear();
    }

    ///// Retrieves metrics about the V2P cache.
    //pub fn v2p_cache_metrics(&self) -> CacheMetrics {
    //    let cache = self.cache.v2p.borrow();
    //    CacheMetrics {
    //        hits: ...,
    //        misses: ...,
    //    }
    //}

    /// Sets a limit on the length of strings read by the `read_string` methods.
    /// If the limit is reached, the string will be truncated.
    pub fn with_read_string_length_limit(self, limit_in_bytes: usize) -> Self {
        Self {
            read_string_length_limit: RefCell::new(Some(limit_in_bytes)),
            ..self
        }
    }

    /// Returns the current limit on the length of strings read by the
    /// `read_string` methods.
    pub fn read_string_length_limit(&self) -> Option<usize> {
        *self.read_string_length_limit.borrow()
    }

    /// Sets a limit on the length of strings read by the `read_string` methods.
    ///
    /// This method allows you to set a maximum length (in bytes) for strings
    /// read from the virtual machine's memory. When set, string reading
    /// operations will truncate their results to this limit. This can be
    /// useful for preventing excessively long string reads, which might
    /// impact performance or consume too much memory.
    ///
    /// If the limit is reached during a string read operation, the resulting
    /// string will be truncated to the specified length.
    ///
    /// To remove the limit, call this method with `None`.
    pub fn set_read_string_length_limit(&self, limit: usize) {
        *self.read_string_length_limit.borrow_mut() = Some(limit);
    }

    /// Returns the duration since this `VmiCore` instance was created.
    pub fn elapsed(&self) -> Duration {
        self.created.elapsed()
    }

    /// Retrieves information about the virtual machine.
    pub fn info(&self) -> Result<VmiInfo, VmiError> {
        self.driver.info()
    }

    /// Pauses the virtual machine.
    pub fn pause(&self) -> Result<(), VmiError> {
        self.driver.pause()
    }

    /// Resumes the virtual machine.
    pub fn resume(&self) -> Result<(), VmiError> {
        self.driver.resume()
    }

    /// Pauses the virtual machine and returns a guard that will resume it when
    /// dropped.
    pub fn pause_guard(&self) -> Result<VmiPauseGuard<'_, Driver>, VmiError> {
        VmiPauseGuard::new(&self.driver)
    }

    /// Retrieves the current state of CPU registers for a specified virtual
    /// CPU.
    ///
    /// This method allows you to access the current values of CPU registers,
    /// which is crucial for understanding the state of the virtual machine
    /// at a given point in time.
    ///
    /// # Notes
    ///
    /// The exact structure and content of the returned registers depend on the
    /// specific architecture of the VM being introspected. Refer to the
    /// documentation of your [`Architecture`] implementation for details on
    /// how to interpret the register values.
    pub fn registers(
        &self,
        vcpu: VcpuId,
    ) -> Result<<Driver::Architecture as Architecture>::Registers, VmiError> {
        self.driver.registers(vcpu)
    }

    /// Sets the registers of a virtual CPU.
    pub fn set_registers(
        &self,
        vcpu: VcpuId,
        registers: <Driver::Architecture as Architecture>::Registers,
    ) -> Result<(), VmiError> {
        self.driver.set_registers(vcpu, registers)
    }

    /// Retrieves the memory access permissions for a specific guest frame
    /// number (GFN).
    ///
    /// The returned `MemoryAccess` indicates the current read, write, and
    /// execute permissions for the specified memory page in the given view.
    pub fn memory_access(&self, gfn: Gfn, view: View) -> Result<MemoryAccess, VmiError> {
        self.driver.memory_access(gfn, view)
    }

    /// Sets the memory access permissions for a specific guest frame number
    /// (GFN).
    ///
    /// This method allows you to modify the read, write, and execute
    /// permissions for a given memory page in the specified view.
    pub fn set_memory_access(
        &self,
        gfn: Gfn,
        view: View,
        access: MemoryAccess,
    ) -> Result<(), VmiError> {
        self.driver.set_memory_access(gfn, view, access)
    }

    /// Allocates the next available guest frame number (GFN).
    ///
    /// This method finds and allocates the next free GFN after the current
    /// maximum GFN. It's useful when you need to allocate new memory pages
    /// for the VM.
    pub fn allocate_next_available_gfn(&self) -> Result<Gfn, VmiError> {
        let info = self.info()?;

        let next_available_gfn = info.max_gfn + 1;
        self.allocate_gfn(next_available_gfn)?;
        Ok(next_available_gfn)
    }

    /// Allocates a specific guest frame number (GFN).
    ///
    /// This method allows you to allocate a particular GFN. It's useful when
    /// you need to allocate a specific memory page for the VM.
    pub fn allocate_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
        self.driver.allocate_gfn(gfn)
    }

    /// Frees a previously allocated guest frame number (GFN).
    ///
    /// This method deallocates a GFN that was previously allocated. It's
    /// important to free GFNs when they're no longer needed to prevent
    /// memory leaks in the VM.
    pub fn free_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
        self.driver.free_gfn(gfn)
    }

    /// Returns the default view for the virtual machine.
    ///
    /// The default view typically represents the normal, unmodified state of
    /// the VM's memory.
    pub fn default_view(&self) -> View {
        self.driver.default_view()
    }

    /// Creates a new view with the specified default access permissions.
    ///
    /// Views allow for creating different perspectives of the VM's memory,
    /// which can be useful for analysis or isolation purposes. The default
    /// access permissions apply to memory pages not explicitly modified
    /// within this view.
    pub fn create_view(&self, default_access: MemoryAccess) -> Result<View, VmiError> {
        self.driver.create_view(default_access)
    }

    /// Destroys a previously created view.
    ///
    /// This method removes a view and frees associated resources. It should be
    /// called when a view is no longer needed to prevent resource leaks.
    pub fn destroy_view(&self, view: View) -> Result<(), VmiError> {
        self.driver.destroy_view(view)
    }

    /// Switches to a different view for all virtual CPUs.
    ///
    /// This method changes the current active view for all vCPUs, affecting
    /// subsequent memory operations across the entire VM. It allows for
    /// quick transitions between different memory perspectives globally.
    ///
    /// Note the difference between this method and
    /// [`VmiEventResponse::set_view()`]:
    /// - `switch_to_view()` changes the view for all vCPUs immediately.
    /// - `VmiEventResponse::set_view()` sets the view only for the specific
    ///   vCPU that received the event, and the change is applied when the event
    ///   handler returns.
    ///
    /// Use `switch_to_view()` for global view changes, and
    /// `VmiEventResponse::set_view()` for targeted, event-specific view
    /// modifications on individual vCPUs.
    pub fn switch_to_view(&self, view: View) -> Result<(), VmiError> {
        self.driver.switch_to_view(view)
    }

    /// Changes the mapping of a guest frame number (GFN) in a specific view.
    ///
    /// This method allows for remapping a GFN to a different physical frame
    /// within a view, enabling fine-grained control over memory layout in
    /// different views.
    ///
    /// A notable use case for this method is implementing "stealth hooks":
    /// 1. Create a new GFN and copy the contents of the original page to it.
    /// 2. Modify the new page by installing a breakpoint (e.g., 0xcc on AMD64)
    ///    at a strategic location.
    /// 3. Use this method to change the mapping of the original GFN to the new
    ///    one.
    /// 4. Set the memory access of the new GFN to non-readable.
    ///
    /// When a read access occurs:
    /// - The handler should enable single-stepping.
    /// - Switch to an unmodified view (e.g., `default_view`) to execute the
    ///   read instruction, which will read the original non-breakpoint byte.
    /// - Re-enable single-stepping afterwards.
    ///
    /// This technique allows for transparent breakpoints that are difficult to
    /// detect by the guest OS or applications.
    pub fn change_view_gfn(&self, view: View, old_gfn: Gfn, new_gfn: Gfn) -> Result<(), VmiError> {
        self.driver.change_view_gfn(view, old_gfn, new_gfn)
    }

    /// Resets the mapping of a guest frame number (GFN) in a specific view to
    /// its original state.
    ///
    /// This method reverts any custom mapping for the specified GFN in the
    /// given view, restoring it to the default mapping.
    pub fn reset_view_gfn(&self, view: View, gfn: Gfn) -> Result<(), VmiError> {
        self.driver.reset_view_gfn(view, gfn)
    }

    /// Enables monitoring of specific events.
    pub fn monitor_enable(
        &self,
        option: <Driver::Architecture as Architecture>::EventMonitor,
    ) -> Result<(), VmiError> {
        self.driver.monitor_enable(option)
    }

    /// Disables monitoring of specific events.
    pub fn monitor_disable(
        &self,
        option: <Driver::Architecture as Architecture>::EventMonitor,
    ) -> Result<(), VmiError> {
        self.driver.monitor_disable(option)
    }

    /*
    /// Enables or disables monitoring of specific CPU registers.
    ///
    /// This method allows for setting up event triggers when certain CPU
    /// registers are accessed or modified. The specific registers that can
    /// be monitored depend on the architecture and are defined by the
    /// [`Architecture::MonitorRegisterOptions`] type.
    ///
    /// When enabled, relevant events will be passed to the event callback
    /// function.
    pub fn monitor_register(
        &self,
        enable: bool,
        options: <Driver::Architecture as Architecture>::MonitorRegisterOptions,
    ) -> Result<(), VmiError> {
        self.driver.monitor_register(enable, options)
    }

    /// Enables or disables monitoring of specific interrupts.
    ///
    /// This method sets up event triggers for specified interrupt events. The
    /// types of interrupts that can be monitored are defined by the
    /// [`Architecture::MonitorInterruptOptions`] type, which is specific to
    /// the architecture being used.
    ///
    /// When an interrupt event occurs, it will be passed to the event callback
    /// function.
    pub fn monitor_interrupt(
        &self,
        enable: bool,
        options: <Driver::Architecture as Architecture>::MonitorInterruptOptions,
    ) -> Result<(), VmiError> {
        self.driver.monitor_interrupt(enable, options)
    }

    /// Enables or disables single-step monitoring.
    ///
    /// When enabled, this method causes the VMI system to generate an event
    /// after each instruction execution in the guest. This can be useful
    /// for detailed analysis of guest behavior, but may have a significant
    /// performance impact.
    ///
    /// Single-step events will be passed to the event callback function when
    /// they occur.
    pub fn monitor_singlestep(&self, enable: bool) -> Result<(), VmiError> {
        self.driver.monitor_singlestep(enable)
    }

    /// Enables or disables monitoring of CPUID instruction execution.
    ///
    /// When enabled, this method generates an event each time a CPUID
    /// instruction is executed in the guest. This can be useful for
    /// analyzing how the guest queries CPU features or for implementing CPU
    /// feature spoofing.
    ///
    /// CPUID events will be passed to the event callback function when they
    /// occur.
    pub fn monitor_cpuid(&self, enable: bool) -> Result<(), VmiError> {
        self.driver.monitor_cpuid(enable)
    }

    /// Enables or disables monitoring of I/O port operations.
    ///
    /// When enabled, this method generates events for I/O port read and write
    /// operations performed by the guest. This can be useful for analyzing
    /// guest interactions with virtual hardware or for implementing custom
    /// virtual device behavior.
    ///
    /// I/O port events will be passed to the event callback function when they
    /// occur.
    pub fn monitor_io(&self, enable: bool) -> Result<(), VmiError> {
        self.driver.monitor_io(enable)
    }
    */

    /// Injects an interrupt into a specific virtual CPU.
    ///
    /// This method allows for the injection of architecture-specific interrupts
    /// into a given vCPU. It can be used to simulate hardware events or to
    /// manipulate the guest's execution flow for analysis purposes.
    ///
    /// The type of interrupt and its parameters are defined by the
    /// architecture-specific [`Architecture::Interrupt`] type.
    pub fn inject_interrupt(
        &self,
        vcpu: VcpuId,
        interrupt: <Driver::Architecture as Architecture>::Interrupt,
    ) -> Result<(), VmiError> {
        self.driver.inject_interrupt(vcpu, interrupt)
    }

    /// Returns the number of pending events.
    ///
    /// This method provides a count of events that have occurred but have not
    /// yet been processed.
    pub fn events_pending(&self) -> usize {
        self.driver.events_pending()
    }

    /// Returns the time spent processing events by the driver.
    ///
    /// This method provides a measure of the overhead introduced by event
    /// processing. It can be useful for performance tuning and
    /// understanding the impact of VMI operations on overall system
    /// performance.
    pub fn event_processing_overhead(&self) -> Duration {
        self.driver.event_processing_overhead()
    }

    /// Waits for an event to occur and processes it with the provided handler.
    ///
    /// This method blocks until an event occurs or the specified timeout is
    /// reached. When an event occurs, it is passed to the provided callback
    /// function for processing.
    pub fn wait_for_event<'a>(
        &'a self,
        timeout: Duration,
        handler: Box<VmiEventCallback<'a, Driver::Architecture>>,
    ) -> Result<(), VmiError> {
        self.driver.wait_for_event(timeout, handler)
    }

    /// Resets the state of the VMI system.
    ///
    /// This method clears all event monitors, caches, and any other stateful
    /// data maintained by the VMI system. It's useful for bringing the VMI
    /// system back to a known clean state, which can be necessary when
    /// switching between different analysis tasks or recovering from error
    /// conditions.
    pub fn reset_state(&self) -> Result<(), VmiError> {
        self.driver.reset_state()
    }

    /// Reads memory from the virtual machine.
    pub fn read(&self, ctx: impl Into<AccessContext>, buffer: &mut [u8]) -> Result<(), VmiError> {
        let ctx = ctx.into();
        let mut position = 0usize;
        let mut remaining = buffer.len();

        while remaining > 0 {
            let address = self.translate_access_context(ctx + position as u64)?;
            let gfn = Driver::Architecture::gfn_from_pa(address);
            let offset = Driver::Architecture::pa_offset(address) as usize;

            let page = self.read_page(gfn)?;
            let page = &page[offset..];

            let size = std::cmp::min(remaining, page.len());
            buffer[position..position + size].copy_from_slice(&page[..size]);

            position += size;
            remaining -= size;
        }

        Ok(())
    }

    /// Writes memory to the virtual machine.
    pub fn write(&self, ctx: impl Into<AccessContext>, buffer: &[u8]) -> Result<(), VmiError> {
        let ctx = ctx.into();
        let mut position = 0usize;
        let mut remaining = buffer.len();

        let page_size = self.info()?.page_size;

        while remaining > 0 {
            let address = self.translate_access_context(ctx + position as u64)?;
            let gfn = Driver::Architecture::gfn_from_pa(address);
            let offset = Driver::Architecture::pa_offset(address);

            let size = std::cmp::min(remaining, (page_size - offset) as usize);
            let content = &buffer[position..position + size];

            self.driver.write_page(gfn, offset, content)?;

            position += size;
            remaining -= size;
        }

        Ok(())
    }

    /// Reads a single byte from the virtual machine.
    pub fn read_u8(&self, ctx: impl Into<AccessContext>) -> Result<u8, VmiError> {
        let mut buffer = [0u8; 1];
        self.read(ctx, &mut buffer)?;
        Ok(buffer[0])
    }

    /// Reads a 16-bit unsigned integer from the virtual machine.
    pub fn read_u16(&self, ctx: impl Into<AccessContext>) -> Result<u16, VmiError> {
        let mut buffer = [0u8; 2];
        self.read(ctx, &mut buffer)?;
        Ok(u16::from_le_bytes(buffer))
    }

    /// Reads a 32-bit unsigned integer from the virtual machine.
    pub fn read_u32(&self, ctx: impl Into<AccessContext>) -> Result<u32, VmiError> {
        let mut buffer = [0u8; 4];
        self.read(ctx, &mut buffer)?;
        Ok(u32::from_le_bytes(buffer))
    }

    /// Reads a 64-bit unsigned integer from the virtual machine.
    pub fn read_u64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
        let mut buffer = [0u8; 8];
        self.read(ctx, &mut buffer)?;
        Ok(u64::from_le_bytes(buffer))
    }

    /// Reads an address-sized unsigned integer from the virtual machine.
    ///
    /// This method reads an address of the specified width (in bytes) from
    /// the given access context. It's useful when dealing with architectures
    /// that can operate in different address modes.
    pub fn read_address(
        &self,
        ctx: impl Into<AccessContext>,
        address_width: usize,
    ) -> Result<u64, VmiError> {
        match address_width {
            4 => self.read_address32(ctx),
            8 => self.read_address64(ctx),
            _ => Err(VmiError::InvalidAddressWidth),
        }
    }

    /// Reads a 32-bit address from the virtual machine.
    pub fn read_address32(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
        Ok(self.read_u32(ctx)? as u64)
    }

    /// Reads a 64-bit address from the virtual machine.
    pub fn read_address64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
        self.read_u64(ctx)
    }

    /// Reads a virtual address from the virtual machine.
    pub fn read_va(
        &self,
        ctx: impl Into<AccessContext>,
        address_width: usize,
    ) -> Result<Va, VmiError> {
        Ok(Va(self.read_address(ctx, address_width)?))
    }

    /// Reads a 32-bit virtual address from the virtual machine.
    pub fn read_va32(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
        Ok(Va(self.read_address32(ctx)?))
    }

    /// Reads a 64-bit virtual address from the virtual machine.
    pub fn read_va64(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
        Ok(Va(self.read_address64(ctx)?))
    }

    /// Reads a null-terminated string of bytes from the virtual machine with a
    /// specified limit.
    pub fn read_string_bytes_limited(
        &self,
        ctx: impl Into<AccessContext>,
        limit: usize,
    ) -> Result<Vec<u8>, VmiError> {
        let mut ctx = ctx.into();

        // read until the end of page
        let mut buffer = vec![
            0u8;
            (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
                as usize
        ];
        self.read(ctx, &mut buffer)?;

        // try to find the null terminator
        let position = buffer.iter().position(|&b| b == 0);

        if let Some(position) = position {
            buffer.truncate(limit.min(position));
            return Ok(buffer);
        }

        let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
        loop {
            ctx.address += buffer.len() as u64;
            self.read(ctx, &mut page)?;

            let position = page.iter().position(|&b| b == 0);

            if let Some(position) = position {
                buffer.extend_from_slice(&page[..position]);

                if buffer.len() >= limit {
                    buffer.truncate(limit);
                }

                break;
            }

            buffer.extend_from_slice(&page);

            if buffer.len() >= limit {
                buffer.truncate(limit);
                break;
            }
        }

        Ok(buffer)
    }

    /// Reads a null-terminated string of bytes from the virtual machine.
    pub fn read_string_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u8>, VmiError> {
        self.read_string_bytes_limited(
            ctx,
            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
        )
    }

    /// Reads a null-terminated wide string (UTF-16) from the virtual machine
    /// with a specified limit.
    pub fn read_wstring_bytes_limited(
        &self,
        ctx: impl Into<AccessContext>,
        limit: usize,
    ) -> Result<Vec<u16>, VmiError> {
        let mut ctx = ctx.into();

        // read until the end of page
        let mut buffer = vec![
            0u8;
            (Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
                as usize
        ];
        self.read(ctx, &mut buffer)?;

        // try to find the null terminator
        let position = buffer
            .chunks_exact(2)
            .position(|chunk| chunk[0] == 0 && chunk[1] == 0);

        if let Some(position) = position {
            buffer.truncate(limit.min(position * 2));
            return Ok(buffer
                .chunks_exact(2)
                .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
                .collect());
        }

        let mut page = [0u8; 4096_usize]; // FIXME: Driver::Architecture::PAGE_SIZE
        loop {
            ctx.address += buffer.len() as u64;
            self.read(ctx, &mut page)?;

            let position = page
                .chunks_exact(2)
                .position(|chunk| chunk[0] == 0 && chunk[1] == 0);

            if let Some(position) = position {
                buffer.extend_from_slice(&page[..position * 2]);

                if buffer.len() >= limit {
                    buffer.truncate(limit);
                }

                break;
            }

            buffer.extend_from_slice(&page);

            if buffer.len() >= limit {
                buffer.truncate(limit);
                break;
            }
        }

        Ok(buffer
            .chunks_exact(2)
            .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
            .collect())
    }

    /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
    pub fn read_wstring_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u16>, VmiError> {
        self.read_wstring_bytes_limited(
            ctx,
            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
        )
    }

    /// Reads a null-terminated string from the virtual machine with a specified
    /// limit.
    pub fn read_string_limited(
        &self,
        ctx: impl Into<AccessContext>,
        limit: usize,
    ) -> Result<String, VmiError> {
        Ok(String::from_utf8_lossy(&self.read_string_bytes_limited(ctx, limit)?).into())
    }

    /// Reads a null-terminated string from the virtual machine.
    pub fn read_string(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
        self.read_string_limited(
            ctx,
            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
        )
    }

    /// Reads a null-terminated wide string (UTF-16) from the virtual machine
    /// with a specified limit.
    pub fn read_wstring_limited(
        &self,
        ctx: impl Into<AccessContext>,
        limit: usize,
    ) -> Result<String, VmiError> {
        Ok(String::from_utf16_lossy(
            &self.read_wstring_bytes_limited(ctx, limit)?,
        ))
    }

    /// Reads a null-terminated wide string (UTF-16) from the virtual machine.
    pub fn read_wstring(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
        self.read_wstring_limited(
            ctx,
            self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
        )
    }

    /// Reads a struct from the virtual machine.
    pub fn read_struct<T>(&self, ctx: impl Into<AccessContext>) -> Result<T, VmiError>
    where
        T: FromBytes + IntoBytes,
    {
        let mut result = T::new_zeroed();
        self.read(ctx, result.as_mut_bytes())?;
        Ok(result)
    }

    /// Writes a single byte to the virtual machine.
    pub fn write_u8(&self, ctx: impl Into<AccessContext>, value: u8) -> Result<(), VmiError> {
        self.write(ctx, &value.to_le_bytes())
    }

    /// Writes a 16-bit unsigned integer to the virtual machine.
    pub fn write_u16(&self, ctx: impl Into<AccessContext>, value: u16) -> Result<(), VmiError> {
        self.write(ctx, &value.to_le_bytes())
    }

    /// Writes a 32-bit unsigned integer to the virtual machine.
    pub fn write_u32(&self, ctx: impl Into<AccessContext>, value: u32) -> Result<(), VmiError> {
        self.write(ctx, &value.to_le_bytes())
    }

    /// Writes a 64-bit unsigned integer to the virtual machine.
    pub fn write_u64(&self, ctx: impl Into<AccessContext>, value: u64) -> Result<(), VmiError> {
        self.write(ctx, &value.to_le_bytes())
    }

    /// Writes a struct to the virtual machine.
    pub fn write_struct<T>(&self, ctx: impl Into<AccessContext>, value: T) -> Result<(), VmiError>
    where
        T: IntoBytes + Immutable,
    {
        self.write(ctx, value.as_bytes())
    }

    /// Translates a virtual address to a physical address.
    pub fn translate_address(&self, ctx: impl Into<AddressContext>) -> Result<Pa, VmiError> {
        self.translate_access_context(AccessContext::from(ctx.into()))
    }

    /// Translates an access context to a physical address.
    pub fn translate_access_context(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
        (self.translate_access_context_fn)(self, ctx)
    }

    /// Reads a page of memory from the virtual machine.
    pub fn read_page(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
        (self.read_page_fn)(self, gfn)
    }

    /// Reads a page of memory from the virtual machine without using the cache.
    fn read_page_nocache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
        self.driver.read_page(gfn)
    }

    /// Reads a page of memory from the virtual machine, using the cache if
    /// enabled.
    fn read_page_cache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
        let mut cache = self.cache.gfn.borrow_mut();
        let value = cache.try_get_or_insert(gfn, || self.read_page_nocache(gfn))?;

        // Mapped pages are reference counted, so cloning it is cheap.
        Ok(value.clone())
    }

    /// Translates an access context to a physical address without using the
    /// cache.
    ///
    /// # Notes
    ///
    /// If [`TranslationMechanism::Paging`] is used, the `root` must be present.
    /// In case the root is not present, a [`VmiError::RootNotPresent`] error is
    /// returned.
    fn translate_access_context_nocache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
        Ok(match ctx.mechanism {
            TranslationMechanism::Direct => Pa(ctx.address),
            TranslationMechanism::Paging { root } => match root {
                Some(root) => <Driver::Architecture as Architecture>::translate_address(
                    self,
                    ctx.address.into(),
                    root,
                )?,
                None => return Err(VmiError::RootNotPresent),
            },
        })
    }

    /// Translates an access context to a physical address, using the cache if
    /// enabled.
    fn translate_access_context_cache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
        let mut cache = self.cache.v2p.borrow_mut();
        let value = cache.try_get_or_insert(ctx, || self.translate_access_context_nocache(ctx))?;
        Ok(*value)
    }
}

/// A guard that pauses the virtual machine on creation and resumes it on drop.
pub struct VmiPauseGuard<'a, Driver>
where
    Driver: VmiDriver,
{
    driver: &'a Driver,
}

impl<'a, Driver> VmiPauseGuard<'a, Driver>
where
    Driver: VmiDriver,
{
    /// Creates a new pause guard.
    pub fn new(driver: &'a Driver) -> Result<Self, VmiError> {
        driver.pause()?;
        Ok(Self { driver })
    }
}

impl<Driver> Drop for VmiPauseGuard<'_, Driver>
where
    Driver: VmiDriver,
{
    fn drop(&mut self) {
        if let Err(err) = self.driver.resume() {
            tracing::error!(?err, "Failed to resume the virtual machine");
        }
    }
}