xen/devicemodel/
mod.rs

1mod handle;
2use std::rc::Rc;
3
4use xen_sys::xendevicemodel_inject_event;
5
6pub use self::handle::XenDeviceModelHandle;
7use crate::{VcpuId, XenDomainId, XenError};
8
9macro_rules! xc_check_error {
10    ($rc:ident) => {
11        if $rc < 0 {
12            return Err(XenError::Io(std::io::Error::last_os_error()));
13        }
14    };
15}
16
17/*
18 * x86 event types. This enumeration is valid for:
19 *  Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
20 *  AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
21 */
22#[derive(Debug, Clone, Copy, PartialEq, Eq)]
23#[repr(u8)]
24pub enum XenX86EventType {
25    /// External interrupt
26    ExternalInterrupt,
27
28    /// Reserved
29    Reserved,
30
31    /// NMI
32    Nmi,
33
34    /// Hardware exception
35    HardwareException,
36
37    /// Software interrupt (CD nn)
38    SoftwareInterrupt,
39
40    /// ICEBP (F1)
41    PrivilegedSoftwareException,
42
43    /// INT3 (CC), INTO (CE)
44    SoftwareException,
45}
46
47/// Exception vector.
48#[derive(Debug, Clone, Copy, PartialEq, Eq)]
49pub struct XenX86ExceptionVector(pub u8);
50
51#[expect(non_upper_case_globals)]
52impl XenX86ExceptionVector {
53    pub const DivideError: Self = Self(0);
54    pub const DebugException: Self = Self(1);
55    pub const Nmi: Self = Self(2);
56    pub const Breakpoint: Self = Self(3);
57    pub const Overflow: Self = Self(4);
58    pub const BoundRange: Self = Self(5);
59    pub const InvalidOpcode: Self = Self(6);
60    pub const DeviceNotAvailable: Self = Self(7);
61    pub const DoubleFault: Self = Self(8);
62    pub const CoprocessorSegmentOverrun: Self = Self(9);
63    pub const InvalidTss: Self = Self(10);
64    pub const SegmentNotPresent: Self = Self(11);
65    pub const StackSegmentFault: Self = Self(12);
66    pub const GeneralProtectionFault: Self = Self(13);
67    pub const PageFault: Self = Self(14);
68    pub const PicSpuriousInterruptVector: Self = Self(15);
69    pub const MathsFault: Self = Self(16);
70    pub const AlignmentCheck: Self = Self(17);
71    pub const MachineCheck: Self = Self(18);
72    pub const SimdException: Self = Self(19);
73    pub const VirtualisationException: Self = Self(20);
74    pub const ControlFlowProtection: Self = Self(21);
75    pub const HypervisorInjection: Self = Self(28);
76    pub const VmmCommunication: Self = Self(29);
77    pub const SecurityException: Self = Self(30);
78}
79
80#[derive(Debug, Clone)]
81pub struct XenDeviceModel {
82    pub(crate) handle: Rc<XenDeviceModelHandle>,
83    domain_id: XenDomainId,
84}
85
86impl XenDeviceModel {
87    pub(crate) fn new(domain_id: XenDomainId) -> Result<Self, XenError> {
88        Ok(Self {
89            handle: Rc::new(XenDeviceModelHandle::new()?),
90            domain_id,
91        })
92    }
93
94    /// This function injects an event into a vCPU to take effect the next time
95    /// it resumes.
96    ///
97    /// Set `error_code` to `!0` to skip.
98    ///
99    /// Set `extra` to type-specific extra data (`%cr2` for `#PF`, `pending_dbg`
100    /// for `#DB`).
101    pub fn inject_event(
102        &self,
103        vcpu: VcpuId,
104        vector: XenX86ExceptionVector,
105        event_type: XenX86EventType,
106        error_code: u32,
107        instruction_length: u8,
108        extra: u64,
109    ) -> Result<(), XenError> {
110        let rc = unsafe {
111            xendevicemodel_inject_event(
112                self.handle.0,
113                self.domain_id.0 as _,
114                vcpu.0 as _,
115                vector.0,
116                event_type as _,
117                error_code,
118                instruction_length,
119                extra,
120            )
121        };
122        xc_check_error!(rc);
123        Ok(())
124    }
125}