kvm_ioctls/ioctls/vm.rs
1// Copyright © 2024 Institute of Software, CAS. All rights reserved.
2//
3// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4// SPDX-License-Identifier: Apache-2.0 OR MIT
5//
6// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the THIRD-PARTY file.
9
10use kvm_bindings::*;
11use std::fs::File;
12use std::os::raw::c_void;
13use std::os::raw::{c_int, c_ulong};
14use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
15
16use crate::cap::Cap;
17use crate::ioctls::device::DeviceFd;
18use crate::ioctls::device::new_device;
19use crate::ioctls::vcpu::VcpuFd;
20use crate::ioctls::vcpu::new_vcpu;
21use crate::ioctls::{KvmRunWrapper, Result};
22use crate::kvm_ioctls::*;
23use vmm_sys_util::errno;
24use vmm_sys_util::eventfd::EventFd;
25#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
26use vmm_sys_util::ioctl::ioctl;
27#[cfg(target_arch = "x86_64")]
28use vmm_sys_util::ioctl::ioctl_with_mut_ptr;
29use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val};
30
31/// An address either in programmable I/O space or in memory mapped I/O space.
32///
33/// The `IoEventAddress` is used for specifying the type when registering an event
34/// in [register_ioevent](struct.VmFd.html#method.register_ioevent).
35#[derive(Debug, Clone, Copy)]
36pub enum IoEventAddress {
37 /// Representation of an programmable I/O address.
38 Pio(u64),
39 /// Representation of an memory mapped I/O address.
40 Mmio(u64),
41}
42
43/// Helper structure for disabling datamatch.
44///
45/// The structure can be used as a parameter to
46/// [`register_ioevent`](struct.VmFd.html#method.register_ioevent)
47/// to disable filtering of events based on the datamatch flag. For details check the
48/// [KVM API documentation](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
49#[derive(Debug, Clone, Copy)]
50pub struct NoDatamatch;
51impl From<NoDatamatch> for u64 {
52 fn from(_: NoDatamatch) -> u64 {
53 0
54 }
55}
56
57/// Wrapper over KVM VM ioctls.
58#[derive(Debug)]
59pub struct VmFd {
60 vm: File,
61 run_size: usize,
62}
63
64impl VmFd {
65 /// Creates/modifies a guest physical memory slot.
66 ///
67 /// See the documentation for `KVM_SET_USER_MEMORY_REGION`.
68 ///
69 /// # Arguments
70 ///
71 /// * `user_memory_region` - Guest physical memory slot. For details check the
72 /// `kvm_userspace_memory_region` structure in the
73 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
74 ///
75 /// # Safety
76 ///
77 /// This function is unsafe because there is no guarantee `userspace_addr` points to a valid
78 /// memory region, nor the memory region lives as long as the kernel needs it to.
79 ///
80 /// The caller of this method must make sure that:
81 /// - the raw pointer (`userspace_addr`) points to valid memory
82 /// - the regions provided to KVM are not overlapping other memory regions.
83 ///
84 /// # Example
85 ///
86 /// ```rust
87 /// # extern crate kvm_ioctls;
88 /// extern crate kvm_bindings;
89 ///
90 /// use kvm_bindings::kvm_userspace_memory_region;
91 /// use kvm_ioctls::Kvm;
92 ///
93 /// let kvm = Kvm::new().unwrap();
94 /// let vm = kvm.create_vm().unwrap();
95 /// let mem_region = kvm_userspace_memory_region {
96 /// slot: 0,
97 /// guest_phys_addr: 0x10000 as u64,
98 /// memory_size: 0x10000 as u64,
99 /// userspace_addr: 0x0 as u64,
100 /// flags: 0,
101 /// };
102 /// unsafe {
103 /// vm.set_user_memory_region(mem_region).unwrap();
104 /// };
105 /// ```
106 pub unsafe fn set_user_memory_region(
107 &self,
108 user_memory_region: kvm_userspace_memory_region,
109 ) -> Result<()> {
110 // SAFETY: we trust the kernel and verified parameters
111 let ret =
112 unsafe { ioctl_with_ref(self, KVM_SET_USER_MEMORY_REGION(), &user_memory_region) };
113 if ret == 0 {
114 Ok(())
115 } else {
116 Err(errno::Error::last())
117 }
118 }
119
120 /// Creates/modifies a guest physical memory slot.
121 ///
122 /// See the documentation for `KVM_SET_USER_MEMORY_REGION2`.
123 ///
124 /// # Arguments
125 ///
126 /// * `user_memory_region2` - Guest physical memory slot. For details check the
127 /// `kvm_userspace_memory_region2` structure in the
128 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
129 ///
130 /// # Safety
131 ///
132 /// This function is unsafe because there is no guarantee `userspace_addr` points to a valid
133 /// memory region, nor the memory region lives as long as the kernel needs it to.
134 ///
135 /// The caller of this method must make sure that:
136 /// - the raw pointer (`userspace_addr`) points to valid memory
137 /// - the regions provided to KVM are not overlapping other memory regions.
138 /// - the guest_memfd points at a file created via KVM_CREATE_GUEST_MEMFD on
139 /// the current VM, and the target range must not be bound to any other memory region
140 ///
141 /// # Example
142 ///
143 /// On x86, create a `KVM_X86_SW_PROTECTED_VM` with a memslot that has a `guest_memfd` associated.
144 ///
145 /// ```rust
146 /// # extern crate kvm_ioctls;
147 /// extern crate kvm_bindings;
148 ///
149 /// use kvm_bindings::{
150 /// KVM_CAP_GUEST_MEMFD, KVM_CAP_USER_MEMORY2, KVM_MEM_GUEST_MEMFD, kvm_create_guest_memfd,
151 /// kvm_userspace_memory_region2,
152 /// };
153 /// use kvm_ioctls::{Cap, Kvm};
154 /// use std::os::fd::RawFd;
155 ///
156 /// let kvm = Kvm::new().unwrap();
157 /// #[cfg(target_arch = "x86_64")]
158 /// let vm = kvm
159 /// .create_vm_with_type(kvm_bindings::KVM_X86_SW_PROTECTED_VM as u64)
160 /// .unwrap();
161 /// #[cfg(not(target_arch = "x86_64"))]
162 /// let vm = kvm.create_vm().unwrap(); /* non-x86 does not yet have a vm type that supports gmem */
163 ///
164 /// let address_space = unsafe { libc::mmap(0 as _, 10000, 3, 34, -1, 0) };
165 /// let userspace_addr = address_space as *const u8 as u64;
166 ///
167 /// if !vm.check_extension(Cap::GuestMemfd) || !vm.check_extension(Cap::UserMemory2) {
168 /// return;
169 /// }
170 ///
171 /// let gmem = kvm_create_guest_memfd {
172 /// size: 0x10000,
173 /// flags: 0,
174 /// reserved: [0; 6],
175 /// };
176 ///
177 /// let fd: RawFd = unsafe { vm.create_guest_memfd(gmem).unwrap() };
178 ///
179 /// let mem_region = kvm_userspace_memory_region2 {
180 /// slot: 0,
181 /// flags: KVM_MEM_GUEST_MEMFD,
182 /// guest_phys_addr: 0x10000 as u64,
183 /// memory_size: 0x10000 as u64,
184 /// userspace_addr,
185 /// guest_memfd_offset: 0,
186 /// guest_memfd: fd as u32,
187 /// pad1: 0,
188 /// pad2: [0; 14],
189 /// };
190 /// unsafe {
191 /// vm.set_user_memory_region2(mem_region).unwrap();
192 /// };
193 /// ```
194 pub unsafe fn set_user_memory_region2(
195 &self,
196 user_memory_region2: kvm_userspace_memory_region2,
197 ) -> Result<()> {
198 // SAFETY: we trust the kernel and verified parameters
199 let ret =
200 unsafe { ioctl_with_ref(self, KVM_SET_USER_MEMORY_REGION2(), &user_memory_region2) };
201 if ret == 0 {
202 Ok(())
203 } else {
204 Err(errno::Error::last())
205 }
206 }
207
208 /// Sets the address of the three-page region in the VM's address space.
209 ///
210 /// See the documentation for `KVM_SET_TSS_ADDR`.
211 ///
212 /// # Arguments
213 ///
214 /// * `offset` - Physical address of a three-page region in the guest's physical address space.
215 ///
216 /// # Example
217 ///
218 /// ```rust
219 /// # extern crate kvm_ioctls;
220 /// # use kvm_ioctls::Kvm;
221 /// let kvm = Kvm::new().unwrap();
222 /// let vm = kvm.create_vm().unwrap();
223 /// vm.set_tss_address(0xfffb_d000).unwrap();
224 /// ```
225 #[cfg(target_arch = "x86_64")]
226 pub fn set_tss_address(&self, offset: usize) -> Result<()> {
227 // SAFETY: Safe because we know that our file is a VM fd and we verify the return result.
228 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), offset as c_ulong) };
229 if ret == 0 {
230 Ok(())
231 } else {
232 Err(errno::Error::last())
233 }
234 }
235
236 /// Sets the address of the one-page region in the VM's address space.
237 ///
238 /// See the documentation for `KVM_SET_IDENTITY_MAP_ADDR`.
239 ///
240 /// # Arguments
241 ///
242 /// * `address` - Physical address of a one-page region in the guest's physical address space.
243 ///
244 /// # Example
245 ///
246 /// ```rust
247 /// # extern crate kvm_ioctls;
248 /// # use kvm_ioctls::Kvm;
249 /// let kvm = Kvm::new().unwrap();
250 /// let vm = kvm.create_vm().unwrap();
251 /// vm.set_identity_map_address(0xfffb_c000).unwrap();
252 /// ```
253 #[cfg(target_arch = "x86_64")]
254 pub fn set_identity_map_address(&self, address: u64) -> Result<()> {
255 // SAFETY: Safe because we know that our file is a VM fd and we verify the return result.
256 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &address) };
257 if ret == 0 {
258 Ok(())
259 } else {
260 Err(errno::Error::last())
261 }
262 }
263
264 /// Creates an in-kernel interrupt controller.
265 ///
266 /// See the documentation for `KVM_CREATE_IRQCHIP`.
267 ///
268 /// # Example
269 ///
270 /// ```rust
271 /// # extern crate kvm_ioctls;
272 /// # extern crate kvm_bindings;
273 /// # use kvm_ioctls::Kvm;
274 /// let kvm = Kvm::new().unwrap();
275 /// let vm = kvm.create_vm().unwrap();
276 ///
277 /// #[cfg(target_arch = "x86_64")]
278 /// vm.create_irq_chip().unwrap();
279 /// #[cfg(target_arch = "aarch64")]
280 /// {
281 /// use kvm_bindings::{
282 /// KVM_CREATE_DEVICE_TEST, kvm_create_device, kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2,
283 /// };
284 /// let mut gic_device = kvm_create_device {
285 /// type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2,
286 /// fd: 0,
287 /// flags: KVM_CREATE_DEVICE_TEST,
288 /// };
289 /// if vm.create_device(&mut gic_device).is_ok() {
290 /// vm.create_irq_chip().unwrap();
291 /// }
292 /// }
293 /// ```
294 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
295 pub fn create_irq_chip(&self) -> Result<()> {
296 // SAFETY: Safe because we know that our file is a VM fd and we verify the return result.
297 let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) };
298 if ret == 0 {
299 Ok(())
300 } else {
301 Err(errno::Error::last())
302 }
303 }
304
305 /// X86 specific call to retrieve the state of a kernel interrupt controller.
306 ///
307 /// See the documentation for `KVM_GET_IRQCHIP` in the
308 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
309 ///
310 /// # Arguments
311 ///
312 /// * `irqchip` - `kvm_irqchip` (input/output) to be read.
313 ///
314 /// # Example
315 ///
316 /// ```rust
317 /// # extern crate kvm_bindings;
318 /// # extern crate kvm_ioctls;
319 /// # use kvm_bindings::{kvm_irqchip, KVM_IRQCHIP_PIC_MASTER};
320 /// # use kvm_ioctls::Kvm;
321 /// let kvm = Kvm::new().unwrap();
322 /// let vm = kvm.create_vm().unwrap();
323 ///
324 /// vm.create_irq_chip().unwrap();
325 /// let mut irqchip = kvm_irqchip::default();
326 /// irqchip.chip_id = KVM_IRQCHIP_PIC_MASTER;
327 /// vm.get_irqchip(&mut irqchip).unwrap();
328 /// ```
329 #[cfg(target_arch = "x86_64")]
330 pub fn get_irqchip(&self, irqchip: &mut kvm_irqchip) -> Result<()> {
331 // SAFETY: Here we trust the kernel not to read past the end of the kvm_irqchip struct.
332 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), irqchip) };
333 if ret == 0 {
334 Ok(())
335 } else {
336 Err(errno::Error::last())
337 }
338 }
339
340 /// X86 specific call to set the state of a kernel interrupt controller.
341 ///
342 /// See the documentation for `KVM_SET_IRQCHIP` in the
343 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
344 ///
345 /// # Arguments
346 ///
347 /// * `irqchip` - `kvm_irqchip` (input/output) to be written.
348 ///
349 /// # Example
350 ///
351 /// ```rust
352 /// # extern crate kvm_bindings;
353 /// # extern crate kvm_ioctls;
354 /// # use kvm_bindings::{kvm_irqchip, KVM_IRQCHIP_PIC_MASTER};
355 /// # use kvm_ioctls::Kvm;
356 /// let kvm = Kvm::new().unwrap();
357 /// let vm = kvm.create_vm().unwrap();
358 ///
359 /// vm.create_irq_chip().unwrap();
360 /// let mut irqchip = kvm_irqchip::default();
361 /// irqchip.chip_id = KVM_IRQCHIP_PIC_MASTER;
362 /// // Your `irqchip` manipulation here.
363 /// vm.set_irqchip(&mut irqchip).unwrap();
364 /// ```
365 #[cfg(target_arch = "x86_64")]
366 pub fn set_irqchip(&self, irqchip: &kvm_irqchip) -> Result<()> {
367 // SAFETY: Here we trust the kernel not to read past the end of the kvm_irqchip struct.
368 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), irqchip) };
369 if ret == 0 {
370 Ok(())
371 } else {
372 Err(errno::Error::last())
373 }
374 }
375
376 /// Creates a PIT as per the `KVM_CREATE_PIT2` ioctl.
377 ///
378 /// # Arguments
379 ///
380 /// * pit_config - PIT configuration. For details check the `kvm_pit_config` structure in the
381 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
382 /// # Example
383 ///
384 /// ```rust
385 /// # extern crate kvm_ioctls;
386 /// extern crate kvm_bindings;
387 /// # use kvm_ioctls::Kvm;
388 /// use kvm_bindings::kvm_pit_config;
389 ///
390 /// let kvm = Kvm::new().unwrap();
391 /// let vm = kvm.create_vm().unwrap();
392 /// vm.create_irq_chip().unwrap();
393 /// let pit_config = kvm_pit_config::default();
394 /// vm.create_pit2(pit_config).unwrap();
395 /// ```
396 #[cfg(target_arch = "x86_64")]
397 pub fn create_pit2(&self, pit_config: kvm_pit_config) -> Result<()> {
398 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
399 // the correct amount of memory from our pointer, and we verify the return result.
400 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) };
401 if ret == 0 {
402 Ok(())
403 } else {
404 Err(errno::Error::last())
405 }
406 }
407
408 /// X86 specific call to retrieve the state of the in-kernel PIT model.
409 ///
410 /// See the documentation for `KVM_GET_PIT2` in the
411 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
412 ///
413 /// # Arguments
414 ///
415 /// * `pitstate` - `kvm_pit_state2` to be read.
416 ///
417 /// # Example
418 ///
419 /// ```rust
420 /// # extern crate kvm_bindings;
421 /// # extern crate kvm_ioctls;
422 /// # use kvm_bindings::kvm_pit_config;
423 /// # use kvm_ioctls::Kvm;
424 /// let kvm = Kvm::new().unwrap();
425 /// let vm = kvm.create_vm().unwrap();
426 /// vm.create_irq_chip().unwrap();
427 ///
428 /// let pit_config = kvm_pit_config::default();
429 /// vm.create_pit2(pit_config).unwrap();
430 /// let pitstate = vm.get_pit2().unwrap();
431 /// ```
432 #[cfg(target_arch = "x86_64")]
433 pub fn get_pit2(&self) -> Result<kvm_pit_state2> {
434 let mut pitstate = Default::default();
435 // SAFETY: Here we trust the kernel not to read past the end of the kvm_pit_state2 struct.
436 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pitstate) };
437 if ret == 0 {
438 Ok(pitstate)
439 } else {
440 Err(errno::Error::last())
441 }
442 }
443
444 /// X86 specific call to set the state of the in-kernel PIT model.
445 ///
446 /// See the documentation for `KVM_SET_PIT2` in the
447 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
448 ///
449 /// # Arguments
450 ///
451 /// * `pitstate` - `kvm_pit_state2` to be written.
452 ///
453 /// # Example
454 ///
455 /// ```rust
456 /// # extern crate kvm_bindings;
457 /// # extern crate kvm_ioctls;
458 /// # use kvm_bindings::{kvm_pit_config, kvm_pit_state2};
459 /// # use kvm_ioctls::Kvm;
460 /// let kvm = Kvm::new().unwrap();
461 /// let vm = kvm.create_vm().unwrap();
462 /// vm.create_irq_chip().unwrap();
463 ///
464 /// let pit_config = kvm_pit_config::default();
465 /// vm.create_pit2(pit_config).unwrap();
466 /// let mut pitstate = kvm_pit_state2::default();
467 /// // Your `pitstate` manipulation here.
468 /// vm.set_pit2(&mut pitstate).unwrap();
469 /// ```
470 #[cfg(target_arch = "x86_64")]
471 pub fn set_pit2(&self, pitstate: &kvm_pit_state2) -> Result<()> {
472 // SAFETY: Here we trust the kernel not to read past the end of the kvm_pit_state2 struct.
473 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pitstate) };
474 if ret == 0 {
475 Ok(())
476 } else {
477 Err(errno::Error::last())
478 }
479 }
480
481 /// X86 specific call to retrieve the current timestamp of kvmclock.
482 ///
483 /// See the documentation for `KVM_GET_CLOCK` in the
484 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
485 ///
486 /// # Arguments
487 ///
488 /// * `clock` - `kvm_clock_data` to be read.
489 ///
490 /// # Example
491 ///
492 /// ```rust
493 /// # extern crate kvm_ioctls;
494 /// # use kvm_ioctls::Kvm;
495 /// let kvm = Kvm::new().unwrap();
496 /// let vm = kvm.create_vm().unwrap();
497 /// let clock = vm.get_clock().unwrap();
498 /// ```
499 #[cfg(target_arch = "x86_64")]
500 pub fn get_clock(&self) -> Result<kvm_clock_data> {
501 let mut clock = Default::default();
502 // SAFETY: Here we trust the kernel not to read past the end of the kvm_clock_data struct.
503 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock) };
504 if ret == 0 {
505 Ok(clock)
506 } else {
507 Err(errno::Error::last())
508 }
509 }
510
511 /// X86 specific call to set the current timestamp of kvmclock.
512 ///
513 /// See the documentation for `KVM_SET_CLOCK` in the
514 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
515 ///
516 /// # Arguments
517 ///
518 /// * `clock` - `kvm_clock_data` to be written.
519 ///
520 /// # Example
521 ///
522 /// ```rust
523 /// # extern crate kvm_bindings;
524 /// # extern crate kvm_ioctls;
525 /// # use kvm_bindings::kvm_clock_data;
526 /// # use kvm_ioctls::Kvm;
527 /// let kvm = Kvm::new().unwrap();
528 /// let vm = kvm.create_vm().unwrap();
529 /// let mut clock = kvm_clock_data::default();
530 /// vm.set_clock(&mut clock).unwrap();
531 /// ```
532 #[cfg(target_arch = "x86_64")]
533 pub fn set_clock(&self, clock: &kvm_clock_data) -> Result<()> {
534 // SAFETY: Here we trust the kernel not to read past the end of the kvm_clock_data struct.
535 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), clock) };
536 if ret == 0 {
537 Ok(())
538 } else {
539 Err(errno::Error::last())
540 }
541 }
542
543 /// Directly injects a MSI message as per the `KVM_SIGNAL_MSI` ioctl.
544 ///
545 /// See the documentation for `KVM_SIGNAL_MSI`.
546 ///
547 /// This ioctl returns > 0 when the MSI is successfully delivered and 0
548 /// when the guest blocked the MSI.
549 ///
550 /// # Arguments
551 ///
552 /// * kvm_msi - MSI message configuration. For details check the `kvm_msi` structure in the
553 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
554 /// # Example
555 ///
556 /// In this example, the important function signal_msi() calling into
557 /// the actual ioctl is commented out. The reason is that MSI vectors are
558 /// not chosen from the HW side (VMM). The guest OS (or anything that runs
559 /// inside the VM) is supposed to allocate the MSI vectors, and usually
560 /// communicate back through PCI configuration space. Sending a random MSI
561 /// vector through this signal_msi() function will always result in a
562 /// failure, which is why it needs to be commented out.
563 ///
564 /// ```rust
565 /// # extern crate kvm_ioctls;
566 /// extern crate kvm_bindings;
567 /// # use kvm_ioctls::Kvm;
568 /// use kvm_bindings::kvm_msi;
569 ///
570 /// let kvm = Kvm::new().unwrap();
571 /// let vm = kvm.create_vm().unwrap();
572 /// let msi = kvm_msi::default();
573 /// #[cfg(target_arch = "x86_64")]
574 /// vm.create_irq_chip().unwrap();
575 /// //vm.signal_msi(msi).unwrap();
576 /// ```
577 #[cfg(any(
578 target_arch = "x86_64",
579 target_arch = "aarch64",
580 target_arch = "riscv64"
581 ))]
582 pub fn signal_msi(&self, msi: kvm_msi) -> Result<c_int> {
583 // SAFETY: Safe because we allocated the structure and we know the kernel
584 // will read exactly the size of the structure.
585 let ret = unsafe { ioctl_with_ref(self, KVM_SIGNAL_MSI(), &msi) };
586 if ret >= 0 {
587 Ok(ret)
588 } else {
589 Err(errno::Error::last())
590 }
591 }
592
593 /// Sets the GSI routing table entries, overwriting any previously set
594 /// entries, as per the `KVM_SET_GSI_ROUTING` ioctl.
595 ///
596 /// See the documentation for `KVM_SET_GSI_ROUTING`.
597 ///
598 /// Returns an io::Error when the table could not be updated.
599 ///
600 /// # Arguments
601 ///
602 /// * kvm_irq_routing - IRQ routing configuration. Describe all routes
603 /// associated with GSI entries. For details check
604 /// the `kvm_irq_routing` and `kvm_irq_routing_entry`
605 /// structures in the
606 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
607 /// # Example
608 ///
609 /// ```rust
610 /// # extern crate kvm_ioctls;
611 /// extern crate kvm_bindings;
612 /// # use kvm_ioctls::Kvm;
613 /// use kvm_bindings::KvmIrqRouting;
614 ///
615 /// let kvm = Kvm::new().unwrap();
616 /// let vm = kvm.create_vm().unwrap();
617 ///
618 /// #[cfg(target_arch = "x86_64")]
619 /// vm.create_irq_chip().unwrap();
620 ///
621 /// #[cfg(target_arch = "riscv64")]
622 /// vm.create_device(&mut kvm_bindings::kvm_create_device {
623 /// type_: kvm_bindings::kvm_device_type_KVM_DEV_TYPE_RISCV_AIA,
624 /// fd: 0,
625 /// flags: 0,
626 /// })
627 /// .expect("Cannot create KVM vAIA device.");
628 ///
629 /// let irq_routing = KvmIrqRouting::new(0).unwrap();
630 /// vm.set_gsi_routing(&irq_routing).unwrap();
631 /// ```
632 #[cfg(any(
633 target_arch = "x86_64",
634 target_arch = "aarch64",
635 target_arch = "riscv64"
636 ))]
637 pub fn set_gsi_routing(&self, irq_routing: &KvmIrqRouting) -> Result<()> {
638 // SAFETY: Safe because we allocated the structure and we know the kernel
639 // will read exactly the size of the structure.
640 let ret =
641 unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING(), irq_routing.as_fam_struct_ref()) };
642 if ret == 0 {
643 Ok(())
644 } else {
645 Err(errno::Error::last())
646 }
647 }
648
649 /// Registers an event to be signaled whenever a certain address is written to.
650 ///
651 /// See the documentation for `KVM_IOEVENTFD`.
652 ///
653 /// # Arguments
654 ///
655 /// * `fd` - `EventFd` which will be signaled. When signaling, the usual `vmexit` to userspace
656 /// is prevented.
657 /// * `addr` - Address being written to.
658 /// * `datamatch` - Limits signaling `fd` to only the cases where the value being written is
659 /// equal to this parameter. The size of `datamatch` is important and it must
660 /// match the expected size of the guest's write.
661 ///
662 /// # Example
663 ///
664 /// ```rust
665 /// # extern crate kvm_ioctls;
666 /// extern crate libc;
667 /// extern crate vmm_sys_util;
668 /// # use kvm_ioctls::{IoEventAddress, Kvm, NoDatamatch};
669 /// use libc::{EFD_NONBLOCK, eventfd};
670 /// use vmm_sys_util::eventfd::EventFd;
671 /// let kvm = Kvm::new().unwrap();
672 /// let vm_fd = kvm.create_vm().unwrap();
673 /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
674 /// vm_fd
675 /// .register_ioevent(&evtfd, &IoEventAddress::Pio(0xf4), NoDatamatch)
676 /// .unwrap();
677 /// vm_fd
678 /// .register_ioevent(&evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch)
679 /// .unwrap();
680 /// ```
681 pub fn register_ioevent<T: Into<u64>>(
682 &self,
683 fd: &EventFd,
684 addr: &IoEventAddress,
685 datamatch: T,
686 ) -> Result<()> {
687 let mut flags = 0;
688 if std::mem::size_of::<T>() > 0 {
689 flags |= 1 << kvm_ioeventfd_flag_nr_datamatch
690 }
691 if let IoEventAddress::Pio(_) = *addr {
692 flags |= 1 << kvm_ioeventfd_flag_nr_pio
693 }
694
695 let ioeventfd = kvm_ioeventfd {
696 datamatch: datamatch.into(),
697 len: std::mem::size_of::<T>() as u32,
698 addr: match addr {
699 IoEventAddress::Pio(p) => *p,
700 IoEventAddress::Mmio(m) => *m,
701 },
702 fd: fd.as_raw_fd(),
703 flags,
704 ..Default::default()
705 };
706 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
707 // the correct amount of memory from our pointer, and we verify the return result.
708 let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) };
709 if ret == 0 {
710 Ok(())
711 } else {
712 Err(errno::Error::last())
713 }
714 }
715
716 /// Unregisters an event from a certain address it has been previously registered to.
717 ///
718 /// See the documentation for `KVM_IOEVENTFD`.
719 ///
720 /// # Arguments
721 ///
722 /// * `fd` - FD which will be unregistered.
723 /// * `addr` - Address being written to.
724 ///
725 /// # Safety
726 ///
727 /// This function is unsafe because it relies on RawFd.
728 ///
729 /// # Example
730 ///
731 /// ```rust
732 /// # extern crate kvm_ioctls;
733 /// extern crate libc;
734 /// extern crate vmm_sys_util;
735 /// # use kvm_ioctls::{IoEventAddress, Kvm, NoDatamatch};
736 /// use libc::EFD_NONBLOCK;
737 /// use vmm_sys_util::eventfd::EventFd;
738 ///
739 /// let kvm = Kvm::new().unwrap();
740 /// let vm_fd = kvm.create_vm().unwrap();
741 /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
742 /// let pio_addr = IoEventAddress::Pio(0xf4);
743 /// let mmio_addr = IoEventAddress::Mmio(0x1000);
744 /// vm_fd
745 /// .register_ioevent(&evtfd, &pio_addr, NoDatamatch)
746 /// .unwrap();
747 /// vm_fd
748 /// .register_ioevent(&evtfd, &mmio_addr, 0x1234u32)
749 /// .unwrap();
750 /// vm_fd
751 /// .unregister_ioevent(&evtfd, &pio_addr, NoDatamatch)
752 /// .unwrap();
753 /// vm_fd
754 /// .unregister_ioevent(&evtfd, &mmio_addr, 0x1234u32)
755 /// .unwrap();
756 /// ```
757 pub fn unregister_ioevent<T: Into<u64>>(
758 &self,
759 fd: &EventFd,
760 addr: &IoEventAddress,
761 datamatch: T,
762 ) -> Result<()> {
763 let mut flags = 1 << kvm_ioeventfd_flag_nr_deassign;
764 if std::mem::size_of::<T>() > 0 {
765 flags |= 1 << kvm_ioeventfd_flag_nr_datamatch
766 }
767 if let IoEventAddress::Pio(_) = *addr {
768 flags |= 1 << kvm_ioeventfd_flag_nr_pio
769 }
770
771 let ioeventfd = kvm_ioeventfd {
772 datamatch: datamatch.into(),
773 len: std::mem::size_of::<T>() as u32,
774 addr: match addr {
775 IoEventAddress::Pio(p) => *p,
776 IoEventAddress::Mmio(m) => *m,
777 },
778 fd: fd.as_raw_fd(),
779 flags,
780 ..Default::default()
781 };
782 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
783 // the correct amount of memory from our pointer, and we verify the return result.
784 let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) };
785 if ret == 0 {
786 Ok(())
787 } else {
788 Err(errno::Error::last())
789 }
790 }
791
792 /// Gets the bitmap of pages dirtied since the last call of this function.
793 ///
794 /// Leverages the dirty page logging feature in KVM. As a side-effect, this also resets the
795 /// bitmap inside the kernel. For the dirty log to be available, you have to set the flag
796 /// `KVM_MEM_LOG_DIRTY_PAGES` when creating guest memory regions.
797 ///
798 /// Check the documentation for `KVM_GET_DIRTY_LOG`.
799 ///
800 /// # Arguments
801 ///
802 /// * `slot` - Guest memory slot identifier.
803 /// * `memory_size` - Size of the memory region.
804 ///
805 /// # Example
806 ///
807 /// ```rust
808 /// # extern crate kvm_ioctls;
809 /// # extern crate kvm_bindings;
810 /// # use std::io::Write;
811 /// # use std::ptr::null_mut;
812 /// # use std::slice;
813 /// # use kvm_ioctls::{Kvm, VcpuExit};
814 /// # use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_LOG_DIRTY_PAGES};
815 /// # let kvm = Kvm::new().unwrap();
816 /// # let vm = kvm.create_vm().unwrap();
817 /// // This example is based on https://lwn.net/Articles/658511/.
818 /// let mem_size = 0x4000;
819 /// let guest_addr: u64 = 0x1000;
820 /// let load_addr: *mut u8 = unsafe {
821 /// libc::mmap(
822 /// null_mut(),
823 /// mem_size,
824 /// libc::PROT_READ | libc::PROT_WRITE,
825 /// libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
826 /// -1,
827 /// 0,
828 /// ) as *mut u8
829 /// };
830 ///
831 /// // Initialize a guest memory region using the flag `KVM_MEM_LOG_DIRTY_PAGES`.
832 /// let mem_region = kvm_userspace_memory_region {
833 /// slot: 0,
834 /// guest_phys_addr: guest_addr,
835 /// memory_size: mem_size as u64,
836 /// userspace_addr: load_addr as u64,
837 /// flags: KVM_MEM_LOG_DIRTY_PAGES,
838 /// };
839 /// unsafe { vm.set_user_memory_region(mem_region).unwrap() };
840 ///
841 /// #[cfg(target_arch = "x86_64")]
842 /// // ASM code that just forces a MMIO Write.
843 /// let asm_code = [0xc6, 0x06, 0x00, 0x80, 0x00];
844 /// #[cfg(target_arch = "aarch64")]
845 /// let asm_code = [
846 /// 0x01, 0x00, 0x00, 0x10, /* adr x1, <this address> */
847 /// 0x22, 0x10, 0x00, 0xb9, /* str w2, [x1, #16]; write to this page */
848 /// 0x02, 0x00, 0x00, 0xb9, /* str w2, [x0]; force MMIO exit */
849 /// 0x00, 0x00, 0x00,
850 /// 0x14, /* b <this address>; shouldn't get here, but if so loop forever */
851 /// ];
852 /// #[cfg(target_arch = "riscv64")]
853 /// let asm_code = [
854 /// 0x17, 0x03, 0x00, 0x00, // auipc t1, 0; <this address> -> t1
855 /// 0xa3, 0x23, 0x73, 0x00, // sw t2, t1 + 7; dirty current page
856 /// 0x23, 0x20, 0x75, 0x00, // sw t2, a0; trigger MMIO exit
857 /// 0x6f, 0x00, 0x00, 0x00, // j .;shouldn't get here, but if so loop forever
858 /// ];
859 ///
860 /// // Write the code in the guest memory. This will generate a dirty page.
861 /// unsafe {
862 /// let mut slice = slice::from_raw_parts_mut(load_addr, mem_size);
863 /// slice.write(&asm_code).unwrap();
864 /// }
865 ///
866 /// let mut vcpu_fd = vm.create_vcpu(0).unwrap();
867 ///
868 /// #[cfg(target_arch = "x86_64")]
869 /// {
870 /// // x86_64 specific registry setup.
871 /// let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap();
872 /// vcpu_sregs.cs.base = 0;
873 /// vcpu_sregs.cs.selector = 0;
874 /// vcpu_fd.set_sregs(&vcpu_sregs).unwrap();
875 ///
876 /// let mut vcpu_regs = vcpu_fd.get_regs().unwrap();
877 /// // Set the Instruction Pointer to the guest address where we loaded the code.
878 /// vcpu_regs.rip = guest_addr;
879 /// vcpu_regs.rax = 2;
880 /// vcpu_regs.rbx = 3;
881 /// vcpu_regs.rflags = 2;
882 /// vcpu_fd.set_regs(&vcpu_regs).unwrap();
883 /// }
884 ///
885 /// #[cfg(target_arch = "aarch64")]
886 /// {
887 /// // aarch64 specific registry setup.
888 /// let mut kvi = kvm_bindings::kvm_vcpu_init::default();
889 /// vm.get_preferred_target(&mut kvi).unwrap();
890 /// vcpu_fd.vcpu_init(&kvi).unwrap();
891 ///
892 /// let core_reg_base: u64 = 0x6030_0000_0010_0000;
893 /// let mmio_addr: u64 = guest_addr + mem_size as u64;
894 /// vcpu_fd.set_one_reg(core_reg_base + 2 * 32, &guest_addr.to_le_bytes()); // set PC
895 /// vcpu_fd.set_one_reg(core_reg_base + 2 * 0, &mmio_addr.to_le_bytes()); // set X0
896 /// }
897 ///
898 /// #[cfg(target_arch = "riscv64")]
899 /// {
900 /// let core_reg_base: u64 = 0x8030_0000_0200_0000;
901 /// let mmio_addr: u64 = guest_addr + mem_size as u64;
902 /// vcpu_fd.set_one_reg(core_reg_base, &guest_addr.to_le_bytes()); // set PC
903 /// vcpu_fd.set_one_reg(core_reg_base + 10, &mmio_addr.to_le_bytes()); // set A0
904 /// }
905 ///
906 /// loop {
907 /// match vcpu_fd.run().expect("run failed") {
908 /// VcpuExit::MmioWrite(addr, data) => {
909 /// // On x86_64, the code snippet dirties 1 page when loading the code in memory
910 /// // while on aarch64 the dirty bit comes from writing to guest_addr (current PC).
911 /// let dirty_pages_bitmap = vm.get_dirty_log(0, mem_size).unwrap();
912 /// let dirty_pages = dirty_pages_bitmap
913 /// .into_iter()
914 /// .map(|page| page.count_ones())
915 /// .fold(0, |dirty_page_count, i| dirty_page_count + i);
916 /// assert_eq!(dirty_pages, 1);
917 /// break;
918 /// }
919 /// exit_reason => panic!("unexpected exit reason: {:?}", exit_reason),
920 /// }
921 /// }
922 /// ```
923 pub fn get_dirty_log(&self, slot: u32, memory_size: usize) -> Result<Vec<u64>> {
924 // Compute the length of the bitmap needed for all dirty pages in one memory slot.
925 // One memory page is `page_size` bytes and `KVM_GET_DIRTY_LOG` returns one dirty bit for
926 // each page.
927 // SAFETY: We trust the sysconf libc function and we're calling it with a correct parameter.
928 let page_size = match unsafe { libc::sysconf(libc::_SC_PAGESIZE) } {
929 -1 => return Err(errno::Error::last()),
930 ps => ps as usize,
931 };
932
933 // For ease of access we are saving the bitmap in a u64 vector. We are using ceil to
934 // make sure we count all dirty pages even when `memory_size` is not a multiple of
935 // `page_size * 64`.
936 let bitmap_size = memory_size.div_ceil(page_size * 64);
937 let mut bitmap = vec![0u64; bitmap_size];
938 let dirtylog = kvm_dirty_log {
939 slot,
940 padding1: 0,
941 __bindgen_anon_1: kvm_dirty_log__bindgen_ty_1 {
942 dirty_bitmap: bitmap.as_mut_ptr() as *mut c_void,
943 },
944 };
945 // SAFETY: Safe because we know that our file is a VM fd, and we know that the amount of
946 // memory we allocated for the bitmap is at least one bit per page.
947 let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG(), &dirtylog) };
948 if ret == 0 {
949 Ok(bitmap)
950 } else {
951 Err(errno::Error::last())
952 }
953 }
954
955 /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
956 ///
957 /// # Arguments
958 ///
959 /// * `fd` - `EventFd` to be signaled.
960 /// * `gsi` - IRQ to be triggered.
961 ///
962 /// # Example
963 ///
964 /// ```rust
965 /// # extern crate kvm_ioctls;
966 /// # extern crate libc;
967 /// # extern crate vmm_sys_util;
968 /// # use kvm_ioctls::Kvm;
969 /// # use libc::EFD_NONBLOCK;
970 /// # use vmm_sys_util::eventfd::EventFd;
971 /// let kvm = Kvm::new().unwrap();
972 /// let vm = kvm.create_vm().unwrap();
973 /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
974 /// #[cfg(target_arch = "x86_64")]
975 /// {
976 /// vm.create_irq_chip().unwrap();
977 /// vm.register_irqfd(&evtfd, 0).unwrap();
978 /// }
979 /// ```
980 #[cfg(any(
981 target_arch = "x86_64",
982 target_arch = "aarch64",
983 target_arch = "riscv64"
984 ))]
985 pub fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
986 let irqfd = kvm_irqfd {
987 fd: fd.as_raw_fd() as u32,
988 gsi,
989 ..Default::default()
990 };
991 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
992 // the correct amount of memory from our pointer, and we verify the return result.
993 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
994 if ret == 0 {
995 Ok(())
996 } else {
997 Err(errno::Error::last())
998 }
999 }
1000
1001 /// Registers an event that will, when signaled, assert the `gsi` IRQ.
1002 /// If the irqchip is resampled by the guest, the IRQ is de-asserted,
1003 /// and `resamplefd` is notified.
1004 ///
1005 /// # Arguments
1006 ///
1007 /// * `fd` - `EventFd` to be signaled.
1008 /// * `resamplefd` - `EventFd`to be notified on resample.
1009 /// * `gsi` - IRQ to be triggered.
1010 ///
1011 /// # Example
1012 ///
1013 /// ```rust
1014 /// # extern crate kvm_ioctls;
1015 /// # extern crate libc;
1016 /// # extern crate vmm_sys_util;
1017 /// # use kvm_ioctls::Kvm;
1018 /// # use libc::EFD_NONBLOCK;
1019 /// # use vmm_sys_util::eventfd::EventFd;
1020 /// let kvm = Kvm::new().unwrap();
1021 /// let vm = kvm.create_vm().unwrap();
1022 /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
1023 /// let resamplefd = EventFd::new(EFD_NONBLOCK).unwrap();
1024 /// #[cfg(target_arch = "x86_64")]
1025 /// {
1026 /// vm.create_irq_chip().unwrap();
1027 /// vm.register_irqfd_with_resample(&evtfd, &resamplefd, 0)
1028 /// .unwrap();
1029 /// }
1030 /// ```
1031 #[cfg(any(
1032 target_arch = "x86_64",
1033 target_arch = "aarch64",
1034 target_arch = "riscv64"
1035 ))]
1036 pub fn register_irqfd_with_resample(
1037 &self,
1038 fd: &EventFd,
1039 resamplefd: &EventFd,
1040 gsi: u32,
1041 ) -> Result<()> {
1042 let irqfd = kvm_irqfd {
1043 fd: fd.as_raw_fd() as u32,
1044 resamplefd: resamplefd.as_raw_fd() as u32,
1045 gsi,
1046 flags: KVM_IRQFD_FLAG_RESAMPLE,
1047 ..Default::default()
1048 };
1049 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1050 // the correct amount of memory from our pointer, and we verify the return result.
1051 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
1052 if ret == 0 {
1053 Ok(())
1054 } else {
1055 Err(errno::Error::last())
1056 }
1057 }
1058
1059 /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1060 ///
1061 /// # Arguments
1062 ///
1063 /// * `fd` - `EventFd` to be signaled.
1064 /// * `gsi` - IRQ to be triggered.
1065 ///
1066 /// # Example
1067 ///
1068 /// ```rust
1069 /// # extern crate kvm_ioctls;
1070 /// # extern crate libc;
1071 /// # extern crate vmm_sys_util;
1072 /// # use kvm_ioctls::Kvm;
1073 /// # use libc::EFD_NONBLOCK;
1074 /// # use vmm_sys_util::eventfd::EventFd;
1075 /// let kvm = Kvm::new().unwrap();
1076 /// let vm = kvm.create_vm().unwrap();
1077 /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
1078 /// let resamplefd = EventFd::new(EFD_NONBLOCK).unwrap();
1079 /// #[cfg(target_arch = "x86_64")]
1080 /// {
1081 /// vm.create_irq_chip().unwrap();
1082 /// vm.register_irqfd(&evtfd, 0).unwrap();
1083 /// vm.unregister_irqfd(&evtfd, 0).unwrap();
1084 /// vm.register_irqfd_with_resample(&evtfd, &resamplefd, 0)
1085 /// .unwrap();
1086 /// vm.unregister_irqfd(&evtfd, 0).unwrap();
1087 /// }
1088 /// ```
1089 #[cfg(any(
1090 target_arch = "x86_64",
1091 target_arch = "aarch64",
1092 target_arch = "riscv64"
1093 ))]
1094 pub fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
1095 let irqfd = kvm_irqfd {
1096 fd: fd.as_raw_fd() as u32,
1097 gsi,
1098 flags: KVM_IRQFD_FLAG_DEASSIGN,
1099 ..Default::default()
1100 };
1101 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1102 // the correct amount of memory from our pointer, and we verify the return result.
1103 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
1104 if ret == 0 {
1105 Ok(())
1106 } else {
1107 Err(errno::Error::last())
1108 }
1109 }
1110
1111 /// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
1112 ///
1113 /// # Arguments
1114 ///
1115 /// * `irq` - IRQ to be set.
1116 /// * `active` - Level of the IRQ input.
1117 ///
1118 /// # Errors
1119 ///
1120 /// Returns an io::Error when the irq field is invalid
1121 ///
1122 /// # Examples
1123 ///
1124 /// ```rust
1125 /// # extern crate kvm_ioctls;
1126 /// # extern crate libc;
1127 /// # extern crate vmm_sys_util;
1128 /// # use kvm_ioctls::{Kvm, VmFd};
1129 /// # use libc::EFD_NONBLOCK;
1130 /// # use vmm_sys_util::eventfd::EventFd;
1131 /// fn arch_setup(vm_fd: &VmFd) {
1132 /// // Arch-specific setup:
1133 /// // For x86 architectures, it simply means calling vm.create_irq_chip().unwrap().
1134 /// # #[cfg(target_arch = "x86_64")]
1135 /// # vm_fd.create_irq_chip().unwrap();
1136 /// // For Arm architectures, the IRQ controllers need to be setup first.
1137 /// // Details please refer to the kernel documentation.
1138 /// // https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt
1139 /// # #[cfg(target_arch = "aarch64")] {
1140 /// # vm_fd.create_vcpu(0).unwrap();
1141 /// # // ... rest of setup for Arm goes here
1142 /// # }
1143 /// }
1144 ///
1145 /// let kvm = Kvm::new().unwrap();
1146 /// let vm = kvm.create_vm().unwrap();
1147 /// arch_setup(&vm);
1148 /// #[cfg(target_arch = "x86_64")]
1149 /// {
1150 /// vm.set_irq_line(4, true);
1151 /// // ...
1152 /// }
1153 /// #[cfg(target_arch = "aarch64")]
1154 /// {
1155 /// vm.set_irq_line(0x01_00_0020, true);
1156 /// // ....
1157 /// }
1158 /// ```
1159 #[cfg(any(
1160 target_arch = "x86_64",
1161 target_arch = "aarch64",
1162 target_arch = "riscv64"
1163 ))]
1164 pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
1165 let mut irq_level = kvm_irq_level::default();
1166 irq_level.__bindgen_anon_1.irq = irq;
1167 irq_level.level = u32::from(active);
1168
1169 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1170 // the correct amount of memory from our pointer, and we verify the return result.
1171 let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) };
1172 if ret == 0 {
1173 Ok(())
1174 } else {
1175 Err(errno::Error::last())
1176 }
1177 }
1178
1179 /// Creates a new KVM vCPU file descriptor and maps the memory corresponding
1180 /// its `kvm_run` structure.
1181 ///
1182 /// See the documentation for `KVM_CREATE_VCPU`.
1183 ///
1184 /// # Arguments
1185 ///
1186 /// * `id` - The vCPU ID.
1187 ///
1188 /// # Errors
1189 ///
1190 /// Returns an io::Error when the VM fd is invalid or the vCPU memory cannot
1191 /// be mapped correctly.
1192 ///
1193 /// # Example
1194 ///
1195 /// ```rust
1196 /// # extern crate kvm_ioctls;
1197 /// # use kvm_ioctls::Kvm;
1198 /// let kvm = Kvm::new().unwrap();
1199 /// let vm = kvm.create_vm().unwrap();
1200 /// // Create one vCPU with the ID=0.
1201 /// let vcpu = vm.create_vcpu(0);
1202 /// ```
1203 pub fn create_vcpu(&self, id: u64) -> Result<VcpuFd> {
1204 #[allow(clippy::cast_lossless)]
1205 // SAFETY: Safe because we know that vm is a VM fd and we verify the return result.
1206 let vcpu_fd = unsafe { ioctl_with_val(&self.vm, KVM_CREATE_VCPU(), id as c_ulong) };
1207 if vcpu_fd < 0 {
1208 return Err(errno::Error::last());
1209 }
1210
1211 // Wrap the vCPU now in case the following ? returns early.
1212 // SAFETY: This is safe because we verified the value of the fd and we own the fd.
1213 let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
1214
1215 let kvm_run_ptr = KvmRunWrapper::mmap_from_fd(&vcpu, self.run_size)?;
1216
1217 Ok(new_vcpu(vcpu, kvm_run_ptr))
1218 }
1219
1220 /// Creates a VcpuFd object from a vcpu RawFd.
1221 ///
1222 /// # Arguments
1223 ///
1224 /// * `fd` - the RawFd used for creating the VcpuFd object.
1225 ///
1226 /// # Safety
1227 ///
1228 /// This function is unsafe as the primitives currently returned have the contract that
1229 /// they are the sole owner of the file descriptor they are wrapping. Usage of this function
1230 /// could accidentally allow violating this contract which can cause memory unsafety in code
1231 /// that relies on it being true.
1232 ///
1233 /// The caller of this method must make sure the fd is valid and nothing else uses it.
1234 ///
1235 /// # Example
1236 ///
1237 /// ```rust
1238 /// # extern crate kvm_ioctls;
1239 /// # use std::os::unix::io::AsRawFd;
1240 /// # use kvm_ioctls::Kvm;
1241 /// let kvm = Kvm::new().unwrap();
1242 /// let vm = kvm.create_vm().unwrap();
1243 /// // Create one vCPU with the ID=0.
1244 /// let vcpu = vm.create_vcpu(0).unwrap();
1245 /// let rawfd = unsafe { libc::dup(vcpu.as_raw_fd()) };
1246 /// assert!(rawfd >= 0);
1247 /// let vcpu = unsafe { vm.create_vcpu_from_rawfd(rawfd).unwrap() };
1248 /// ```
1249 pub unsafe fn create_vcpu_from_rawfd(&self, fd: RawFd) -> Result<VcpuFd> {
1250 // SAFETY: we trust the kernel and verified parameters
1251 let vcpu = unsafe { File::from_raw_fd(fd) };
1252 let kvm_run_ptr = KvmRunWrapper::mmap_from_fd(&vcpu, self.run_size)?;
1253 Ok(new_vcpu(vcpu, kvm_run_ptr))
1254 }
1255
1256 /// Creates an emulated device in the kernel.
1257 ///
1258 /// See the documentation for `KVM_CREATE_DEVICE`.
1259 ///
1260 /// # Arguments
1261 ///
1262 /// * `device`: device configuration. For details check the `kvm_create_device` structure in the
1263 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1264 ///
1265 /// # Example
1266 ///
1267 /// ```rust
1268 /// # extern crate kvm_ioctls;
1269 /// # extern crate kvm_bindings;
1270 /// # use kvm_ioctls::Kvm;
1271 /// use kvm_bindings::{
1272 /// KVM_CREATE_DEVICE_TEST, kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2,
1273 /// kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3, kvm_device_type_KVM_DEV_TYPE_RISCV_AIA,
1274 /// kvm_device_type_KVM_DEV_TYPE_VFIO,
1275 /// };
1276 /// let kvm = Kvm::new().unwrap();
1277 /// let vm = kvm.create_vm().unwrap();
1278 ///
1279 /// // Creating a device with the KVM_CREATE_DEVICE_TEST flag to check
1280 /// // whether the device type is supported. This will not create the device.
1281 /// // To create the device the flag needs to be removed.
1282 /// let mut device = kvm_bindings::kvm_create_device {
1283 /// #[cfg(target_arch = "x86_64")]
1284 /// type_: kvm_device_type_KVM_DEV_TYPE_VFIO,
1285 /// #[cfg(target_arch = "aarch64")]
1286 /// type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3,
1287 /// #[cfg(target_arch = "riscv64")]
1288 /// type_: kvm_device_type_KVM_DEV_TYPE_RISCV_AIA,
1289 /// fd: 0,
1290 /// flags: KVM_CREATE_DEVICE_TEST,
1291 /// };
1292 /// // On ARM, creating VGICv3 may fail due to hardware dependency.
1293 /// // Retry to create VGICv2 in that case.
1294 /// let device_fd = vm.create_device(&mut device).unwrap_or_else(|_| {
1295 /// #[cfg(target_arch = "x86_64")]
1296 /// panic!("Cannot create VFIO device.");
1297 /// #[cfg(target_arch = "aarch64")]
1298 /// {
1299 /// device.type_ = kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2;
1300 /// vm.create_device(&mut device)
1301 /// .expect("Cannot create vGIC device")
1302 /// }
1303 /// #[cfg(target_arch = "riscv64")]
1304 /// panic!("Cannot create vAIA device.");
1305 /// });
1306 /// ```
1307 pub fn create_device(&self, device: &mut kvm_create_device) -> Result<DeviceFd> {
1308 // SAFETY: Safe because we are calling this with the VM fd and we trust the kernel.
1309 let ret = unsafe { ioctl_with_mut_ref(self, KVM_CREATE_DEVICE(), device) };
1310 if ret == 0 {
1311 // SAFETY: We validated the return of the function creating the fd and we trust the
1312 // kernel.
1313 Ok(new_device(unsafe { File::from_raw_fd(device.fd as i32) }))
1314 } else {
1315 Err(errno::Error::last())
1316 }
1317 }
1318
1319 /// Returns the preferred CPU target type which can be emulated by KVM on underlying host.
1320 ///
1321 /// The preferred CPU target is returned in the `kvi` parameter.
1322 /// See documentation for `KVM_ARM_PREFERRED_TARGET`.
1323 ///
1324 /// # Arguments
1325 /// * `kvi` - CPU target configuration (out). For details check the `kvm_vcpu_init`
1326 /// structure in the
1327 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1328 ///
1329 /// # Example
1330 ///
1331 /// ```rust
1332 /// # extern crate kvm_ioctls;
1333 /// # extern crate kvm_bindings;
1334 /// # use kvm_ioctls::Kvm;
1335 /// use kvm_bindings::kvm_vcpu_init;
1336 /// let kvm = Kvm::new().unwrap();
1337 /// let vm = kvm.create_vm().unwrap();
1338 /// let mut kvi = kvm_vcpu_init::default();
1339 /// vm.get_preferred_target(&mut kvi).unwrap();
1340 /// ```
1341 #[cfg(target_arch = "aarch64")]
1342 pub fn get_preferred_target(&self, kvi: &mut kvm_vcpu_init) -> Result<()> {
1343 // SAFETY: The ioctl is safe because we allocated the struct and we know the
1344 // kernel will write exactly the size of the struct.
1345 let ret = unsafe { ioctl_with_mut_ref(self, KVM_ARM_PREFERRED_TARGET(), kvi) };
1346 if ret != 0 {
1347 return Err(errno::Error::last());
1348 }
1349 Ok(())
1350 }
1351
1352 /// Enable the specified capability as per the `KVM_ENABLE_CAP` ioctl.
1353 ///
1354 /// See the documentation for `KVM_ENABLE_CAP`.
1355 ///
1356 /// Returns an io::Error when the capability could not be enabled.
1357 ///
1358 /// # Arguments
1359 ///
1360 /// * kvm_enable_cap - KVM capability structure. For details check the `kvm_enable_cap`
1361 /// structure in the
1362 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1363 ///
1364 /// # Example
1365 ///
1366 /// ```rust
1367 /// # extern crate kvm_ioctls;
1368 /// extern crate kvm_bindings;
1369 ///
1370 /// # use kvm_ioctls::Kvm;
1371 /// use kvm_bindings::{KVM_CAP_SPLIT_IRQCHIP, kvm_enable_cap};
1372 ///
1373 /// let kvm = Kvm::new().unwrap();
1374 /// let vm = kvm.create_vm().unwrap();
1375 /// let mut cap: kvm_enable_cap = Default::default();
1376 /// cap.cap = KVM_CAP_SPLIT_IRQCHIP;
1377 /// // As per the KVM documentation, KVM_CAP_SPLIT_IRQCHIP only emulates
1378 /// // the local APIC in kernel, expecting that a userspace IOAPIC will
1379 /// // be implemented by the VMM.
1380 /// // Along with this capability, the user needs to specify the number
1381 /// // of pins reserved for the userspace IOAPIC. This number needs to be
1382 /// // provided through the first argument of the capability structure, as
1383 /// // specified in KVM documentation:
1384 /// // args[0] - number of routes reserved for userspace IOAPICs
1385 /// //
1386 /// // Because an IOAPIC supports 24 pins, that's the reason why this test
1387 /// // picked this number as reference.
1388 /// cap.args[0] = 24;
1389 /// vm.enable_cap(&cap).unwrap();
1390 /// ```
1391 #[cfg(any(target_arch = "x86_64", target_arch = "s390x", target_arch = "powerpc"))]
1392 pub fn enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> {
1393 // SAFETY: The ioctl is safe because we allocated the struct and we know the
1394 // kernel will write exactly the size of the struct.
1395 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), cap) };
1396 if ret == 0 {
1397 Ok(())
1398 } else {
1399 Err(errno::Error::last())
1400 }
1401 }
1402
1403 /// Get the `kvm_run` size.
1404 pub fn run_size(&self) -> usize {
1405 self.run_size
1406 }
1407
1408 /// Wrapper over `KVM_CHECK_EXTENSION`.
1409 ///
1410 /// Returns 0 if the capability is not available and a positive integer otherwise.
1411 /// See the documentation for `KVM_CHECK_EXTENSION`.
1412 ///
1413 /// # Arguments
1414 ///
1415 /// * `c` - KVM capability to check.
1416 ///
1417 /// # Example
1418 ///
1419 /// ```
1420 /// # use kvm_ioctls::Kvm;
1421 /// use kvm_ioctls::Cap;
1422 ///
1423 /// let kvm = Kvm::new().unwrap();
1424 /// let vm = kvm.create_vm().unwrap();
1425 /// assert!(vm.check_extension_int(Cap::MaxVcpus) > 0);
1426 /// ```
1427 pub fn check_extension_int(&self, c: Cap) -> i32 {
1428 self.check_extension_raw(c as c_ulong)
1429 }
1430
1431 /// Wrapper over `KVM_CHECK_EXTENSION`.
1432 ///
1433 /// Returns 0 if the capability is not available and a positive integer otherwise.
1434 /// See the documentation for `KVM_CHECK_EXTENSION`.
1435 ///
1436 /// # Arguments
1437 ///
1438 /// * `c` - KVM capability to check in a form of a raw integer.
1439 ///
1440 /// # Example
1441 ///
1442 /// ```
1443 /// # use kvm_ioctls::Kvm;
1444 /// # use std::os::raw::c_ulong;
1445 /// use kvm_ioctls::Cap;
1446 ///
1447 /// let kvm = Kvm::new().unwrap();
1448 /// let vm = kvm.create_vm().unwrap();
1449 /// assert!(vm.check_extension_raw(Cap::MaxVcpus as c_ulong) > 0);
1450 /// ```
1451 pub fn check_extension_raw(&self, c: c_ulong) -> i32 {
1452 // SAFETY: Safe because we know that our file is a KVM fd.
1453 // If `c` is not a known kernel extension, kernel will return 0.
1454 unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c) }
1455 }
1456
1457 /// Checks if a particular `Cap` is available.
1458 ///
1459 /// Returns true if the capability is supported and false otherwise.
1460 /// See the documentation for `KVM_CHECK_EXTENSION`.
1461 ///
1462 /// # Arguments
1463 ///
1464 /// * `c` - VM capability to check.
1465 ///
1466 /// # Example
1467 ///
1468 /// ```
1469 /// # use kvm_ioctls::Kvm;
1470 /// use kvm_ioctls::Cap;
1471 ///
1472 /// let kvm = Kvm::new().unwrap();
1473 /// let vm = kvm.create_vm().unwrap();
1474 /// // Check if `KVM_CAP_MP_STATE` is supported.
1475 /// assert!(vm.check_extension(Cap::MpState));
1476 /// ```
1477 pub fn check_extension(&self, c: Cap) -> bool {
1478 self.check_extension_int(c) > 0
1479 }
1480
1481 /// Creates an anonymous file and returns a file descriptor that refers to it.
1482 ///
1483 /// See the documentation for `KVM_CREATE_GUEST_MEMFD`.
1484 ///
1485 /// Returns an io::Error when the file could not be created.
1486 ///
1487 /// # Arguments
1488 ///
1489 /// * kvm_create_guest_memfd - KVM create guest memfd structure. For details check the
1490 /// `kvm_create_guest_memfd` structure in the
1491 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1492 ///
1493 /// # Example
1494 ///
1495 /// ```rust
1496 /// # extern crate kvm_ioctls;
1497 /// extern crate kvm_bindings;
1498 ///
1499 /// # use kvm_ioctls::{Cap, Kvm};
1500 /// use kvm_bindings::{KVM_CAP_GUEST_MEMFD, kvm_create_guest_memfd};
1501 /// use std::os::fd::RawFd;
1502 ///
1503 /// let kvm = Kvm::new().unwrap();
1504 /// #[cfg(target_arch = "x86_64")]
1505 /// let vm = kvm
1506 /// .create_vm_with_type(kvm_bindings::KVM_X86_SW_PROTECTED_VM as u64)
1507 /// .unwrap();
1508 /// #[cfg(not(target_arch = "x86_64"))]
1509 /// let vm = kvm.create_vm().unwrap(); /* non-x86 does not yet have a vm type that supports gmem */
1510 ///
1511 /// if !vm.check_extension(Cap::GuestMemfd) {
1512 /// return;
1513 /// }
1514 ///
1515 /// let gmem = kvm_create_guest_memfd {
1516 /// size: 0x1000,
1517 /// flags: 0,
1518 /// reserved: [0; 6],
1519 /// };
1520 ///
1521 /// let guest_memfd = vm.create_guest_memfd(gmem).unwrap();
1522 /// ```
1523 pub fn create_guest_memfd(&self, gmem: kvm_create_guest_memfd) -> Result<RawFd> {
1524 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only
1525 // read the correct amount of memory from our pointer, and we verify the return result.
1526 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_GUEST_MEMFD(), &gmem) };
1527 if ret < 0 {
1528 return Err(errno::Error::last());
1529 }
1530 Ok(ret)
1531 }
1532
1533 /// Allows userspace to set memory attributes for a range of guest physical memory.
1534 ///
1535 /// See the documentation for `KVM_SET_MEMORY_ATTRIBUTES`.
1536 ///
1537 /// Returns an io::Error when the attributes could not be set.
1538 ///
1539 /// # Arguments
1540 ///
1541 /// * kvm_memory_attributes - KVM set memory attributes structure. For details check the
1542 /// `kvm_memory_attributes` structure in the
1543 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1544 ///
1545 /// # Example
1546 ///
1547 /// ```rust
1548 /// # extern crate kvm_ioctls;
1549 /// extern crate kvm_bindings;
1550 ///
1551 /// # use kvm_ioctls::{Cap, Kvm};
1552 /// use kvm_bindings::{
1553 /// KVM_CAP_GUEST_MEMFD, KVM_CAP_MEMORY_ATTRIBUTES, KVM_CAP_USER_MEMORY2, KVM_MEM_GUEST_MEMFD,
1554 /// KVM_MEMORY_ATTRIBUTE_PRIVATE, kvm_create_guest_memfd, kvm_memory_attributes,
1555 /// kvm_userspace_memory_region2,
1556 /// };
1557 /// use std::os::fd::RawFd;
1558 ///
1559 /// let kvm = Kvm::new().unwrap();
1560 /// #[cfg(target_arch = "x86_64")]
1561 /// let vm = kvm
1562 /// .create_vm_with_type(kvm_bindings::KVM_X86_SW_PROTECTED_VM as u64)
1563 /// .unwrap();
1564 /// #[cfg(not(target_arch = "x86_64"))]
1565 /// let vm = kvm.create_vm().unwrap(); /* non-x86 does not yet have a vm type that supports gmem */
1566 /// let gmem = kvm_create_guest_memfd {
1567 /// size: 0x10000,
1568 /// flags: 0,
1569 /// reserved: [0; 6],
1570 /// };
1571 ///
1572 /// let address_space = unsafe { libc::mmap(0 as _, 10000, 3, 34, -1, 0) };
1573 /// let userspace_addr = address_space as *const u8 as u64;
1574 ///
1575 /// if !vm.check_extension(Cap::GuestMemfd)
1576 /// || !vm.check_extension(Cap::UserMemory2)
1577 /// || !vm.check_extension(Cap::MemoryAttributes)
1578 /// {
1579 /// return;
1580 /// }
1581 ///
1582 /// let fd: RawFd = unsafe { vm.create_guest_memfd(gmem).unwrap() };
1583 /// let mem_region = kvm_userspace_memory_region2 {
1584 /// slot: 0,
1585 /// flags: KVM_MEM_GUEST_MEMFD,
1586 /// guest_phys_addr: 0x10000 as u64,
1587 /// memory_size: 0x10000 as u64,
1588 /// userspace_addr,
1589 /// guest_memfd_offset: 0,
1590 /// guest_memfd: fd as u32,
1591 /// pad1: 0,
1592 /// pad2: [0; 14],
1593 /// };
1594 /// unsafe {
1595 /// vm.set_user_memory_region2(mem_region).unwrap();
1596 /// };
1597 ///
1598 /// let attr = kvm_memory_attributes {
1599 /// address: 0x10000,
1600 /// size: 0x10000,
1601 /// attributes: KVM_MEMORY_ATTRIBUTE_PRIVATE as u64,
1602 /// flags: 0,
1603 /// };
1604 /// vm.set_memory_attributes(attr).unwrap();
1605 /// ```
1606 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
1607 pub fn set_memory_attributes(&self, attr: kvm_memory_attributes) -> Result<()> {
1608 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1609 // the correct amount of memory from our pointer, and we verify the return result.
1610 let ret = unsafe { ioctl_with_ref(self, KVM_SET_MEMORY_ATTRIBUTES(), &attr) };
1611 if ret == 0 {
1612 Ok(())
1613 } else {
1614 Err(errno::Error::last())
1615 }
1616 }
1617
1618 /// Issues platform-specific memory encryption commands to manage encrypted VMs if
1619 /// the platform supports creating those encrypted VMs.
1620 ///
1621 /// Currently, this ioctl is used for issuing Secure Encrypted Virtualization
1622 /// (SEV) commands on AMD Processors.
1623 ///
1624 /// See the documentation for `KVM_MEMORY_ENCRYPT_OP` in the
1625 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1626 ///
1627 /// For SEV-specific functionality, prefer safe wrapper:
1628 /// - [`encrypt_op_sev`](Self::encrypt_op_sev)
1629 ///
1630 /// # Safety
1631 ///
1632 /// This function is unsafe because there is no guarantee `T` is valid in this context, how
1633 /// much data kernel will read from memory and where it will write data on error.
1634 ///
1635 /// # Arguments
1636 ///
1637 /// * `op` - an opaque platform specific structure.
1638 ///
1639 /// # Example
1640 #[cfg_attr(has_sev, doc = "```rust")]
1641 #[cfg_attr(not(has_sev), doc = "```rust,no_run")]
1642 /// # extern crate kvm_ioctls;
1643 /// # extern crate kvm_bindings;
1644 /// use kvm_bindings::bindings::kvm_sev_cmd;
1645 /// # use kvm_ioctls::Kvm;
1646 ///
1647 /// let kvm = Kvm::new().unwrap();
1648 /// let vm = kvm.create_vm().unwrap();
1649 ///
1650 /// // Initialize the SEV platform context.
1651 /// let mut init: kvm_sev_cmd = Default::default();
1652 /// unsafe { vm.encrypt_op(&mut init).unwrap() };
1653 /// ```
1654 #[cfg(target_arch = "x86_64")]
1655 pub unsafe fn encrypt_op<T>(&self, op: *mut T) -> Result<()> {
1656 // SAFETY: we trust the kernel and verified parameters
1657 let ret = unsafe { ioctl_with_mut_ptr(self, KVM_MEMORY_ENCRYPT_OP(), op) };
1658 if ret == 0 {
1659 Ok(())
1660 } else {
1661 Err(errno::Error::last())
1662 }
1663 }
1664
1665 /// Issue common lifecycle events of SEV guests, such as launching, running, snapshotting,
1666 /// migrating and decommissioning via `KVM_MEMORY_ENCRYPT_OP` ioctl.
1667 ///
1668 /// Kernel documentation states that this ioctl can be used for testing whether SEV is enabled
1669 /// by sending `NULL`. To do that, pass [`std::ptr::null_mut`](std::ptr::null_mut) to [`encrypt_op`](Self::encrypt_op).
1670 ///
1671 /// See the documentation for Secure Encrypted Virtualization (SEV).
1672 ///
1673 /// # Arguments
1674 ///
1675 /// * `op` - SEV-specific structure. For details check the
1676 /// [Secure Encrypted Virtualization (SEV) doc](https://www.kernel.org/doc/Documentation/virtual/kvm/amd-memory-encryption.rst).
1677 ///
1678 /// # Example
1679 #[cfg_attr(has_sev, doc = "```rust")]
1680 #[cfg_attr(not(has_sev), doc = "```rust,no_run")]
1681 /// # extern crate kvm_ioctls;
1682 /// # extern crate kvm_bindings;
1683 /// # use std::{os::raw::c_void, ptr::null_mut};
1684 /// use kvm_bindings::bindings::kvm_sev_cmd;
1685 /// # use kvm_ioctls::Kvm;
1686 ///
1687 /// let kvm = Kvm::new().unwrap();
1688 /// let vm = kvm.create_vm().unwrap();
1689 ///
1690 /// // Check whether SEV is enabled, optional.
1691 /// unsafe { vm.encrypt_op(null_mut() as *mut c_void) }.unwrap();
1692 ///
1693 /// // Initialize the SEV platform context.
1694 /// let mut init: kvm_sev_cmd = Default::default();
1695 /// vm.encrypt_op_sev(&mut init).unwrap();
1696 /// ```
1697 #[cfg(target_arch = "x86_64")]
1698 pub fn encrypt_op_sev(&self, op: &mut kvm_sev_cmd) -> Result<()> {
1699 // SAFETY: Safe because we know that kernel will only read the correct amount of memory
1700 // from our pointer and we know where it will write it (op.error).
1701 unsafe { self.encrypt_op(op) }
1702 }
1703
1704 /// Register a guest memory region which may contain encrypted data.
1705 ///
1706 /// It is used in the SEV-enabled guest.
1707 ///
1708 /// See the documentation for `KVM_MEMORY_ENCRYPT_REG_REGION` in the
1709 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1710 ///
1711 /// # Arguments
1712 ///
1713 /// * `memory_region` - Guest physical memory region.
1714 ///
1715 /// # Example
1716 #[cfg_attr(has_sev, doc = "```rust")]
1717 #[cfg_attr(not(has_sev), doc = "```rust,no_run")]
1718 /// # extern crate kvm_bindings;
1719 /// # extern crate kvm_ioctls;
1720 /// # extern crate libc;
1721 /// # use std::{fs::OpenOptions, ptr::null_mut};
1722 /// # use std::os::unix::io::AsRawFd;
1723 /// use kvm_bindings::bindings::{kvm_enc_region, kvm_sev_cmd, kvm_sev_launch_start, sev_cmd_id_KVM_SEV_LAUNCH_START};
1724 /// # use kvm_ioctls::Kvm;
1725 /// use libc;
1726 ///
1727 /// let kvm = Kvm::new().unwrap();
1728 /// let vm = kvm.create_vm().unwrap();
1729 /// let sev = OpenOptions::new()
1730 /// .read(true)
1731 /// .write(true)
1732 /// .open("/dev/sev")
1733 /// .unwrap();
1734 ///
1735 /// // Initialize the SEV platform context.
1736 /// let mut init: kvm_sev_cmd = Default::default();
1737 /// vm.encrypt_op_sev(&mut init).unwrap();
1738 ///
1739 /// // Create the memory encryption context.
1740 /// let start_data: kvm_sev_launch_start = Default::default();
1741 /// let mut start = kvm_sev_cmd {
1742 /// id: sev_cmd_id_KVM_SEV_LAUNCH_START,
1743 /// data: &start_data as *const kvm_sev_launch_start as _,
1744 /// sev_fd: sev.as_raw_fd() as _,
1745 /// ..Default::default()
1746 /// };
1747 /// vm.encrypt_op_sev(&mut start).unwrap();
1748 ///
1749 /// let addr = unsafe {
1750 /// libc::mmap(
1751 /// null_mut(),
1752 /// 4096,
1753 /// libc::PROT_READ | libc::PROT_WRITE,
1754 /// libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
1755 /// -1,
1756 /// 0,
1757 /// )
1758 /// };
1759 /// assert_ne!(addr, libc::MAP_FAILED);
1760 ///
1761 /// let memory_region = kvm_enc_region {
1762 /// addr: addr as _,
1763 /// size: 4096,
1764 /// };
1765 /// vm.register_enc_memory_region(&memory_region).unwrap();
1766 /// ```
1767 #[cfg(target_arch = "x86_64")]
1768 pub fn register_enc_memory_region(&self, memory_region: &kvm_enc_region) -> Result<()> {
1769 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1770 // the correct amount of memory from our pointer, and we verify the return result.
1771 let ret = unsafe { ioctl_with_ref(self, KVM_MEMORY_ENCRYPT_REG_REGION(), memory_region) };
1772 if ret == 0 {
1773 Ok(())
1774 } else {
1775 Err(errno::Error::last())
1776 }
1777 }
1778
1779 /// Unregister a guest memory region registered with
1780 /// [`register_enc_memory_region`](Self::register_enc_memory_region).
1781 ///
1782 /// It is used in the SEV-enabled guest.
1783 ///
1784 /// See the documentation for `KVM_MEMORY_ENCRYPT_UNREG_REGION` in the
1785 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt).
1786 ///
1787 /// # Arguments
1788 ///
1789 /// * `memory_region` - Guest physical memory region.
1790 ///
1791 /// # Example
1792 #[cfg_attr(has_sev, doc = "```rust")]
1793 #[cfg_attr(not(has_sev), doc = "```rust,no_run")]
1794 /// # extern crate kvm_bindings;
1795 /// # extern crate kvm_ioctls;
1796 /// # extern crate libc;
1797 /// # use std::{fs::OpenOptions, ptr::null_mut};
1798 /// # use std::os::unix::io::AsRawFd;
1799 /// use kvm_bindings::bindings::{kvm_enc_region, kvm_sev_cmd, kvm_sev_launch_start, sev_cmd_id_KVM_SEV_LAUNCH_START};
1800 /// # use kvm_ioctls::Kvm;
1801 /// use libc;
1802 ///
1803 /// let kvm = Kvm::new().unwrap();
1804 /// let vm = kvm.create_vm().unwrap();
1805 /// let sev = OpenOptions::new()
1806 /// .read(true)
1807 /// .write(true)
1808 /// .open("/dev/sev")
1809 /// .unwrap();
1810 ///
1811 /// // Initialize the SEV platform context.
1812 /// let mut init: kvm_sev_cmd = Default::default();
1813 /// vm.encrypt_op_sev(&mut init).unwrap();
1814 ///
1815 /// // Create the memory encryption context.
1816 /// let start_data: kvm_sev_launch_start = Default::default();
1817 /// let mut start = kvm_sev_cmd {
1818 /// id: sev_cmd_id_KVM_SEV_LAUNCH_START,
1819 /// data: &start_data as *const kvm_sev_launch_start as _,
1820 /// sev_fd: sev.as_raw_fd() as _,
1821 /// ..Default::default()
1822 /// };
1823 /// vm.encrypt_op_sev(&mut start).unwrap();
1824 ///
1825 /// let addr = unsafe {
1826 /// libc::mmap(
1827 /// null_mut(),
1828 /// 4096,
1829 /// libc::PROT_READ | libc::PROT_WRITE,
1830 /// libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
1831 /// -1,
1832 /// 0,
1833 /// )
1834 /// };
1835 /// assert_ne!(addr, libc::MAP_FAILED);
1836 ///
1837 /// let memory_region = kvm_enc_region {
1838 /// addr: addr as _,
1839 /// size: 4096,
1840 /// };
1841 /// vm.register_enc_memory_region(&memory_region).unwrap();
1842 /// vm.unregister_enc_memory_region(&memory_region).unwrap();
1843 /// ```
1844 #[cfg(target_arch = "x86_64")]
1845 pub fn unregister_enc_memory_region(&self, memory_region: &kvm_enc_region) -> Result<()> {
1846 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1847 // the correct amount of memory from our pointer, and we verify the return result.
1848 let ret = unsafe { ioctl_with_ref(self, KVM_MEMORY_ENCRYPT_UNREG_REGION(), memory_region) };
1849 if ret == 0 {
1850 Ok(())
1851 } else {
1852 Err(errno::Error::last())
1853 }
1854 }
1855
1856 /// Registers an address for coalesced MMIO. Write accesses to the address
1857 /// will not cause a corresponding [`VcpuExit`](crate::VcpuExit), but
1858 /// instead will be appended to the MMIO ring buffer. The [`VcpuFd`] can
1859 /// read entries in the ring buffer via [`VcpuFd::coalesced_mmio_read()`].
1860 /// If entries are not read the buffer will eventually be full,
1861 /// preventing further elements from being appended by the kernel.
1862 ///
1863 /// Needs `KVM_CAP_COALESCED_MMIO` ([`Cap::CoalescedMmio`](crate::Cap::CoalescedMmio))
1864 /// and/or `KVM_CAP_COALESCED_PIO` ([`Cap::CoalescedMmio`](crate::Cap::CoalescedPio)).
1865 ///
1866 /// See the documentation for `KVM_REGISTER_COALESCED_MMIO`.
1867 ///
1868 /// # Arguments
1869 ///
1870 /// * `addr` - Address being written to.
1871 /// * `size` - The size of the write for the mechanism to trigger.
1872 pub fn register_coalesced_mmio(&self, addr: IoEventAddress, size: u32) -> Result<()> {
1873 let (addr, pio) = match addr {
1874 IoEventAddress::Pio(addr) => (addr, 1),
1875 IoEventAddress::Mmio(addr) => (addr, 0),
1876 };
1877 let mut zone = kvm_coalesced_mmio_zone {
1878 addr,
1879 size,
1880 ..Default::default()
1881 };
1882 zone.__bindgen_anon_1.pio = pio;
1883
1884 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1885 // the correct amount of memory from our pointer, and we verify the return result.
1886 let ret = unsafe { ioctl_with_ref(self, KVM_REGISTER_COALESCED_MMIO(), &zone) };
1887 if ret != 0 {
1888 return Err(errno::Error::last());
1889 }
1890 Ok(())
1891 }
1892
1893 /// Unregister an address that was previously registered via
1894 /// [`register_coalesced_mmio()`](VmFd::register_coalesced_mmio).
1895 ///
1896 /// See the documentation for `KVM_UNREGISTER_COALESCED_MMIO`.
1897 pub fn unregister_coalesced_mmio(&self, addr: IoEventAddress, size: u32) -> Result<()> {
1898 let (addr, pio) = match addr {
1899 IoEventAddress::Pio(addr) => (addr, 1),
1900 IoEventAddress::Mmio(addr) => (addr, 0),
1901 };
1902 let mut zone = kvm_coalesced_mmio_zone {
1903 addr,
1904 size,
1905 ..Default::default()
1906 };
1907 zone.__bindgen_anon_1.pio = pio;
1908
1909 // SAFETY: Safe because we know that our file is a VM fd, we know the kernel will only read
1910 // the correct amount of memory from our pointer, and we verify the return result.
1911 let ret = unsafe { ioctl_with_ref(self, KVM_UNREGISTER_COALESCED_MMIO(), &zone) };
1912 if ret != 0 {
1913 return Err(errno::Error::last());
1914 }
1915 Ok(())
1916 }
1917
1918 /// Sets a specified piece of vm configuration and/or state.
1919 ///
1920 /// See the documentation for `KVM_SET_DEVICE_ATTR` in
1921 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt)
1922 /// # Arguments
1923 ///
1924 /// * `device_attr` - The vm attribute to be set.
1925 ///
1926 /// # Example
1927 ///
1928 /// ```rust
1929 /// # extern crate kvm_ioctls;
1930 /// # extern crate kvm_bindings;
1931 /// # use kvm_ioctls::Kvm;
1932 /// # use kvm_bindings::{
1933 /// PSCI_0_2_FN64_BASE, kvm_smccc_filter_action_KVM_SMCCC_FILTER_FWD_TO_USER,
1934 /// KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER
1935 /// };
1936 /// let kvm = Kvm::new().unwrap();
1937 /// let vm = kvm.create_vm().unwrap();
1938 ///
1939 /// const PSCI_0_2_FN64_CPU_ON: u32 = PSCI_0_2_FN64_BASE + 3;
1940 /// let smccc_filter = kvm_bindings::kvm_smccc_filter {
1941 /// base: PSCI_0_2_FN64_CPU_ON,
1942 /// nr_functions: 1,
1943 /// action: kvm_smccc_filter_action_KVM_SMCCC_FILTER_FWD_TO_USER as u8,
1944 /// pad: [0u8; 15],
1945 /// };
1946 ///
1947 /// let dist_attr = kvm_bindings::kvm_device_attr {
1948 /// group: KVM_ARM_VM_SMCCC_CTRL,
1949 /// attr: KVM_ARM_VM_SMCCC_FILTER as u64,
1950 /// addr: &smccc_filter as *const _ as u64,
1951 /// flags: 0,
1952 /// };
1953 ///
1954 /// if (vm.has_device_attr(&dist_attr).is_ok()) {
1955 /// vm.set_device_attr(&dist_attr).unwrap();
1956 /// }
1957 /// ```
1958 #[cfg(target_arch = "aarch64")]
1959 pub fn set_device_attr(&self, device_attr: &kvm_device_attr) -> Result<()> {
1960 // SAFETY: Safe because we call this with a Vm fd and we trust the kernel.
1961 let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR(), device_attr) };
1962 if ret != 0 {
1963 return Err(errno::Error::last());
1964 }
1965 Ok(())
1966 }
1967
1968 /// Tests whether a vm supports a particular attribute.
1969 ///
1970 /// See the documentation for `KVM_HAS_DEVICE_ATTR` in
1971 /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt)
1972 /// # Arguments
1973 ///
1974 /// * `device_attr` - The vm attribute to be tested. `addr` field is ignored.
1975 ///
1976 /// # Example
1977 ///
1978 /// ```rust
1979 /// # extern crate kvm_ioctls;
1980 /// # extern crate kvm_bindings;
1981 /// # use kvm_ioctls::Kvm;
1982 /// # use kvm_bindings::{
1983 /// KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER
1984 /// };
1985 /// let kvm = Kvm::new().unwrap();
1986 /// let vm = kvm.create_vm().unwrap();
1987 ///
1988 /// let dist_attr = kvm_bindings::kvm_device_attr {
1989 /// group: KVM_ARM_VM_SMCCC_CTRL,
1990 /// attr: KVM_ARM_VM_SMCCC_FILTER as u64,
1991 /// addr: 0x0,
1992 /// flags: 0,
1993 /// };
1994 ///
1995 /// vm.has_device_attr(&dist_attr);
1996 /// ```
1997 #[cfg(target_arch = "aarch64")]
1998 pub fn has_device_attr(&self, device_attr: &kvm_device_attr) -> Result<()> {
1999 // SAFETY: Safe because we call this with a Vm fd and we trust the kernel.
2000 let ret = unsafe { ioctl_with_ref(self, KVM_HAS_DEVICE_ATTR(), device_attr) };
2001 if ret != 0 {
2002 return Err(errno::Error::last());
2003 }
2004 Ok(())
2005 }
2006}
2007
2008/// Helper function to create a new `VmFd`.
2009///
2010/// This should not be exported as a public function because the preferred way is to use
2011/// `create_vm` from `Kvm`. The function cannot be part of the `VmFd` implementation because
2012/// then it would be exported with the public `VmFd` interface.
2013pub fn new_vmfd(vm: File, run_size: usize) -> VmFd {
2014 VmFd { vm, run_size }
2015}
2016
2017impl AsRawFd for VmFd {
2018 fn as_raw_fd(&self) -> RawFd {
2019 self.vm.as_raw_fd()
2020 }
2021}
2022
2023/// Create a dummy GIC device.
2024///
2025/// # Arguments
2026///
2027/// * `vm` - The vm file descriptor.
2028/// * `flags` - Flags to be passed to `KVM_CREATE_DEVICE`.
2029#[cfg(test)]
2030#[cfg(target_arch = "aarch64")]
2031pub(crate) fn create_gic_device(vm: &VmFd, flags: u32) -> DeviceFd {
2032 let mut gic_device = kvm_create_device {
2033 type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3,
2034 fd: 0,
2035 flags,
2036 };
2037 match vm.create_device(&mut gic_device) {
2038 Ok(fd) => fd,
2039 Err(_) => {
2040 gic_device.type_ = kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2;
2041 vm.create_device(&mut gic_device)
2042 .expect("Cannot create KVM vGIC device")
2043 }
2044 }
2045}
2046
2047/// Set supported number of IRQs for vGIC.
2048///
2049/// # Arguments
2050///
2051/// * `vgic` - The vGIC file descriptor.
2052/// * `nr_irqs` - Number of IRQs.
2053#[cfg(test)]
2054#[cfg(target_arch = "aarch64")]
2055pub(crate) fn set_supported_nr_irqs(vgic: &DeviceFd, nr_irqs: u32) {
2056 let vgic_attr = kvm_device_attr {
2057 group: KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
2058 attr: 0,
2059 addr: &nr_irqs as *const u32 as u64,
2060 flags: 0,
2061 };
2062 vgic.has_device_attr(&vgic_attr).unwrap();
2063 vgic.set_device_attr(&vgic_attr).unwrap();
2064}
2065
2066/// Request the initialization of the vGIC.
2067///
2068/// # Arguments
2069///
2070/// * `vgic` - The vGIC file descriptor.
2071#[cfg(test)]
2072#[cfg(target_arch = "aarch64")]
2073pub(crate) fn request_gic_init(vgic: &DeviceFd) {
2074 let vgic_attr = kvm_device_attr {
2075 group: KVM_DEV_ARM_VGIC_GRP_CTRL,
2076 attr: u64::from(KVM_DEV_ARM_VGIC_CTRL_INIT),
2077 addr: 0,
2078 flags: 0,
2079 };
2080 vgic.has_device_attr(&vgic_attr).unwrap();
2081 vgic.set_device_attr(&vgic_attr).unwrap();
2082}
2083
2084/// Create a dummy AIA device.
2085///
2086/// # Arguments
2087///
2088/// * `vm` - The vm file descriptor.
2089/// * `flags` - Flags to be passed to `KVM_CREATE_DEVICE`.
2090#[cfg(test)]
2091#[cfg(target_arch = "riscv64")]
2092pub(crate) fn create_aia_device(vm: &VmFd, flags: u32) -> DeviceFd {
2093 let mut aia_device = kvm_create_device {
2094 type_: kvm_device_type_KVM_DEV_TYPE_RISCV_AIA,
2095 fd: 0,
2096 flags,
2097 };
2098 vm.create_device(&mut aia_device)
2099 .expect("Cannot create KVM vAIA device")
2100}
2101
2102/// Set supported number of IRQs for vAIA.
2103///
2104/// # Arguments
2105///
2106/// * `vaia` - The vAIA file descriptor.
2107/// * `nr_irqs` - Number of IRQs.
2108#[cfg(test)]
2109#[cfg(target_arch = "riscv64")]
2110pub(crate) fn set_supported_nr_irqs(vaia: &DeviceFd, nr_irqs: u32) {
2111 let vaia_attr = kvm_device_attr {
2112 group: KVM_DEV_RISCV_AIA_GRP_CONFIG,
2113 attr: u64::from(KVM_DEV_RISCV_AIA_CONFIG_SRCS),
2114 addr: &nr_irqs as *const u32 as u64,
2115 flags: 0,
2116 };
2117 vaia.has_device_attr(&vaia_attr).unwrap();
2118 vaia.set_device_attr(&vaia_attr).unwrap();
2119}
2120
2121/// Request the initialization of the vAIA.
2122///
2123/// # Arguments
2124///
2125/// * `vaia` - The vAIA file descriptor.
2126#[cfg(test)]
2127#[cfg(target_arch = "riscv64")]
2128pub(crate) fn request_aia_init(vaia: &DeviceFd) {
2129 let vaia_attr = kvm_device_attr {
2130 group: KVM_DEV_RISCV_AIA_GRP_CTRL,
2131 attr: u64::from(KVM_DEV_RISCV_AIA_CTRL_INIT),
2132 addr: 0,
2133 flags: 0,
2134 };
2135 vaia.has_device_attr(&vaia_attr).unwrap();
2136 vaia.set_device_attr(&vaia_attr).unwrap();
2137}
2138
2139#[cfg(test)]
2140mod tests {
2141 #![allow(clippy::undocumented_unsafe_blocks)]
2142 use super::*;
2143 use crate::Kvm;
2144
2145 #[cfg(target_arch = "x86_64")]
2146 use std::{fs::OpenOptions, os::fd::IntoRawFd, ptr::null_mut};
2147
2148 use libc::EFD_NONBLOCK;
2149
2150 #[test]
2151 fn test_set_invalid_memory() {
2152 let kvm = Kvm::new().unwrap();
2153 let vm = kvm.create_vm().unwrap();
2154 let invalid_mem_region = kvm_userspace_memory_region {
2155 slot: 0,
2156 guest_phys_addr: 0,
2157 memory_size: 0,
2158 userspace_addr: 0,
2159 flags: 0,
2160 };
2161 unsafe { vm.set_user_memory_region(invalid_mem_region) }.unwrap_err();
2162 }
2163
2164 #[test]
2165 fn test_set_invalid_memory2() {
2166 let kvm = Kvm::new().unwrap();
2167 let vm = kvm.create_vm().unwrap();
2168 let invalid_mem_region = kvm_userspace_memory_region2 {
2169 slot: 0,
2170 flags: 0,
2171 guest_phys_addr: 0,
2172 memory_size: 0,
2173 userspace_addr: 0,
2174 guest_memfd_offset: 0,
2175 guest_memfd: 0,
2176 pad1: 0,
2177 pad2: [0; 14],
2178 };
2179 unsafe { vm.set_user_memory_region2(invalid_mem_region) }.unwrap_err();
2180 }
2181
2182 #[test]
2183 #[cfg(target_arch = "x86_64")]
2184 fn test_set_tss_address() {
2185 let kvm = Kvm::new().unwrap();
2186 let vm = kvm.create_vm().unwrap();
2187 vm.set_tss_address(0xfffb_d000).unwrap();
2188 }
2189
2190 #[test]
2191 #[cfg(target_arch = "x86_64")]
2192 fn test_set_identity_map_address() {
2193 let kvm = Kvm::new().unwrap();
2194 if kvm.check_extension(Cap::SetIdentityMapAddr) {
2195 let vm = kvm.create_vm().unwrap();
2196 vm.set_identity_map_address(0xfffb_c000).unwrap();
2197 vm.create_vcpu(0).unwrap();
2198 // Setting the identity map after creating a vCPU must fail.
2199 vm.set_identity_map_address(0xfffb_c000).unwrap_err();
2200 }
2201 }
2202
2203 #[test]
2204 #[cfg(target_arch = "x86_64")]
2205 fn test_irq_chip() {
2206 use Cap;
2207
2208 let kvm = Kvm::new().unwrap();
2209 assert!(kvm.check_extension(Cap::Irqchip));
2210 let vm = kvm.create_vm().unwrap();
2211 vm.create_irq_chip().unwrap();
2212
2213 let mut irqchip = kvm_irqchip {
2214 chip_id: KVM_IRQCHIP_PIC_MASTER,
2215 ..Default::default()
2216 };
2217 // Set the irq_base to a non-default value to check that set & get work.
2218 irqchip.chip.pic.irq_base = 10;
2219 vm.set_irqchip(&irqchip).unwrap();
2220
2221 // We initialize a dummy irq chip (`other_irqchip`) in which the
2222 // function `get_irqchip` returns its result.
2223 let mut other_irqchip = kvm_irqchip {
2224 chip_id: KVM_IRQCHIP_PIC_MASTER,
2225 ..Default::default()
2226 };
2227 vm.get_irqchip(&mut other_irqchip).unwrap();
2228
2229 // Safe because we know that the irqchip type is PIC.
2230 unsafe { assert_eq!(irqchip.chip.pic, other_irqchip.chip.pic) };
2231 }
2232
2233 #[test]
2234 #[cfg(target_arch = "aarch64")]
2235 fn test_irq_chip() {
2236 use Cap;
2237
2238 let kvm = Kvm::new().unwrap();
2239 assert!(kvm.check_extension(Cap::Irqchip));
2240
2241 let vm = kvm.create_vm().unwrap();
2242
2243 // On ARM/arm64, a GICv2 is created. It's better to check ahead whether GICv2
2244 // can be emulated or not.
2245 let mut gic_device = kvm_create_device {
2246 type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2,
2247 fd: 0,
2248 flags: KVM_CREATE_DEVICE_TEST,
2249 };
2250
2251 let vgic_v2_supported = vm.create_device(&mut gic_device).is_ok();
2252 assert_eq!(vm.create_irq_chip().is_ok(), vgic_v2_supported);
2253 }
2254
2255 #[test]
2256 #[cfg(target_arch = "x86_64")]
2257 fn test_pit2() {
2258 let kvm = Kvm::new().unwrap();
2259 let vm = kvm.create_vm().unwrap();
2260 assert!(kvm.check_extension(Cap::Irqchip));
2261 vm.create_irq_chip().unwrap();
2262
2263 vm.create_pit2(kvm_pit_config::default()).unwrap();
2264
2265 let pit2 = vm.get_pit2().unwrap();
2266 vm.set_pit2(&pit2).unwrap();
2267 let mut other_pit2 = vm.get_pit2().unwrap();
2268 // Load time will differ, let's overwrite it so we can test equality.
2269 other_pit2.channels[0].count_load_time = pit2.channels[0].count_load_time;
2270 other_pit2.channels[1].count_load_time = pit2.channels[1].count_load_time;
2271 other_pit2.channels[2].count_load_time = pit2.channels[2].count_load_time;
2272 assert_eq!(pit2, other_pit2);
2273 }
2274
2275 #[cfg(target_arch = "x86_64")]
2276 #[test]
2277 fn test_clock() {
2278 let kvm = Kvm::new().unwrap();
2279 let vm = kvm.create_vm().unwrap();
2280
2281 // Get current time.
2282 let orig = vm.get_clock().unwrap();
2283
2284 // Reset time.
2285 let fudged = kvm_clock_data {
2286 clock: 10,
2287 ..Default::default()
2288 };
2289 vm.set_clock(&fudged).unwrap();
2290
2291 // Get new time.
2292 let new = vm.get_clock().unwrap();
2293
2294 // Verify new time has progressed but is smaller than orig time.
2295 assert!(fudged.clock < new.clock);
2296 assert!(new.clock < orig.clock);
2297 }
2298
2299 #[test]
2300 fn test_register_ioevent() {
2301 assert_eq!(std::mem::size_of::<NoDatamatch>(), 0);
2302
2303 let kvm = Kvm::new().unwrap();
2304 let vm_fd = kvm.create_vm().unwrap();
2305 let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
2306 vm_fd
2307 .register_ioevent(&evtfd, &IoEventAddress::Pio(0xf4), NoDatamatch)
2308 .unwrap();
2309 vm_fd
2310 .register_ioevent(&evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch)
2311 .unwrap();
2312 vm_fd
2313 .register_ioevent(&evtfd, &IoEventAddress::Pio(0xc1), 0x7fu8)
2314 .unwrap();
2315 vm_fd
2316 .register_ioevent(&evtfd, &IoEventAddress::Pio(0xc2), 0x1337u16)
2317 .unwrap();
2318 vm_fd
2319 .register_ioevent(&evtfd, &IoEventAddress::Pio(0xc4), 0xdead_beefu32)
2320 .unwrap();
2321 vm_fd
2322 .register_ioevent(&evtfd, &IoEventAddress::Pio(0xc8), 0xdead_beef_dead_beefu64)
2323 .unwrap()
2324 }
2325
2326 #[test]
2327 fn test_unregister_ioevent() {
2328 assert_eq!(std::mem::size_of::<NoDatamatch>(), 0);
2329
2330 let kvm = Kvm::new().unwrap();
2331 let vm_fd = kvm.create_vm().unwrap();
2332 let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
2333 let pio_addr = IoEventAddress::Pio(0xf4);
2334 let mmio_addr = IoEventAddress::Mmio(0x1000);
2335
2336 // First try to unregister addresses which have not been registered.
2337 vm_fd
2338 .unregister_ioevent(&evtfd, &pio_addr, NoDatamatch)
2339 .unwrap_err();
2340 vm_fd
2341 .unregister_ioevent(&evtfd, &mmio_addr, NoDatamatch)
2342 .unwrap_err();
2343
2344 // Now register the addresses
2345 vm_fd
2346 .register_ioevent(&evtfd, &pio_addr, NoDatamatch)
2347 .unwrap();
2348 vm_fd
2349 .register_ioevent(&evtfd, &mmio_addr, 0x1337u16)
2350 .unwrap();
2351
2352 // Try again unregistering the addresses. This time it should work
2353 // since they have been previously registered.
2354 vm_fd
2355 .unregister_ioevent(&evtfd, &pio_addr, NoDatamatch)
2356 .unwrap();
2357 vm_fd
2358 .unregister_ioevent(&evtfd, &mmio_addr, 0x1337u16)
2359 .unwrap();
2360 }
2361
2362 #[test]
2363 #[cfg(target_arch = "x86_64")]
2364 fn test_register_unregister_irqfd() {
2365 let kvm = Kvm::new().unwrap();
2366 let vm_fd = kvm.create_vm().unwrap();
2367 let evtfd1 = EventFd::new(EFD_NONBLOCK).unwrap();
2368 let evtfd2 = EventFd::new(EFD_NONBLOCK).unwrap();
2369 let evtfd3 = EventFd::new(EFD_NONBLOCK).unwrap();
2370 let evtfd4 = EventFd::new(EFD_NONBLOCK).unwrap();
2371 let resamplefd = EventFd::new(EFD_NONBLOCK).unwrap();
2372
2373 vm_fd.create_irq_chip().unwrap();
2374
2375 vm_fd.register_irqfd(&evtfd1, 4).unwrap();
2376 vm_fd.register_irqfd(&evtfd2, 8).unwrap();
2377 vm_fd.register_irqfd(&evtfd3, 4).unwrap();
2378 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2379 // KVM irqfd doesn't report failure on this case:(
2380 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2381
2382 // Duplicated eventfd registration.
2383 // On x86_64 this fails as the event fd was already matched with a GSI.
2384 vm_fd.register_irqfd(&evtfd3, 4).unwrap_err();
2385 vm_fd.register_irqfd(&evtfd3, 5).unwrap_err();
2386 // KVM irqfd doesn't report failure on this case:(
2387 vm_fd.unregister_irqfd(&evtfd3, 5).unwrap();
2388
2389 if vm_fd.check_extension(Cap::IrqfdResample) {
2390 vm_fd
2391 .register_irqfd_with_resample(&evtfd4, &resamplefd, 6)
2392 .unwrap();
2393 vm_fd.unregister_irqfd(&evtfd4, 6).unwrap();
2394 } else {
2395 vm_fd
2396 .register_irqfd_with_resample(&evtfd4, &resamplefd, 6)
2397 .unwrap_err();
2398 }
2399 }
2400
2401 #[test]
2402 #[cfg(target_arch = "aarch64")]
2403 fn test_register_unregister_irqfd() {
2404 let kvm = Kvm::new().unwrap();
2405 let vm_fd = kvm.create_vm().unwrap();
2406 let evtfd1 = EventFd::new(EFD_NONBLOCK).unwrap();
2407 let evtfd2 = EventFd::new(EFD_NONBLOCK).unwrap();
2408 let evtfd3 = EventFd::new(EFD_NONBLOCK).unwrap();
2409 let evtfd4 = EventFd::new(EFD_NONBLOCK).unwrap();
2410 let resamplefd = EventFd::new(EFD_NONBLOCK).unwrap();
2411
2412 // Create the vGIC device.
2413 let vgic_fd = create_gic_device(&vm_fd, 0);
2414
2415 // GICv3 on arm/aarch64 requires an online vCPU prior to setting device attributes,
2416 // see: https://www.kernel.org/doc/html/latest/virt/kvm/devices/arm-vgic-v3.html
2417 vm_fd.create_vcpu(0).unwrap();
2418
2419 // Set supported number of IRQs.
2420 set_supported_nr_irqs(&vgic_fd, 128);
2421 // Request the initialization of the vGIC.
2422 request_gic_init(&vgic_fd);
2423
2424 vm_fd.register_irqfd(&evtfd1, 4).unwrap();
2425 vm_fd.register_irqfd(&evtfd2, 8).unwrap();
2426 vm_fd.register_irqfd(&evtfd3, 4).unwrap();
2427 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2428 // KVM irqfd doesn't report failure on this case:(
2429 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2430
2431 // Duplicated eventfd registration.
2432 // On aarch64, this fails because setting up the interrupt controller is mandatory before
2433 // registering any IRQ.
2434 vm_fd.register_irqfd(&evtfd3, 4).unwrap_err();
2435 vm_fd.register_irqfd(&evtfd3, 5).unwrap_err();
2436 // KVM irqfd doesn't report failure on this case:(
2437 vm_fd.unregister_irqfd(&evtfd3, 5).unwrap();
2438
2439 if vm_fd.check_extension(Cap::IrqfdResample) {
2440 vm_fd
2441 .register_irqfd_with_resample(&evtfd4, &resamplefd, 6)
2442 .unwrap();
2443 vm_fd.unregister_irqfd(&evtfd4, 6).unwrap();
2444 } else {
2445 vm_fd
2446 .register_irqfd_with_resample(&evtfd4, &resamplefd, 6)
2447 .unwrap_err();
2448 }
2449 }
2450
2451 #[test]
2452 #[cfg(target_arch = "riscv64")]
2453 fn test_register_unregister_irqfd() {
2454 let kvm = Kvm::new().unwrap();
2455 let vm_fd = kvm.create_vm().unwrap();
2456 let evtfd1 = EventFd::new(EFD_NONBLOCK).unwrap();
2457 let evtfd2 = EventFd::new(EFD_NONBLOCK).unwrap();
2458 let evtfd3 = EventFd::new(EFD_NONBLOCK).unwrap();
2459
2460 // Create the vAIA device.
2461 let vaia_fd = create_aia_device(&vm_fd, 0);
2462
2463 // AIA on riscv64 requires at least one online vCPU prior to setting
2464 // device attributes. Otherwise it would fail when trying ot set address
2465 // of IMSIC.
2466 vm_fd.create_vcpu(0).unwrap();
2467
2468 // Set maximum supported number of IRQs of the vAIA device to 128.
2469 set_supported_nr_irqs(&vaia_fd, 128);
2470
2471 // Before request vAIA device to initialize, APLIC and IMSIC must be set
2472 let aplic_addr: u64 = 0x4000;
2473 vaia_fd
2474 .set_device_attr(&kvm_device_attr {
2475 group: KVM_DEV_RISCV_AIA_GRP_ADDR,
2476 attr: u64::from(KVM_DEV_RISCV_AIA_ADDR_APLIC),
2477 addr: &aplic_addr as *const u64 as u64,
2478 flags: 0,
2479 })
2480 .unwrap();
2481 let imsic_addr: u64 = 0x8000;
2482 vaia_fd
2483 .set_device_attr(&kvm_device_attr {
2484 group: KVM_DEV_RISCV_AIA_GRP_ADDR,
2485 attr: 1u64,
2486 addr: &imsic_addr as *const u64 as u64,
2487 flags: 0,
2488 })
2489 .unwrap();
2490
2491 // Initialize valid vAIA device.
2492 request_aia_init(&vaia_fd);
2493
2494 vm_fd.register_irqfd(&evtfd1, 4).unwrap();
2495 vm_fd.register_irqfd(&evtfd2, 8).unwrap();
2496 vm_fd.register_irqfd(&evtfd3, 4).unwrap();
2497 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2498 // KVM irqfd doesn't report failure on this case:(
2499 vm_fd.unregister_irqfd(&evtfd2, 8).unwrap();
2500
2501 // Duplicated eventfd registration.
2502 // On riscv64 this fails as the event fd was already matched with a GSI.
2503 vm_fd.register_irqfd(&evtfd3, 4).unwrap_err();
2504 vm_fd.register_irqfd(&evtfd3, 5).unwrap_err();
2505 // KVM irqfd doesn't report failure on this case:(
2506 vm_fd.unregister_irqfd(&evtfd3, 5).unwrap();
2507 }
2508
2509 #[test]
2510 #[cfg(target_arch = "x86_64")]
2511 fn test_set_irq_line() {
2512 let kvm = Kvm::new().unwrap();
2513 let vm_fd = kvm.create_vm().unwrap();
2514
2515 vm_fd.create_irq_chip().unwrap();
2516
2517 vm_fd.set_irq_line(4, true).unwrap();
2518 vm_fd.set_irq_line(4, false).unwrap();
2519 vm_fd.set_irq_line(4, true).unwrap();
2520 }
2521
2522 #[test]
2523 #[cfg(target_arch = "aarch64")]
2524 #[allow(clippy::unusual_byte_groupings)]
2525 fn test_set_irq_line() {
2526 let kvm = Kvm::new().unwrap();
2527 let vm_fd = kvm.create_vm().unwrap();
2528 // Create a vcpu for test case 2 of the KVM_IRQ_LINE API on aarch64.
2529 vm_fd.create_vcpu(0).unwrap();
2530
2531 // Create the vGIC device.
2532 let vgic_fd = create_gic_device(&vm_fd, 0);
2533 // Set supported number of IRQs.
2534 set_supported_nr_irqs(&vgic_fd, 128);
2535 // Request the initialization of the vGIC.
2536 request_gic_init(&vgic_fd);
2537
2538 // On arm/aarch64, irq field is interpreted like this:
2539 // bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |
2540 // field: | irq_type | vcpu_index | irq_id |
2541 // The irq_type field has the following values:
2542 // - irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
2543 // - irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.) (the vcpu_index field is ignored)
2544 // - irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
2545 // Hence, using irq_type = 1, irq_id = 32 (decimal), the irq field in hex is: 0x01_00_0020
2546 vm_fd.set_irq_line(0x01_00_0020, true).unwrap();
2547 vm_fd.set_irq_line(0x01_00_0020, false).unwrap();
2548 vm_fd.set_irq_line(0x01_00_0020, true).unwrap();
2549
2550 // Case 2: using irq_type = 2, vcpu_index = 0, irq_id = 16 (decimal), the irq field in hex is: 0x02_00_0010
2551 vm_fd.set_irq_line(0x02_00_0010, true).unwrap();
2552 vm_fd.set_irq_line(0x02_00_0010, false).unwrap();
2553 vm_fd.set_irq_line(0x02_00_0010, true).unwrap();
2554 }
2555
2556 #[test]
2557 #[cfg(target_arch = "riscv64")]
2558 fn test_set_irq_line() {
2559 let kvm = Kvm::new().unwrap();
2560 let vm_fd = kvm.create_vm().unwrap();
2561 vm_fd.create_vcpu(0).unwrap();
2562
2563 // Create the vAIA device.
2564 let vaia_fd = create_aia_device(&vm_fd, 0);
2565 // Set maximum supported number of IRQs of the vAIA device to 128.
2566 set_supported_nr_irqs(&vaia_fd, 128);
2567
2568 // Before request vAIA device to initialize, APLIC and IMSIC must be set
2569 let aplic_addr: u64 = 0x4000;
2570 vaia_fd
2571 .set_device_attr(&kvm_device_attr {
2572 group: KVM_DEV_RISCV_AIA_GRP_ADDR,
2573 attr: u64::from(KVM_DEV_RISCV_AIA_ADDR_APLIC),
2574 addr: &aplic_addr as *const u64 as u64,
2575 flags: 0,
2576 })
2577 .unwrap();
2578 let imsic_addr: u64 = 0x8000;
2579 vaia_fd
2580 .set_device_attr(&kvm_device_attr {
2581 group: KVM_DEV_RISCV_AIA_GRP_ADDR,
2582 attr: 1u64,
2583 addr: &imsic_addr as *const u64 as u64,
2584 flags: 0,
2585 })
2586 .unwrap();
2587
2588 // Initialize valid vAIA device.
2589 request_aia_init(&vaia_fd);
2590
2591 vm_fd.set_irq_line(7, true).unwrap();
2592 vm_fd.set_irq_line(7, false).unwrap();
2593 vm_fd.set_irq_line(7, true).unwrap();
2594 }
2595
2596 #[test]
2597 #[cfg(target_arch = "x86_64")]
2598 fn test_faulty_vm_fd() {
2599 let badf_errno = libc::EBADF;
2600
2601 let faulty_vm_fd = VmFd {
2602 vm: unsafe { File::from_raw_fd(-2) },
2603 run_size: 0,
2604 };
2605
2606 let invalid_mem_region = kvm_userspace_memory_region {
2607 slot: 0,
2608 guest_phys_addr: 0,
2609 memory_size: 0,
2610 userspace_addr: 0,
2611 flags: 0,
2612 };
2613
2614 assert_eq!(
2615 unsafe {
2616 faulty_vm_fd
2617 .set_user_memory_region(invalid_mem_region)
2618 .unwrap_err()
2619 .errno()
2620 },
2621 badf_errno
2622 );
2623 assert_eq!(
2624 faulty_vm_fd.set_tss_address(0).unwrap_err().errno(),
2625 badf_errno
2626 );
2627 assert_eq!(
2628 faulty_vm_fd.create_irq_chip().unwrap_err().errno(),
2629 badf_errno
2630 );
2631 assert_eq!(
2632 faulty_vm_fd
2633 .create_pit2(kvm_pit_config::default())
2634 .unwrap_err()
2635 .errno(),
2636 badf_errno
2637 );
2638 let event_fd = EventFd::new(EFD_NONBLOCK).unwrap();
2639 assert_eq!(
2640 faulty_vm_fd
2641 .register_ioevent(&event_fd, &IoEventAddress::Pio(0), 0u64)
2642 .unwrap_err()
2643 .errno(),
2644 badf_errno
2645 );
2646 assert_eq!(
2647 faulty_vm_fd
2648 .get_irqchip(&mut kvm_irqchip::default())
2649 .unwrap_err()
2650 .errno(),
2651 badf_errno
2652 );
2653 assert_eq!(
2654 faulty_vm_fd
2655 .set_irqchip(&kvm_irqchip::default())
2656 .unwrap_err()
2657 .errno(),
2658 badf_errno
2659 );
2660 assert_eq!(faulty_vm_fd.get_clock().unwrap_err().errno(), badf_errno);
2661 assert_eq!(
2662 faulty_vm_fd
2663 .set_clock(&kvm_clock_data::default())
2664 .unwrap_err()
2665 .errno(),
2666 badf_errno
2667 );
2668 assert_eq!(faulty_vm_fd.get_pit2().unwrap_err().errno(), badf_errno);
2669 assert_eq!(
2670 faulty_vm_fd
2671 .set_pit2(&kvm_pit_state2::default())
2672 .unwrap_err()
2673 .errno(),
2674 badf_errno
2675 );
2676 assert_eq!(
2677 faulty_vm_fd
2678 .register_irqfd(&event_fd, 0)
2679 .unwrap_err()
2680 .errno(),
2681 badf_errno
2682 );
2683
2684 assert_eq!(
2685 faulty_vm_fd.create_vcpu(0).err().unwrap().errno(),
2686 badf_errno
2687 );
2688
2689 assert_eq!(
2690 faulty_vm_fd.get_dirty_log(0, 0).unwrap_err().errno(),
2691 badf_errno
2692 );
2693
2694 // Don't drop the File object, or it'll notice the file it's trying to close is
2695 // invalid and abort the process.
2696 let _ = faulty_vm_fd.vm.into_raw_fd();
2697 }
2698
2699 #[test]
2700 #[cfg(target_arch = "aarch64")]
2701 fn test_get_preferred_target() {
2702 let kvm = Kvm::new().unwrap();
2703 let vm = kvm.create_vm().unwrap();
2704 let mut kvi = kvm_vcpu_init::default();
2705 vm.get_preferred_target(&mut kvi).unwrap();
2706 }
2707
2708 /// As explained in the example code related to signal_msi(), sending
2709 /// a random MSI vector will always fail because no vector has been
2710 /// previously allocated from the guest itself.
2711 #[test]
2712 #[cfg(any(
2713 target_arch = "x86_64",
2714 target_arch = "aarch64",
2715 target_arch = "riscv64"
2716 ))]
2717 fn test_signal_msi_failure() {
2718 let kvm = Kvm::new().unwrap();
2719 let vm = kvm.create_vm().unwrap();
2720 let msi = kvm_msi::default();
2721 vm.signal_msi(msi).unwrap_err();
2722 }
2723
2724 #[test]
2725 #[cfg(not(any(target_arch = "aarch64", target_arch = "riscv64")))]
2726 fn test_enable_cap_failure() {
2727 let kvm = Kvm::new().unwrap();
2728 let vm = kvm.create_vm().unwrap();
2729 let cap: kvm_enable_cap = Default::default();
2730 // Providing the `kvm_enable_cap` structure filled with default() should
2731 // always result in a failure as it is not a valid capability.
2732 vm.enable_cap(&cap).unwrap_err();
2733 }
2734
2735 #[test]
2736 #[cfg(target_arch = "x86_64")]
2737 fn test_enable_split_irqchip_cap() {
2738 let kvm = Kvm::new().unwrap();
2739 let vm = kvm.create_vm().unwrap();
2740 let mut cap = kvm_enable_cap {
2741 cap: KVM_CAP_SPLIT_IRQCHIP,
2742 ..Default::default()
2743 };
2744 // As per the KVM documentation, KVM_CAP_SPLIT_IRQCHIP only emulates
2745 // the local APIC in kernel, expecting that a userspace IOAPIC will
2746 // be implemented by the VMM.
2747 // Along with this capability, the user needs to specify the number
2748 // of pins reserved for the userspace IOAPIC. This number needs to be
2749 // provided through the first argument of the capability structure, as
2750 // specified in KVM documentation:
2751 // args[0] - number of routes reserved for userspace IOAPICs
2752 //
2753 // Because an IOAPIC supports 24 pins, that's the reason why this test
2754 // picked this number as reference.
2755 cap.args[0] = 24;
2756 vm.enable_cap(&cap).unwrap();
2757 }
2758
2759 #[test]
2760 #[cfg(any(
2761 target_arch = "x86_64",
2762 target_arch = "aarch64",
2763 target_arch = "riscv64"
2764 ))]
2765 fn test_set_gsi_routing() {
2766 let kvm = Kvm::new().unwrap();
2767 let vm = kvm.create_vm().unwrap();
2768 let irq_routing = KvmIrqRouting::new(0).unwrap();
2769
2770 // Expect failure for x86 since the irqchip is not created yet.
2771 #[cfg(target_arch = "x86_64")]
2772 vm.set_gsi_routing(&irq_routing).unwrap_err();
2773 #[cfg(target_arch = "x86_64")]
2774 vm.create_irq_chip().unwrap();
2775
2776 // RISC-V 64-bit expect an AIA device to be created in advance of
2777 // committing irq_routing table.
2778 #[cfg(target_arch = "riscv64")]
2779 create_aia_device(&vm, 0);
2780
2781 vm.set_gsi_routing(&irq_routing).unwrap();
2782 }
2783
2784 #[test]
2785 fn test_create_vcpu_different_ids() {
2786 let kvm = Kvm::new().unwrap();
2787 let vm = kvm.create_vm().unwrap();
2788
2789 // Fails when an arbitrarily large value
2790 let err = vm.create_vcpu(65537_u64).err();
2791 assert_eq!(err.unwrap().errno(), libc::EINVAL);
2792
2793 // Fails when input `id` = `max_vcpu_id`
2794 let max_vcpu_id = kvm.get_max_vcpu_id();
2795 vm.create_vcpu((max_vcpu_id - 1) as u64).unwrap();
2796
2797 let vcpu_err = vm.create_vcpu(max_vcpu_id as u64).err();
2798 assert_eq!(vcpu_err.unwrap().errno(), libc::EINVAL);
2799 }
2800
2801 #[test]
2802 fn test_check_extension() {
2803 let kvm = Kvm::new().unwrap();
2804 let vm = kvm.create_vm().unwrap();
2805 assert!(vm.check_extension(Cap::MpState));
2806 }
2807
2808 #[test]
2809 #[cfg(target_arch = "x86_64")]
2810 #[cfg_attr(not(has_sev), ignore)]
2811 fn test_encrypt_op_sev() {
2812 let kvm = Kvm::new().unwrap();
2813 let vm = kvm.create_vm().unwrap();
2814
2815 let mut init: kvm_sev_cmd = Default::default();
2816 vm.encrypt_op_sev(&mut init).unwrap();
2817 }
2818
2819 #[test]
2820 #[cfg(target_arch = "x86_64")]
2821 #[cfg_attr(not(has_sev), ignore)]
2822 fn test_register_unregister_enc_memory_region() {
2823 let sev = OpenOptions::new()
2824 .read(true)
2825 .write(true)
2826 .open("/dev/sev")
2827 .unwrap();
2828
2829 let kvm = Kvm::new().unwrap();
2830 let vm = kvm.create_vm().unwrap();
2831
2832 // Perform SEV launch sequence according to
2833 // https://www.kernel.org/doc/Documentation/virtual/kvm/amd-memory-encryption.rst
2834
2835 let mut init: kvm_sev_cmd = Default::default();
2836 vm.encrypt_op_sev(&mut init).unwrap();
2837
2838 let start_data: kvm_sev_launch_start = Default::default();
2839 let mut start = kvm_sev_cmd {
2840 id: sev_cmd_id_KVM_SEV_LAUNCH_START,
2841 data: &start_data as *const kvm_sev_launch_start as _,
2842 sev_fd: sev.as_raw_fd() as _,
2843 ..Default::default()
2844 };
2845 vm.encrypt_op_sev(&mut start).unwrap();
2846
2847 let addr = unsafe {
2848 libc::mmap(
2849 null_mut(),
2850 4096,
2851 libc::PROT_READ | libc::PROT_WRITE,
2852 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
2853 -1,
2854 0,
2855 )
2856 };
2857 assert_ne!(addr, libc::MAP_FAILED);
2858
2859 assert_eq!(
2860 vm.register_enc_memory_region(&Default::default())
2861 .unwrap_err()
2862 .errno(),
2863 libc::EINVAL
2864 );
2865 assert_eq!(
2866 vm.unregister_enc_memory_region(&Default::default())
2867 .unwrap_err()
2868 .errno(),
2869 libc::EINVAL
2870 );
2871
2872 let memory_region = kvm_enc_region {
2873 addr: addr as _,
2874 size: 4096,
2875 };
2876 assert_eq!(
2877 vm.unregister_enc_memory_region(&memory_region)
2878 .unwrap_err()
2879 .errno(),
2880 libc::EINVAL
2881 );
2882 vm.register_enc_memory_region(&memory_region).unwrap();
2883 vm.unregister_enc_memory_region(&memory_region).unwrap();
2884 }
2885
2886 #[test]
2887 #[cfg(target_arch = "aarch64")]
2888 fn test_set_smccc_filter() {
2889 let kvm = Kvm::new().unwrap();
2890 let vm = kvm.create_vm().unwrap();
2891
2892 const PSCI_0_2_FN64_CPU_ON: u32 = PSCI_0_2_FN64_BASE + 3;
2893 let smccc_filter = kvm_bindings::kvm_smccc_filter {
2894 base: PSCI_0_2_FN64_CPU_ON,
2895 nr_functions: 1,
2896 action: kvm_smccc_filter_action_KVM_SMCCC_FILTER_FWD_TO_USER as u8,
2897 pad: [0u8; 15],
2898 };
2899
2900 let dist_attr = kvm_bindings::kvm_device_attr {
2901 group: KVM_ARM_VM_SMCCC_CTRL,
2902 attr: KVM_ARM_VM_SMCCC_FILTER as u64,
2903 addr: &smccc_filter as *const _ as u64,
2904 flags: 0,
2905 };
2906
2907 vm.has_device_attr(&dist_attr).unwrap();
2908 vm.set_device_attr(&dist_attr).unwrap();
2909 }
2910}