pub mod arch;
mod core;
mod ctx;
pub mod driver;
mod error;
mod event;
mod handler;
pub mod os;
mod page;
use std::{cell::RefCell, num::NonZeroUsize, time::Duration};
use isr_macros::Field;
use lru::LruCache;
use zerocopy::{FromBytes, Immutable, IntoBytes};
pub use self::{
arch::{Architecture, Registers},
core::{
AccessContext, AddressContext, Gfn, Hex, MemoryAccess, MemoryAccessOptions, Pa,
TranslationMechanism, Va, VcpuId, View, VmiInfo, VmiVa,
},
ctx::{VmiContext, VmiOsContext, VmiOsState, VmiProber, VmiSession, VmiState},
driver::{
VmiDriver, VmiEventControl, VmiFullDriver, VmiMemory, VmiProtection, VmiQueryProtection,
VmiQueryRegisters, VmiRead, VmiReadAccess, VmiRegisters, VmiSetProtection, VmiSetRegisters,
VmiViewControl, VmiVmControl, VmiWrite, VmiWriteAccess,
},
error::{PageFaults, VmiError},
event::{VmiEvent, VmiEventAction, VmiEventFlags, VmiEventResponse},
handler::VmiHandler,
os::VmiOs,
page::VmiMappedPage,
};
struct Cache {
gfn: RefCell<LruCache<Gfn, VmiMappedPage>>,
v2p: RefCell<LruCache<AccessContext, Pa>>,
}
impl Cache {
const DEFAULT_SIZE: NonZeroUsize = NonZeroUsize::new(8192).unwrap();
pub fn new() -> Self {
Self {
gfn: RefCell::new(LruCache::new(Self::DEFAULT_SIZE)),
v2p: RefCell::new(LruCache::new(Self::DEFAULT_SIZE)),
}
}
}
pub struct VmiCore<Driver>
where
Driver: VmiDriver,
{
driver: Driver,
cache: Cache,
read_page_fn: fn(&Self, Gfn) -> Result<VmiMappedPage, VmiError>,
translate_access_context_fn: fn(&Self, AccessContext) -> Result<Pa, VmiError>,
read_string_length_limit: RefCell<Option<usize>>,
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiDriver,
{
pub fn driver(&self) -> &Driver {
&self.driver
}
pub fn info(&self) -> Result<VmiInfo, VmiError> {
self.driver.info()
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiRead,
{
pub fn new(driver: Driver) -> Result<Self, VmiError> {
Ok(Self {
driver,
cache: Cache::new(),
read_page_fn: Self::read_page_cache,
translate_access_context_fn: Self::translate_access_context_cache,
read_string_length_limit: RefCell::new(None),
})
}
pub fn with_gfn_cache(self, size: usize) -> Self {
Self {
cache: Cache {
gfn: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
..self.cache
},
read_page_fn: Self::read_page_cache,
..self
}
}
pub fn enable_gfn_cache(&mut self) {
self.read_page_fn = Self::read_page_cache;
}
pub fn disable_gfn_cache(&mut self) {
self.read_page_fn = Self::read_page_nocache;
}
pub fn resize_gfn_cache(&mut self, size: usize) {
self.cache
.gfn
.borrow_mut()
.resize(NonZeroUsize::new(size).unwrap());
}
pub fn flush_gfn_cache_entry(&self, gfn: Gfn) -> Option<VmiMappedPage> {
self.cache.gfn.borrow_mut().pop(&gfn)
}
pub fn flush_gfn_cache(&self) {
self.cache.gfn.borrow_mut().clear();
}
pub fn with_v2p_cache(self, size: usize) -> Self {
Self {
cache: Cache {
v2p: RefCell::new(LruCache::new(NonZeroUsize::new(size).unwrap())),
..self.cache
},
translate_access_context_fn: Self::translate_access_context_cache,
..self
}
}
pub fn enable_v2p_cache(&mut self) {
self.translate_access_context_fn = Self::translate_access_context_cache;
}
pub fn disable_v2p_cache(&mut self) {
self.translate_access_context_fn = Self::translate_access_context_nocache;
}
pub fn resize_v2p_cache(&mut self, size: usize) {
self.cache
.v2p
.borrow_mut()
.resize(NonZeroUsize::new(size).unwrap());
}
pub fn flush_v2p_cache_entry(&self, ctx: AccessContext) -> Option<Pa> {
self.cache.v2p.borrow_mut().pop(&ctx)
}
pub fn flush_v2p_cache(&self) {
self.cache.v2p.borrow_mut().clear();
}
pub fn with_read_string_length_limit(self, limit_in_bytes: usize) -> Self {
Self {
read_string_length_limit: RefCell::new(Some(limit_in_bytes)),
..self
}
}
pub fn read_string_length_limit(&self) -> Option<usize> {
*self.read_string_length_limit.borrow()
}
pub fn set_read_string_length_limit(&self, limit: usize) {
*self.read_string_length_limit.borrow_mut() = Some(limit);
}
pub fn read(&self, ctx: impl Into<AccessContext>, buffer: &mut [u8]) -> Result<(), VmiError> {
let ctx = ctx.into();
let mut position = 0usize;
let mut remaining = buffer.len();
while remaining > 0 {
let address = self.translate_access_context(ctx + position as u64)?;
let gfn = Driver::Architecture::gfn_from_pa(address);
let offset = Driver::Architecture::pa_offset(address) as usize;
let page = self.read_page(gfn)?;
let page = &page[offset..];
let size = std::cmp::min(remaining, page.len());
buffer[position..position + size].copy_from_slice(&page[..size]);
position += size;
remaining -= size;
}
Ok(())
}
pub fn read_u8(&self, ctx: impl Into<AccessContext>) -> Result<u8, VmiError> {
let mut buffer = [0u8; 1];
self.read(ctx, &mut buffer)?;
Ok(buffer[0])
}
pub fn read_u16(&self, ctx: impl Into<AccessContext>) -> Result<u16, VmiError> {
let mut buffer = [0u8; 2];
self.read(ctx, &mut buffer)?;
Ok(u16::from_le_bytes(buffer))
}
pub fn read_u32(&self, ctx: impl Into<AccessContext>) -> Result<u32, VmiError> {
let mut buffer = [0u8; 4];
self.read(ctx, &mut buffer)?;
Ok(u32::from_le_bytes(buffer))
}
pub fn read_u64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
let mut buffer = [0u8; 8];
self.read(ctx, &mut buffer)?;
Ok(u64::from_le_bytes(buffer))
}
pub fn read_uint(&self, ctx: impl Into<AccessContext>, size: usize) -> Result<u64, VmiError> {
match size {
1 => self.read_u8(ctx).map(u64::from),
2 => self.read_u16(ctx).map(u64::from),
4 => self.read_u32(ctx).map(u64::from),
8 => self.read_u64(ctx),
_ => Err(VmiError::InvalidAddressWidth),
}
}
pub fn read_field(
&self,
ctx: impl Into<AccessContext>,
field: &Field,
) -> Result<u64, VmiError> {
self.read_uint(ctx.into() + field.offset(), field.size() as usize)
}
pub fn read_address(
&self,
ctx: impl Into<AccessContext>,
address_width: usize,
) -> Result<u64, VmiError> {
match address_width {
4 => self.read_address32(ctx),
8 => self.read_address64(ctx),
_ => Err(VmiError::InvalidAddressWidth),
}
}
pub fn read_address32(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
Ok(self.read_u32(ctx)? as u64)
}
pub fn read_address64(&self, ctx: impl Into<AccessContext>) -> Result<u64, VmiError> {
self.read_u64(ctx)
}
pub fn read_va(
&self,
ctx: impl Into<AccessContext>,
address_width: usize,
) -> Result<Va, VmiError> {
Ok(Va(self.read_address(ctx, address_width)?))
}
pub fn read_va32(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
Ok(Va(self.read_address32(ctx)?))
}
pub fn read_va64(&self, ctx: impl Into<AccessContext>) -> Result<Va, VmiError> {
Ok(Va(self.read_address64(ctx)?))
}
pub fn read_string_bytes_limited(
&self,
ctx: impl Into<AccessContext>,
limit: usize,
) -> Result<Vec<u8>, VmiError> {
let mut ctx = ctx.into();
let mut buffer = vec![
0u8;
(Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
as usize
];
self.read(ctx, &mut buffer)?;
let position = buffer.iter().position(|&b| b == 0);
if let Some(position) = position {
buffer.truncate(limit.min(position));
return Ok(buffer);
}
let mut page = [0u8; 4096_usize]; loop {
ctx.address += buffer.len() as u64;
self.read(ctx, &mut page)?;
let position = page.iter().position(|&b| b == 0);
if let Some(position) = position {
buffer.extend_from_slice(&page[..position]);
if buffer.len() >= limit {
buffer.truncate(limit);
}
break;
}
buffer.extend_from_slice(&page);
if buffer.len() >= limit {
buffer.truncate(limit);
break;
}
}
Ok(buffer)
}
pub fn read_string_bytes(&self, ctx: impl Into<AccessContext>) -> Result<Vec<u8>, VmiError> {
self.read_string_bytes_limited(
ctx,
self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
)
}
pub fn read_string_utf16_bytes_limited(
&self,
ctx: impl Into<AccessContext>,
limit: usize,
) -> Result<Vec<u16>, VmiError> {
let mut ctx = ctx.into();
let mut buffer = vec![
0u8;
(Driver::Architecture::PAGE_SIZE - (ctx.address & !Driver::Architecture::PAGE_MASK))
as usize
];
self.read(ctx, &mut buffer)?;
let position = buffer
.chunks_exact(2)
.position(|chunk| chunk[0] == 0 && chunk[1] == 0);
if let Some(position) = position {
buffer.truncate(limit.min(position * 2));
return Ok(buffer
.chunks_exact(2)
.map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
.collect());
}
let mut page = [0u8; 4096_usize]; loop {
ctx.address += buffer.len() as u64;
self.read(ctx, &mut page)?;
let position = page
.chunks_exact(2)
.position(|chunk| chunk[0] == 0 && chunk[1] == 0);
if let Some(position) = position {
buffer.extend_from_slice(&page[..position * 2]);
if buffer.len() >= limit {
buffer.truncate(limit);
}
break;
}
buffer.extend_from_slice(&page);
if buffer.len() >= limit {
buffer.truncate(limit);
break;
}
}
Ok(buffer
.chunks_exact(2)
.map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
.collect())
}
pub fn read_string_utf16_bytes(
&self,
ctx: impl Into<AccessContext>,
) -> Result<Vec<u16>, VmiError> {
self.read_string_utf16_bytes_limited(
ctx,
self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
)
}
pub fn read_string_limited(
&self,
ctx: impl Into<AccessContext>,
limit: usize,
) -> Result<String, VmiError> {
Ok(String::from_utf8_lossy(&self.read_string_bytes_limited(ctx, limit)?).into())
}
pub fn read_string(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
self.read_string_limited(
ctx,
self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
)
}
pub fn read_string_utf16_limited(
&self,
ctx: impl Into<AccessContext>,
limit: usize,
) -> Result<String, VmiError> {
Ok(String::from_utf16_lossy(
&self.read_string_utf16_bytes_limited(ctx, limit)?,
))
}
pub fn read_string_utf16(&self, ctx: impl Into<AccessContext>) -> Result<String, VmiError> {
self.read_string_utf16_limited(
ctx,
self.read_string_length_limit.borrow().unwrap_or(usize::MAX),
)
}
pub fn read_struct<T>(&self, ctx: impl Into<AccessContext>) -> Result<T, VmiError>
where
T: FromBytes + IntoBytes,
{
let mut result = T::new_zeroed();
self.read(ctx, result.as_mut_bytes())?;
Ok(result)
}
pub fn translate_address(&self, ctx: impl Into<AddressContext>) -> Result<Pa, VmiError> {
self.translate_access_context(AccessContext::from(ctx.into()))
}
pub fn translate_access_context(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
(self.translate_access_context_fn)(self, ctx)
}
pub fn read_page(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
(self.read_page_fn)(self, gfn)
}
fn read_page_nocache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
self.driver.read_page(gfn)
}
fn read_page_cache(&self, gfn: Gfn) -> Result<VmiMappedPage, VmiError> {
let mut cache = self.cache.gfn.borrow_mut();
let value = cache.try_get_or_insert(gfn, || self.read_page_nocache(gfn))?;
Ok(value.clone())
}
fn translate_access_context_nocache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
Ok(match ctx.mechanism {
TranslationMechanism::Direct => Pa(ctx.address),
TranslationMechanism::Paging { root } => match root {
Some(root) => <Driver::Architecture as Architecture>::translate_address(
self,
ctx.address.into(),
root,
)?,
None => return Err(VmiError::RootNotPresent),
},
})
}
fn translate_access_context_cache(&self, ctx: AccessContext) -> Result<Pa, VmiError> {
let mut cache = self.cache.v2p.borrow_mut();
let value = cache.try_get_or_insert(ctx, || self.translate_access_context_nocache(ctx))?;
Ok(*value)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiRead + VmiWrite,
{
pub fn write(&self, ctx: impl Into<AccessContext>, buffer: &[u8]) -> Result<(), VmiError> {
let ctx = ctx.into();
let mut position = 0usize;
let mut remaining = buffer.len();
while remaining > 0 {
let address = self.translate_access_context(ctx + position as u64)?;
let gfn = Driver::Architecture::gfn_from_pa(address);
let offset = Driver::Architecture::pa_offset(address);
let size = std::cmp::min(
remaining,
(Driver::Architecture::PAGE_SIZE - offset) as usize,
);
let content = &buffer[position..position + size];
self.driver.write_page(gfn, offset, content)?;
position += size;
remaining -= size;
}
Ok(())
}
pub fn write_u8(&self, ctx: impl Into<AccessContext>, value: u8) -> Result<(), VmiError> {
self.write(ctx, &value.to_le_bytes())
}
pub fn write_u16(&self, ctx: impl Into<AccessContext>, value: u16) -> Result<(), VmiError> {
self.write(ctx, &value.to_le_bytes())
}
pub fn write_u32(&self, ctx: impl Into<AccessContext>, value: u32) -> Result<(), VmiError> {
self.write(ctx, &value.to_le_bytes())
}
pub fn write_u64(&self, ctx: impl Into<AccessContext>, value: u64) -> Result<(), VmiError> {
self.write(ctx, &value.to_le_bytes())
}
pub fn write_struct<T>(&self, ctx: impl Into<AccessContext>, value: T) -> Result<(), VmiError>
where
T: IntoBytes + Immutable,
{
self.write(ctx, value.as_bytes())
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiQueryProtection,
{
pub fn memory_access(&self, gfn: Gfn, view: View) -> Result<MemoryAccess, VmiError> {
self.driver.memory_access(gfn, view)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiSetProtection,
{
pub fn set_memory_access(
&self,
gfn: Gfn,
view: View,
access: MemoryAccess,
) -> Result<(), VmiError> {
self.driver.set_memory_access(gfn, view, access)
}
pub fn set_memory_access_with_options(
&self,
gfn: Gfn,
view: View,
access: MemoryAccess,
options: MemoryAccessOptions,
) -> Result<(), VmiError> {
self.driver
.set_memory_access_with_options(gfn, view, access, options)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiQueryRegisters,
{
pub fn registers(
&self,
vcpu: VcpuId,
) -> Result<<Driver::Architecture as Architecture>::Registers, VmiError> {
self.driver.registers(vcpu)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiSetRegisters,
{
pub fn set_registers(
&self,
vcpu: VcpuId,
registers: <Driver::Architecture as Architecture>::Registers,
) -> Result<(), VmiError> {
self.driver.set_registers(vcpu, registers)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiViewControl,
{
pub fn default_view(&self) -> View {
self.driver.default_view()
}
pub fn create_view(&self, default_access: MemoryAccess) -> Result<View, VmiError> {
self.driver.create_view(default_access)
}
pub fn destroy_view(&self, view: View) -> Result<(), VmiError> {
self.driver.destroy_view(view)
}
pub fn switch_to_view(&self, view: View) -> Result<(), VmiError> {
self.driver.switch_to_view(view)
}
pub fn change_view_gfn(&self, view: View, old_gfn: Gfn, new_gfn: Gfn) -> Result<(), VmiError> {
self.driver.change_view_gfn(view, old_gfn, new_gfn)
}
pub fn reset_view_gfn(&self, view: View, gfn: Gfn) -> Result<(), VmiError> {
self.driver.reset_view_gfn(view, gfn)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiEventControl,
{
pub fn monitor_enable(
&self,
option: <Driver::Architecture as Architecture>::EventMonitor,
) -> Result<(), VmiError> {
self.driver.monitor_enable(option)
}
pub fn monitor_disable(
&self,
option: <Driver::Architecture as Architecture>::EventMonitor,
) -> Result<(), VmiError> {
self.driver.monitor_disable(option)
}
pub fn events_pending(&self) -> usize {
self.driver.events_pending()
}
pub fn event_processing_overhead(&self) -> Duration {
self.driver.event_processing_overhead()
}
pub fn wait_for_event(
&self,
timeout: Duration,
handler: impl FnMut(&VmiEvent<Driver::Architecture>) -> VmiEventResponse<Driver::Architecture>,
) -> Result<(), VmiError> {
self.driver.wait_for_event(timeout, handler)
}
}
impl<Driver> VmiCore<Driver>
where
Driver: VmiVmControl,
{
pub fn pause(&self) -> Result<(), VmiError> {
self.driver.pause()
}
pub fn resume(&self) -> Result<(), VmiError> {
self.driver.resume()
}
pub fn pause_guard(&self) -> Result<VmiPauseGuard<'_, Driver>, VmiError> {
VmiPauseGuard::new(&self.driver)
}
pub fn allocate_gfn(&self) -> Result<Gfn, VmiError> {
self.driver.allocate_gfn()
}
pub fn allocate_gfn_at(&self, gfn: Gfn) -> Result<(), VmiError> {
self.driver.allocate_gfn_at(gfn)
}
pub fn free_gfn(&self, gfn: Gfn) -> Result<(), VmiError> {
self.driver.free_gfn(gfn)
}
pub fn inject_interrupt(
&self,
vcpu: VcpuId,
interrupt: <Driver::Architecture as Architecture>::Interrupt,
) -> Result<(), VmiError> {
self.driver.inject_interrupt(vcpu, interrupt)
}
pub fn reset_state(&self) -> Result<(), VmiError> {
self.driver.reset_state()
}
}
pub struct VmiPauseGuard<'a, Driver>
where
Driver: VmiVmControl,
{
driver: &'a Driver,
}
impl<'a, Driver> VmiPauseGuard<'a, Driver>
where
Driver: VmiVmControl,
{
pub fn new(driver: &'a Driver) -> Result<Self, VmiError> {
driver.pause()?;
Ok(Self { driver })
}
}
impl<Driver> Drop for VmiPauseGuard<'_, Driver>
where
Driver: VmiVmControl,
{
fn drop(&mut self) {
if let Err(err) = self.driver.resume() {
tracing::error!(%err, "Failed to resume the virtual machine");
}
}
}