use super::{DeviceStatus, DeviceType, Transport};
use crate::{
Error, PhysAddr,
queue::{Descriptor, fake_read_write_queue},
transport::InterruptStatus,
};
use alloc::{sync::Arc, vec::Vec};
use core::{
fmt::{self, Debug, Formatter},
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use std::{sync::Mutex, thread};
use zerocopy::{FromBytes, Immutable, IntoBytes};
#[derive(Debug)]
pub struct FakeTransport<C> {
pub device_type: DeviceType,
pub max_queue_size: u32,
pub device_features: u64,
pub state: Arc<Mutex<State<C>>>,
}
impl<C: FromBytes + Immutable + IntoBytes> Transport for FakeTransport<C> {
fn device_type(&self) -> DeviceType {
self.device_type
}
fn read_device_features(&mut self) -> u64 {
self.device_features
}
fn write_driver_features(&mut self, driver_features: u64) {
self.state.lock().unwrap().driver_features = driver_features;
}
fn max_queue_size(&mut self, _queue: u16) -> u32 {
self.max_queue_size
}
fn notify(&mut self, queue: u16) {
self.state.lock().unwrap().queues[queue as usize]
.notified
.store(true, Ordering::SeqCst);
}
fn get_status(&self) -> DeviceStatus {
self.state.lock().unwrap().status
}
fn set_status(&mut self, status: DeviceStatus) {
self.state.lock().unwrap().status = status;
}
fn set_guest_page_size(&mut self, guest_page_size: u32) {
self.state.lock().unwrap().guest_page_size = guest_page_size;
}
fn requires_legacy_layout(&self) -> bool {
false
}
fn queue_set(
&mut self,
queue: u16,
size: u32,
descriptors: PhysAddr,
driver_area: PhysAddr,
device_area: PhysAddr,
) {
let mut state = self.state.lock().unwrap();
state.queues[queue as usize].size = size;
state.queues[queue as usize].descriptors = descriptors;
state.queues[queue as usize].driver_area = driver_area;
state.queues[queue as usize].device_area = device_area;
}
fn queue_unset(&mut self, queue: u16) {
let mut state = self.state.lock().unwrap();
state.queues[queue as usize].size = 0;
state.queues[queue as usize].descriptors = 0;
state.queues[queue as usize].driver_area = 0;
state.queues[queue as usize].device_area = 0;
}
fn queue_used(&mut self, queue: u16) -> bool {
self.state.lock().unwrap().queues[queue as usize].descriptors != 0
}
fn ack_interrupt(&mut self) -> InterruptStatus {
let mut state = self.state.lock().unwrap();
let pending = state.interrupt_pending;
if pending {
state.interrupt_pending = false;
return InterruptStatus::QUEUE_INTERRUPT;
}
InterruptStatus::empty()
}
fn read_config_generation(&self) -> u32 {
self.state.lock().unwrap().config_generation
}
fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
assert!(
align_of::<T>() <= 4,
"Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
align_of::<T>()
);
assert!(offset % align_of::<T>() == 0);
if size_of::<C>() < offset + size_of::<T>() {
Err(Error::ConfigSpaceTooSmall)
} else {
let state = self.state.lock().unwrap();
let bytes = &state.config_space.as_bytes()[offset..offset + size_of::<T>()];
Ok(T::read_from_bytes(bytes).unwrap())
}
}
fn write_config_space<T: Immutable + IntoBytes>(
&mut self,
offset: usize,
value: T,
) -> Result<(), Error> {
assert!(
align_of::<T>() <= 4,
"Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
align_of::<T>()
);
assert!(offset % align_of::<T>() == 0);
if size_of::<C>() < offset + size_of::<T>() {
Err(Error::ConfigSpaceTooSmall)
} else {
let mut state = self.state.lock().unwrap();
let bytes = &mut state.config_space.as_mut_bytes()[offset..offset + size_of::<T>()];
value.write_to(bytes).unwrap();
Ok(())
}
}
}
pub struct State<C> {
pub status: DeviceStatus,
pub driver_features: u64,
pub guest_page_size: u32,
pub interrupt_pending: bool,
pub queues: Vec<QueueStatus>,
pub config_generation: u32,
pub config_space: C,
}
impl<C> Debug for State<C> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("State")
.field("status", &self.status)
.field("driver_features", &self.driver_features)
.field("guest_page_size", &self.guest_page_size)
.field("interrupt_pending", &self.interrupt_pending)
.field("queues", &self.queues)
.field("config_generation", &self.config_generation)
.field("config_space", &"...")
.finish()
}
}
impl<C> State<C> {
pub const fn new(queues: Vec<QueueStatus>, config_space: C) -> Self {
Self {
status: DeviceStatus::empty(),
driver_features: 0,
guest_page_size: 0,
interrupt_pending: false,
queues,
config_generation: 0,
config_space,
}
}
pub fn write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8]) {
let queue = &self.queues[queue_index as usize];
assert_ne!(queue.descriptors, 0);
assert!(fake_read_write_queue(
queue.descriptors as *const [Descriptor; QUEUE_SIZE],
queue.driver_area as *const u8,
queue.device_area as *mut u8,
|input| {
assert_eq!(input, Vec::new());
data.to_owned()
},
));
}
pub fn read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8> {
let queue = &self.queues[queue_index as usize];
assert_ne!(queue.descriptors, 0);
let mut ret = None;
assert!(fake_read_write_queue(
queue.descriptors as *const [Descriptor; QUEUE_SIZE],
queue.driver_area as *const u8,
queue.device_area as *mut u8,
|input| {
ret = Some(input);
Vec::new()
},
));
ret.unwrap()
}
pub fn read_write_queue<const QUEUE_SIZE: usize>(
&mut self,
queue_index: u16,
handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
) -> bool {
let queue = &self.queues[queue_index as usize];
assert_ne!(queue.descriptors, 0);
fake_read_write_queue(
queue.descriptors as *const [Descriptor; QUEUE_SIZE],
queue.driver_area as *const u8,
queue.device_area as *mut u8,
handler,
)
}
pub fn wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16) {
while !Self::poll_queue_notified(state, queue_index) {
thread::sleep(Duration::from_millis(10));
}
}
pub fn poll_queue_notified(state: &Mutex<Self>, queue_index: u16) -> bool {
state.lock().unwrap().queues[usize::from(queue_index)]
.notified
.swap(false, Ordering::SeqCst)
}
}
#[derive(Debug, Default)]
pub struct QueueStatus {
pub size: u32,
pub descriptors: PhysAddr,
pub driver_area: PhysAddr,
pub device_area: PhysAddr,
pub notified: AtomicBool,
}