use std::marker::PhantomData;
use std::mem::size_of;
use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestUsize,
};
use crate::defs::{VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE};
use crate::{
desc::{
split::{Descriptor as SplitDescriptor, VirtqUsedElem},
RawDescriptor,
},
DescriptorChain, Error, Queue, QueueOwnedT, QueueT,
};
use std::fmt::{self, Debug, Display};
use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT};
#[derive(Debug)]
pub enum MockError {
InvalidQueueParams(Error),
InvalidIndex,
InvalidNextAvail,
GuestMem(GuestMemoryError),
}
impl Display for MockError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::MockError::*;
match self {
InvalidQueueParams(_) => write!(f, "cannot create queue due to invalid parameter"),
InvalidIndex => write!(
f,
"invalid index for pointing to an address in a region when defining a Ref object"
),
InvalidNextAvail => write!(
f,
"invalid next available descriptor chain head in the queue"
),
GuestMem(e) => write!(f, "guest memory error: {e}"),
}
}
}
impl std::error::Error for MockError {}
pub struct Ref<'a, M, T> {
mem: &'a M,
addr: GuestAddress,
phantom: PhantomData<*const T>,
}
impl<'a, M: GuestMemory, T: ByteValued> Ref<'a, M, T> {
fn new(mem: &'a M, addr: GuestAddress) -> Self {
Ref {
mem,
addr,
phantom: PhantomData,
}
}
pub fn load(&self) -> T {
self.mem.read_obj(self.addr).unwrap()
}
pub fn store(&self, val: T) {
self.mem.write_obj(val, self.addr).unwrap()
}
}
pub struct ArrayRef<'a, M, T> {
mem: &'a M,
addr: GuestAddress,
len: usize,
phantom: PhantomData<*const T>,
}
impl<'a, M: GuestMemory, T: ByteValued> ArrayRef<'a, M, T> {
fn new(mem: &'a M, addr: GuestAddress, len: usize) -> Self {
ArrayRef {
mem,
addr,
len,
phantom: PhantomData,
}
}
pub fn ref_at(&self, index: usize) -> Result<Ref<'a, M, T>, MockError> {
if index >= self.len {
return Err(MockError::InvalidIndex);
}
let addr = self
.addr
.checked_add((index * size_of::<T>()) as u64)
.unwrap();
Ok(Ref::new(self.mem, addr))
}
}
pub struct SplitQueueRing<'a, M, T: ByteValued> {
flags: Ref<'a, M, u16>,
idx: Ref<'a, M, u16>,
ring: ArrayRef<'a, M, T>,
event: Ref<'a, M, u16>,
}
impl<'a, M: GuestMemory, T: ByteValued> SplitQueueRing<'a, M, T> {
pub fn new(mem: &'a M, base: GuestAddress, len: u16) -> Self {
let event_addr = base
.checked_add(4)
.and_then(|a| a.checked_add((size_of::<u16>() * len as usize) as u64))
.unwrap();
let split_queue_ring = SplitQueueRing {
flags: Ref::new(mem, base),
idx: Ref::new(mem, base.checked_add(2).unwrap()),
ring: ArrayRef::new(mem, base.checked_add(4).unwrap(), len as usize),
event: Ref::new(mem, event_addr),
};
split_queue_ring.flags.store(0);
split_queue_ring.idx.store(0);
split_queue_ring.event.store(0);
split_queue_ring
}
pub fn start(&self) -> GuestAddress {
self.ring.addr
}
pub fn end(&self) -> GuestAddress {
self.start()
.checked_add(self.ring.len as GuestUsize)
.unwrap()
}
pub fn idx(&self) -> &Ref<'a, M, u16> {
&self.idx
}
pub fn ring(&self) -> &ArrayRef<'a, M, T> {
&self.ring
}
}
pub type AvailRing<'a, M> = SplitQueueRing<'a, M, u16>;
pub type UsedRing<'a, M> = SplitQueueRing<'a, M, VirtqUsedElem>;
pub struct DescriptorTable<'a, M> {
table: ArrayRef<'a, M, RawDescriptor>,
len: u16,
free_descriptors: Vec<u16>,
}
impl<'a, M: GuestMemory> DescriptorTable<'a, M> {
pub fn new(mem: &'a M, addr: GuestAddress, len: u16) -> Self {
let table = ArrayRef::new(mem, addr, len as usize);
let free_descriptors = (0..len).rev().collect();
DescriptorTable {
table,
len,
free_descriptors,
}
}
pub fn load(&self, index: u16) -> Result<RawDescriptor, MockError> {
self.table
.ref_at(index as usize)
.map(|load_ref| load_ref.load())
}
pub fn store(&self, index: u16, value: RawDescriptor) -> Result<(), MockError> {
self.table
.ref_at(index as usize)
.map(|store_ref| store_ref.store(value))
}
pub fn total_size(&self) -> u64 {
(self.len as usize * size_of::<RawDescriptor>()) as u64
}
pub fn build_chain(&mut self, len: u16) -> Result<u16, MockError> {
let indices = self
.free_descriptors
.iter()
.copied()
.rev()
.take(usize::from(len))
.collect::<Vec<_>>();
assert_eq!(indices.len(), len as usize);
for (pos, index_value) in indices.iter().copied().enumerate() {
let mut desc = SplitDescriptor::new(0x1000, 0x1000, 0, 0);
if pos < indices.len() - 1 {
desc.set_flags(VRING_DESC_F_NEXT as u16);
desc.set_next(indices[pos + 1]);
} else {
desc.set_flags(0);
}
let desc = RawDescriptor::from(desc);
self.store(index_value, desc)?;
}
Ok(indices[0])
}
}
trait GuestAddressExt {
fn align_up(&self, x: GuestUsize) -> GuestAddress;
}
impl GuestAddressExt for GuestAddress {
fn align_up(&self, x: GuestUsize) -> GuestAddress {
Self((self.0 + (x - 1)) & !(x - 1))
}
}
pub struct MockSplitQueue<'a, M> {
mem: &'a M,
len: u16,
desc_table_addr: GuestAddress,
desc_table: DescriptorTable<'a, M>,
avail_addr: GuestAddress,
avail: AvailRing<'a, M>,
used_addr: GuestAddress,
used: UsedRing<'a, M>,
indirect_addr: GuestAddress,
}
impl<'a, M: GuestMemory> MockSplitQueue<'a, M> {
pub fn new(mem: &'a M, len: u16) -> Self {
Self::create(mem, GuestAddress(0), len)
}
pub fn create(mem: &'a M, start: GuestAddress, len: u16) -> Self {
const AVAIL_ALIGN: GuestUsize = 2;
const USED_ALIGN: GuestUsize = 4;
let desc_table_addr = start;
let desc_table = DescriptorTable::new(mem, desc_table_addr, len);
let avail_addr = start
.checked_add(16 * len as GuestUsize)
.unwrap()
.align_up(AVAIL_ALIGN);
let avail = AvailRing::new(mem, avail_addr, len);
let used_addr = avail.end().align_up(USED_ALIGN);
let used = UsedRing::new(mem, used_addr, len);
let indirect_addr = GuestAddress(0x3000_0000);
MockSplitQueue {
mem,
len,
desc_table_addr,
desc_table,
avail_addr,
avail,
used_addr,
used,
indirect_addr,
}
}
pub fn start(&self) -> GuestAddress {
self.desc_table_addr
}
pub fn end(&self) -> GuestAddress {
self.used.end()
}
pub fn desc_table(&self) -> &DescriptorTable<'a, M> {
&self.desc_table
}
pub fn avail(&self) -> &AvailRing<'_, M> {
&self.avail
}
pub fn used(&self) -> &UsedRing<'_, M> {
&self.used
}
pub fn desc_table_addr(&self) -> GuestAddress {
self.desc_table_addr
}
pub fn avail_addr(&self) -> GuestAddress {
self.avail_addr
}
pub fn used_addr(&self) -> GuestAddress {
self.used_addr
}
fn update_avail_idx(&mut self, value: u16) -> Result<(), MockError> {
let avail_idx = self.avail.idx.load();
self.avail.ring.ref_at(avail_idx as usize)?.store(value);
self.avail.idx.store(avail_idx.wrapping_add(1));
Ok(())
}
fn alloc_indirect_chain(&mut self, len: u16) -> Result<GuestAddress, MockError> {
let table_len = if len % 16 == 0 {
len
} else {
16 * (len / 16 + 1)
};
let mut table = DescriptorTable::new(self.mem, self.indirect_addr, table_len);
let head_decriptor_index = table.build_chain(len)?;
assert_eq!(head_decriptor_index, 0);
let table_addr = self.indirect_addr;
self.indirect_addr = self.indirect_addr.checked_add(table.total_size()).unwrap();
Ok(table_addr)
}
pub fn add_chain(&mut self, len: u16) -> Result<(), MockError> {
self.desc_table
.build_chain(len)
.and_then(|head_idx| self.update_avail_idx(head_idx))
}
pub fn add_indirect_chain(&mut self, len: u16) -> Result<(), MockError> {
let head_idx = self.desc_table.build_chain(1)?;
let indirect_addr = self.alloc_indirect_chain(len)?;
let desc = self.desc_table.load(head_idx)?;
let mut desc = SplitDescriptor::from(desc);
desc.set_flags(VRING_DESC_F_INDIRECT as u16);
desc.set_addr(indirect_addr.raw_value());
desc.set_len(u32::from(len) * size_of::<RawDescriptor>() as u32);
self.desc_table.store(head_idx, RawDescriptor::from(desc))?;
self.update_avail_idx(head_idx)
}
pub fn create_queue<Q: QueueT>(&self) -> Result<Q, Error> {
let mut q = Q::new(self.len)?;
q.set_size(self.len);
q.set_ready(true);
q.set_desc_table_address(
Some(self.desc_table_addr.0 as u32),
Some((self.desc_table_addr.0 >> 32) as u32),
);
q.set_avail_ring_address(
Some(self.avail_addr.0 as u32),
Some((self.avail_addr.0 >> 32) as u32),
);
q.set_used_ring_address(
Some(self.used_addr.0 as u32),
Some((self.used_addr.0 >> 32) as u32),
);
Ok(q)
}
pub fn build_multiple_desc_chains(
&self,
descs: &[RawDescriptor],
) -> Result<DescriptorChain<&M>, MockError> {
self.add_desc_chains(descs, 0)?;
self.create_queue::<Queue>()
.map_err(MockError::InvalidQueueParams)?
.iter(self.mem)
.map_err(MockError::InvalidQueueParams)?
.next()
.ok_or(MockError::InvalidNextAvail)
}
pub fn build_desc_chain(
&self,
descs: &[RawDescriptor],
) -> Result<DescriptorChain<&M>, MockError> {
let mut modified_descs: Vec<RawDescriptor> = Vec::with_capacity(descs.len());
for (idx, desc) in descs.iter().enumerate() {
let desc = SplitDescriptor::from(*desc);
let (flags, next) = if idx == descs.len() - 1 {
(desc.flags() & !VRING_DESC_F_NEXT as u16, 0)
} else {
(desc.flags() | VRING_DESC_F_NEXT as u16, idx as u16 + 1)
};
modified_descs.push(RawDescriptor::from(SplitDescriptor::new(
desc.addr().0,
desc.len(),
flags,
next,
)));
}
self.build_multiple_desc_chains(&modified_descs[..])
}
pub fn add_desc_chains(&self, descs: &[RawDescriptor], offset: u16) -> Result<(), MockError> {
let mut new_entries = 0;
let avail_idx: u16 = self
.mem
.read_obj::<u16>(self.avail_addr().unchecked_add(2))
.map(u16::from_le)
.map_err(MockError::GuestMem)?;
for (idx, desc) in descs.iter().enumerate() {
let i = idx as u16 + offset;
self.desc_table().store(i, *desc)?;
if idx == 0
|| SplitDescriptor::from(descs[idx - 1]).flags() & VRING_DESC_F_NEXT as u16 != 1
{
self.mem
.write_obj(
u16::to_le(i),
self.avail_addr().unchecked_add(
VIRTQ_AVAIL_RING_HEADER_SIZE
+ (avail_idx + new_entries) as u64 * VIRTQ_AVAIL_ELEMENT_SIZE,
),
)
.map_err(MockError::GuestMem)?;
new_entries += 1;
}
}
self.mem
.write_obj(
u16::to_le(avail_idx + new_entries),
self.avail_addr().unchecked_add(2),
)
.map_err(MockError::GuestMem)?;
Ok(())
}
}