use std::mem::size_of;
use std::num::Wrapping;
use std::ops::Deref;
use std::sync::atomic::{fence, Ordering};
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
use crate::defs::{
DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
};
use crate::desc::{split::VirtqUsedElem, RawDescriptor};
use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT};
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
#[cfg(kani)]
mod verification;
pub const MAX_QUEUE_SIZE: u16 = 32768;
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Queue {
max_size: u16,
next_avail: Wrapping<u16>,
next_used: Wrapping<u16>,
event_idx_enabled: bool,
num_added: Wrapping<u16>,
size: u16,
ready: bool,
desc_table: GuestAddress,
avail_ring: GuestAddress,
used_ring: GuestAddress,
}
impl Queue {
pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
return Err(Error::InvalidSize);
}
self.size = size;
Ok(())
}
pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
if desc_table.mask(0xf) != 0 {
return Err(Error::InvalidDescTableAlign);
}
self.desc_table = desc_table;
Ok(())
}
pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
if avail_ring.mask(0x1) != 0 {
return Err(Error::InvalidAvailRingAlign);
}
self.avail_ring = avail_ring;
Ok(())
}
pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
if used_ring.mask(0x3) != 0 {
return Err(Error::InvalidUsedRingAlign);
}
self.used_ring = used_ring;
Ok(())
}
pub fn state(&self) -> QueueState {
QueueState {
max_size: self.max_size,
next_avail: self.next_avail(),
next_used: self.next_used(),
event_idx_enabled: self.event_idx_enabled,
size: self.size,
ready: self.ready,
desc_table: self.desc_table(),
avail_ring: self.avail_ring(),
used_ring: self.used_ring(),
}
}
fn set_avail_event<M: GuestMemory>(
&self,
mem: &M,
val: u16,
order: Ordering,
) -> Result<(), Error> {
let avail_event_offset =
VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
let addr = self
.used_ring
.checked_add(avail_event_offset)
.ok_or(Error::AddressOverflow)?;
mem.store(u16::to_le(val), addr, order)
.map_err(Error::GuestMemory)
}
fn set_used_flags<M: GuestMemory>(
&mut self,
mem: &M,
val: u16,
order: Ordering,
) -> Result<(), Error> {
mem.store(u16::to_le(val), self.used_ring, order)
.map_err(Error::GuestMemory)
}
fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
if enable {
if self.event_idx_enabled {
self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
} else {
self.set_used_flags(mem, 0, Ordering::Relaxed)
}
} else if !self.event_idx_enabled {
self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
} else {
Ok(())
}
}
fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
let used_event_offset =
VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
let used_event_addr = self
.avail_ring
.checked_add(used_event_offset)
.ok_or(Error::AddressOverflow)?;
mem.load(used_event_addr, order)
.map(u16::from_le)
.map(Wrapping)
.map_err(Error::GuestMemory)
}
}
impl<'a> QueueGuard<'a> for Queue {
type G = &'a mut Self;
}
impl QueueT for Queue {
fn new(max_size: u16) -> Result<Self, Error> {
if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
return Err(Error::InvalidMaxSize);
}
Ok(Queue {
max_size,
size: max_size,
ready: false,
desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
next_avail: Wrapping(0),
next_used: Wrapping(0),
event_idx_enabled: false,
num_added: Wrapping(0),
})
}
fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
let queue_size = self.size as u64;
let desc_table = self.desc_table;
let desc_table_size = size_of::<RawDescriptor>() as u64 * queue_size;
let avail_ring = self.avail_ring;
let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
let used_ring = self.used_ring;
let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
if !self.ready {
error!("attempt to use virtio queue that is not marked ready");
false
} else if desc_table
.checked_add(desc_table_size)
.is_none_or(|v| !mem.address_in_range(v))
{
error!(
"virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
desc_table.raw_value(),
desc_table_size
);
false
} else if avail_ring
.checked_add(avail_ring_size)
.is_none_or(|v| !mem.address_in_range(v))
{
error!(
"virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
avail_ring.raw_value(),
avail_ring_size
);
false
} else if used_ring
.checked_add(used_ring_size)
.is_none_or(|v| !mem.address_in_range(v))
{
error!(
"virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
used_ring.raw_value(),
used_ring_size
);
false
} else {
true
}
}
fn reset(&mut self) {
self.ready = false;
self.size = self.max_size;
self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
self.next_avail = Wrapping(0);
self.next_used = Wrapping(0);
self.num_added = Wrapping(0);
self.event_idx_enabled = false;
}
fn lock(&mut self) -> <Self as QueueGuard<'_>>::G {
self
}
fn max_size(&self) -> u16 {
self.max_size
}
fn size(&self) -> u16 {
self.size
}
fn set_size(&mut self, size: u16) {
if self.try_set_size(size).is_err() {
error!("virtio queue with invalid size: {}", size);
}
}
fn ready(&self) -> bool {
self.ready
}
fn set_ready(&mut self, ready: bool) {
self.ready = ready;
}
fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
let desc_table = GuestAddress((high << 32) | low);
if self.try_set_desc_table_address(desc_table).is_err() {
error!("virtio queue descriptor table breaks alignment constraints");
}
}
fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
let avail_ring = GuestAddress((high << 32) | low);
if self.try_set_avail_ring_address(avail_ring).is_err() {
error!("virtio queue available ring breaks alignment constraints");
}
}
fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
let used_ring = GuestAddress((high << 32) | low);
if self.try_set_used_ring_address(used_ring).is_err() {
error!("virtio queue used ring breaks alignment constraints");
}
}
fn set_event_idx(&mut self, enabled: bool) {
self.event_idx_enabled = enabled;
}
fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
where
M: GuestMemory + ?Sized,
{
let addr = self
.avail_ring
.checked_add(2)
.ok_or(Error::AddressOverflow)?;
mem.load(addr, order)
.map(u16::from_le)
.map(Wrapping)
.map_err(Error::GuestMemory)
}
fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
let addr = self
.used_ring
.checked_add(2)
.ok_or(Error::AddressOverflow)?;
mem.load(addr, order)
.map(u16::from_le)
.map(Wrapping)
.map_err(Error::GuestMemory)
}
fn add_used<M: GuestMemory>(
&mut self,
mem: &M,
head_index: u16,
len: u32,
) -> Result<(), Error> {
if head_index >= self.size {
error!(
"attempted to add out of bounds descriptor to used ring: {}",
head_index
);
return Err(Error::InvalidDescriptorIndex);
}
let next_used_index = u64::from(self.next_used.0 % self.size);
let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
let addr = self
.used_ring
.checked_add(offset)
.ok_or(Error::AddressOverflow)?;
mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
.map_err(Error::GuestMemory)?;
self.next_used += Wrapping(1);
self.num_added += Wrapping(1);
mem.store(
u16::to_le(self.next_used.0),
self.used_ring
.checked_add(2)
.ok_or(Error::AddressOverflow)?,
Ordering::Release,
)
.map_err(Error::GuestMemory)
}
fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
self.set_notification(mem, true)?;
fence(Ordering::SeqCst);
self.avail_idx(mem, Ordering::Relaxed)
.map(|idx| idx != self.next_avail)
}
fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
self.set_notification(mem, false)
}
fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
let used_idx = self.next_used;
fence(Ordering::SeqCst);
if self.event_idx_enabled {
let used_event = self.used_event(mem, Ordering::Relaxed)?;
let old = used_idx - self.num_added;
self.num_added = Wrapping(0);
return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
}
Ok(true)
}
fn next_avail(&self) -> u16 {
self.next_avail.0
}
fn set_next_avail(&mut self, next_avail: u16) {
self.next_avail = Wrapping(next_avail);
}
fn next_used(&self) -> u16 {
self.next_used.0
}
fn set_next_used(&mut self, next_used: u16) {
self.next_used = Wrapping(next_used);
}
fn desc_table(&self) -> u64 {
self.desc_table.0
}
fn avail_ring(&self) -> u64 {
self.avail_ring.0
}
fn used_ring(&self) -> u64 {
self.used_ring.0
}
fn event_idx_enabled(&self) -> bool {
self.event_idx_enabled
}
fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
where
M: Clone + Deref,
M::Target: GuestMemory,
{
match self.iter(mem) {
Ok(mut iter) => iter.next(),
Err(e) => {
error!("Iterator error {}", e);
None
}
}
}
}
impl QueueOwnedT for Queue {
fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
where
M: Deref,
M::Target: GuestMemory,
{
if !self.ready || self.avail_ring == GuestAddress(0) {
return Err(Error::QueueNotReady);
}
self.avail_idx(mem.deref(), Ordering::Acquire)
.map(move |idx| AvailIter::new(mem, idx, self))?
}
fn go_to_previous_position(&mut self) {
self.next_avail -= Wrapping(1);
}
}
#[derive(Debug)]
pub struct AvailIter<'b, M> {
mem: M,
desc_table: GuestAddress,
avail_ring: GuestAddress,
queue_size: u16,
last_index: Wrapping<u16>,
next_avail: &'b mut Wrapping<u16>,
}
impl<'b, M> AvailIter<'b, M>
where
M: Deref,
M::Target: GuestMemory,
{
pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
if (idx - queue.next_avail).0 > queue.size {
return Err(Error::InvalidAvailRingIndex);
}
Ok(AvailIter {
mem,
desc_table: queue.desc_table,
avail_ring: queue.avail_ring,
queue_size: queue.size,
last_index: idx,
next_avail: &mut queue.next_avail,
})
}
pub fn go_to_previous_position(&mut self) {
*self.next_avail -= Wrapping(1);
}
}
impl<M> Iterator for AvailIter<'_, M>
where
M: Clone + Deref,
M::Target: GuestMemory,
{
type Item = DescriptorChain<M>;
fn next(&mut self) -> Option<Self::Item> {
if *self.next_avail == self.last_index {
return None;
}
let elem_off =
u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
let addr = self.avail_ring.checked_add(offset)?;
let head_index: u16 = self
.mem
.load(addr, Ordering::Acquire)
.map(u16::from_le)
.map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
.ok()?;
*self.next_avail += Wrapping(1);
Some(DescriptorChain::new(
self.mem.clone(),
self.desc_table,
self.queue_size,
head_index,
))
}
}
#[cfg(any(test, feature = "test-utils"))]
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
format!("{}", &self) == format!("{other}")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
use crate::mock::MockSplitQueue;
use virtio_bindings::bindings::virtio_ring::{
VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
};
use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
#[test]
fn test_queue_is_valid() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 16);
let mut q: Queue = vq.create_queue().unwrap();
assert!(q.is_valid(m));
q.set_ready(false);
assert!(!q.ready());
assert!(!q.is_valid(m));
q.set_ready(true);
q.set_size(q.max_size() << 1);
assert_eq!(q.size, q.max_size());
q.set_size(0);
assert_eq!(q.size, q.max_size());
q.set_size(11);
assert_eq!(q.size, q.max_size());
q.set_size(4);
assert_eq!(q.size, 4);
q.size = q.max_size();
q.set_desc_table_address(Some(0xf), None);
assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
q.set_desc_table_address(Some(0xffff_fff0), None);
assert_eq!(q.desc_table.0, 0xffff_fff0);
assert!(!q.is_valid(m));
q.set_desc_table_address(Some(0x10), None);
assert_eq!(q.desc_table.0, 0x10);
assert!(q.is_valid(m));
let addr = vq.desc_table_addr().0;
q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
q.set_avail_ring_address(Some(0x1), None);
assert_eq!(q.avail_ring.0, vq.avail_addr().0);
q.set_avail_ring_address(Some(0xffff_fffe), None);
assert_eq!(q.avail_ring.0, 0xffff_fffe);
assert!(!q.is_valid(m));
q.set_avail_ring_address(Some(0x2), None);
assert_eq!(q.avail_ring.0, 0x2);
assert!(q.is_valid(m));
let addr = vq.avail_addr().0;
q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
q.set_used_ring_address(Some(0x3), None);
assert_eq!(q.used_ring.0, vq.used_addr().0);
q.set_used_ring_address(Some(0xffff_fffc), None);
assert_eq!(q.used_ring.0, 0xffff_fffc);
assert!(!q.is_valid(m));
q.set_used_ring_address(Some(0x4), None);
assert_eq!(q.used_ring.0, 0x4);
let addr = vq.used_addr().0;
q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
assert!(q.is_valid(m));
}
#[test]
fn test_add_used() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(mem, 16);
let mut q: Queue = vq.create_queue().unwrap();
assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
assert_eq!(u16::from_le(vq.used().idx().load()), 0);
assert!(q.add_used(mem, 16, 0x1000).is_err());
assert_eq!(u16::from_le(vq.used().idx().load()), 0);
q.add_used(mem, 1, 0x1000).unwrap();
assert_eq!(q.next_used, Wrapping(1));
assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
assert_eq!(u16::from_le(vq.used().idx().load()), 1);
let x = vq.used().ring().ref_at(0).unwrap().load();
assert_eq!(x.id(), 1);
assert_eq!(x.len(), 0x1000);
}
#[test]
fn test_reset_queue() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 16);
let mut q: Queue = vq.create_queue().unwrap();
q.set_size(8);
q.set_desc_table_address(Some(0x5000), None);
q.set_event_idx(true);
q.set_next_avail(2);
q.set_next_used(4);
q.num_added = Wrapping(15);
assert_eq!(q.size, 8);
assert!(q.ready);
assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
assert_ne!(q.next_avail, Wrapping(0));
assert_ne!(q.next_used, Wrapping(0));
assert_ne!(q.num_added, Wrapping(0));
assert!(q.event_idx_enabled);
q.reset();
assert_eq!(q.size, 16);
assert!(!q.ready);
assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
assert_eq!(q.next_avail, Wrapping(0));
assert_eq!(q.next_used, Wrapping(0));
assert_eq!(q.num_added, Wrapping(0));
assert!(!q.event_idx_enabled);
}
#[test]
fn test_needs_notification() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let qsize = 16;
let vq = MockSplitQueue::new(mem, qsize);
let mut q: Queue = vq.create_queue().unwrap();
let avail_addr = vq.avail_addr();
for i in 0..qsize {
q.next_used = Wrapping(i);
assert!(q.needs_notification(mem).unwrap());
}
mem.write_obj::<u16>(
u16::to_le(4),
avail_addr.unchecked_add(4 + qsize as u64 * 2),
)
.unwrap();
q.set_event_idx(true);
let wrap = u32::from(u16::MAX) + 1;
for i in 0..wrap + 12 {
q.next_used = Wrapping(i as u16);
q.num_added = Wrapping(1);
let expected = i == 5 || i == (5 + wrap);
assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
}
mem.write_obj::<u16>(
u16::to_le(8),
avail_addr.unchecked_add(4 + qsize as u64 * 2),
)
.unwrap();
assert!(!q.needs_notification(mem).unwrap());
mem.write_obj::<u16>(
u16::to_le(15),
avail_addr.unchecked_add(4 + qsize as u64 * 2),
)
.unwrap();
q.num_added = Wrapping(1);
assert!(!q.needs_notification(mem).unwrap());
q.next_used = Wrapping(15);
q.num_added = Wrapping(1);
assert!(!q.needs_notification(mem).unwrap());
q.next_used = Wrapping(16);
q.num_added = Wrapping(1);
assert!(q.needs_notification(mem).unwrap());
assert!(!q.needs_notification(mem).unwrap());
mem.write_obj::<u16>(
u16::to_le(u16::MAX - 3),
avail_addr.unchecked_add(4 + qsize as u64 * 2),
)
.unwrap();
q.next_used = Wrapping(u16::MAX - 2);
q.num_added = Wrapping(1);
assert!(q.needs_notification(mem).unwrap());
}
#[test]
fn test_enable_disable_notification() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(mem, 16);
let mut q: Queue = vq.create_queue().unwrap();
let used_addr = vq.used_addr();
assert!(!q.event_idx_enabled);
q.enable_notification(mem).unwrap();
let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
assert_eq!(v, 0);
q.disable_notification(mem).unwrap();
let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
q.enable_notification(mem).unwrap();
let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
assert_eq!(v, 0);
q.set_event_idx(true);
let avail_addr = vq.avail_addr();
mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
.unwrap();
assert!(q.enable_notification(mem).unwrap());
q.next_avail = Wrapping(2);
assert!(!q.enable_notification(mem).unwrap());
mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
.unwrap();
assert!(q.enable_notification(mem).unwrap());
q.next_avail = Wrapping(8);
assert!(!q.enable_notification(mem).unwrap());
}
#[test]
fn test_consume_chains_with_notif() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(mem, 16);
let mut q: Queue = vq.create_queue().unwrap();
assert!(q.is_valid(mem));
let mut descs = Vec::new();
for i in 0..13 {
let flags = match i {
1 | 4 | 6 | 8 | 12 => 0,
_ => VRING_DESC_F_NEXT,
};
descs.push(RawDescriptor::from(SplitDescriptor::new(
(0x1000 * (i + 1)) as u64,
0x1000,
flags as u16,
i + 1,
)));
}
vq.add_desc_chains(&descs, 0).unwrap();
vq.avail().idx().store(u16::to_le(2));
assert_eq!(q.next_avail(), 0);
let mut i = 0;
loop {
i += 1;
q.disable_notification(mem).unwrap();
while let Some(chain) = q.iter(mem).unwrap().next() {
let head_index = chain.head_index();
let mut desc_len = 0;
chain.for_each(|d| {
if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
desc_len += d.len();
}
});
q.add_used(mem, head_index, desc_len).unwrap();
}
if !q.enable_notification(mem).unwrap() {
break;
}
}
assert_eq!(i, 1);
assert_eq!(q.next_avail(), 2);
assert_eq!(q.next_used(), 2);
vq.avail().idx().store(u16::to_le(3));
i = 0;
loop {
i += 1;
q.disable_notification(mem).unwrap();
while let Some(chain) = q.iter(mem).unwrap().next() {
let head_index = chain.head_index();
let mut desc_len = 0;
chain.for_each(|d| {
if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
desc_len += d.len();
}
});
q.add_used(mem, head_index, desc_len).unwrap();
}
vq.avail().idx().store(u16::to_le(4));
if !q.enable_notification(mem).unwrap() {
break;
}
}
assert_eq!(i, 2);
assert_eq!(q.next_avail(), 4);
assert_eq!(q.next_used(), 4);
vq.avail().idx().store(u16::to_le(7));
loop {
q.disable_notification(mem).unwrap();
while let Some(chain) = q.iter(mem).unwrap().next() {
let head_index = chain.head_index();
let mut desc_len = 0;
chain.for_each(|d| {
if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
desc_len += d.len();
}
});
q.add_used(mem, head_index, desc_len).unwrap();
}
if !q.enable_notification(mem).unwrap() {
break;
}
}
assert_eq!(q.next_avail(), 7);
assert_eq!(q.next_used(), 7);
}
#[test]
fn test_invalid_avail_idx() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(mem, 16);
let mut q: Queue = vq.create_queue().unwrap();
assert!(q.is_valid(mem));
let mut descs = Vec::new();
for i in 0..7 {
let flags = match i {
1 | 4 | 6 => 0,
_ => VRING_DESC_F_NEXT,
};
descs.push(RawDescriptor::from(SplitDescriptor::new(
(0x1000 * (i + 1)) as u64,
0x1000,
flags as u16,
i + 1,
)));
}
vq.add_desc_chains(&descs, 0).unwrap();
vq.avail().idx().store(u16::to_le(3));
assert_eq!(q.next_avail(), 0);
assert_eq!(q.next_used(), 0);
loop {
q.disable_notification(mem).unwrap();
while let Some(chain) = q.iter(mem).unwrap().next() {
let head_index = chain.head_index();
let mut desc_len = 0;
chain.for_each(|d| {
if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
desc_len += d.len();
}
});
q.add_used(mem, head_index, desc_len).unwrap();
}
if !q.enable_notification(mem).unwrap() {
break;
}
}
assert_eq!(q.next_avail(), 3);
assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
assert_eq!(q.next_used(), 3);
assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
assert!(q.lock().ready());
vq.avail().idx().store(u16::to_le(1));
assert!(q.iter(mem).is_err());
}
#[test]
fn test_iterator_and_avail_idx() {
let queue_size = 2;
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(mem, queue_size);
let mut q: Queue = vq.create_queue().unwrap();
assert!(q.is_valid(mem));
let mut descs = Vec::new();
for i in 0..queue_size {
descs.push(RawDescriptor::from(SplitDescriptor::new(
(0x1000 * (i + 1)) as u64,
0x1000,
0_u16,
i + 1,
)));
}
vq.add_desc_chains(&descs, 0).unwrap();
q.set_next_avail(u16::MAX);
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
vq.avail().idx().store(u16::to_le(avail_idx.0));
assert!(q.iter(mem).is_ok());
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
vq.avail().idx().store(u16::to_le(avail_idx.0));
assert!(q.iter(mem).is_ok());
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
vq.avail().idx().store(u16::to_le(avail_idx.0));
assert!(q.iter(mem).is_err());
}
#[test]
fn test_descriptor_and_iterator() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 16);
let mut q: Queue = vq.create_queue().unwrap();
assert!(q.is_valid(m));
let mut descs = Vec::new();
for j in 0..7 {
let flags = match j {
1 | 6 => 0,
2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
4 => VRING_DESC_F_WRITE,
_ => VRING_DESC_F_NEXT,
};
descs.push(RawDescriptor::from(SplitDescriptor::new(
(0x1000 * (j + 1)) as u64,
0x1000,
flags as u16,
j + 1,
)));
}
vq.add_desc_chains(&descs, 0).unwrap();
let mut i = q.iter(m).unwrap();
{
let c = i.next().unwrap();
assert_eq!(c.head_index(), 0);
let mut iter = c;
assert!(iter.next().is_some());
assert!(iter.next().is_some());
assert!(iter.next().is_none());
assert!(iter.next().is_none());
}
{
let c = i.next().unwrap();
assert_eq!(c.head_index(), 2);
let mut iter = c.writable();
assert!(iter.next().is_some());
assert!(iter.next().is_some());
assert!(iter.next().is_none());
assert!(iter.next().is_none());
}
{
let c = i.next().unwrap();
assert_eq!(c.head_index(), 5);
let mut iter = c.readable();
assert!(iter.next().is_some());
assert!(iter.next().is_none());
assert!(iter.next().is_none());
}
}
#[test]
fn test_iterator() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 16);
let mut q: Queue = vq.create_queue().unwrap();
q.size = q.max_size;
q.desc_table = vq.desc_table_addr();
q.avail_ring = vq.avail_addr();
q.used_ring = vq.used_addr();
assert!(q.is_valid(m));
{
q.ready = false;
assert!(q.iter(m).is_err());
}
q.ready = true;
{
let mut descs = Vec::new();
for j in 0..5u16 {
let flags = match j {
1 | 4 => 0,
_ => VRING_DESC_F_NEXT,
};
descs.push(RawDescriptor::from(SplitDescriptor::new(
(0x1000 * (j + 1)) as u64,
0x1000,
flags as u16,
j + 1,
)));
}
vq.add_desc_chains(&descs, 0).unwrap();
let mut i = q.iter(m).unwrap();
{
let mut c = i.next().unwrap();
assert_eq!(c.head_index(), 0);
c.next().unwrap();
assert!(c.next().is_some());
assert!(c.next().is_none());
assert_eq!(c.head_index(), 0);
}
{
let mut c = i.next().unwrap();
assert_eq!(c.head_index(), 2);
c.next().unwrap();
c.next().unwrap();
c.next().unwrap();
assert!(c.next().is_none());
assert_eq!(c.head_index(), 2);
}
{
assert!(i.next().is_none());
i.go_to_previous_position();
let mut c = q.iter(m).unwrap().next().unwrap();
c.next().unwrap();
c.next().unwrap();
c.next().unwrap();
assert!(c.next().is_none());
}
}
{
let descs = vec![
RawDescriptor::from(SplitDescriptor::new(
0x1000,
0xffff_ffff,
VRING_DESC_F_NEXT as u16,
1,
)),
RawDescriptor::from(SplitDescriptor::new(0x1000, 0x1234_5678, 0, 2)),
];
vq.add_desc_chains(&descs, 0).unwrap();
let mut yielded_bytes_by_iteration = 0_u32;
for d in q.iter(m).unwrap().next().unwrap() {
yielded_bytes_by_iteration = yielded_bytes_by_iteration
.checked_add(d.len())
.expect("iterator should not yield more than 2^32 bytes");
}
}
{
let descs = vec![RawDescriptor::from(SplitDescriptor::new(
0x1000,
0xffff_ffff,
VRING_DESC_F_NEXT as u16,
0,
))];
vq.add_desc_chains(&descs, 0).unwrap();
let mut yielded_bytes_by_iteration = 0_u32;
for d in q.iter(m).unwrap().next().unwrap() {
yielded_bytes_by_iteration = yielded_bytes_by_iteration
.checked_add(d.len())
.expect("iterator should not yield more than 2^32 bytes");
}
}
}
#[test]
fn test_regression_iterator_division() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 1);
let descriptors: Vec<RawDescriptor> = vec![RawDescriptor::from(SplitDescriptor::new(
14178673876262995140,
3301229764,
50372,
50372,
))];
vq.build_desc_chain(&descriptors).unwrap();
let mut q = Queue {
max_size: 38,
next_avail: Wrapping(0),
next_used: Wrapping(0),
event_idx_enabled: false,
num_added: Wrapping(0),
size: 0,
ready: false,
desc_table: GuestAddress(12837708984796196),
avail_ring: GuestAddress(0),
used_ring: GuestAddress(9943947977301164032),
};
assert!(q.pop_descriptor_chain(m).is_none());
}
#[test]
fn test_setters_error_cases() {
assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
let mut q = Queue::new(16).unwrap();
let expected_val = q.desc_table.0;
assert_eq!(
q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
Error::InvalidDescTableAlign
);
assert_eq!(q.desc_table(), expected_val);
let expected_val = q.avail_ring.0;
assert_eq!(
q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
Error::InvalidAvailRingAlign
);
assert_eq!(q.avail_ring(), expected_val);
let expected_val = q.used_ring.0;
assert_eq!(
q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
Error::InvalidUsedRingAlign
);
assert_eq!(q.used_ring(), expected_val);
let expected_val = q.size;
assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
assert_eq!(q.size(), expected_val)
}
#[test]
fn test_regression_timeout_after_reset() {
let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
let vq = MockSplitQueue::new(m, 1024);
let descriptors: Vec<RawDescriptor> = vec![
RawDescriptor::from(SplitDescriptor::new(21508325467, 0, 1, 4)),
RawDescriptor::from(SplitDescriptor::new(2097152, 4096, 3, 0)),
RawDescriptor::from(SplitDescriptor::new(
18374686479672737792,
4294967295,
65535,
29,
)),
RawDescriptor::from(SplitDescriptor::new(76842670169653248, 1114115, 0, 0)),
RawDescriptor::from(SplitDescriptor::new(16, 983040, 126, 3)),
RawDescriptor::from(SplitDescriptor::new(897648164864, 0, 0, 0)),
RawDescriptor::from(SplitDescriptor::new(111669149722, 0, 0, 0)),
];
vq.build_multiple_desc_chains(&descriptors).unwrap();
let mut q: Queue = vq.create_queue().unwrap();
q.reset();
q.set_ready(true);
let mut counter = 0;
while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
while desc_chain.next().is_some() {
counter += 1;
}
}
assert_eq!(counter, 0);
q.reset();
q.set_avail_ring_address(Some(0x1000), None);
assert_eq!(q.avail_ring, GuestAddress(0x1000));
counter = 0;
while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
while desc_chain.next().is_some() {
counter += 1;
}
}
assert_eq!(counter, 0);
}
}