1use std::mem::size_of;
11use std::num::Wrapping;
12use std::ops::Deref;
13use std::sync::atomic::{fence, Ordering};
14
15use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17use crate::defs::{
18 DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19 VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20 VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21};
22use crate::{
23 error, Descriptor, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT,
24 VirtqUsedElem,
25};
26use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
27
28pub const MAX_QUEUE_SIZE: u16 = 32768;
30
31#[derive(Debug, Default, PartialEq, Eq)]
80pub struct Queue {
81 max_size: u16,
83
84 next_avail: Wrapping<u16>,
86
87 next_used: Wrapping<u16>,
89
90 event_idx_enabled: bool,
92
93 num_added: Wrapping<u16>,
96
97 size: u16,
99
100 ready: bool,
102
103 desc_table: GuestAddress,
105
106 avail_ring: GuestAddress,
108
109 used_ring: GuestAddress,
111}
112
113impl Queue {
114 pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
119 if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
120 return Err(Error::InvalidSize);
121 }
122 self.size = size;
123 Ok(())
124 }
125
126 pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
133 if desc_table.mask(0xf) != 0 {
134 return Err(Error::InvalidDescTableAlign);
135 }
136 self.desc_table = desc_table;
137
138 Ok(())
139 }
140
141 pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
148 if avail_ring.mask(0x1) != 0 {
149 return Err(Error::InvalidAvailRingAlign);
150 }
151 self.avail_ring = avail_ring;
152 Ok(())
153 }
154
155 pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
162 if used_ring.mask(0x3) != 0 {
163 return Err(Error::InvalidUsedRingAlign);
164 }
165 self.used_ring = used_ring;
166 Ok(())
167 }
168
169 pub fn state(&self) -> QueueState {
179 QueueState {
180 max_size: self.max_size,
181 next_avail: self.next_avail(),
182 next_used: self.next_used(),
183 event_idx_enabled: self.event_idx_enabled,
184 size: self.size,
185 ready: self.ready,
186 desc_table: self.desc_table(),
187 avail_ring: self.avail_ring(),
188 used_ring: self.used_ring(),
189 }
190 }
191
192 fn set_avail_event<M: GuestMemory>(
195 &self,
196 mem: &M,
197 val: u16,
198 order: Ordering,
199 ) -> Result<(), Error> {
200 let avail_event_offset =
203 VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
204 let addr = self
205 .used_ring
206 .checked_add(avail_event_offset)
207 .ok_or(Error::AddressOverflow)?;
208
209 mem.store(u16::to_le(val), addr, order)
210 .map_err(Error::GuestMemory)
211 }
212
213 fn set_used_flags<M: GuestMemory>(
215 &mut self,
216 mem: &M,
217 val: u16,
218 order: Ordering,
219 ) -> Result<(), Error> {
220 mem.store(u16::to_le(val), self.used_ring, order)
221 .map_err(Error::GuestMemory)
222 }
223
224 fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
229 if enable {
230 if self.event_idx_enabled {
231 self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
235 } else {
236 self.set_used_flags(mem, 0, Ordering::Relaxed)
237 }
238 } else if !self.event_idx_enabled {
239 self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
240 } else {
241 Ok(())
244 }
245 }
246
247 fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
258 let used_event_offset =
261 VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
262 let used_event_addr = self
263 .avail_ring
264 .checked_add(used_event_offset)
265 .ok_or(Error::AddressOverflow)?;
266
267 mem.load(used_event_addr, order)
268 .map(u16::from_le)
269 .map(Wrapping)
270 .map_err(Error::GuestMemory)
271 }
272}
273
274impl<'a> QueueGuard<'a> for Queue {
275 type G = &'a mut Self;
276}
277
278impl QueueT for Queue {
279 fn new(max_size: u16) -> Result<Self, Error> {
280 if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
283 return Err(Error::InvalidMaxSize);
284 }
285 Ok(Queue {
286 max_size,
287 size: max_size,
288 ready: false,
289 desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
290 avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
291 used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
292 next_avail: Wrapping(0),
293 next_used: Wrapping(0),
294 event_idx_enabled: false,
295 num_added: Wrapping(0),
296 })
297 }
298
299 fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
300 let queue_size = self.size as u64;
301 let desc_table = self.desc_table;
302 let desc_table_size = size_of::<Descriptor>() as u64 * queue_size;
305 let avail_ring = self.avail_ring;
306 let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
309 let used_ring = self.used_ring;
310 let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
311
312 if !self.ready {
313 error!("attempt to use virtio queue that is not marked ready");
314 false
315 } else if desc_table
316 .checked_add(desc_table_size)
317 .map_or(true, |v| !mem.address_in_range(v))
318 {
319 error!(
320 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
321 desc_table.raw_value(),
322 desc_table_size
323 );
324 false
325 } else if avail_ring
326 .checked_add(avail_ring_size)
327 .map_or(true, |v| !mem.address_in_range(v))
328 {
329 error!(
330 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
331 avail_ring.raw_value(),
332 avail_ring_size
333 );
334 false
335 } else if used_ring
336 .checked_add(used_ring_size)
337 .map_or(true, |v| !mem.address_in_range(v))
338 {
339 error!(
340 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
341 used_ring.raw_value(),
342 used_ring_size
343 );
344 false
345 } else {
346 true
347 }
348 }
349
350 fn reset(&mut self) {
351 self.ready = false;
352 self.size = self.max_size;
353 self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
354 self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
355 self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
356 self.next_avail = Wrapping(0);
357 self.next_used = Wrapping(0);
358 self.num_added = Wrapping(0);
359 self.event_idx_enabled = false;
360 }
361
362 fn lock(&mut self) -> <Self as QueueGuard>::G {
363 self
364 }
365
366 fn max_size(&self) -> u16 {
367 self.max_size
368 }
369
370 fn size(&self) -> u16 {
371 self.size
372 }
373
374 fn set_size(&mut self, size: u16) {
375 if self.try_set_size(size).is_err() {
376 error!("virtio queue with invalid size: {}", size);
377 }
378 }
379
380 fn ready(&self) -> bool {
381 self.ready
382 }
383
384 fn set_ready(&mut self, ready: bool) {
385 self.ready = ready;
386 }
387
388 fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
389 let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
390 let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
391
392 let desc_table = GuestAddress((high << 32) | low);
393 if self.try_set_desc_table_address(desc_table).is_err() {
394 error!("virtio queue descriptor table breaks alignment constraints");
395 }
396 }
397
398 fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
399 let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
400 let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
401
402 let avail_ring = GuestAddress((high << 32) | low);
403 if self.try_set_avail_ring_address(avail_ring).is_err() {
404 error!("virtio queue available ring breaks alignment constraints");
405 }
406 }
407
408 fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
409 let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
410 let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
411
412 let used_ring = GuestAddress((high << 32) | low);
413 if self.try_set_used_ring_address(used_ring).is_err() {
414 error!("virtio queue used ring breaks alignment constraints");
415 }
416 }
417
418 fn set_event_idx(&mut self, enabled: bool) {
419 self.event_idx_enabled = enabled;
420 }
421
422 fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
423 where
424 M: GuestMemory + ?Sized,
425 {
426 let addr = self
427 .avail_ring
428 .checked_add(2)
429 .ok_or(Error::AddressOverflow)?;
430
431 mem.load(addr, order)
432 .map(u16::from_le)
433 .map(Wrapping)
434 .map_err(Error::GuestMemory)
435 }
436
437 fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
438 let addr = self
439 .used_ring
440 .checked_add(2)
441 .ok_or(Error::AddressOverflow)?;
442
443 mem.load(addr, order)
444 .map(u16::from_le)
445 .map(Wrapping)
446 .map_err(Error::GuestMemory)
447 }
448
449 fn add_used<M: GuestMemory>(
450 &mut self,
451 mem: &M,
452 head_index: u16,
453 len: u32,
454 ) -> Result<(), Error> {
455 if head_index >= self.size {
456 error!(
457 "attempted to add out of bounds descriptor to used ring: {}",
458 head_index
459 );
460 return Err(Error::InvalidDescriptorIndex);
461 }
462
463 let next_used_index = u64::from(self.next_used.0 % self.size);
464 let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
467 let addr = self
468 .used_ring
469 .checked_add(offset)
470 .ok_or(Error::AddressOverflow)?;
471 mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
472 .map_err(Error::GuestMemory)?;
473
474 self.next_used += Wrapping(1);
475 self.num_added += Wrapping(1);
476
477 mem.store(
478 u16::to_le(self.next_used.0),
479 self.used_ring
480 .checked_add(2)
481 .ok_or(Error::AddressOverflow)?,
482 Ordering::Release,
483 )
484 .map_err(Error::GuestMemory)
485 }
486
487 fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
488 self.set_notification(mem, true)?;
489 fence(Ordering::SeqCst);
491
492 self.avail_idx(mem, Ordering::Relaxed)
499 .map(|idx| idx != self.next_avail)
500 }
501
502 fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
503 self.set_notification(mem, false)
504 }
505
506 fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
507 let used_idx = self.next_used;
508
509 fence(Ordering::SeqCst);
511
512 if self.event_idx_enabled {
530 let used_event = self.used_event(mem, Ordering::Relaxed)?;
531 let old = used_idx - self.num_added;
532 self.num_added = Wrapping(0);
533
534 return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
535 }
536
537 Ok(true)
538 }
539
540 fn next_avail(&self) -> u16 {
541 self.next_avail.0
542 }
543
544 fn set_next_avail(&mut self, next_avail: u16) {
545 self.next_avail = Wrapping(next_avail);
546 }
547
548 fn next_used(&self) -> u16 {
549 self.next_used.0
550 }
551
552 fn set_next_used(&mut self, next_used: u16) {
553 self.next_used = Wrapping(next_used);
554 }
555
556 fn desc_table(&self) -> u64 {
557 self.desc_table.0
558 }
559
560 fn avail_ring(&self) -> u64 {
561 self.avail_ring.0
562 }
563
564 fn used_ring(&self) -> u64 {
565 self.used_ring.0
566 }
567
568 fn event_idx_enabled(&self) -> bool {
569 self.event_idx_enabled
570 }
571
572 fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
573 where
574 M: Clone + Deref,
575 M::Target: GuestMemory,
576 {
577 match self.iter(mem) {
579 Ok(mut iter) => iter.next(),
580 Err(e) => {
581 error!("Iterator error {}", e);
582 None
583 }
584 }
585 }
586}
587
588impl QueueOwnedT for Queue {
589 fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
590 where
591 M: Deref,
592 M::Target: GuestMemory,
593 {
594 if !self.ready || self.avail_ring == GuestAddress(0) {
598 return Err(Error::QueueNotReady);
599 }
600
601 self.avail_idx(mem.deref(), Ordering::Acquire)
602 .map(move |idx| AvailIter::new(mem, idx, self))?
603 }
604
605 fn go_to_previous_position(&mut self) {
606 self.next_avail -= Wrapping(1);
607 }
608}
609
610#[derive(Debug)]
678pub struct AvailIter<'b, M> {
679 mem: M,
680 desc_table: GuestAddress,
681 avail_ring: GuestAddress,
682 queue_size: u16,
683 last_index: Wrapping<u16>,
684 next_avail: &'b mut Wrapping<u16>,
685}
686
687impl<'b, M> AvailIter<'b, M>
688where
689 M: Deref,
690 M::Target: GuestMemory,
691{
692 pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701 if (idx - queue.next_avail).0 > queue.size {
708 return Err(Error::InvalidAvailRingIndex);
709 }
710
711 Ok(AvailIter {
712 mem,
713 desc_table: queue.desc_table,
714 avail_ring: queue.avail_ring,
715 queue_size: queue.size,
716 last_index: idx,
717 next_avail: &mut queue.next_avail,
718 })
719 }
720
721 pub fn go_to_previous_position(&mut self) {
729 *self.next_avail -= Wrapping(1);
730 }
731}
732
733impl<'b, M> Iterator for AvailIter<'b, M>
734where
735 M: Clone + Deref,
736 M::Target: GuestMemory,
737{
738 type Item = DescriptorChain<M>;
739
740 fn next(&mut self) -> Option<Self::Item> {
741 if *self.next_avail == self.last_index {
742 return None;
743 }
744
745 let elem_off =
748 u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749 let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751 let addr = self.avail_ring.checked_add(offset)?;
752 let head_index: u16 = self
753 .mem
754 .load(addr, Ordering::Acquire)
755 .map(u16::from_le)
756 .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
757 .ok()?;
758
759 *self.next_avail += Wrapping(1);
760
761 Some(DescriptorChain::new(
762 self.mem.clone(),
763 self.desc_table,
764 self.queue_size,
765 head_index,
766 ))
767 }
768}
769
770#[cfg(any(test, feature = "test-utils"))]
771impl PartialEq for Error {
774 fn eq(&self, other: &Self) -> bool {
775 format!("{}", &self) == format!("{}", other)
776 }
777}
778
779#[cfg(test)]
780mod tests {
781 use super::*;
782 use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
783 use crate::mock::MockSplitQueue;
784 use crate::Descriptor;
785 use virtio_bindings::bindings::virtio_ring::{
786 VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
787 };
788
789 use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
790
791 #[test]
792 fn test_queue_is_valid() {
793 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
794 let vq = MockSplitQueue::new(m, 16);
795 let mut q: Queue = vq.create_queue().unwrap();
796
797 assert!(q.is_valid(m));
799
800 q.set_ready(false);
802 assert!(!q.ready());
803 assert!(!q.is_valid(m));
804 q.set_ready(true);
805
806 q.set_size(q.max_size() << 1);
808 assert_eq!(q.size, q.max_size());
809
810 q.set_size(0);
812 assert_eq!(q.size, q.max_size());
813
814 q.set_size(11);
816 assert_eq!(q.size, q.max_size());
817
818 q.set_size(4);
820 assert_eq!(q.size, 4);
821 q.size = q.max_size();
822
823 q.set_desc_table_address(Some(0xf), None);
825 assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
826 q.set_desc_table_address(Some(0xffff_fff0), None);
828 assert_eq!(q.desc_table.0, 0xffff_fff0);
829 assert!(!q.is_valid(m));
831 q.set_desc_table_address(Some(0x10), None);
833 assert_eq!(q.desc_table.0, 0x10);
834 assert!(q.is_valid(m));
835 let addr = vq.desc_table_addr().0;
836 q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
837
838 q.set_avail_ring_address(Some(0x1), None);
840 assert_eq!(q.avail_ring.0, vq.avail_addr().0);
841 q.set_avail_ring_address(Some(0xffff_fffe), None);
843 assert_eq!(q.avail_ring.0, 0xffff_fffe);
844 assert!(!q.is_valid(m));
846 q.set_avail_ring_address(Some(0x2), None);
848 assert_eq!(q.avail_ring.0, 0x2);
849 assert!(q.is_valid(m));
850 let addr = vq.avail_addr().0;
851 q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
852
853 q.set_used_ring_address(Some(0x3), None);
855 assert_eq!(q.used_ring.0, vq.used_addr().0);
856 q.set_used_ring_address(Some(0xffff_fffc), None);
858 assert_eq!(q.used_ring.0, 0xffff_fffc);
859 assert!(!q.is_valid(m));
861 q.set_used_ring_address(Some(0x4), None);
863 assert_eq!(q.used_ring.0, 0x4);
864 let addr = vq.used_addr().0;
865 q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
866 assert!(q.is_valid(m));
867 }
868
869 #[test]
870 fn test_add_used() {
871 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
872 let vq = MockSplitQueue::new(mem, 16);
873 let mut q: Queue = vq.create_queue().unwrap();
874
875 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
876 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
877
878 assert!(q.add_used(mem, 16, 0x1000).is_err());
880 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
881
882 q.add_used(mem, 1, 0x1000).unwrap();
884 assert_eq!(q.next_used, Wrapping(1));
885 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
886 assert_eq!(u16::from_le(vq.used().idx().load()), 1);
887
888 let x = vq.used().ring().ref_at(0).unwrap().load();
889 assert_eq!(x.id(), 1);
890 assert_eq!(x.len(), 0x1000);
891 }
892
893 #[test]
894 fn test_reset_queue() {
895 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
896 let vq = MockSplitQueue::new(m, 16);
897 let mut q: Queue = vq.create_queue().unwrap();
898
899 q.set_size(8);
900 q.set_desc_table_address(Some(0x5000), None);
903 q.set_event_idx(true);
905 q.set_next_avail(2);
906 q.set_next_used(4);
907 q.num_added = Wrapping(15);
908 assert_eq!(q.size, 8);
909 assert!(q.ready);
911 assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
912 assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
913 assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
914 assert_ne!(q.next_avail, Wrapping(0));
915 assert_ne!(q.next_used, Wrapping(0));
916 assert_ne!(q.num_added, Wrapping(0));
917 assert!(q.event_idx_enabled);
918
919 q.reset();
920 assert_eq!(q.size, 16);
921 assert!(!q.ready);
922 assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
923 assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
924 assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
925 assert_eq!(q.next_avail, Wrapping(0));
926 assert_eq!(q.next_used, Wrapping(0));
927 assert_eq!(q.num_added, Wrapping(0));
928 assert!(!q.event_idx_enabled);
929 }
930
931 #[test]
932 fn test_needs_notification() {
933 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
934 let qsize = 16;
935 let vq = MockSplitQueue::new(mem, qsize);
936 let mut q: Queue = vq.create_queue().unwrap();
937 let avail_addr = vq.avail_addr();
938
939 for i in 0..qsize {
941 q.next_used = Wrapping(i);
942 assert!(q.needs_notification(mem).unwrap());
943 }
944
945 mem.write_obj::<u16>(
946 u16::to_le(4),
947 avail_addr.unchecked_add(4 + qsize as u64 * 2),
948 )
949 .unwrap();
950 q.set_event_idx(true);
951
952 let wrap = u32::from(u16::MAX) + 1;
954
955 for i in 0..wrap + 12 {
956 q.next_used = Wrapping(i as u16);
957 q.num_added = Wrapping(1);
962 let expected = i == 5 || i == (5 + wrap);
963 assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
964 }
965
966 mem.write_obj::<u16>(
967 u16::to_le(8),
968 avail_addr.unchecked_add(4 + qsize as u64 * 2),
969 )
970 .unwrap();
971
972 assert!(!q.needs_notification(mem).unwrap());
976
977 mem.write_obj::<u16>(
978 u16::to_le(15),
979 avail_addr.unchecked_add(4 + qsize as u64 * 2),
980 )
981 .unwrap();
982
983 q.num_added = Wrapping(1);
984 assert!(!q.needs_notification(mem).unwrap());
985
986 q.next_used = Wrapping(15);
987 q.num_added = Wrapping(1);
988 assert!(!q.needs_notification(mem).unwrap());
989
990 q.next_used = Wrapping(16);
991 q.num_added = Wrapping(1);
992 assert!(q.needs_notification(mem).unwrap());
993
994 assert!(!q.needs_notification(mem).unwrap());
996
997 mem.write_obj::<u16>(
998 u16::to_le(u16::MAX - 3),
999 avail_addr.unchecked_add(4 + qsize as u64 * 2),
1000 )
1001 .unwrap();
1002 q.next_used = Wrapping(u16::MAX - 2);
1003 q.num_added = Wrapping(1);
1004 assert!(q.needs_notification(mem).unwrap());
1009 }
1010
1011 #[test]
1012 fn test_enable_disable_notification() {
1013 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1014 let vq = MockSplitQueue::new(mem, 16);
1015
1016 let mut q: Queue = vq.create_queue().unwrap();
1017 let used_addr = vq.used_addr();
1018
1019 assert!(!q.event_idx_enabled);
1020
1021 q.enable_notification(mem).unwrap();
1022 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1023 assert_eq!(v, 0);
1024
1025 q.disable_notification(mem).unwrap();
1026 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1027 assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1028
1029 q.enable_notification(mem).unwrap();
1030 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1031 assert_eq!(v, 0);
1032
1033 q.set_event_idx(true);
1034 let avail_addr = vq.avail_addr();
1035 mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1036 .unwrap();
1037
1038 assert!(q.enable_notification(mem).unwrap());
1039 q.next_avail = Wrapping(2);
1040 assert!(!q.enable_notification(mem).unwrap());
1041
1042 mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1043 .unwrap();
1044
1045 assert!(q.enable_notification(mem).unwrap());
1046 q.next_avail = Wrapping(8);
1047 assert!(!q.enable_notification(mem).unwrap());
1048 }
1049
1050 #[test]
1051 fn test_consume_chains_with_notif() {
1052 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1053 let vq = MockSplitQueue::new(mem, 16);
1054
1055 let mut q: Queue = vq.create_queue().unwrap();
1056
1057 assert!(q.is_valid(mem));
1059
1060 let mut descs = Vec::new();
1062 for i in 0..13 {
1063 let flags = match i {
1064 1 | 4 | 6 | 8 | 12 => 0,
1065 _ => VRING_DESC_F_NEXT,
1066 };
1067
1068 descs.push(Descriptor::new(
1069 (0x1000 * (i + 1)) as u64,
1070 0x1000,
1071 flags as u16,
1072 i + 1,
1073 ));
1074 }
1075
1076 vq.add_desc_chains(&descs, 0).unwrap();
1077 vq.avail().idx().store(u16::to_le(2));
1081 assert_eq!(q.next_avail(), 0);
1083
1084 let mut i = 0;
1085
1086 loop {
1087 i += 1;
1088 q.disable_notification(mem).unwrap();
1089
1090 while let Some(chain) = q.iter(mem).unwrap().next() {
1091 let head_index = chain.head_index();
1094 let mut desc_len = 0;
1095 chain.for_each(|d| {
1096 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1097 desc_len += d.len();
1098 }
1099 });
1100 q.add_used(mem, head_index, desc_len).unwrap();
1101 }
1102 if !q.enable_notification(mem).unwrap() {
1103 break;
1104 }
1105 }
1106 assert_eq!(i, 1);
1109 assert_eq!(q.next_avail(), 2);
1111 assert_eq!(q.next_used(), 2);
1112 vq.avail().idx().store(u16::to_le(3));
1114 i = 0;
1115
1116 loop {
1117 i += 1;
1118 q.disable_notification(mem).unwrap();
1119
1120 while let Some(chain) = q.iter(mem).unwrap().next() {
1121 let head_index = chain.head_index();
1124 let mut desc_len = 0;
1125 chain.for_each(|d| {
1126 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1127 desc_len += d.len();
1128 }
1129 });
1130 q.add_used(mem, head_index, desc_len).unwrap();
1131 }
1132
1133 vq.avail().idx().store(u16::to_le(4));
1138 if !q.enable_notification(mem).unwrap() {
1139 break;
1140 }
1141 }
1142 assert_eq!(i, 2);
1143 assert_eq!(q.next_avail(), 4);
1145 assert_eq!(q.next_used(), 4);
1146
1147 vq.avail().idx().store(u16::to_le(7));
1150 loop {
1151 q.disable_notification(mem).unwrap();
1152
1153 while let Some(chain) = q.iter(mem).unwrap().next() {
1154 let head_index = chain.head_index();
1157 let mut desc_len = 0;
1158 chain.for_each(|d| {
1159 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1160 desc_len += d.len();
1161 }
1162 });
1163 q.add_used(mem, head_index, desc_len).unwrap();
1164 }
1165 if !q.enable_notification(mem).unwrap() {
1166 break;
1167 }
1168 }
1169 assert_eq!(q.next_avail(), 7);
1170 assert_eq!(q.next_used(), 7);
1171 }
1172
1173 #[test]
1174 fn test_invalid_avail_idx() {
1175 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1179 let vq = MockSplitQueue::new(mem, 16);
1180
1181 let mut q: Queue = vq.create_queue().unwrap();
1182
1183 assert!(q.is_valid(mem));
1185
1186 let mut descs = Vec::new();
1188 for i in 0..7 {
1189 let flags = match i {
1190 1 | 4 | 6 => 0,
1191 _ => VRING_DESC_F_NEXT,
1192 };
1193
1194 descs.push(Descriptor::new(
1195 (0x1000 * (i + 1)) as u64,
1196 0x1000,
1197 flags as u16,
1198 i + 1,
1199 ));
1200 }
1201
1202 vq.add_desc_chains(&descs, 0).unwrap();
1203 vq.avail().idx().store(u16::to_le(3));
1205 assert_eq!(q.next_avail(), 0);
1207 assert_eq!(q.next_used(), 0);
1208
1209 loop {
1210 q.disable_notification(mem).unwrap();
1211
1212 while let Some(chain) = q.iter(mem).unwrap().next() {
1213 let head_index = chain.head_index();
1216 let mut desc_len = 0;
1217 chain.for_each(|d| {
1218 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1219 desc_len += d.len();
1220 }
1221 });
1222 q.add_used(mem, head_index, desc_len).unwrap();
1223 }
1224 if !q.enable_notification(mem).unwrap() {
1225 break;
1226 }
1227 }
1228 assert_eq!(q.next_avail(), 3);
1230 assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1231 assert_eq!(q.next_used(), 3);
1232 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1233 assert!(q.lock().ready());
1234
1235 vq.avail().idx().store(u16::to_le(1));
1238 assert!(q.iter(mem).is_err());
1240 }
1241
1242 #[test]
1243 fn test_iterator_and_avail_idx() {
1244 let queue_size = 2;
1248 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1249 let vq = MockSplitQueue::new(mem, queue_size);
1250
1251 let mut q: Queue = vq.create_queue().unwrap();
1252
1253 assert!(q.is_valid(mem));
1255
1256 let mut descs = Vec::new();
1258 for i in 0..queue_size {
1259 descs.push(Descriptor::new(
1260 (0x1000 * (i + 1)) as u64,
1261 0x1000,
1262 0_u16,
1263 i + 1,
1264 ));
1265 }
1266 vq.add_desc_chains(&descs, 0).unwrap();
1267
1268 q.set_next_avail(u16::MAX);
1270
1271 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1274 vq.avail().idx().store(u16::to_le(avail_idx.0));
1275 assert!(q.iter(mem).is_ok());
1276 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1277 vq.avail().idx().store(u16::to_le(avail_idx.0));
1278 assert!(q.iter(mem).is_ok());
1279
1280 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1283 vq.avail().idx().store(u16::to_le(avail_idx.0));
1284 assert!(q.iter(mem).is_err());
1285 }
1286
1287 #[test]
1288 fn test_descriptor_and_iterator() {
1289 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1290 let vq = MockSplitQueue::new(m, 16);
1291
1292 let mut q: Queue = vq.create_queue().unwrap();
1293
1294 assert!(q.is_valid(m));
1296
1297 let mut descs = Vec::new();
1299 for j in 0..7 {
1300 let flags = match j {
1301 1 | 6 => 0,
1302 2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1303 4 => VRING_DESC_F_WRITE,
1304 _ => VRING_DESC_F_NEXT,
1305 };
1306
1307 descs.push(Descriptor::new(
1308 (0x1000 * (j + 1)) as u64,
1309 0x1000,
1310 flags as u16,
1311 j + 1,
1312 ));
1313 }
1314
1315 vq.add_desc_chains(&descs, 0).unwrap();
1316
1317 let mut i = q.iter(m).unwrap();
1318
1319 {
1320 let c = i.next().unwrap();
1321 assert_eq!(c.head_index(), 0);
1322
1323 let mut iter = c;
1324 assert!(iter.next().is_some());
1325 assert!(iter.next().is_some());
1326 assert!(iter.next().is_none());
1327 assert!(iter.next().is_none());
1328 }
1329
1330 {
1331 let c = i.next().unwrap();
1332 assert_eq!(c.head_index(), 2);
1333
1334 let mut iter = c.writable();
1335 assert!(iter.next().is_some());
1336 assert!(iter.next().is_some());
1337 assert!(iter.next().is_none());
1338 assert!(iter.next().is_none());
1339 }
1340
1341 {
1342 let c = i.next().unwrap();
1343 assert_eq!(c.head_index(), 5);
1344
1345 let mut iter = c.readable();
1346 assert!(iter.next().is_some());
1347 assert!(iter.next().is_none());
1348 assert!(iter.next().is_none());
1349 }
1350 }
1351
1352 #[test]
1353 fn test_iterator() {
1354 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1355 let vq = MockSplitQueue::new(m, 16);
1356
1357 let mut q: Queue = vq.create_queue().unwrap();
1358
1359 q.size = q.max_size;
1360 q.desc_table = vq.desc_table_addr();
1361 q.avail_ring = vq.avail_addr();
1362 q.used_ring = vq.used_addr();
1363 assert!(q.is_valid(m));
1364
1365 {
1366 q.ready = false;
1368 assert!(q.iter(m).is_err());
1369 }
1370
1371 q.ready = true;
1372
1373 {
1376 let mut descs = Vec::new();
1377 for j in 0..5u16 {
1378 let flags = match j {
1379 1 | 4 => 0,
1380 _ => VRING_DESC_F_NEXT,
1381 };
1382
1383 descs.push(Descriptor::new(
1384 (0x1000 * (j + 1)) as u64,
1385 0x1000,
1386 flags as u16,
1387 j + 1,
1388 ));
1389 }
1390 vq.add_desc_chains(&descs, 0).unwrap();
1391
1392 let mut i = q.iter(m).unwrap();
1393
1394 {
1395 let mut c = i.next().unwrap();
1396 assert_eq!(c.head_index(), 0);
1397
1398 c.next().unwrap();
1399 assert!(c.next().is_some());
1400 assert!(c.next().is_none());
1401 assert_eq!(c.head_index(), 0);
1402 }
1403
1404 {
1405 let mut c = i.next().unwrap();
1406 assert_eq!(c.head_index(), 2);
1407
1408 c.next().unwrap();
1409 c.next().unwrap();
1410 c.next().unwrap();
1411 assert!(c.next().is_none());
1412 assert_eq!(c.head_index(), 2);
1413 }
1414
1415 {
1417 assert!(i.next().is_none());
1418 i.go_to_previous_position();
1419 let mut c = q.iter(m).unwrap().next().unwrap();
1420 c.next().unwrap();
1421 c.next().unwrap();
1422 c.next().unwrap();
1423 assert!(c.next().is_none());
1424 }
1425 }
1426
1427 {
1432 let descs = vec![
1433 Descriptor::new(0x1000, 0xffff_ffff, VRING_DESC_F_NEXT as u16, 1),
1434 Descriptor::new(0x1000, 0x1234_5678, 0, 2),
1435 ];
1436 vq.add_desc_chains(&descs, 0).unwrap();
1437 let mut yielded_bytes_by_iteration = 0_u32;
1438 for d in q.iter(m).unwrap().next().unwrap() {
1439 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1440 .checked_add(d.len())
1441 .expect("iterator should not yield more than 2^32 bytes");
1442 }
1443 }
1444
1445 {
1447 let descs = vec![Descriptor::new(
1448 0x1000,
1449 0xffff_ffff,
1450 VRING_DESC_F_NEXT as u16,
1451 0,
1452 )];
1453 vq.add_desc_chains(&descs, 0).unwrap();
1454 let mut yielded_bytes_by_iteration = 0_u32;
1455 for d in q.iter(m).unwrap().next().unwrap() {
1456 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1457 .checked_add(d.len())
1458 .expect("iterator should not yield more than 2^32 bytes");
1459 }
1460 }
1461 }
1462
1463 #[test]
1464 fn test_regression_iterator_division() {
1465 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1468 let vq = MockSplitQueue::new(m, 1);
1469 let descriptors: Vec<Descriptor> = vec![Descriptor::new(
1471 14178673876262995140,
1472 3301229764,
1473 50372,
1474 50372,
1475 )];
1476 vq.build_desc_chain(&descriptors).unwrap();
1477
1478 let mut q = Queue {
1479 max_size: 38,
1480 next_avail: Wrapping(0),
1481 next_used: Wrapping(0),
1482 event_idx_enabled: false,
1483 num_added: Wrapping(0),
1484 size: 0,
1485 ready: false,
1486 desc_table: GuestAddress(12837708984796196),
1487 avail_ring: GuestAddress(0),
1488 used_ring: GuestAddress(9943947977301164032),
1489 };
1490
1491 assert!(q.pop_descriptor_chain(m).is_none());
1492 }
1493
1494 #[test]
1495 fn test_setters_error_cases() {
1496 assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1497 let mut q = Queue::new(16).unwrap();
1498
1499 let expected_val = q.desc_table.0;
1500 assert_eq!(
1501 q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1502 Error::InvalidDescTableAlign
1503 );
1504 assert_eq!(q.desc_table(), expected_val);
1505
1506 let expected_val = q.avail_ring.0;
1507 assert_eq!(
1508 q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1509 Error::InvalidAvailRingAlign
1510 );
1511 assert_eq!(q.avail_ring(), expected_val);
1512
1513 let expected_val = q.used_ring.0;
1514 assert_eq!(
1515 q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1516 Error::InvalidUsedRingAlign
1517 );
1518 assert_eq!(q.used_ring(), expected_val);
1519
1520 let expected_val = q.size;
1521 assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1522 assert_eq!(q.size(), expected_val)
1523 }
1524
1525 #[test]
1526 fn test_regression_timeout_after_reset() {
1532 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1534 let vq = MockSplitQueue::new(m, 1024);
1535
1536 let descriptors: Vec<Descriptor> = vec![
1538 Descriptor::new(21508325467, 0, 1, 4),
1539 Descriptor::new(2097152, 4096, 3, 0),
1540 Descriptor::new(18374686479672737792, 4294967295, 65535, 29),
1541 Descriptor::new(76842670169653248, 1114115, 0, 0),
1542 Descriptor::new(16, 983040, 126, 3),
1543 Descriptor::new(897648164864, 0, 0, 0),
1544 Descriptor::new(111669149722, 0, 0, 0),
1545 ];
1546 vq.build_multiple_desc_chains(&descriptors).unwrap();
1547
1548 let mut q: Queue = vq.create_queue().unwrap();
1549
1550 q.reset();
1552 q.set_ready(true);
1553 let mut counter = 0;
1554 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1555 while desc_chain.next().is_some() {
1558 counter += 1;
1559 }
1560 }
1561 assert_eq!(counter, 0);
1562
1563 q.reset();
1565 q.set_avail_ring_address(Some(0x1000), None);
1566 assert_eq!(q.avail_ring, GuestAddress(0x1000));
1567 counter = 0;
1568 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1569 while desc_chain.next().is_some() {
1572 counter += 1;
1573 }
1574 }
1575 assert_eq!(counter, 0);
1576 }
1577}