1use std::mem::size_of;
11use std::num::Wrapping;
12use std::ops::Deref;
13use std::sync::atomic::{fence, Ordering};
14
15use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17use crate::defs::{
18 DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19 VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20 VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21};
22use crate::desc::{split::VirtqUsedElem, RawDescriptor};
23use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT};
24use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
25
26#[cfg(kani)]
27mod verification;
28
29pub const MAX_QUEUE_SIZE: u16 = 32768;
31
32#[derive(Debug, Default, PartialEq, Eq)]
81pub struct Queue {
82 max_size: u16,
84
85 next_avail: Wrapping<u16>,
87
88 next_used: Wrapping<u16>,
90
91 event_idx_enabled: bool,
93
94 num_added: Wrapping<u16>,
97
98 size: u16,
100
101 ready: bool,
103
104 desc_table: GuestAddress,
106
107 avail_ring: GuestAddress,
109
110 used_ring: GuestAddress,
112}
113
114impl Queue {
115 pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
120 if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
121 return Err(Error::InvalidSize);
122 }
123 self.size = size;
124 Ok(())
125 }
126
127 pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
134 if desc_table.mask(0xf) != 0 {
135 return Err(Error::InvalidDescTableAlign);
136 }
137 self.desc_table = desc_table;
138
139 Ok(())
140 }
141
142 pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
149 if avail_ring.mask(0x1) != 0 {
150 return Err(Error::InvalidAvailRingAlign);
151 }
152 self.avail_ring = avail_ring;
153 Ok(())
154 }
155
156 pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
163 if used_ring.mask(0x3) != 0 {
164 return Err(Error::InvalidUsedRingAlign);
165 }
166 self.used_ring = used_ring;
167 Ok(())
168 }
169
170 pub fn state(&self) -> QueueState {
180 QueueState {
181 max_size: self.max_size,
182 next_avail: self.next_avail(),
183 next_used: self.next_used(),
184 event_idx_enabled: self.event_idx_enabled,
185 size: self.size,
186 ready: self.ready,
187 desc_table: self.desc_table(),
188 avail_ring: self.avail_ring(),
189 used_ring: self.used_ring(),
190 }
191 }
192
193 fn set_avail_event<M: GuestMemory>(
196 &self,
197 mem: &M,
198 val: u16,
199 order: Ordering,
200 ) -> Result<(), Error> {
201 let avail_event_offset =
204 VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
205 let addr = self
206 .used_ring
207 .checked_add(avail_event_offset)
208 .ok_or(Error::AddressOverflow)?;
209
210 mem.store(u16::to_le(val), addr, order)
211 .map_err(Error::GuestMemory)
212 }
213
214 fn set_used_flags<M: GuestMemory>(
216 &mut self,
217 mem: &M,
218 val: u16,
219 order: Ordering,
220 ) -> Result<(), Error> {
221 mem.store(u16::to_le(val), self.used_ring, order)
222 .map_err(Error::GuestMemory)
223 }
224
225 fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
230 if enable {
231 if self.event_idx_enabled {
232 self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
236 } else {
237 self.set_used_flags(mem, 0, Ordering::Relaxed)
238 }
239 } else if !self.event_idx_enabled {
240 self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
241 } else {
242 Ok(())
245 }
246 }
247
248 fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
259 let used_event_offset =
262 VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
263 let used_event_addr = self
264 .avail_ring
265 .checked_add(used_event_offset)
266 .ok_or(Error::AddressOverflow)?;
267
268 mem.load(used_event_addr, order)
269 .map(u16::from_le)
270 .map(Wrapping)
271 .map_err(Error::GuestMemory)
272 }
273}
274
275impl<'a> QueueGuard<'a> for Queue {
276 type G = &'a mut Self;
277}
278
279impl QueueT for Queue {
280 fn new(max_size: u16) -> Result<Self, Error> {
281 if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
284 return Err(Error::InvalidMaxSize);
285 }
286 Ok(Queue {
287 max_size,
288 size: max_size,
289 ready: false,
290 desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
291 avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
292 used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
293 next_avail: Wrapping(0),
294 next_used: Wrapping(0),
295 event_idx_enabled: false,
296 num_added: Wrapping(0),
297 })
298 }
299
300 fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
301 let queue_size = self.size as u64;
302 let desc_table = self.desc_table;
303 let desc_table_size = size_of::<RawDescriptor>() as u64 * queue_size;
306 let avail_ring = self.avail_ring;
307 let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
310 let used_ring = self.used_ring;
311 let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
312
313 if !self.ready {
314 error!("attempt to use virtio queue that is not marked ready");
315 false
316 } else if desc_table
317 .checked_add(desc_table_size)
318 .is_none_or(|v| !mem.address_in_range(v))
319 {
320 error!(
321 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
322 desc_table.raw_value(),
323 desc_table_size
324 );
325 false
326 } else if avail_ring
327 .checked_add(avail_ring_size)
328 .is_none_or(|v| !mem.address_in_range(v))
329 {
330 error!(
331 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
332 avail_ring.raw_value(),
333 avail_ring_size
334 );
335 false
336 } else if used_ring
337 .checked_add(used_ring_size)
338 .is_none_or(|v| !mem.address_in_range(v))
339 {
340 error!(
341 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
342 used_ring.raw_value(),
343 used_ring_size
344 );
345 false
346 } else {
347 true
348 }
349 }
350
351 fn reset(&mut self) {
352 self.ready = false;
353 self.size = self.max_size;
354 self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
355 self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
356 self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
357 self.next_avail = Wrapping(0);
358 self.next_used = Wrapping(0);
359 self.num_added = Wrapping(0);
360 self.event_idx_enabled = false;
361 }
362
363 fn lock(&mut self) -> <Self as QueueGuard<'_>>::G {
364 self
365 }
366
367 fn max_size(&self) -> u16 {
368 self.max_size
369 }
370
371 fn size(&self) -> u16 {
372 self.size
373 }
374
375 fn set_size(&mut self, size: u16) {
376 if self.try_set_size(size).is_err() {
377 error!("virtio queue with invalid size: {}", size);
378 }
379 }
380
381 fn ready(&self) -> bool {
382 self.ready
383 }
384
385 fn set_ready(&mut self, ready: bool) {
386 self.ready = ready;
387 }
388
389 fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
390 let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
391 let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
392
393 let desc_table = GuestAddress((high << 32) | low);
394 if self.try_set_desc_table_address(desc_table).is_err() {
395 error!("virtio queue descriptor table breaks alignment constraints");
396 }
397 }
398
399 fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
400 let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
401 let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
402
403 let avail_ring = GuestAddress((high << 32) | low);
404 if self.try_set_avail_ring_address(avail_ring).is_err() {
405 error!("virtio queue available ring breaks alignment constraints");
406 }
407 }
408
409 fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
410 let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
411 let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
412
413 let used_ring = GuestAddress((high << 32) | low);
414 if self.try_set_used_ring_address(used_ring).is_err() {
415 error!("virtio queue used ring breaks alignment constraints");
416 }
417 }
418
419 fn set_event_idx(&mut self, enabled: bool) {
420 self.event_idx_enabled = enabled;
421 }
422
423 fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
424 where
425 M: GuestMemory + ?Sized,
426 {
427 let addr = self
428 .avail_ring
429 .checked_add(2)
430 .ok_or(Error::AddressOverflow)?;
431
432 mem.load(addr, order)
433 .map(u16::from_le)
434 .map(Wrapping)
435 .map_err(Error::GuestMemory)
436 }
437
438 fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
439 let addr = self
440 .used_ring
441 .checked_add(2)
442 .ok_or(Error::AddressOverflow)?;
443
444 mem.load(addr, order)
445 .map(u16::from_le)
446 .map(Wrapping)
447 .map_err(Error::GuestMemory)
448 }
449
450 fn add_used<M: GuestMemory>(
451 &mut self,
452 mem: &M,
453 head_index: u16,
454 len: u32,
455 ) -> Result<(), Error> {
456 if head_index >= self.size {
457 error!(
458 "attempted to add out of bounds descriptor to used ring: {}",
459 head_index
460 );
461 return Err(Error::InvalidDescriptorIndex);
462 }
463
464 let next_used_index = u64::from(self.next_used.0 % self.size);
465 let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
468 let addr = self
469 .used_ring
470 .checked_add(offset)
471 .ok_or(Error::AddressOverflow)?;
472 mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
473 .map_err(Error::GuestMemory)?;
474
475 self.next_used += Wrapping(1);
476 self.num_added += Wrapping(1);
477
478 mem.store(
479 u16::to_le(self.next_used.0),
480 self.used_ring
481 .checked_add(2)
482 .ok_or(Error::AddressOverflow)?,
483 Ordering::Release,
484 )
485 .map_err(Error::GuestMemory)
486 }
487
488 fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
489 self.set_notification(mem, true)?;
490 fence(Ordering::SeqCst);
492
493 self.avail_idx(mem, Ordering::Relaxed)
500 .map(|idx| idx != self.next_avail)
501 }
502
503 fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
504 self.set_notification(mem, false)
505 }
506
507 fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
508 let used_idx = self.next_used;
509
510 fence(Ordering::SeqCst);
512
513 if self.event_idx_enabled {
531 let used_event = self.used_event(mem, Ordering::Relaxed)?;
532 let old = used_idx - self.num_added;
533 self.num_added = Wrapping(0);
534
535 return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
536 }
537
538 Ok(true)
539 }
540
541 fn next_avail(&self) -> u16 {
542 self.next_avail.0
543 }
544
545 fn set_next_avail(&mut self, next_avail: u16) {
546 self.next_avail = Wrapping(next_avail);
547 }
548
549 fn next_used(&self) -> u16 {
550 self.next_used.0
551 }
552
553 fn set_next_used(&mut self, next_used: u16) {
554 self.next_used = Wrapping(next_used);
555 }
556
557 fn desc_table(&self) -> u64 {
558 self.desc_table.0
559 }
560
561 fn avail_ring(&self) -> u64 {
562 self.avail_ring.0
563 }
564
565 fn used_ring(&self) -> u64 {
566 self.used_ring.0
567 }
568
569 fn event_idx_enabled(&self) -> bool {
570 self.event_idx_enabled
571 }
572
573 fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
574 where
575 M: Clone + Deref,
576 M::Target: GuestMemory,
577 {
578 match self.iter(mem) {
580 Ok(mut iter) => iter.next(),
581 Err(e) => {
582 error!("Iterator error {}", e);
583 None
584 }
585 }
586 }
587}
588
589impl QueueOwnedT for Queue {
590 fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
591 where
592 M: Deref,
593 M::Target: GuestMemory,
594 {
595 if !self.ready || self.avail_ring == GuestAddress(0) {
599 return Err(Error::QueueNotReady);
600 }
601
602 self.avail_idx(mem.deref(), Ordering::Acquire)
603 .map(move |idx| AvailIter::new(mem, idx, self))?
604 }
605
606 fn go_to_previous_position(&mut self) {
607 self.next_avail -= Wrapping(1);
608 }
609}
610
611#[derive(Debug)]
679pub struct AvailIter<'b, M> {
680 mem: M,
681 desc_table: GuestAddress,
682 avail_ring: GuestAddress,
683 queue_size: u16,
684 last_index: Wrapping<u16>,
685 next_avail: &'b mut Wrapping<u16>,
686}
687
688impl<'b, M> AvailIter<'b, M>
689where
690 M: Deref,
691 M::Target: GuestMemory,
692{
693 pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701 if (idx - queue.next_avail).0 > queue.size {
708 return Err(Error::InvalidAvailRingIndex);
709 }
710
711 Ok(AvailIter {
712 mem,
713 desc_table: queue.desc_table,
714 avail_ring: queue.avail_ring,
715 queue_size: queue.size,
716 last_index: idx,
717 next_avail: &mut queue.next_avail,
718 })
719 }
720
721 pub fn go_to_previous_position(&mut self) {
729 *self.next_avail -= Wrapping(1);
730 }
731}
732
733impl<M> Iterator for AvailIter<'_, M>
734where
735 M: Clone + Deref,
736 M::Target: GuestMemory,
737{
738 type Item = DescriptorChain<M>;
739
740 fn next(&mut self) -> Option<Self::Item> {
741 if *self.next_avail == self.last_index {
742 return None;
743 }
744
745 let elem_off =
748 u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749 let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751 let addr = self.avail_ring.checked_add(offset)?;
752 let head_index: u16 = self
753 .mem
754 .load(addr, Ordering::Acquire)
755 .map(u16::from_le)
756 .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
757 .ok()?;
758
759 *self.next_avail += Wrapping(1);
760
761 Some(DescriptorChain::new(
762 self.mem.clone(),
763 self.desc_table,
764 self.queue_size,
765 head_index,
766 ))
767 }
768}
769
770#[cfg(any(test, feature = "test-utils"))]
771impl PartialEq for Error {
774 fn eq(&self, other: &Self) -> bool {
775 format!("{}", &self) == format!("{other}")
776 }
777}
778
779#[cfg(test)]
780mod tests {
781 use super::*;
782 use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
783 use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
784 use crate::mock::MockSplitQueue;
785 use virtio_bindings::bindings::virtio_ring::{
786 VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
787 };
788
789 use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
790
791 #[test]
792 fn test_queue_is_valid() {
793 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
794 let vq = MockSplitQueue::new(m, 16);
795 let mut q: Queue = vq.create_queue().unwrap();
796
797 assert!(q.is_valid(m));
799
800 q.set_ready(false);
802 assert!(!q.ready());
803 assert!(!q.is_valid(m));
804 q.set_ready(true);
805
806 q.set_size(q.max_size() << 1);
808 assert_eq!(q.size, q.max_size());
809
810 q.set_size(0);
812 assert_eq!(q.size, q.max_size());
813
814 q.set_size(11);
816 assert_eq!(q.size, q.max_size());
817
818 q.set_size(4);
820 assert_eq!(q.size, 4);
821 q.size = q.max_size();
822
823 q.set_desc_table_address(Some(0xf), None);
825 assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
826 q.set_desc_table_address(Some(0xffff_fff0), None);
828 assert_eq!(q.desc_table.0, 0xffff_fff0);
829 assert!(!q.is_valid(m));
831 q.set_desc_table_address(Some(0x10), None);
833 assert_eq!(q.desc_table.0, 0x10);
834 assert!(q.is_valid(m));
835 let addr = vq.desc_table_addr().0;
836 q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
837
838 q.set_avail_ring_address(Some(0x1), None);
840 assert_eq!(q.avail_ring.0, vq.avail_addr().0);
841 q.set_avail_ring_address(Some(0xffff_fffe), None);
843 assert_eq!(q.avail_ring.0, 0xffff_fffe);
844 assert!(!q.is_valid(m));
846 q.set_avail_ring_address(Some(0x2), None);
848 assert_eq!(q.avail_ring.0, 0x2);
849 assert!(q.is_valid(m));
850 let addr = vq.avail_addr().0;
851 q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
852
853 q.set_used_ring_address(Some(0x3), None);
855 assert_eq!(q.used_ring.0, vq.used_addr().0);
856 q.set_used_ring_address(Some(0xffff_fffc), None);
858 assert_eq!(q.used_ring.0, 0xffff_fffc);
859 assert!(!q.is_valid(m));
861 q.set_used_ring_address(Some(0x4), None);
863 assert_eq!(q.used_ring.0, 0x4);
864 let addr = vq.used_addr().0;
865 q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
866 assert!(q.is_valid(m));
867 }
868
869 #[test]
870 fn test_add_used() {
871 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
872 let vq = MockSplitQueue::new(mem, 16);
873 let mut q: Queue = vq.create_queue().unwrap();
874
875 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
876 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
877
878 assert!(q.add_used(mem, 16, 0x1000).is_err());
880 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
881
882 q.add_used(mem, 1, 0x1000).unwrap();
884 assert_eq!(q.next_used, Wrapping(1));
885 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
886 assert_eq!(u16::from_le(vq.used().idx().load()), 1);
887
888 let x = vq.used().ring().ref_at(0).unwrap().load();
889 assert_eq!(x.id(), 1);
890 assert_eq!(x.len(), 0x1000);
891 }
892
893 #[test]
894 fn test_reset_queue() {
895 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
896 let vq = MockSplitQueue::new(m, 16);
897 let mut q: Queue = vq.create_queue().unwrap();
898
899 q.set_size(8);
900 q.set_desc_table_address(Some(0x5000), None);
903 q.set_event_idx(true);
905 q.set_next_avail(2);
906 q.set_next_used(4);
907 q.num_added = Wrapping(15);
908 assert_eq!(q.size, 8);
909 assert!(q.ready);
911 assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
912 assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
913 assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
914 assert_ne!(q.next_avail, Wrapping(0));
915 assert_ne!(q.next_used, Wrapping(0));
916 assert_ne!(q.num_added, Wrapping(0));
917 assert!(q.event_idx_enabled);
918
919 q.reset();
920 assert_eq!(q.size, 16);
921 assert!(!q.ready);
922 assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
923 assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
924 assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
925 assert_eq!(q.next_avail, Wrapping(0));
926 assert_eq!(q.next_used, Wrapping(0));
927 assert_eq!(q.num_added, Wrapping(0));
928 assert!(!q.event_idx_enabled);
929 }
930
931 #[test]
932 fn test_needs_notification() {
933 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
934 let qsize = 16;
935 let vq = MockSplitQueue::new(mem, qsize);
936 let mut q: Queue = vq.create_queue().unwrap();
937 let avail_addr = vq.avail_addr();
938
939 for i in 0..qsize {
941 q.next_used = Wrapping(i);
942 assert!(q.needs_notification(mem).unwrap());
943 }
944
945 mem.write_obj::<u16>(
946 u16::to_le(4),
947 avail_addr.unchecked_add(4 + qsize as u64 * 2),
948 )
949 .unwrap();
950 q.set_event_idx(true);
951
952 let wrap = u32::from(u16::MAX) + 1;
954
955 for i in 0..wrap + 12 {
956 q.next_used = Wrapping(i as u16);
957 q.num_added = Wrapping(1);
962 let expected = i == 5 || i == (5 + wrap);
963 assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
964 }
965
966 mem.write_obj::<u16>(
967 u16::to_le(8),
968 avail_addr.unchecked_add(4 + qsize as u64 * 2),
969 )
970 .unwrap();
971
972 assert!(!q.needs_notification(mem).unwrap());
976
977 mem.write_obj::<u16>(
978 u16::to_le(15),
979 avail_addr.unchecked_add(4 + qsize as u64 * 2),
980 )
981 .unwrap();
982
983 q.num_added = Wrapping(1);
984 assert!(!q.needs_notification(mem).unwrap());
985
986 q.next_used = Wrapping(15);
987 q.num_added = Wrapping(1);
988 assert!(!q.needs_notification(mem).unwrap());
989
990 q.next_used = Wrapping(16);
991 q.num_added = Wrapping(1);
992 assert!(q.needs_notification(mem).unwrap());
993
994 assert!(!q.needs_notification(mem).unwrap());
996
997 mem.write_obj::<u16>(
998 u16::to_le(u16::MAX - 3),
999 avail_addr.unchecked_add(4 + qsize as u64 * 2),
1000 )
1001 .unwrap();
1002 q.next_used = Wrapping(u16::MAX - 2);
1003 q.num_added = Wrapping(1);
1004 assert!(q.needs_notification(mem).unwrap());
1009 }
1010
1011 #[test]
1012 fn test_enable_disable_notification() {
1013 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1014 let vq = MockSplitQueue::new(mem, 16);
1015
1016 let mut q: Queue = vq.create_queue().unwrap();
1017 let used_addr = vq.used_addr();
1018
1019 assert!(!q.event_idx_enabled);
1020
1021 q.enable_notification(mem).unwrap();
1022 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1023 assert_eq!(v, 0);
1024
1025 q.disable_notification(mem).unwrap();
1026 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1027 assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1028
1029 q.enable_notification(mem).unwrap();
1030 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1031 assert_eq!(v, 0);
1032
1033 q.set_event_idx(true);
1034 let avail_addr = vq.avail_addr();
1035 mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1036 .unwrap();
1037
1038 assert!(q.enable_notification(mem).unwrap());
1039 q.next_avail = Wrapping(2);
1040 assert!(!q.enable_notification(mem).unwrap());
1041
1042 mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1043 .unwrap();
1044
1045 assert!(q.enable_notification(mem).unwrap());
1046 q.next_avail = Wrapping(8);
1047 assert!(!q.enable_notification(mem).unwrap());
1048 }
1049
1050 #[test]
1051 fn test_consume_chains_with_notif() {
1052 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1053 let vq = MockSplitQueue::new(mem, 16);
1054
1055 let mut q: Queue = vq.create_queue().unwrap();
1056
1057 assert!(q.is_valid(mem));
1059
1060 let mut descs = Vec::new();
1062 for i in 0..13 {
1063 let flags = match i {
1064 1 | 4 | 6 | 8 | 12 => 0,
1065 _ => VRING_DESC_F_NEXT,
1066 };
1067
1068 descs.push(RawDescriptor::from(SplitDescriptor::new(
1069 (0x1000 * (i + 1)) as u64,
1070 0x1000,
1071 flags as u16,
1072 i + 1,
1073 )));
1074 }
1075
1076 vq.add_desc_chains(&descs, 0).unwrap();
1077 vq.avail().idx().store(u16::to_le(2));
1081 assert_eq!(q.next_avail(), 0);
1083
1084 let mut i = 0;
1085
1086 loop {
1087 i += 1;
1088 q.disable_notification(mem).unwrap();
1089
1090 while let Some(chain) = q.iter(mem).unwrap().next() {
1091 let head_index = chain.head_index();
1094 let mut desc_len = 0;
1095 chain.for_each(|d| {
1096 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1097 desc_len += d.len();
1098 }
1099 });
1100 q.add_used(mem, head_index, desc_len).unwrap();
1101 }
1102 if !q.enable_notification(mem).unwrap() {
1103 break;
1104 }
1105 }
1106 assert_eq!(i, 1);
1109 assert_eq!(q.next_avail(), 2);
1111 assert_eq!(q.next_used(), 2);
1112 vq.avail().idx().store(u16::to_le(3));
1114 i = 0;
1115
1116 loop {
1117 i += 1;
1118 q.disable_notification(mem).unwrap();
1119
1120 while let Some(chain) = q.iter(mem).unwrap().next() {
1121 let head_index = chain.head_index();
1124 let mut desc_len = 0;
1125 chain.for_each(|d| {
1126 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1127 desc_len += d.len();
1128 }
1129 });
1130 q.add_used(mem, head_index, desc_len).unwrap();
1131 }
1132
1133 vq.avail().idx().store(u16::to_le(4));
1138 if !q.enable_notification(mem).unwrap() {
1139 break;
1140 }
1141 }
1142 assert_eq!(i, 2);
1143 assert_eq!(q.next_avail(), 4);
1145 assert_eq!(q.next_used(), 4);
1146
1147 vq.avail().idx().store(u16::to_le(7));
1150 loop {
1151 q.disable_notification(mem).unwrap();
1152
1153 while let Some(chain) = q.iter(mem).unwrap().next() {
1154 let head_index = chain.head_index();
1157 let mut desc_len = 0;
1158 chain.for_each(|d| {
1159 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1160 desc_len += d.len();
1161 }
1162 });
1163 q.add_used(mem, head_index, desc_len).unwrap();
1164 }
1165 if !q.enable_notification(mem).unwrap() {
1166 break;
1167 }
1168 }
1169 assert_eq!(q.next_avail(), 7);
1170 assert_eq!(q.next_used(), 7);
1171 }
1172
1173 #[test]
1174 fn test_invalid_avail_idx() {
1175 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1179 let vq = MockSplitQueue::new(mem, 16);
1180
1181 let mut q: Queue = vq.create_queue().unwrap();
1182
1183 assert!(q.is_valid(mem));
1185
1186 let mut descs = Vec::new();
1188 for i in 0..7 {
1189 let flags = match i {
1190 1 | 4 | 6 => 0,
1191 _ => VRING_DESC_F_NEXT,
1192 };
1193
1194 descs.push(RawDescriptor::from(SplitDescriptor::new(
1195 (0x1000 * (i + 1)) as u64,
1196 0x1000,
1197 flags as u16,
1198 i + 1,
1199 )));
1200 }
1201
1202 vq.add_desc_chains(&descs, 0).unwrap();
1203 vq.avail().idx().store(u16::to_le(3));
1205 assert_eq!(q.next_avail(), 0);
1207 assert_eq!(q.next_used(), 0);
1208
1209 loop {
1210 q.disable_notification(mem).unwrap();
1211
1212 while let Some(chain) = q.iter(mem).unwrap().next() {
1213 let head_index = chain.head_index();
1216 let mut desc_len = 0;
1217 chain.for_each(|d| {
1218 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1219 desc_len += d.len();
1220 }
1221 });
1222 q.add_used(mem, head_index, desc_len).unwrap();
1223 }
1224 if !q.enable_notification(mem).unwrap() {
1225 break;
1226 }
1227 }
1228 assert_eq!(q.next_avail(), 3);
1230 assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1231 assert_eq!(q.next_used(), 3);
1232 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1233 assert!(q.lock().ready());
1234
1235 vq.avail().idx().store(u16::to_le(1));
1238 assert!(q.iter(mem).is_err());
1240 }
1241
1242 #[test]
1243 fn test_iterator_and_avail_idx() {
1244 let queue_size = 2;
1248 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1249 let vq = MockSplitQueue::new(mem, queue_size);
1250
1251 let mut q: Queue = vq.create_queue().unwrap();
1252
1253 assert!(q.is_valid(mem));
1255
1256 let mut descs = Vec::new();
1258 for i in 0..queue_size {
1259 descs.push(RawDescriptor::from(SplitDescriptor::new(
1260 (0x1000 * (i + 1)) as u64,
1261 0x1000,
1262 0_u16,
1263 i + 1,
1264 )));
1265 }
1266 vq.add_desc_chains(&descs, 0).unwrap();
1267
1268 q.set_next_avail(u16::MAX);
1270
1271 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1274 vq.avail().idx().store(u16::to_le(avail_idx.0));
1275 assert!(q.iter(mem).is_ok());
1276 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1277 vq.avail().idx().store(u16::to_le(avail_idx.0));
1278 assert!(q.iter(mem).is_ok());
1279
1280 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1283 vq.avail().idx().store(u16::to_le(avail_idx.0));
1284 assert!(q.iter(mem).is_err());
1285 }
1286
1287 #[test]
1288 fn test_descriptor_and_iterator() {
1289 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1290 let vq = MockSplitQueue::new(m, 16);
1291
1292 let mut q: Queue = vq.create_queue().unwrap();
1293
1294 assert!(q.is_valid(m));
1296
1297 let mut descs = Vec::new();
1299 for j in 0..7 {
1300 let flags = match j {
1301 1 | 6 => 0,
1302 2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1303 4 => VRING_DESC_F_WRITE,
1304 _ => VRING_DESC_F_NEXT,
1305 };
1306
1307 descs.push(RawDescriptor::from(SplitDescriptor::new(
1308 (0x1000 * (j + 1)) as u64,
1309 0x1000,
1310 flags as u16,
1311 j + 1,
1312 )));
1313 }
1314
1315 vq.add_desc_chains(&descs, 0).unwrap();
1316
1317 let mut i = q.iter(m).unwrap();
1318
1319 {
1320 let c = i.next().unwrap();
1321 assert_eq!(c.head_index(), 0);
1322
1323 let mut iter = c;
1324 assert!(iter.next().is_some());
1325 assert!(iter.next().is_some());
1326 assert!(iter.next().is_none());
1327 assert!(iter.next().is_none());
1328 }
1329
1330 {
1331 let c = i.next().unwrap();
1332 assert_eq!(c.head_index(), 2);
1333
1334 let mut iter = c.writable();
1335 assert!(iter.next().is_some());
1336 assert!(iter.next().is_some());
1337 assert!(iter.next().is_none());
1338 assert!(iter.next().is_none());
1339 }
1340
1341 {
1342 let c = i.next().unwrap();
1343 assert_eq!(c.head_index(), 5);
1344
1345 let mut iter = c.readable();
1346 assert!(iter.next().is_some());
1347 assert!(iter.next().is_none());
1348 assert!(iter.next().is_none());
1349 }
1350 }
1351
1352 #[test]
1353 fn test_iterator() {
1354 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1355 let vq = MockSplitQueue::new(m, 16);
1356
1357 let mut q: Queue = vq.create_queue().unwrap();
1358
1359 q.size = q.max_size;
1360 q.desc_table = vq.desc_table_addr();
1361 q.avail_ring = vq.avail_addr();
1362 q.used_ring = vq.used_addr();
1363 assert!(q.is_valid(m));
1364
1365 {
1366 q.ready = false;
1368 assert!(q.iter(m).is_err());
1369 }
1370
1371 q.ready = true;
1372
1373 {
1376 let mut descs = Vec::new();
1377 for j in 0..5u16 {
1378 let flags = match j {
1379 1 | 4 => 0,
1380 _ => VRING_DESC_F_NEXT,
1381 };
1382
1383 descs.push(RawDescriptor::from(SplitDescriptor::new(
1384 (0x1000 * (j + 1)) as u64,
1385 0x1000,
1386 flags as u16,
1387 j + 1,
1388 )));
1389 }
1390 vq.add_desc_chains(&descs, 0).unwrap();
1391
1392 let mut i = q.iter(m).unwrap();
1393
1394 {
1395 let mut c = i.next().unwrap();
1396 assert_eq!(c.head_index(), 0);
1397
1398 c.next().unwrap();
1399 assert!(c.next().is_some());
1400 assert!(c.next().is_none());
1401 assert_eq!(c.head_index(), 0);
1402 }
1403
1404 {
1405 let mut c = i.next().unwrap();
1406 assert_eq!(c.head_index(), 2);
1407
1408 c.next().unwrap();
1409 c.next().unwrap();
1410 c.next().unwrap();
1411 assert!(c.next().is_none());
1412 assert_eq!(c.head_index(), 2);
1413 }
1414
1415 {
1417 assert!(i.next().is_none());
1418 i.go_to_previous_position();
1419 let mut c = q.iter(m).unwrap().next().unwrap();
1420 c.next().unwrap();
1421 c.next().unwrap();
1422 c.next().unwrap();
1423 assert!(c.next().is_none());
1424 }
1425 }
1426
1427 {
1432 let descs = vec![
1433 RawDescriptor::from(SplitDescriptor::new(
1434 0x1000,
1435 0xffff_ffff,
1436 VRING_DESC_F_NEXT as u16,
1437 1,
1438 )),
1439 RawDescriptor::from(SplitDescriptor::new(0x1000, 0x1234_5678, 0, 2)),
1440 ];
1441 vq.add_desc_chains(&descs, 0).unwrap();
1442 let mut yielded_bytes_by_iteration = 0_u32;
1443 for d in q.iter(m).unwrap().next().unwrap() {
1444 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1445 .checked_add(d.len())
1446 .expect("iterator should not yield more than 2^32 bytes");
1447 }
1448 }
1449
1450 {
1452 let descs = vec![RawDescriptor::from(SplitDescriptor::new(
1453 0x1000,
1454 0xffff_ffff,
1455 VRING_DESC_F_NEXT as u16,
1456 0,
1457 ))];
1458 vq.add_desc_chains(&descs, 0).unwrap();
1459 let mut yielded_bytes_by_iteration = 0_u32;
1460 for d in q.iter(m).unwrap().next().unwrap() {
1461 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1462 .checked_add(d.len())
1463 .expect("iterator should not yield more than 2^32 bytes");
1464 }
1465 }
1466 }
1467
1468 #[test]
1469 fn test_regression_iterator_division() {
1470 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1473 let vq = MockSplitQueue::new(m, 1);
1474 let descriptors: Vec<RawDescriptor> = vec![RawDescriptor::from(SplitDescriptor::new(
1476 14178673876262995140,
1477 3301229764,
1478 50372,
1479 50372,
1480 ))];
1481 vq.build_desc_chain(&descriptors).unwrap();
1482
1483 let mut q = Queue {
1484 max_size: 38,
1485 next_avail: Wrapping(0),
1486 next_used: Wrapping(0),
1487 event_idx_enabled: false,
1488 num_added: Wrapping(0),
1489 size: 0,
1490 ready: false,
1491 desc_table: GuestAddress(12837708984796196),
1492 avail_ring: GuestAddress(0),
1493 used_ring: GuestAddress(9943947977301164032),
1494 };
1495
1496 assert!(q.pop_descriptor_chain(m).is_none());
1497 }
1498
1499 #[test]
1500 fn test_setters_error_cases() {
1501 assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1502 let mut q = Queue::new(16).unwrap();
1503
1504 let expected_val = q.desc_table.0;
1505 assert_eq!(
1506 q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1507 Error::InvalidDescTableAlign
1508 );
1509 assert_eq!(q.desc_table(), expected_val);
1510
1511 let expected_val = q.avail_ring.0;
1512 assert_eq!(
1513 q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1514 Error::InvalidAvailRingAlign
1515 );
1516 assert_eq!(q.avail_ring(), expected_val);
1517
1518 let expected_val = q.used_ring.0;
1519 assert_eq!(
1520 q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1521 Error::InvalidUsedRingAlign
1522 );
1523 assert_eq!(q.used_ring(), expected_val);
1524
1525 let expected_val = q.size;
1526 assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1527 assert_eq!(q.size(), expected_val)
1528 }
1529
1530 #[test]
1531 fn test_regression_timeout_after_reset() {
1537 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1539 let vq = MockSplitQueue::new(m, 1024);
1540
1541 let descriptors: Vec<RawDescriptor> = vec![
1543 RawDescriptor::from(SplitDescriptor::new(21508325467, 0, 1, 4)),
1544 RawDescriptor::from(SplitDescriptor::new(2097152, 4096, 3, 0)),
1545 RawDescriptor::from(SplitDescriptor::new(
1546 18374686479672737792,
1547 4294967295,
1548 65535,
1549 29,
1550 )),
1551 RawDescriptor::from(SplitDescriptor::new(76842670169653248, 1114115, 0, 0)),
1552 RawDescriptor::from(SplitDescriptor::new(16, 983040, 126, 3)),
1553 RawDescriptor::from(SplitDescriptor::new(897648164864, 0, 0, 0)),
1554 RawDescriptor::from(SplitDescriptor::new(111669149722, 0, 0, 0)),
1555 ];
1556 vq.build_multiple_desc_chains(&descriptors).unwrap();
1557
1558 let mut q: Queue = vq.create_queue().unwrap();
1559
1560 q.reset();
1562 q.set_ready(true);
1563 let mut counter = 0;
1564 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1565 while desc_chain.next().is_some() {
1568 counter += 1;
1569 }
1570 }
1571 assert_eq!(counter, 0);
1572
1573 q.reset();
1575 q.set_avail_ring_address(Some(0x1000), None);
1576 assert_eq!(q.avail_ring, GuestAddress(0x1000));
1577 counter = 0;
1578 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1579 while desc_chain.next().is_some() {
1582 counter += 1;
1583 }
1584 }
1585 assert_eq!(counter, 0);
1586 }
1587}