1use std::mem::size_of;
11use std::num::Wrapping;
12use std::ops::Deref;
13use std::sync::atomic::{fence, Ordering};
14
15use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17use crate::defs::{
18 DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19 VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20 VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21};
22use crate::desc::{split::VirtqUsedElem, RawDescriptor};
23use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT};
24use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
25
26pub const MAX_QUEUE_SIZE: u16 = 32768;
28
29#[derive(Debug, Default, PartialEq, Eq)]
78pub struct Queue {
79 max_size: u16,
81
82 next_avail: Wrapping<u16>,
84
85 next_used: Wrapping<u16>,
87
88 event_idx_enabled: bool,
90
91 num_added: Wrapping<u16>,
94
95 size: u16,
97
98 ready: bool,
100
101 desc_table: GuestAddress,
103
104 avail_ring: GuestAddress,
106
107 used_ring: GuestAddress,
109}
110
111impl Queue {
112 pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
117 if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
118 return Err(Error::InvalidSize);
119 }
120 self.size = size;
121 Ok(())
122 }
123
124 pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
131 if desc_table.mask(0xf) != 0 {
132 return Err(Error::InvalidDescTableAlign);
133 }
134 self.desc_table = desc_table;
135
136 Ok(())
137 }
138
139 pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
146 if avail_ring.mask(0x1) != 0 {
147 return Err(Error::InvalidAvailRingAlign);
148 }
149 self.avail_ring = avail_ring;
150 Ok(())
151 }
152
153 pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
160 if used_ring.mask(0x3) != 0 {
161 return Err(Error::InvalidUsedRingAlign);
162 }
163 self.used_ring = used_ring;
164 Ok(())
165 }
166
167 pub fn state(&self) -> QueueState {
177 QueueState {
178 max_size: self.max_size,
179 next_avail: self.next_avail(),
180 next_used: self.next_used(),
181 event_idx_enabled: self.event_idx_enabled,
182 size: self.size,
183 ready: self.ready,
184 desc_table: self.desc_table(),
185 avail_ring: self.avail_ring(),
186 used_ring: self.used_ring(),
187 }
188 }
189
190 fn set_avail_event<M: GuestMemory>(
193 &self,
194 mem: &M,
195 val: u16,
196 order: Ordering,
197 ) -> Result<(), Error> {
198 let avail_event_offset =
201 VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
202 let addr = self
203 .used_ring
204 .checked_add(avail_event_offset)
205 .ok_or(Error::AddressOverflow)?;
206
207 mem.store(u16::to_le(val), addr, order)
208 .map_err(Error::GuestMemory)
209 }
210
211 fn set_used_flags<M: GuestMemory>(
213 &mut self,
214 mem: &M,
215 val: u16,
216 order: Ordering,
217 ) -> Result<(), Error> {
218 mem.store(u16::to_le(val), self.used_ring, order)
219 .map_err(Error::GuestMemory)
220 }
221
222 fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
227 if enable {
228 if self.event_idx_enabled {
229 self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
233 } else {
234 self.set_used_flags(mem, 0, Ordering::Relaxed)
235 }
236 } else if !self.event_idx_enabled {
237 self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
238 } else {
239 Ok(())
242 }
243 }
244
245 fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
256 let used_event_offset =
259 VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
260 let used_event_addr = self
261 .avail_ring
262 .checked_add(used_event_offset)
263 .ok_or(Error::AddressOverflow)?;
264
265 mem.load(used_event_addr, order)
266 .map(u16::from_le)
267 .map(Wrapping)
268 .map_err(Error::GuestMemory)
269 }
270}
271
272impl<'a> QueueGuard<'a> for Queue {
273 type G = &'a mut Self;
274}
275
276impl QueueT for Queue {
277 fn new(max_size: u16) -> Result<Self, Error> {
278 if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
281 return Err(Error::InvalidMaxSize);
282 }
283 Ok(Queue {
284 max_size,
285 size: max_size,
286 ready: false,
287 desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
288 avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
289 used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
290 next_avail: Wrapping(0),
291 next_used: Wrapping(0),
292 event_idx_enabled: false,
293 num_added: Wrapping(0),
294 })
295 }
296
297 fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
298 let queue_size = self.size as u64;
299 let desc_table = self.desc_table;
300 let desc_table_size = size_of::<RawDescriptor>() as u64 * queue_size;
303 let avail_ring = self.avail_ring;
304 let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
307 let used_ring = self.used_ring;
308 let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
309
310 if !self.ready {
311 error!("attempt to use virtio queue that is not marked ready");
312 false
313 } else if desc_table
314 .checked_add(desc_table_size)
315 .is_none_or(|v| !mem.address_in_range(v))
316 {
317 error!(
318 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
319 desc_table.raw_value(),
320 desc_table_size
321 );
322 false
323 } else if avail_ring
324 .checked_add(avail_ring_size)
325 .is_none_or(|v| !mem.address_in_range(v))
326 {
327 error!(
328 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
329 avail_ring.raw_value(),
330 avail_ring_size
331 );
332 false
333 } else if used_ring
334 .checked_add(used_ring_size)
335 .is_none_or(|v| !mem.address_in_range(v))
336 {
337 error!(
338 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
339 used_ring.raw_value(),
340 used_ring_size
341 );
342 false
343 } else {
344 true
345 }
346 }
347
348 fn reset(&mut self) {
349 self.ready = false;
350 self.size = self.max_size;
351 self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
352 self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
353 self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
354 self.next_avail = Wrapping(0);
355 self.next_used = Wrapping(0);
356 self.num_added = Wrapping(0);
357 self.event_idx_enabled = false;
358 }
359
360 fn lock(&mut self) -> <Self as QueueGuard>::G {
361 self
362 }
363
364 fn max_size(&self) -> u16 {
365 self.max_size
366 }
367
368 fn size(&self) -> u16 {
369 self.size
370 }
371
372 fn set_size(&mut self, size: u16) {
373 if self.try_set_size(size).is_err() {
374 error!("virtio queue with invalid size: {}", size);
375 }
376 }
377
378 fn ready(&self) -> bool {
379 self.ready
380 }
381
382 fn set_ready(&mut self, ready: bool) {
383 self.ready = ready;
384 }
385
386 fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
387 let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
388 let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
389
390 let desc_table = GuestAddress((high << 32) | low);
391 if self.try_set_desc_table_address(desc_table).is_err() {
392 error!("virtio queue descriptor table breaks alignment constraints");
393 }
394 }
395
396 fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
397 let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
398 let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
399
400 let avail_ring = GuestAddress((high << 32) | low);
401 if self.try_set_avail_ring_address(avail_ring).is_err() {
402 error!("virtio queue available ring breaks alignment constraints");
403 }
404 }
405
406 fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
407 let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
408 let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
409
410 let used_ring = GuestAddress((high << 32) | low);
411 if self.try_set_used_ring_address(used_ring).is_err() {
412 error!("virtio queue used ring breaks alignment constraints");
413 }
414 }
415
416 fn set_event_idx(&mut self, enabled: bool) {
417 self.event_idx_enabled = enabled;
418 }
419
420 fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
421 where
422 M: GuestMemory + ?Sized,
423 {
424 let addr = self
425 .avail_ring
426 .checked_add(2)
427 .ok_or(Error::AddressOverflow)?;
428
429 mem.load(addr, order)
430 .map(u16::from_le)
431 .map(Wrapping)
432 .map_err(Error::GuestMemory)
433 }
434
435 fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
436 let addr = self
437 .used_ring
438 .checked_add(2)
439 .ok_or(Error::AddressOverflow)?;
440
441 mem.load(addr, order)
442 .map(u16::from_le)
443 .map(Wrapping)
444 .map_err(Error::GuestMemory)
445 }
446
447 fn add_used<M: GuestMemory>(
448 &mut self,
449 mem: &M,
450 head_index: u16,
451 len: u32,
452 ) -> Result<(), Error> {
453 if head_index >= self.size {
454 error!(
455 "attempted to add out of bounds descriptor to used ring: {}",
456 head_index
457 );
458 return Err(Error::InvalidDescriptorIndex);
459 }
460
461 let next_used_index = u64::from(self.next_used.0 % self.size);
462 let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
465 let addr = self
466 .used_ring
467 .checked_add(offset)
468 .ok_or(Error::AddressOverflow)?;
469 mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
470 .map_err(Error::GuestMemory)?;
471
472 self.next_used += Wrapping(1);
473 self.num_added += Wrapping(1);
474
475 mem.store(
476 u16::to_le(self.next_used.0),
477 self.used_ring
478 .checked_add(2)
479 .ok_or(Error::AddressOverflow)?,
480 Ordering::Release,
481 )
482 .map_err(Error::GuestMemory)
483 }
484
485 fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
486 self.set_notification(mem, true)?;
487 fence(Ordering::SeqCst);
489
490 self.avail_idx(mem, Ordering::Relaxed)
497 .map(|idx| idx != self.next_avail)
498 }
499
500 fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
501 self.set_notification(mem, false)
502 }
503
504 fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
505 let used_idx = self.next_used;
506
507 fence(Ordering::SeqCst);
509
510 if self.event_idx_enabled {
528 let used_event = self.used_event(mem, Ordering::Relaxed)?;
529 let old = used_idx - self.num_added;
530 self.num_added = Wrapping(0);
531
532 return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
533 }
534
535 Ok(true)
536 }
537
538 fn next_avail(&self) -> u16 {
539 self.next_avail.0
540 }
541
542 fn set_next_avail(&mut self, next_avail: u16) {
543 self.next_avail = Wrapping(next_avail);
544 }
545
546 fn next_used(&self) -> u16 {
547 self.next_used.0
548 }
549
550 fn set_next_used(&mut self, next_used: u16) {
551 self.next_used = Wrapping(next_used);
552 }
553
554 fn desc_table(&self) -> u64 {
555 self.desc_table.0
556 }
557
558 fn avail_ring(&self) -> u64 {
559 self.avail_ring.0
560 }
561
562 fn used_ring(&self) -> u64 {
563 self.used_ring.0
564 }
565
566 fn event_idx_enabled(&self) -> bool {
567 self.event_idx_enabled
568 }
569
570 fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
571 where
572 M: Clone + Deref,
573 M::Target: GuestMemory,
574 {
575 match self.iter(mem) {
577 Ok(mut iter) => iter.next(),
578 Err(e) => {
579 error!("Iterator error {}", e);
580 None
581 }
582 }
583 }
584}
585
586impl QueueOwnedT for Queue {
587 fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
588 where
589 M: Deref,
590 M::Target: GuestMemory,
591 {
592 if !self.ready || self.avail_ring == GuestAddress(0) {
596 return Err(Error::QueueNotReady);
597 }
598
599 self.avail_idx(mem.deref(), Ordering::Acquire)
600 .map(move |idx| AvailIter::new(mem, idx, self))?
601 }
602
603 fn go_to_previous_position(&mut self) {
604 self.next_avail -= Wrapping(1);
605 }
606}
607
608#[derive(Debug)]
676pub struct AvailIter<'b, M> {
677 mem: M,
678 desc_table: GuestAddress,
679 avail_ring: GuestAddress,
680 queue_size: u16,
681 last_index: Wrapping<u16>,
682 next_avail: &'b mut Wrapping<u16>,
683}
684
685impl<'b, M> AvailIter<'b, M>
686where
687 M: Deref,
688 M::Target: GuestMemory,
689{
690 pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
699 if (idx - queue.next_avail).0 > queue.size {
706 return Err(Error::InvalidAvailRingIndex);
707 }
708
709 Ok(AvailIter {
710 mem,
711 desc_table: queue.desc_table,
712 avail_ring: queue.avail_ring,
713 queue_size: queue.size,
714 last_index: idx,
715 next_avail: &mut queue.next_avail,
716 })
717 }
718
719 pub fn go_to_previous_position(&mut self) {
727 *self.next_avail -= Wrapping(1);
728 }
729}
730
731impl<M> Iterator for AvailIter<'_, M>
732where
733 M: Clone + Deref,
734 M::Target: GuestMemory,
735{
736 type Item = DescriptorChain<M>;
737
738 fn next(&mut self) -> Option<Self::Item> {
739 if *self.next_avail == self.last_index {
740 return None;
741 }
742
743 let elem_off =
746 u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
747 let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
748
749 let addr = self.avail_ring.checked_add(offset)?;
750 let head_index: u16 = self
751 .mem
752 .load(addr, Ordering::Acquire)
753 .map(u16::from_le)
754 .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
755 .ok()?;
756
757 *self.next_avail += Wrapping(1);
758
759 Some(DescriptorChain::new(
760 self.mem.clone(),
761 self.desc_table,
762 self.queue_size,
763 head_index,
764 ))
765 }
766}
767
768#[cfg(any(test, feature = "test-utils"))]
769impl PartialEq for Error {
772 fn eq(&self, other: &Self) -> bool {
773 format!("{}", &self) == format!("{}", other)
774 }
775}
776
777#[cfg(test)]
778mod tests {
779 use super::*;
780 use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
781 use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
782 use crate::mock::MockSplitQueue;
783 use virtio_bindings::bindings::virtio_ring::{
784 VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
785 };
786
787 use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
788
789 #[test]
790 fn test_queue_is_valid() {
791 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
792 let vq = MockSplitQueue::new(m, 16);
793 let mut q: Queue = vq.create_queue().unwrap();
794
795 assert!(q.is_valid(m));
797
798 q.set_ready(false);
800 assert!(!q.ready());
801 assert!(!q.is_valid(m));
802 q.set_ready(true);
803
804 q.set_size(q.max_size() << 1);
806 assert_eq!(q.size, q.max_size());
807
808 q.set_size(0);
810 assert_eq!(q.size, q.max_size());
811
812 q.set_size(11);
814 assert_eq!(q.size, q.max_size());
815
816 q.set_size(4);
818 assert_eq!(q.size, 4);
819 q.size = q.max_size();
820
821 q.set_desc_table_address(Some(0xf), None);
823 assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
824 q.set_desc_table_address(Some(0xffff_fff0), None);
826 assert_eq!(q.desc_table.0, 0xffff_fff0);
827 assert!(!q.is_valid(m));
829 q.set_desc_table_address(Some(0x10), None);
831 assert_eq!(q.desc_table.0, 0x10);
832 assert!(q.is_valid(m));
833 let addr = vq.desc_table_addr().0;
834 q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
835
836 q.set_avail_ring_address(Some(0x1), None);
838 assert_eq!(q.avail_ring.0, vq.avail_addr().0);
839 q.set_avail_ring_address(Some(0xffff_fffe), None);
841 assert_eq!(q.avail_ring.0, 0xffff_fffe);
842 assert!(!q.is_valid(m));
844 q.set_avail_ring_address(Some(0x2), None);
846 assert_eq!(q.avail_ring.0, 0x2);
847 assert!(q.is_valid(m));
848 let addr = vq.avail_addr().0;
849 q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
850
851 q.set_used_ring_address(Some(0x3), None);
853 assert_eq!(q.used_ring.0, vq.used_addr().0);
854 q.set_used_ring_address(Some(0xffff_fffc), None);
856 assert_eq!(q.used_ring.0, 0xffff_fffc);
857 assert!(!q.is_valid(m));
859 q.set_used_ring_address(Some(0x4), None);
861 assert_eq!(q.used_ring.0, 0x4);
862 let addr = vq.used_addr().0;
863 q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
864 assert!(q.is_valid(m));
865 }
866
867 #[test]
868 fn test_add_used() {
869 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
870 let vq = MockSplitQueue::new(mem, 16);
871 let mut q: Queue = vq.create_queue().unwrap();
872
873 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
874 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
875
876 assert!(q.add_used(mem, 16, 0x1000).is_err());
878 assert_eq!(u16::from_le(vq.used().idx().load()), 0);
879
880 q.add_used(mem, 1, 0x1000).unwrap();
882 assert_eq!(q.next_used, Wrapping(1));
883 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
884 assert_eq!(u16::from_le(vq.used().idx().load()), 1);
885
886 let x = vq.used().ring().ref_at(0).unwrap().load();
887 assert_eq!(x.id(), 1);
888 assert_eq!(x.len(), 0x1000);
889 }
890
891 #[test]
892 fn test_reset_queue() {
893 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
894 let vq = MockSplitQueue::new(m, 16);
895 let mut q: Queue = vq.create_queue().unwrap();
896
897 q.set_size(8);
898 q.set_desc_table_address(Some(0x5000), None);
901 q.set_event_idx(true);
903 q.set_next_avail(2);
904 q.set_next_used(4);
905 q.num_added = Wrapping(15);
906 assert_eq!(q.size, 8);
907 assert!(q.ready);
909 assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
910 assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
911 assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
912 assert_ne!(q.next_avail, Wrapping(0));
913 assert_ne!(q.next_used, Wrapping(0));
914 assert_ne!(q.num_added, Wrapping(0));
915 assert!(q.event_idx_enabled);
916
917 q.reset();
918 assert_eq!(q.size, 16);
919 assert!(!q.ready);
920 assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
921 assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
922 assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
923 assert_eq!(q.next_avail, Wrapping(0));
924 assert_eq!(q.next_used, Wrapping(0));
925 assert_eq!(q.num_added, Wrapping(0));
926 assert!(!q.event_idx_enabled);
927 }
928
929 #[test]
930 fn test_needs_notification() {
931 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
932 let qsize = 16;
933 let vq = MockSplitQueue::new(mem, qsize);
934 let mut q: Queue = vq.create_queue().unwrap();
935 let avail_addr = vq.avail_addr();
936
937 for i in 0..qsize {
939 q.next_used = Wrapping(i);
940 assert!(q.needs_notification(mem).unwrap());
941 }
942
943 mem.write_obj::<u16>(
944 u16::to_le(4),
945 avail_addr.unchecked_add(4 + qsize as u64 * 2),
946 )
947 .unwrap();
948 q.set_event_idx(true);
949
950 let wrap = u32::from(u16::MAX) + 1;
952
953 for i in 0..wrap + 12 {
954 q.next_used = Wrapping(i as u16);
955 q.num_added = Wrapping(1);
960 let expected = i == 5 || i == (5 + wrap);
961 assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
962 }
963
964 mem.write_obj::<u16>(
965 u16::to_le(8),
966 avail_addr.unchecked_add(4 + qsize as u64 * 2),
967 )
968 .unwrap();
969
970 assert!(!q.needs_notification(mem).unwrap());
974
975 mem.write_obj::<u16>(
976 u16::to_le(15),
977 avail_addr.unchecked_add(4 + qsize as u64 * 2),
978 )
979 .unwrap();
980
981 q.num_added = Wrapping(1);
982 assert!(!q.needs_notification(mem).unwrap());
983
984 q.next_used = Wrapping(15);
985 q.num_added = Wrapping(1);
986 assert!(!q.needs_notification(mem).unwrap());
987
988 q.next_used = Wrapping(16);
989 q.num_added = Wrapping(1);
990 assert!(q.needs_notification(mem).unwrap());
991
992 assert!(!q.needs_notification(mem).unwrap());
994
995 mem.write_obj::<u16>(
996 u16::to_le(u16::MAX - 3),
997 avail_addr.unchecked_add(4 + qsize as u64 * 2),
998 )
999 .unwrap();
1000 q.next_used = Wrapping(u16::MAX - 2);
1001 q.num_added = Wrapping(1);
1002 assert!(q.needs_notification(mem).unwrap());
1007 }
1008
1009 #[test]
1010 fn test_enable_disable_notification() {
1011 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1012 let vq = MockSplitQueue::new(mem, 16);
1013
1014 let mut q: Queue = vq.create_queue().unwrap();
1015 let used_addr = vq.used_addr();
1016
1017 assert!(!q.event_idx_enabled);
1018
1019 q.enable_notification(mem).unwrap();
1020 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1021 assert_eq!(v, 0);
1022
1023 q.disable_notification(mem).unwrap();
1024 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1025 assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1026
1027 q.enable_notification(mem).unwrap();
1028 let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1029 assert_eq!(v, 0);
1030
1031 q.set_event_idx(true);
1032 let avail_addr = vq.avail_addr();
1033 mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1034 .unwrap();
1035
1036 assert!(q.enable_notification(mem).unwrap());
1037 q.next_avail = Wrapping(2);
1038 assert!(!q.enable_notification(mem).unwrap());
1039
1040 mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1041 .unwrap();
1042
1043 assert!(q.enable_notification(mem).unwrap());
1044 q.next_avail = Wrapping(8);
1045 assert!(!q.enable_notification(mem).unwrap());
1046 }
1047
1048 #[test]
1049 fn test_consume_chains_with_notif() {
1050 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1051 let vq = MockSplitQueue::new(mem, 16);
1052
1053 let mut q: Queue = vq.create_queue().unwrap();
1054
1055 assert!(q.is_valid(mem));
1057
1058 let mut descs = Vec::new();
1060 for i in 0..13 {
1061 let flags = match i {
1062 1 | 4 | 6 | 8 | 12 => 0,
1063 _ => VRING_DESC_F_NEXT,
1064 };
1065
1066 descs.push(RawDescriptor::from(SplitDescriptor::new(
1067 (0x1000 * (i + 1)) as u64,
1068 0x1000,
1069 flags as u16,
1070 i + 1,
1071 )));
1072 }
1073
1074 vq.add_desc_chains(&descs, 0).unwrap();
1075 vq.avail().idx().store(u16::to_le(2));
1079 assert_eq!(q.next_avail(), 0);
1081
1082 let mut i = 0;
1083
1084 loop {
1085 i += 1;
1086 q.disable_notification(mem).unwrap();
1087
1088 while let Some(chain) = q.iter(mem).unwrap().next() {
1089 let head_index = chain.head_index();
1092 let mut desc_len = 0;
1093 chain.for_each(|d| {
1094 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1095 desc_len += d.len();
1096 }
1097 });
1098 q.add_used(mem, head_index, desc_len).unwrap();
1099 }
1100 if !q.enable_notification(mem).unwrap() {
1101 break;
1102 }
1103 }
1104 assert_eq!(i, 1);
1107 assert_eq!(q.next_avail(), 2);
1109 assert_eq!(q.next_used(), 2);
1110 vq.avail().idx().store(u16::to_le(3));
1112 i = 0;
1113
1114 loop {
1115 i += 1;
1116 q.disable_notification(mem).unwrap();
1117
1118 while let Some(chain) = q.iter(mem).unwrap().next() {
1119 let head_index = chain.head_index();
1122 let mut desc_len = 0;
1123 chain.for_each(|d| {
1124 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1125 desc_len += d.len();
1126 }
1127 });
1128 q.add_used(mem, head_index, desc_len).unwrap();
1129 }
1130
1131 vq.avail().idx().store(u16::to_le(4));
1136 if !q.enable_notification(mem).unwrap() {
1137 break;
1138 }
1139 }
1140 assert_eq!(i, 2);
1141 assert_eq!(q.next_avail(), 4);
1143 assert_eq!(q.next_used(), 4);
1144
1145 vq.avail().idx().store(u16::to_le(7));
1148 loop {
1149 q.disable_notification(mem).unwrap();
1150
1151 while let Some(chain) = q.iter(mem).unwrap().next() {
1152 let head_index = chain.head_index();
1155 let mut desc_len = 0;
1156 chain.for_each(|d| {
1157 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1158 desc_len += d.len();
1159 }
1160 });
1161 q.add_used(mem, head_index, desc_len).unwrap();
1162 }
1163 if !q.enable_notification(mem).unwrap() {
1164 break;
1165 }
1166 }
1167 assert_eq!(q.next_avail(), 7);
1168 assert_eq!(q.next_used(), 7);
1169 }
1170
1171 #[test]
1172 fn test_invalid_avail_idx() {
1173 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1177 let vq = MockSplitQueue::new(mem, 16);
1178
1179 let mut q: Queue = vq.create_queue().unwrap();
1180
1181 assert!(q.is_valid(mem));
1183
1184 let mut descs = Vec::new();
1186 for i in 0..7 {
1187 let flags = match i {
1188 1 | 4 | 6 => 0,
1189 _ => VRING_DESC_F_NEXT,
1190 };
1191
1192 descs.push(RawDescriptor::from(SplitDescriptor::new(
1193 (0x1000 * (i + 1)) as u64,
1194 0x1000,
1195 flags as u16,
1196 i + 1,
1197 )));
1198 }
1199
1200 vq.add_desc_chains(&descs, 0).unwrap();
1201 vq.avail().idx().store(u16::to_le(3));
1203 assert_eq!(q.next_avail(), 0);
1205 assert_eq!(q.next_used(), 0);
1206
1207 loop {
1208 q.disable_notification(mem).unwrap();
1209
1210 while let Some(chain) = q.iter(mem).unwrap().next() {
1211 let head_index = chain.head_index();
1214 let mut desc_len = 0;
1215 chain.for_each(|d| {
1216 if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1217 desc_len += d.len();
1218 }
1219 });
1220 q.add_used(mem, head_index, desc_len).unwrap();
1221 }
1222 if !q.enable_notification(mem).unwrap() {
1223 break;
1224 }
1225 }
1226 assert_eq!(q.next_avail(), 3);
1228 assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1229 assert_eq!(q.next_used(), 3);
1230 assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1231 assert!(q.lock().ready());
1232
1233 vq.avail().idx().store(u16::to_le(1));
1236 assert!(q.iter(mem).is_err());
1238 }
1239
1240 #[test]
1241 fn test_iterator_and_avail_idx() {
1242 let queue_size = 2;
1246 let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1247 let vq = MockSplitQueue::new(mem, queue_size);
1248
1249 let mut q: Queue = vq.create_queue().unwrap();
1250
1251 assert!(q.is_valid(mem));
1253
1254 let mut descs = Vec::new();
1256 for i in 0..queue_size {
1257 descs.push(RawDescriptor::from(SplitDescriptor::new(
1258 (0x1000 * (i + 1)) as u64,
1259 0x1000,
1260 0_u16,
1261 i + 1,
1262 )));
1263 }
1264 vq.add_desc_chains(&descs, 0).unwrap();
1265
1266 q.set_next_avail(u16::MAX);
1268
1269 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1272 vq.avail().idx().store(u16::to_le(avail_idx.0));
1273 assert!(q.iter(mem).is_ok());
1274 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1275 vq.avail().idx().store(u16::to_le(avail_idx.0));
1276 assert!(q.iter(mem).is_ok());
1277
1278 let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1281 vq.avail().idx().store(u16::to_le(avail_idx.0));
1282 assert!(q.iter(mem).is_err());
1283 }
1284
1285 #[test]
1286 fn test_descriptor_and_iterator() {
1287 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1288 let vq = MockSplitQueue::new(m, 16);
1289
1290 let mut q: Queue = vq.create_queue().unwrap();
1291
1292 assert!(q.is_valid(m));
1294
1295 let mut descs = Vec::new();
1297 for j in 0..7 {
1298 let flags = match j {
1299 1 | 6 => 0,
1300 2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1301 4 => VRING_DESC_F_WRITE,
1302 _ => VRING_DESC_F_NEXT,
1303 };
1304
1305 descs.push(RawDescriptor::from(SplitDescriptor::new(
1306 (0x1000 * (j + 1)) as u64,
1307 0x1000,
1308 flags as u16,
1309 j + 1,
1310 )));
1311 }
1312
1313 vq.add_desc_chains(&descs, 0).unwrap();
1314
1315 let mut i = q.iter(m).unwrap();
1316
1317 {
1318 let c = i.next().unwrap();
1319 assert_eq!(c.head_index(), 0);
1320
1321 let mut iter = c;
1322 assert!(iter.next().is_some());
1323 assert!(iter.next().is_some());
1324 assert!(iter.next().is_none());
1325 assert!(iter.next().is_none());
1326 }
1327
1328 {
1329 let c = i.next().unwrap();
1330 assert_eq!(c.head_index(), 2);
1331
1332 let mut iter = c.writable();
1333 assert!(iter.next().is_some());
1334 assert!(iter.next().is_some());
1335 assert!(iter.next().is_none());
1336 assert!(iter.next().is_none());
1337 }
1338
1339 {
1340 let c = i.next().unwrap();
1341 assert_eq!(c.head_index(), 5);
1342
1343 let mut iter = c.readable();
1344 assert!(iter.next().is_some());
1345 assert!(iter.next().is_none());
1346 assert!(iter.next().is_none());
1347 }
1348 }
1349
1350 #[test]
1351 fn test_iterator() {
1352 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1353 let vq = MockSplitQueue::new(m, 16);
1354
1355 let mut q: Queue = vq.create_queue().unwrap();
1356
1357 q.size = q.max_size;
1358 q.desc_table = vq.desc_table_addr();
1359 q.avail_ring = vq.avail_addr();
1360 q.used_ring = vq.used_addr();
1361 assert!(q.is_valid(m));
1362
1363 {
1364 q.ready = false;
1366 assert!(q.iter(m).is_err());
1367 }
1368
1369 q.ready = true;
1370
1371 {
1374 let mut descs = Vec::new();
1375 for j in 0..5u16 {
1376 let flags = match j {
1377 1 | 4 => 0,
1378 _ => VRING_DESC_F_NEXT,
1379 };
1380
1381 descs.push(RawDescriptor::from(SplitDescriptor::new(
1382 (0x1000 * (j + 1)) as u64,
1383 0x1000,
1384 flags as u16,
1385 j + 1,
1386 )));
1387 }
1388 vq.add_desc_chains(&descs, 0).unwrap();
1389
1390 let mut i = q.iter(m).unwrap();
1391
1392 {
1393 let mut c = i.next().unwrap();
1394 assert_eq!(c.head_index(), 0);
1395
1396 c.next().unwrap();
1397 assert!(c.next().is_some());
1398 assert!(c.next().is_none());
1399 assert_eq!(c.head_index(), 0);
1400 }
1401
1402 {
1403 let mut c = i.next().unwrap();
1404 assert_eq!(c.head_index(), 2);
1405
1406 c.next().unwrap();
1407 c.next().unwrap();
1408 c.next().unwrap();
1409 assert!(c.next().is_none());
1410 assert_eq!(c.head_index(), 2);
1411 }
1412
1413 {
1415 assert!(i.next().is_none());
1416 i.go_to_previous_position();
1417 let mut c = q.iter(m).unwrap().next().unwrap();
1418 c.next().unwrap();
1419 c.next().unwrap();
1420 c.next().unwrap();
1421 assert!(c.next().is_none());
1422 }
1423 }
1424
1425 {
1430 let descs = vec![
1431 RawDescriptor::from(SplitDescriptor::new(
1432 0x1000,
1433 0xffff_ffff,
1434 VRING_DESC_F_NEXT as u16,
1435 1,
1436 )),
1437 RawDescriptor::from(SplitDescriptor::new(0x1000, 0x1234_5678, 0, 2)),
1438 ];
1439 vq.add_desc_chains(&descs, 0).unwrap();
1440 let mut yielded_bytes_by_iteration = 0_u32;
1441 for d in q.iter(m).unwrap().next().unwrap() {
1442 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1443 .checked_add(d.len())
1444 .expect("iterator should not yield more than 2^32 bytes");
1445 }
1446 }
1447
1448 {
1450 let descs = vec![RawDescriptor::from(SplitDescriptor::new(
1451 0x1000,
1452 0xffff_ffff,
1453 VRING_DESC_F_NEXT as u16,
1454 0,
1455 ))];
1456 vq.add_desc_chains(&descs, 0).unwrap();
1457 let mut yielded_bytes_by_iteration = 0_u32;
1458 for d in q.iter(m).unwrap().next().unwrap() {
1459 yielded_bytes_by_iteration = yielded_bytes_by_iteration
1460 .checked_add(d.len())
1461 .expect("iterator should not yield more than 2^32 bytes");
1462 }
1463 }
1464 }
1465
1466 #[test]
1467 fn test_regression_iterator_division() {
1468 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1471 let vq = MockSplitQueue::new(m, 1);
1472 let descriptors: Vec<RawDescriptor> = vec![RawDescriptor::from(SplitDescriptor::new(
1474 14178673876262995140,
1475 3301229764,
1476 50372,
1477 50372,
1478 ))];
1479 vq.build_desc_chain(&descriptors).unwrap();
1480
1481 let mut q = Queue {
1482 max_size: 38,
1483 next_avail: Wrapping(0),
1484 next_used: Wrapping(0),
1485 event_idx_enabled: false,
1486 num_added: Wrapping(0),
1487 size: 0,
1488 ready: false,
1489 desc_table: GuestAddress(12837708984796196),
1490 avail_ring: GuestAddress(0),
1491 used_ring: GuestAddress(9943947977301164032),
1492 };
1493
1494 assert!(q.pop_descriptor_chain(m).is_none());
1495 }
1496
1497 #[test]
1498 fn test_setters_error_cases() {
1499 assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1500 let mut q = Queue::new(16).unwrap();
1501
1502 let expected_val = q.desc_table.0;
1503 assert_eq!(
1504 q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1505 Error::InvalidDescTableAlign
1506 );
1507 assert_eq!(q.desc_table(), expected_val);
1508
1509 let expected_val = q.avail_ring.0;
1510 assert_eq!(
1511 q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1512 Error::InvalidAvailRingAlign
1513 );
1514 assert_eq!(q.avail_ring(), expected_val);
1515
1516 let expected_val = q.used_ring.0;
1517 assert_eq!(
1518 q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1519 Error::InvalidUsedRingAlign
1520 );
1521 assert_eq!(q.used_ring(), expected_val);
1522
1523 let expected_val = q.size;
1524 assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1525 assert_eq!(q.size(), expected_val)
1526 }
1527
1528 #[test]
1529 fn test_regression_timeout_after_reset() {
1535 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1537 let vq = MockSplitQueue::new(m, 1024);
1538
1539 let descriptors: Vec<RawDescriptor> = vec![
1541 RawDescriptor::from(SplitDescriptor::new(21508325467, 0, 1, 4)),
1542 RawDescriptor::from(SplitDescriptor::new(2097152, 4096, 3, 0)),
1543 RawDescriptor::from(SplitDescriptor::new(
1544 18374686479672737792,
1545 4294967295,
1546 65535,
1547 29,
1548 )),
1549 RawDescriptor::from(SplitDescriptor::new(76842670169653248, 1114115, 0, 0)),
1550 RawDescriptor::from(SplitDescriptor::new(16, 983040, 126, 3)),
1551 RawDescriptor::from(SplitDescriptor::new(897648164864, 0, 0, 0)),
1552 RawDescriptor::from(SplitDescriptor::new(111669149722, 0, 0, 0)),
1553 ];
1554 vq.build_multiple_desc_chains(&descriptors).unwrap();
1555
1556 let mut q: Queue = vq.create_queue().unwrap();
1557
1558 q.reset();
1560 q.set_ready(true);
1561 let mut counter = 0;
1562 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1563 while desc_chain.next().is_some() {
1566 counter += 1;
1567 }
1568 }
1569 assert_eq!(counter, 0);
1570
1571 q.reset();
1573 q.set_avail_ring_address(Some(0x1000), None);
1574 assert_eq!(q.avail_ring, GuestAddress(0x1000));
1575 counter = 0;
1576 while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1577 while desc_chain.next().is_some() {
1580 counter += 1;
1581 }
1582 }
1583 assert_eq!(counter, 0);
1584 }
1585}