virtio_queue/
queue.rs

1// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
3// Copyright © 2019 Intel Corporation.
4// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
5// Use of this source code is governed by a BSD-style license that can be
6// found in the LICENSE-BSD-3-Clause file.
7//
8// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9
10use std::mem::size_of;
11use std::num::Wrapping;
12use std::ops::Deref;
13use std::sync::atomic::{fence, Ordering};
14
15use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17use crate::defs::{
18    DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19    VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20    VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21};
22use crate::desc::{split::VirtqUsedElem, RawDescriptor};
23use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT};
24use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
25
26#[cfg(kani)]
27mod verification;
28
29/// The maximum queue size as defined in the Virtio Spec.
30pub const MAX_QUEUE_SIZE: u16 = 32768;
31
32/// Struct to maintain information and manipulate a virtio queue.
33///
34/// # Example
35///
36/// ```rust
37/// use virtio_queue::{Queue, QueueOwnedT, QueueT};
38/// use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap};
39///
40/// let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
41/// let mut queue = Queue::new(1024).unwrap();
42///
43/// // First, the driver sets up the queue; this set up is done via writes on the bus (PCI, MMIO).
44/// queue.set_size(8);
45/// queue.set_desc_table_address(Some(0x1000), None);
46/// queue.set_avail_ring_address(Some(0x2000), None);
47/// queue.set_used_ring_address(Some(0x3000), None);
48/// queue.set_event_idx(true);
49/// queue.set_ready(true);
50/// // The user should check if the queue is valid before starting to use it.
51/// assert!(queue.is_valid(&m));
52///
53/// // Here the driver would add entries in the available ring and then update the `idx` field of
54/// // the available ring (address = 0x2000 + 2).
55/// m.write_obj(3, GuestAddress(0x2002));
56///
57/// loop {
58///     queue.disable_notification(&m).unwrap();
59///
60///     // Consume entries from the available ring.
61///     while let Some(chain) = queue.iter(&m).unwrap().next() {
62///         // Process the descriptor chain, and then add an entry in the used ring and optionally
63///         // notify the driver.
64///         queue.add_used(&m, chain.head_index(), 0x100).unwrap();
65///
66///         if queue.needs_notification(&m).unwrap() {
67///             // Here we would notify the driver it has new entries in the used ring to consume.
68///         }
69///     }
70///     if !queue.enable_notification(&m).unwrap() {
71///         break;
72///     }
73/// }
74///
75/// // We can reset the queue at some point.
76/// queue.reset();
77/// // The queue should not be ready after reset.
78/// assert!(!queue.ready());
79/// ```
80#[derive(Debug, Default, PartialEq, Eq)]
81pub struct Queue {
82    /// The maximum size in elements offered by the device.
83    max_size: u16,
84
85    /// Tail position of the available ring.
86    next_avail: Wrapping<u16>,
87
88    /// Head position of the used ring.
89    next_used: Wrapping<u16>,
90
91    /// VIRTIO_F_RING_EVENT_IDX negotiated.
92    event_idx_enabled: bool,
93
94    /// The number of descriptor chains placed in the used ring via `add_used`
95    /// since the last time `needs_notification` was called on the associated queue.
96    num_added: Wrapping<u16>,
97
98    /// The queue size in elements the driver selected.
99    size: u16,
100
101    /// Indicates if the queue is finished with configuration.
102    ready: bool,
103
104    /// Guest physical address of the descriptor table.
105    desc_table: GuestAddress,
106
107    /// Guest physical address of the available ring.
108    avail_ring: GuestAddress,
109
110    /// Guest physical address of the used ring.
111    used_ring: GuestAddress,
112}
113
114impl Queue {
115    /// Equivalent of [`QueueT::set_size`] returning an error in case of invalid size.
116    ///
117    /// This should not be directly used, as the preferred method is part of the [`QueueT`]
118    /// interface. This is a convenience function for implementing save/restore capabilities.
119    pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
120        if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
121            return Err(Error::InvalidSize);
122        }
123        self.size = size;
124        Ok(())
125    }
126
127    /// Tries to set the descriptor table address. In case of an invalid value, the address is
128    /// not updated.
129    ///
130    /// This should not be directly used, as the preferred method is
131    /// [`QueueT::set_desc_table_address`]. This is a convenience function for implementing
132    /// save/restore capabilities.
133    pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
134        if desc_table.mask(0xf) != 0 {
135            return Err(Error::InvalidDescTableAlign);
136        }
137        self.desc_table = desc_table;
138
139        Ok(())
140    }
141
142    /// Tries to update the available ring address. In case of an invalid value, the address is
143    /// not updated.
144    ///
145    /// This should not be directly used, as the preferred method is
146    /// [`QueueT::set_avail_ring_address`]. This is a convenience function for implementing
147    /// save/restore capabilities.
148    pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
149        if avail_ring.mask(0x1) != 0 {
150            return Err(Error::InvalidAvailRingAlign);
151        }
152        self.avail_ring = avail_ring;
153        Ok(())
154    }
155
156    /// Tries to update the used ring address. In cae of an invalid value, the address is not
157    /// updated.
158    ///
159    /// This should not be directly used, as the preferred method is
160    /// [`QueueT::set_used_ring_address`]. This is a convenience function for implementing
161    /// save/restore capabilities.
162    pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
163        if used_ring.mask(0x3) != 0 {
164            return Err(Error::InvalidUsedRingAlign);
165        }
166        self.used_ring = used_ring;
167        Ok(())
168    }
169
170    /// Returns the state of the `Queue`.
171    ///
172    /// This is useful for implementing save/restore capabilities.
173    /// The state does not have support for serialization, but this can be
174    /// added by VMMs locally through the use of a
175    /// [remote type](https://serde.rs/remote-derive.html).
176    ///
177    /// Alternatively, a version aware and serializable/deserializable QueueState
178    /// is available in the `virtio-queue-ser` crate.
179    pub fn state(&self) -> QueueState {
180        QueueState {
181            max_size: self.max_size,
182            next_avail: self.next_avail(),
183            next_used: self.next_used(),
184            event_idx_enabled: self.event_idx_enabled,
185            size: self.size,
186            ready: self.ready,
187            desc_table: self.desc_table(),
188            avail_ring: self.avail_ring(),
189            used_ring: self.used_ring(),
190        }
191    }
192
193    // Helper method that writes `val` to the `avail_event` field of the used ring, using
194    // the provided ordering.
195    fn set_avail_event<M: GuestMemory>(
196        &self,
197        mem: &M,
198        val: u16,
199        order: Ordering,
200    ) -> Result<(), Error> {
201        // This can not overflow an u64 since it is working with relatively small numbers compared
202        // to u64::MAX.
203        let avail_event_offset =
204            VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
205        let addr = self
206            .used_ring
207            .checked_add(avail_event_offset)
208            .ok_or(Error::AddressOverflow)?;
209
210        mem.store(u16::to_le(val), addr, order)
211            .map_err(Error::GuestMemory)
212    }
213
214    // Set the value of the `flags` field of the used ring, applying the specified ordering.
215    fn set_used_flags<M: GuestMemory>(
216        &mut self,
217        mem: &M,
218        val: u16,
219        order: Ordering,
220    ) -> Result<(), Error> {
221        mem.store(u16::to_le(val), self.used_ring, order)
222            .map_err(Error::GuestMemory)
223    }
224
225    // Write the appropriate values to enable or disable notifications from the driver.
226    //
227    // Every access in this method uses `Relaxed` ordering because a fence is added by the caller
228    // when appropriate.
229    fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
230        if enable {
231            if self.event_idx_enabled {
232                // We call `set_avail_event` using the `next_avail` value, instead of reading
233                // and using the current `avail_idx` to avoid missing notifications. More
234                // details in `enable_notification`.
235                self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
236            } else {
237                self.set_used_flags(mem, 0, Ordering::Relaxed)
238            }
239        } else if !self.event_idx_enabled {
240            self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
241        } else {
242            // Notifications are effectively disabled by default after triggering once when
243            // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case.
244            Ok(())
245        }
246    }
247
248    // Return the value present in the used_event field of the avail ring.
249    //
250    // If the VIRTIO_F_EVENT_IDX feature bit is not negotiated, the flags field in the available
251    // ring offers a crude mechanism for the driver to inform the device that it doesn’t want
252    // interrupts when buffers are used. Otherwise virtq_avail.used_event is a more performant
253    // alternative where the driver specifies how far the device can progress before interrupting.
254    //
255    // Neither of these interrupt suppression methods are reliable, as they are not synchronized
256    // with the device, but they serve as useful optimizations. So we only ensure access to the
257    // virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses.
258    fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
259        // This can not overflow an u64 since it is working with relatively small numbers compared
260        // to u64::MAX.
261        let used_event_offset =
262            VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
263        let used_event_addr = self
264            .avail_ring
265            .checked_add(used_event_offset)
266            .ok_or(Error::AddressOverflow)?;
267
268        mem.load(used_event_addr, order)
269            .map(u16::from_le)
270            .map(Wrapping)
271            .map_err(Error::GuestMemory)
272    }
273}
274
275impl<'a> QueueGuard<'a> for Queue {
276    type G = &'a mut Self;
277}
278
279impl QueueT for Queue {
280    fn new(max_size: u16) -> Result<Self, Error> {
281        // We need to check that the max size is a power of 2 because we're setting this as the
282        // queue size, and the valid queue sizes are a power of 2 as per the specification.
283        if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
284            return Err(Error::InvalidMaxSize);
285        }
286        Ok(Queue {
287            max_size,
288            size: max_size,
289            ready: false,
290            desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
291            avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
292            used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
293            next_avail: Wrapping(0),
294            next_used: Wrapping(0),
295            event_idx_enabled: false,
296            num_added: Wrapping(0),
297        })
298    }
299
300    fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
301        let queue_size = self.size as u64;
302        let desc_table = self.desc_table;
303        // The multiplication can not overflow an u64 since we are multiplying an u16 with a
304        // small number.
305        let desc_table_size = size_of::<RawDescriptor>() as u64 * queue_size;
306        let avail_ring = self.avail_ring;
307        // The operations below can not overflow an u64 since they're working with relatively small
308        // numbers compared to u64::MAX.
309        let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
310        let used_ring = self.used_ring;
311        let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
312
313        if !self.ready {
314            error!("attempt to use virtio queue that is not marked ready");
315            false
316        } else if desc_table
317            .checked_add(desc_table_size)
318            .is_none_or(|v| !mem.address_in_range(v))
319        {
320            error!(
321                "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
322                desc_table.raw_value(),
323                desc_table_size
324            );
325            false
326        } else if avail_ring
327            .checked_add(avail_ring_size)
328            .is_none_or(|v| !mem.address_in_range(v))
329        {
330            error!(
331                "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
332                avail_ring.raw_value(),
333                avail_ring_size
334            );
335            false
336        } else if used_ring
337            .checked_add(used_ring_size)
338            .is_none_or(|v| !mem.address_in_range(v))
339        {
340            error!(
341                "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
342                used_ring.raw_value(),
343                used_ring_size
344            );
345            false
346        } else {
347            true
348        }
349    }
350
351    fn reset(&mut self) {
352        self.ready = false;
353        self.size = self.max_size;
354        self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
355        self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
356        self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
357        self.next_avail = Wrapping(0);
358        self.next_used = Wrapping(0);
359        self.num_added = Wrapping(0);
360        self.event_idx_enabled = false;
361    }
362
363    fn lock(&mut self) -> <Self as QueueGuard<'_>>::G {
364        self
365    }
366
367    fn max_size(&self) -> u16 {
368        self.max_size
369    }
370
371    fn size(&self) -> u16 {
372        self.size
373    }
374
375    fn set_size(&mut self, size: u16) {
376        if self.try_set_size(size).is_err() {
377            error!("virtio queue with invalid size: {}", size);
378        }
379    }
380
381    fn ready(&self) -> bool {
382        self.ready
383    }
384
385    fn set_ready(&mut self, ready: bool) {
386        self.ready = ready;
387    }
388
389    fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
390        let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
391        let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
392
393        let desc_table = GuestAddress((high << 32) | low);
394        if self.try_set_desc_table_address(desc_table).is_err() {
395            error!("virtio queue descriptor table breaks alignment constraints");
396        }
397    }
398
399    fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
400        let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
401        let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
402
403        let avail_ring = GuestAddress((high << 32) | low);
404        if self.try_set_avail_ring_address(avail_ring).is_err() {
405            error!("virtio queue available ring breaks alignment constraints");
406        }
407    }
408
409    fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
410        let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
411        let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
412
413        let used_ring = GuestAddress((high << 32) | low);
414        if self.try_set_used_ring_address(used_ring).is_err() {
415            error!("virtio queue used ring breaks alignment constraints");
416        }
417    }
418
419    fn set_event_idx(&mut self, enabled: bool) {
420        self.event_idx_enabled = enabled;
421    }
422
423    fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
424    where
425        M: GuestMemory + ?Sized,
426    {
427        let addr = self
428            .avail_ring
429            .checked_add(2)
430            .ok_or(Error::AddressOverflow)?;
431
432        mem.load(addr, order)
433            .map(u16::from_le)
434            .map(Wrapping)
435            .map_err(Error::GuestMemory)
436    }
437
438    fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
439        let addr = self
440            .used_ring
441            .checked_add(2)
442            .ok_or(Error::AddressOverflow)?;
443
444        mem.load(addr, order)
445            .map(u16::from_le)
446            .map(Wrapping)
447            .map_err(Error::GuestMemory)
448    }
449
450    fn add_used<M: GuestMemory>(
451        &mut self,
452        mem: &M,
453        head_index: u16,
454        len: u32,
455    ) -> Result<(), Error> {
456        if head_index >= self.size {
457            error!(
458                "attempted to add out of bounds descriptor to used ring: {}",
459                head_index
460            );
461            return Err(Error::InvalidDescriptorIndex);
462        }
463
464        let next_used_index = u64::from(self.next_used.0 % self.size);
465        // This can not overflow an u64 since it is working with relatively small numbers compared
466        // to u64::MAX.
467        let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
468        let addr = self
469            .used_ring
470            .checked_add(offset)
471            .ok_or(Error::AddressOverflow)?;
472        mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
473            .map_err(Error::GuestMemory)?;
474
475        self.next_used += Wrapping(1);
476        self.num_added += Wrapping(1);
477
478        mem.store(
479            u16::to_le(self.next_used.0),
480            self.used_ring
481                .checked_add(2)
482                .ok_or(Error::AddressOverflow)?,
483            Ordering::Release,
484        )
485        .map_err(Error::GuestMemory)
486    }
487
488    fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
489        self.set_notification(mem, true)?;
490        // Ensures the following read is not reordered before any previous write operation.
491        fence(Ordering::SeqCst);
492
493        // We double check here to avoid the situation where the available ring has been updated
494        // just before we re-enabled notifications, and it's possible to miss one. We compare the
495        // current `avail_idx` value to `self.next_avail` because it's where we stopped processing
496        // entries. There are situations where we intentionally avoid processing everything in the
497        // available ring (which will cause this method to return `true`), but in that case we'll
498        // probably not re-enable notifications as we already know there are pending entries.
499        self.avail_idx(mem, Ordering::Relaxed)
500            .map(|idx| idx != self.next_avail)
501    }
502
503    fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
504        self.set_notification(mem, false)
505    }
506
507    fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
508        let used_idx = self.next_used;
509
510        // Complete all the writes in add_used() before reading the event.
511        fence(Ordering::SeqCst);
512
513        // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet.
514
515        // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event`
516        // a value that's used by the device to determine whether a notification must
517        // be submitted after adding a descriptor chain to the used ring. According to the
518        // standard, the notification must be sent when `next_used == used_event + 1`, but
519        // various device model implementations rely on an inequality instead, most likely
520        // to also support use cases where a bunch of descriptor chains are added to the used
521        // ring first, and only afterwards the `needs_notification` logic is called. For example,
522        // the approach based on `num_added` below is taken from the Linux Kernel implementation
523        // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661)
524
525        // The `old` variable below is used to determine the value of `next_used` from when
526        // `needs_notification` was called last (each `needs_notification` call resets `num_added`
527        // to zero, while each `add_used` called increments it by one). Then, the logic below
528        // uses wrapped arithmetic to see whether `used_event` can be found between `old` and
529        // `next_used` in the circular sequence space of the used ring.
530        if self.event_idx_enabled {
531            let used_event = self.used_event(mem, Ordering::Relaxed)?;
532            let old = used_idx - self.num_added;
533            self.num_added = Wrapping(0);
534
535            return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
536        }
537
538        Ok(true)
539    }
540
541    fn next_avail(&self) -> u16 {
542        self.next_avail.0
543    }
544
545    fn set_next_avail(&mut self, next_avail: u16) {
546        self.next_avail = Wrapping(next_avail);
547    }
548
549    fn next_used(&self) -> u16 {
550        self.next_used.0
551    }
552
553    fn set_next_used(&mut self, next_used: u16) {
554        self.next_used = Wrapping(next_used);
555    }
556
557    fn desc_table(&self) -> u64 {
558        self.desc_table.0
559    }
560
561    fn avail_ring(&self) -> u64 {
562        self.avail_ring.0
563    }
564
565    fn used_ring(&self) -> u64 {
566        self.used_ring.0
567    }
568
569    fn event_idx_enabled(&self) -> bool {
570        self.event_idx_enabled
571    }
572
573    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
574    where
575        M: Clone + Deref,
576        M::Target: GuestMemory,
577    {
578        // Default, iter-based impl. Will be subsequently improved.
579        match self.iter(mem) {
580            Ok(mut iter) => iter.next(),
581            Err(e) => {
582                error!("Iterator error {}", e);
583                None
584            }
585        }
586    }
587}
588
589impl QueueOwnedT for Queue {
590    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
591    where
592        M: Deref,
593        M::Target: GuestMemory,
594    {
595        // We're checking here that a reset did not happen without re-initializing the queue.
596        // TODO: In the future we might want to also check that the other parameters in the
597        // queue are valid.
598        if !self.ready || self.avail_ring == GuestAddress(0) {
599            return Err(Error::QueueNotReady);
600        }
601
602        self.avail_idx(mem.deref(), Ordering::Acquire)
603            .map(move |idx| AvailIter::new(mem, idx, self))?
604    }
605
606    fn go_to_previous_position(&mut self) {
607        self.next_avail -= Wrapping(1);
608    }
609}
610
611/// Consuming iterator over all available descriptor chain heads in the queue.
612///
613/// # Example
614///
615/// ```rust
616/// # use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
617/// # use virtio_queue::mock::MockSplitQueue;
618/// use virtio_queue::{desc::{split::Descriptor as SplitDescriptor, RawDescriptor}, Queue, QueueOwnedT};
619/// use vm_memory::{GuestAddress, GuestMemoryMmap};
620///
621/// # fn populate_queue(m: &GuestMemoryMmap) -> Queue {
622/// #    let vq = MockSplitQueue::new(m, 16);
623/// #    let mut q: Queue = vq.create_queue().unwrap();
624/// #
625/// #    // The chains are (0, 1), (2, 3, 4) and (5, 6).
626/// #    let mut descs = Vec::new();
627/// #    for i in 0..7 {
628/// #        let flags = match i {
629/// #            1 | 6 => 0,
630/// #            2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
631/// #            4 => VRING_DESC_F_WRITE,
632/// #            _ => VRING_DESC_F_NEXT,
633/// #        };
634/// #
635/// #        descs.push(RawDescriptor::from(SplitDescriptor::new((0x1000 * (i + 1)) as u64, 0x1000, flags as u16, i + 1)));
636/// #    }
637/// #
638/// #    vq.add_desc_chains(&descs, 0).unwrap();
639/// #    q
640/// # }
641/// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
642/// // Populate the queue with descriptor chains and update the available ring accordingly.
643/// let mut queue = populate_queue(m);
644/// let mut i = queue.iter(m).unwrap();
645///
646/// {
647///     let mut c = i.next().unwrap();
648///     let _first_head_index = c.head_index();
649///     // We should have two descriptors in the first chain.
650///     let _desc1 = c.next().unwrap();
651///     let _desc2 = c.next().unwrap();
652/// }
653///
654/// {
655///     let c = i.next().unwrap();
656///     let _second_head_index = c.head_index();
657///
658///     let mut iter = c.writable();
659///     // We should have two writable descriptors in the second chain.
660///     let _desc1 = iter.next().unwrap();
661///     let _desc2 = iter.next().unwrap();
662/// }
663///
664/// {
665///     let c = i.next().unwrap();
666///     let _third_head_index = c.head_index();
667///
668///     let mut iter = c.readable();
669///     // We should have one readable descriptor in the third chain.
670///     let _desc1 = iter.next().unwrap();
671/// }
672/// // Let's go back one position in the available ring.
673/// i.go_to_previous_position();
674/// // We should be able to access again the third descriptor chain.
675/// let c = i.next().unwrap();
676/// let _third_head_index = c.head_index();
677/// ```
678#[derive(Debug)]
679pub struct AvailIter<'b, M> {
680    mem: M,
681    desc_table: GuestAddress,
682    avail_ring: GuestAddress,
683    queue_size: u16,
684    last_index: Wrapping<u16>,
685    next_avail: &'b mut Wrapping<u16>,
686}
687
688impl<'b, M> AvailIter<'b, M>
689where
690    M: Deref,
691    M::Target: GuestMemory,
692{
693    /// Create a new instance of `AvailInter`.
694    ///
695    /// # Arguments
696    ///
697    /// * `mem` - the `GuestMemory` object that can be used to access the queue buffers.
698    /// * `idx` - the index of the available ring entry where the driver would put the next available descriptor chain.
699    /// * `queue` - the `Queue` object from which the needed data to create the `AvailIter` can be retrieved.
700    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701        // The number of descriptor chain heads to process should always
702        // be smaller or equal to the queue size, as the driver should
703        // never ask the VMM to process a available ring entry more than
704        // once. Checking and reporting such incorrect driver behavior
705        // can prevent potential hanging and Denial-of-Service from
706        // happening on the VMM side.
707        if (idx - queue.next_avail).0 > queue.size {
708            return Err(Error::InvalidAvailRingIndex);
709        }
710
711        Ok(AvailIter {
712            mem,
713            desc_table: queue.desc_table,
714            avail_ring: queue.avail_ring,
715            queue_size: queue.size,
716            last_index: idx,
717            next_avail: &mut queue.next_avail,
718        })
719    }
720
721    /// Goes back one position in the available descriptor chain offered by the driver.
722    ///
723    /// Rust does not support bidirectional iterators. This is the only way to revert the effect
724    /// of an iterator increment on the queue.
725    ///
726    /// Note: this method assumes there's only one thread manipulating the queue, so it should only
727    /// be invoked in single-threaded context.
728    pub fn go_to_previous_position(&mut self) {
729        *self.next_avail -= Wrapping(1);
730    }
731}
732
733impl<M> Iterator for AvailIter<'_, M>
734where
735    M: Clone + Deref,
736    M::Target: GuestMemory,
737{
738    type Item = DescriptorChain<M>;
739
740    fn next(&mut self) -> Option<Self::Item> {
741        if *self.next_avail == self.last_index {
742            return None;
743        }
744
745        // These two operations can not overflow an u64 since they're working with relatively small
746        // numbers compared to u64::MAX.
747        let elem_off =
748            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751        let addr = self.avail_ring.checked_add(offset)?;
752        let head_index: u16 = self
753            .mem
754            .load(addr, Ordering::Acquire)
755            .map(u16::from_le)
756            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
757            .ok()?;
758
759        *self.next_avail += Wrapping(1);
760
761        Some(DescriptorChain::new(
762            self.mem.clone(),
763            self.desc_table,
764            self.queue_size,
765            head_index,
766        ))
767    }
768}
769
770#[cfg(any(test, feature = "test-utils"))]
771// It is convenient for tests to implement `PartialEq`, but it is not a
772// proper implementation as `GuestMemory` errors cannot implement `PartialEq`.
773impl PartialEq for Error {
774    fn eq(&self, other: &Self) -> bool {
775        format!("{}", &self) == format!("{other}")
776    }
777}
778
779#[cfg(test)]
780mod tests {
781    use super::*;
782    use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
783    use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
784    use crate::mock::MockSplitQueue;
785    use virtio_bindings::bindings::virtio_ring::{
786        VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
787    };
788
789    use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
790
791    #[test]
792    fn test_queue_is_valid() {
793        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
794        let vq = MockSplitQueue::new(m, 16);
795        let mut q: Queue = vq.create_queue().unwrap();
796
797        // q is currently valid
798        assert!(q.is_valid(m));
799
800        // shouldn't be valid when not marked as ready
801        q.set_ready(false);
802        assert!(!q.ready());
803        assert!(!q.is_valid(m));
804        q.set_ready(true);
805
806        // shouldn't be allowed to set a size > max_size
807        q.set_size(q.max_size() << 1);
808        assert_eq!(q.size, q.max_size());
809
810        // or set the size to 0
811        q.set_size(0);
812        assert_eq!(q.size, q.max_size());
813
814        // or set a size which is not a power of 2
815        q.set_size(11);
816        assert_eq!(q.size, q.max_size());
817
818        // but should be allowed to set a size if 0 < size <= max_size and size is a power of two
819        q.set_size(4);
820        assert_eq!(q.size, 4);
821        q.size = q.max_size();
822
823        // shouldn't be allowed to set an address that breaks the alignment constraint
824        q.set_desc_table_address(Some(0xf), None);
825        assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
826        // should be allowed to set an aligned out of bounds address
827        q.set_desc_table_address(Some(0xffff_fff0), None);
828        assert_eq!(q.desc_table.0, 0xffff_fff0);
829        // but shouldn't be valid
830        assert!(!q.is_valid(m));
831        // but should be allowed to set a valid description table address
832        q.set_desc_table_address(Some(0x10), None);
833        assert_eq!(q.desc_table.0, 0x10);
834        assert!(q.is_valid(m));
835        let addr = vq.desc_table_addr().0;
836        q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
837
838        // shouldn't be allowed to set an address that breaks the alignment constraint
839        q.set_avail_ring_address(Some(0x1), None);
840        assert_eq!(q.avail_ring.0, vq.avail_addr().0);
841        // should be allowed to set an aligned out of bounds address
842        q.set_avail_ring_address(Some(0xffff_fffe), None);
843        assert_eq!(q.avail_ring.0, 0xffff_fffe);
844        // but shouldn't be valid
845        assert!(!q.is_valid(m));
846        // but should be allowed to set a valid available ring address
847        q.set_avail_ring_address(Some(0x2), None);
848        assert_eq!(q.avail_ring.0, 0x2);
849        assert!(q.is_valid(m));
850        let addr = vq.avail_addr().0;
851        q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
852
853        // shouldn't be allowed to set an address that breaks the alignment constraint
854        q.set_used_ring_address(Some(0x3), None);
855        assert_eq!(q.used_ring.0, vq.used_addr().0);
856        // should be allowed to set an aligned out of bounds address
857        q.set_used_ring_address(Some(0xffff_fffc), None);
858        assert_eq!(q.used_ring.0, 0xffff_fffc);
859        // but shouldn't be valid
860        assert!(!q.is_valid(m));
861        // but should be allowed to set a valid used ring address
862        q.set_used_ring_address(Some(0x4), None);
863        assert_eq!(q.used_ring.0, 0x4);
864        let addr = vq.used_addr().0;
865        q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
866        assert!(q.is_valid(m));
867    }
868
869    #[test]
870    fn test_add_used() {
871        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
872        let vq = MockSplitQueue::new(mem, 16);
873        let mut q: Queue = vq.create_queue().unwrap();
874
875        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
876        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
877
878        // index too large
879        assert!(q.add_used(mem, 16, 0x1000).is_err());
880        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
881
882        // should be ok
883        q.add_used(mem, 1, 0x1000).unwrap();
884        assert_eq!(q.next_used, Wrapping(1));
885        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
886        assert_eq!(u16::from_le(vq.used().idx().load()), 1);
887
888        let x = vq.used().ring().ref_at(0).unwrap().load();
889        assert_eq!(x.id(), 1);
890        assert_eq!(x.len(), 0x1000);
891    }
892
893    #[test]
894    fn test_reset_queue() {
895        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
896        let vq = MockSplitQueue::new(m, 16);
897        let mut q: Queue = vq.create_queue().unwrap();
898
899        q.set_size(8);
900        // The address set by `MockSplitQueue` for the descriptor table is DEFAULT_DESC_TABLE_ADDR,
901        // so let's change it for testing the reset.
902        q.set_desc_table_address(Some(0x5000), None);
903        // Same for `event_idx_enabled`, `next_avail` `next_used` and `signalled_used`.
904        q.set_event_idx(true);
905        q.set_next_avail(2);
906        q.set_next_used(4);
907        q.num_added = Wrapping(15);
908        assert_eq!(q.size, 8);
909        // `create_queue` also marks the queue as ready.
910        assert!(q.ready);
911        assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
912        assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
913        assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
914        assert_ne!(q.next_avail, Wrapping(0));
915        assert_ne!(q.next_used, Wrapping(0));
916        assert_ne!(q.num_added, Wrapping(0));
917        assert!(q.event_idx_enabled);
918
919        q.reset();
920        assert_eq!(q.size, 16);
921        assert!(!q.ready);
922        assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
923        assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
924        assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
925        assert_eq!(q.next_avail, Wrapping(0));
926        assert_eq!(q.next_used, Wrapping(0));
927        assert_eq!(q.num_added, Wrapping(0));
928        assert!(!q.event_idx_enabled);
929    }
930
931    #[test]
932    fn test_needs_notification() {
933        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
934        let qsize = 16;
935        let vq = MockSplitQueue::new(mem, qsize);
936        let mut q: Queue = vq.create_queue().unwrap();
937        let avail_addr = vq.avail_addr();
938
939        // It should always return true when EVENT_IDX isn't enabled.
940        for i in 0..qsize {
941            q.next_used = Wrapping(i);
942            assert!(q.needs_notification(mem).unwrap());
943        }
944
945        mem.write_obj::<u16>(
946            u16::to_le(4),
947            avail_addr.unchecked_add(4 + qsize as u64 * 2),
948        )
949        .unwrap();
950        q.set_event_idx(true);
951
952        // Incrementing up to this value causes an `u16` to wrap back to 0.
953        let wrap = u32::from(u16::MAX) + 1;
954
955        for i in 0..wrap + 12 {
956            q.next_used = Wrapping(i as u16);
957            // Let's test wrapping around the maximum index value as well.
958            // `num_added` needs to be at least `1` to represent the fact that new descriptor
959            // chains have be added to the used ring since the last time `needs_notification`
960            // returned.
961            q.num_added = Wrapping(1);
962            let expected = i == 5 || i == (5 + wrap);
963            assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
964        }
965
966        mem.write_obj::<u16>(
967            u16::to_le(8),
968            avail_addr.unchecked_add(4 + qsize as u64 * 2),
969        )
970        .unwrap();
971
972        // Returns `false` because the current `used_event` value is behind both `next_used` and
973        // the value of `next_used` at the time when `needs_notification` last returned (which is
974        // computed based on `num_added` as described in the comments for `needs_notification`.
975        assert!(!q.needs_notification(mem).unwrap());
976
977        mem.write_obj::<u16>(
978            u16::to_le(15),
979            avail_addr.unchecked_add(4 + qsize as u64 * 2),
980        )
981        .unwrap();
982
983        q.num_added = Wrapping(1);
984        assert!(!q.needs_notification(mem).unwrap());
985
986        q.next_used = Wrapping(15);
987        q.num_added = Wrapping(1);
988        assert!(!q.needs_notification(mem).unwrap());
989
990        q.next_used = Wrapping(16);
991        q.num_added = Wrapping(1);
992        assert!(q.needs_notification(mem).unwrap());
993
994        // Calling `needs_notification` again immediately returns `false`.
995        assert!(!q.needs_notification(mem).unwrap());
996
997        mem.write_obj::<u16>(
998            u16::to_le(u16::MAX - 3),
999            avail_addr.unchecked_add(4 + qsize as u64 * 2),
1000        )
1001        .unwrap();
1002        q.next_used = Wrapping(u16::MAX - 2);
1003        q.num_added = Wrapping(1);
1004        // Returns `true` because, when looking at circular sequence of indices of the used ring,
1005        // the value we wrote in the `used_event` appears between the "old" value of `next_used`
1006        // (i.e. `next_used` - `num_added`) and the current `next_used`, thus suggesting that we
1007        // need to notify the driver.
1008        assert!(q.needs_notification(mem).unwrap());
1009    }
1010
1011    #[test]
1012    fn test_enable_disable_notification() {
1013        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1014        let vq = MockSplitQueue::new(mem, 16);
1015
1016        let mut q: Queue = vq.create_queue().unwrap();
1017        let used_addr = vq.used_addr();
1018
1019        assert!(!q.event_idx_enabled);
1020
1021        q.enable_notification(mem).unwrap();
1022        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1023        assert_eq!(v, 0);
1024
1025        q.disable_notification(mem).unwrap();
1026        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1027        assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1028
1029        q.enable_notification(mem).unwrap();
1030        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1031        assert_eq!(v, 0);
1032
1033        q.set_event_idx(true);
1034        let avail_addr = vq.avail_addr();
1035        mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1036            .unwrap();
1037
1038        assert!(q.enable_notification(mem).unwrap());
1039        q.next_avail = Wrapping(2);
1040        assert!(!q.enable_notification(mem).unwrap());
1041
1042        mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1043            .unwrap();
1044
1045        assert!(q.enable_notification(mem).unwrap());
1046        q.next_avail = Wrapping(8);
1047        assert!(!q.enable_notification(mem).unwrap());
1048    }
1049
1050    #[test]
1051    fn test_consume_chains_with_notif() {
1052        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1053        let vq = MockSplitQueue::new(mem, 16);
1054
1055        let mut q: Queue = vq.create_queue().unwrap();
1056
1057        // q is currently valid.
1058        assert!(q.is_valid(mem));
1059
1060        // The chains are (0, 1), (2, 3, 4), (5, 6), (7, 8), (9, 10, 11, 12).
1061        let mut descs = Vec::new();
1062        for i in 0..13 {
1063            let flags = match i {
1064                1 | 4 | 6 | 8 | 12 => 0,
1065                _ => VRING_DESC_F_NEXT,
1066            };
1067
1068            descs.push(RawDescriptor::from(SplitDescriptor::new(
1069                (0x1000 * (i + 1)) as u64,
1070                0x1000,
1071                flags as u16,
1072                i + 1,
1073            )));
1074        }
1075
1076        vq.add_desc_chains(&descs, 0).unwrap();
1077        // Update the index of the chain that can be consumed to not be the last one.
1078        // This enables us to consume chains in multiple iterations as opposed to consuming
1079        // all the driver written chains at once.
1080        vq.avail().idx().store(u16::to_le(2));
1081        // No descriptor chains are consumed at this point.
1082        assert_eq!(q.next_avail(), 0);
1083
1084        let mut i = 0;
1085
1086        loop {
1087            i += 1;
1088            q.disable_notification(mem).unwrap();
1089
1090            while let Some(chain) = q.iter(mem).unwrap().next() {
1091                // Process the descriptor chain, and then add entries to the
1092                // used ring.
1093                let head_index = chain.head_index();
1094                let mut desc_len = 0;
1095                chain.for_each(|d| {
1096                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1097                        desc_len += d.len();
1098                    }
1099                });
1100                q.add_used(mem, head_index, desc_len).unwrap();
1101            }
1102            if !q.enable_notification(mem).unwrap() {
1103                break;
1104            }
1105        }
1106        // The chains should be consumed in a single loop iteration because there's nothing updating
1107        // the `idx` field of the available ring in the meantime.
1108        assert_eq!(i, 1);
1109        // The next chain that can be consumed should have index 2.
1110        assert_eq!(q.next_avail(), 2);
1111        assert_eq!(q.next_used(), 2);
1112        // Let the device know it can consume one more chain.
1113        vq.avail().idx().store(u16::to_le(3));
1114        i = 0;
1115
1116        loop {
1117            i += 1;
1118            q.disable_notification(mem).unwrap();
1119
1120            while let Some(chain) = q.iter(mem).unwrap().next() {
1121                // Process the descriptor chain, and then add entries to the
1122                // used ring.
1123                let head_index = chain.head_index();
1124                let mut desc_len = 0;
1125                chain.for_each(|d| {
1126                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1127                        desc_len += d.len();
1128                    }
1129                });
1130                q.add_used(mem, head_index, desc_len).unwrap();
1131            }
1132
1133            // For the simplicity of the test we are updating here the `idx` value of the available
1134            // ring. Ideally this should be done on a separate thread.
1135            // Because of this update, the loop should be iterated again to consume the new
1136            // available descriptor chains.
1137            vq.avail().idx().store(u16::to_le(4));
1138            if !q.enable_notification(mem).unwrap() {
1139                break;
1140            }
1141        }
1142        assert_eq!(i, 2);
1143        // The next chain that can be consumed should have index 4.
1144        assert_eq!(q.next_avail(), 4);
1145        assert_eq!(q.next_used(), 4);
1146
1147        // Set an `idx` that is bigger than the number of entries added in the ring.
1148        // This is an allowed scenario, but the indexes of the chain will have unexpected values.
1149        vq.avail().idx().store(u16::to_le(7));
1150        loop {
1151            q.disable_notification(mem).unwrap();
1152
1153            while let Some(chain) = q.iter(mem).unwrap().next() {
1154                // Process the descriptor chain, and then add entries to the
1155                // used ring.
1156                let head_index = chain.head_index();
1157                let mut desc_len = 0;
1158                chain.for_each(|d| {
1159                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1160                        desc_len += d.len();
1161                    }
1162                });
1163                q.add_used(mem, head_index, desc_len).unwrap();
1164            }
1165            if !q.enable_notification(mem).unwrap() {
1166                break;
1167            }
1168        }
1169        assert_eq!(q.next_avail(), 7);
1170        assert_eq!(q.next_used(), 7);
1171    }
1172
1173    #[test]
1174    fn test_invalid_avail_idx() {
1175        // This is a negative test for the following MUST from the spec: `A driver MUST NOT
1176        // decrement the available idx on a virtqueue (ie. there is no way to “unexpose” buffers).`.
1177        // We validate that for this misconfiguration, the device does not panic.
1178        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1179        let vq = MockSplitQueue::new(mem, 16);
1180
1181        let mut q: Queue = vq.create_queue().unwrap();
1182
1183        // q is currently valid.
1184        assert!(q.is_valid(mem));
1185
1186        // The chains are (0, 1), (2, 3, 4), (5, 6).
1187        let mut descs = Vec::new();
1188        for i in 0..7 {
1189            let flags = match i {
1190                1 | 4 | 6 => 0,
1191                _ => VRING_DESC_F_NEXT,
1192            };
1193
1194            descs.push(RawDescriptor::from(SplitDescriptor::new(
1195                (0x1000 * (i + 1)) as u64,
1196                0x1000,
1197                flags as u16,
1198                i + 1,
1199            )));
1200        }
1201
1202        vq.add_desc_chains(&descs, 0).unwrap();
1203        // Let the device know it can consume chains with the index < 2.
1204        vq.avail().idx().store(u16::to_le(3));
1205        // No descriptor chains are consumed at this point.
1206        assert_eq!(q.next_avail(), 0);
1207        assert_eq!(q.next_used(), 0);
1208
1209        loop {
1210            q.disable_notification(mem).unwrap();
1211
1212            while let Some(chain) = q.iter(mem).unwrap().next() {
1213                // Process the descriptor chain, and then add entries to the
1214                // used ring.
1215                let head_index = chain.head_index();
1216                let mut desc_len = 0;
1217                chain.for_each(|d| {
1218                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1219                        desc_len += d.len();
1220                    }
1221                });
1222                q.add_used(mem, head_index, desc_len).unwrap();
1223            }
1224            if !q.enable_notification(mem).unwrap() {
1225                break;
1226            }
1227        }
1228        // The next chain that can be consumed should have index 3.
1229        assert_eq!(q.next_avail(), 3);
1230        assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1231        assert_eq!(q.next_used(), 3);
1232        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1233        assert!(q.lock().ready());
1234
1235        // Decrement `idx` which should be forbidden. We don't enforce this thing, but we should
1236        // test that we don't panic in case the driver decrements it.
1237        vq.avail().idx().store(u16::to_le(1));
1238        // Invalid available ring index
1239        assert!(q.iter(mem).is_err());
1240    }
1241
1242    #[test]
1243    fn test_iterator_and_avail_idx() {
1244        // This test ensures constructing a descriptor chain iterator succeeds
1245        // with valid available ring indexes while produces an error with invalid
1246        // indexes.
1247        let queue_size = 2;
1248        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1249        let vq = MockSplitQueue::new(mem, queue_size);
1250
1251        let mut q: Queue = vq.create_queue().unwrap();
1252
1253        // q is currently valid.
1254        assert!(q.is_valid(mem));
1255
1256        // Create descriptors to fill up the queue
1257        let mut descs = Vec::new();
1258        for i in 0..queue_size {
1259            descs.push(RawDescriptor::from(SplitDescriptor::new(
1260                (0x1000 * (i + 1)) as u64,
1261                0x1000,
1262                0_u16,
1263                i + 1,
1264            )));
1265        }
1266        vq.add_desc_chains(&descs, 0).unwrap();
1267
1268        // Set the 'next_available' index to 'u16:MAX' to test the wrapping scenarios
1269        q.set_next_avail(u16::MAX);
1270
1271        // When the number of chains exposed by the driver is equal to or less than the queue
1272        // size, the available ring index is valid and constructs an iterator successfully.
1273        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1274        vq.avail().idx().store(u16::to_le(avail_idx.0));
1275        assert!(q.iter(mem).is_ok());
1276        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1277        vq.avail().idx().store(u16::to_le(avail_idx.0));
1278        assert!(q.iter(mem).is_ok());
1279
1280        // When the number of chains exposed by the driver is larger than the queue size, the
1281        // available ring index is invalid and produces an error from constructing an iterator.
1282        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1283        vq.avail().idx().store(u16::to_le(avail_idx.0));
1284        assert!(q.iter(mem).is_err());
1285    }
1286
1287    #[test]
1288    fn test_descriptor_and_iterator() {
1289        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1290        let vq = MockSplitQueue::new(m, 16);
1291
1292        let mut q: Queue = vq.create_queue().unwrap();
1293
1294        // q is currently valid
1295        assert!(q.is_valid(m));
1296
1297        // the chains are (0, 1), (2, 3, 4) and (5, 6)
1298        let mut descs = Vec::new();
1299        for j in 0..7 {
1300            let flags = match j {
1301                1 | 6 => 0,
1302                2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1303                4 => VRING_DESC_F_WRITE,
1304                _ => VRING_DESC_F_NEXT,
1305            };
1306
1307            descs.push(RawDescriptor::from(SplitDescriptor::new(
1308                (0x1000 * (j + 1)) as u64,
1309                0x1000,
1310                flags as u16,
1311                j + 1,
1312            )));
1313        }
1314
1315        vq.add_desc_chains(&descs, 0).unwrap();
1316
1317        let mut i = q.iter(m).unwrap();
1318
1319        {
1320            let c = i.next().unwrap();
1321            assert_eq!(c.head_index(), 0);
1322
1323            let mut iter = c;
1324            assert!(iter.next().is_some());
1325            assert!(iter.next().is_some());
1326            assert!(iter.next().is_none());
1327            assert!(iter.next().is_none());
1328        }
1329
1330        {
1331            let c = i.next().unwrap();
1332            assert_eq!(c.head_index(), 2);
1333
1334            let mut iter = c.writable();
1335            assert!(iter.next().is_some());
1336            assert!(iter.next().is_some());
1337            assert!(iter.next().is_none());
1338            assert!(iter.next().is_none());
1339        }
1340
1341        {
1342            let c = i.next().unwrap();
1343            assert_eq!(c.head_index(), 5);
1344
1345            let mut iter = c.readable();
1346            assert!(iter.next().is_some());
1347            assert!(iter.next().is_none());
1348            assert!(iter.next().is_none());
1349        }
1350    }
1351
1352    #[test]
1353    fn test_iterator() {
1354        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1355        let vq = MockSplitQueue::new(m, 16);
1356
1357        let mut q: Queue = vq.create_queue().unwrap();
1358
1359        q.size = q.max_size;
1360        q.desc_table = vq.desc_table_addr();
1361        q.avail_ring = vq.avail_addr();
1362        q.used_ring = vq.used_addr();
1363        assert!(q.is_valid(m));
1364
1365        {
1366            // an invalid queue should return an iterator with no next
1367            q.ready = false;
1368            assert!(q.iter(m).is_err());
1369        }
1370
1371        q.ready = true;
1372
1373        // now let's create two simple descriptor chains
1374        // the chains are (0, 1) and (2, 3, 4)
1375        {
1376            let mut descs = Vec::new();
1377            for j in 0..5u16 {
1378                let flags = match j {
1379                    1 | 4 => 0,
1380                    _ => VRING_DESC_F_NEXT,
1381                };
1382
1383                descs.push(RawDescriptor::from(SplitDescriptor::new(
1384                    (0x1000 * (j + 1)) as u64,
1385                    0x1000,
1386                    flags as u16,
1387                    j + 1,
1388                )));
1389            }
1390            vq.add_desc_chains(&descs, 0).unwrap();
1391
1392            let mut i = q.iter(m).unwrap();
1393
1394            {
1395                let mut c = i.next().unwrap();
1396                assert_eq!(c.head_index(), 0);
1397
1398                c.next().unwrap();
1399                assert!(c.next().is_some());
1400                assert!(c.next().is_none());
1401                assert_eq!(c.head_index(), 0);
1402            }
1403
1404            {
1405                let mut c = i.next().unwrap();
1406                assert_eq!(c.head_index(), 2);
1407
1408                c.next().unwrap();
1409                c.next().unwrap();
1410                c.next().unwrap();
1411                assert!(c.next().is_none());
1412                assert_eq!(c.head_index(), 2);
1413            }
1414
1415            // also test go_to_previous_position() works as expected
1416            {
1417                assert!(i.next().is_none());
1418                i.go_to_previous_position();
1419                let mut c = q.iter(m).unwrap().next().unwrap();
1420                c.next().unwrap();
1421                c.next().unwrap();
1422                c.next().unwrap();
1423                assert!(c.next().is_none());
1424            }
1425        }
1426
1427        // Test that iterating some broken descriptor chain does not exceed
1428        // 2^32 bytes in total (VIRTIO spec version 1.2, 2.7.5.2:
1429        // Drivers MUST NOT add a descriptor chain longer than 2^32 bytes in
1430        // total)
1431        {
1432            let descs = vec![
1433                RawDescriptor::from(SplitDescriptor::new(
1434                    0x1000,
1435                    0xffff_ffff,
1436                    VRING_DESC_F_NEXT as u16,
1437                    1,
1438                )),
1439                RawDescriptor::from(SplitDescriptor::new(0x1000, 0x1234_5678, 0, 2)),
1440            ];
1441            vq.add_desc_chains(&descs, 0).unwrap();
1442            let mut yielded_bytes_by_iteration = 0_u32;
1443            for d in q.iter(m).unwrap().next().unwrap() {
1444                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1445                    .checked_add(d.len())
1446                    .expect("iterator should not yield more than 2^32 bytes");
1447            }
1448        }
1449
1450        // Same as above, but test with a descriptor which is self-referential
1451        {
1452            let descs = vec![RawDescriptor::from(SplitDescriptor::new(
1453                0x1000,
1454                0xffff_ffff,
1455                VRING_DESC_F_NEXT as u16,
1456                0,
1457            ))];
1458            vq.add_desc_chains(&descs, 0).unwrap();
1459            let mut yielded_bytes_by_iteration = 0_u32;
1460            for d in q.iter(m).unwrap().next().unwrap() {
1461                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1462                    .checked_add(d.len())
1463                    .expect("iterator should not yield more than 2^32 bytes");
1464            }
1465        }
1466    }
1467
1468    #[test]
1469    fn test_regression_iterator_division() {
1470        // This is a regression test that tests that the iterator does not try to divide
1471        // by 0 when the queue size is 0
1472        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1473        let vq = MockSplitQueue::new(m, 1);
1474        // This input was generated by the fuzzer, both for the QueueS and the Descriptor
1475        let descriptors: Vec<RawDescriptor> = vec![RawDescriptor::from(SplitDescriptor::new(
1476            14178673876262995140,
1477            3301229764,
1478            50372,
1479            50372,
1480        ))];
1481        vq.build_desc_chain(&descriptors).unwrap();
1482
1483        let mut q = Queue {
1484            max_size: 38,
1485            next_avail: Wrapping(0),
1486            next_used: Wrapping(0),
1487            event_idx_enabled: false,
1488            num_added: Wrapping(0),
1489            size: 0,
1490            ready: false,
1491            desc_table: GuestAddress(12837708984796196),
1492            avail_ring: GuestAddress(0),
1493            used_ring: GuestAddress(9943947977301164032),
1494        };
1495
1496        assert!(q.pop_descriptor_chain(m).is_none());
1497    }
1498
1499    #[test]
1500    fn test_setters_error_cases() {
1501        assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1502        let mut q = Queue::new(16).unwrap();
1503
1504        let expected_val = q.desc_table.0;
1505        assert_eq!(
1506            q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1507            Error::InvalidDescTableAlign
1508        );
1509        assert_eq!(q.desc_table(), expected_val);
1510
1511        let expected_val = q.avail_ring.0;
1512        assert_eq!(
1513            q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1514            Error::InvalidAvailRingAlign
1515        );
1516        assert_eq!(q.avail_ring(), expected_val);
1517
1518        let expected_val = q.used_ring.0;
1519        assert_eq!(
1520            q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1521            Error::InvalidUsedRingAlign
1522        );
1523        assert_eq!(q.used_ring(), expected_val);
1524
1525        let expected_val = q.size;
1526        assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1527        assert_eq!(q.size(), expected_val)
1528    }
1529
1530    #[test]
1531    // This is a regression test for a fuzzing finding. If the driver requests a reset of the
1532    // device, but then does not re-initializes the queue then a subsequent call to process
1533    // a request should yield no descriptors to process. Before this fix we were processing
1534    // descriptors that were added to the queue before, and we were ending up processing 255
1535    // descriptors per chain.
1536    fn test_regression_timeout_after_reset() {
1537        // The input below was generated by libfuzzer and adapted for this test.
1538        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1539        let vq = MockSplitQueue::new(m, 1024);
1540
1541        // This input below was generated by the fuzzer.
1542        let descriptors: Vec<RawDescriptor> = vec![
1543            RawDescriptor::from(SplitDescriptor::new(21508325467, 0, 1, 4)),
1544            RawDescriptor::from(SplitDescriptor::new(2097152, 4096, 3, 0)),
1545            RawDescriptor::from(SplitDescriptor::new(
1546                18374686479672737792,
1547                4294967295,
1548                65535,
1549                29,
1550            )),
1551            RawDescriptor::from(SplitDescriptor::new(76842670169653248, 1114115, 0, 0)),
1552            RawDescriptor::from(SplitDescriptor::new(16, 983040, 126, 3)),
1553            RawDescriptor::from(SplitDescriptor::new(897648164864, 0, 0, 0)),
1554            RawDescriptor::from(SplitDescriptor::new(111669149722, 0, 0, 0)),
1555        ];
1556        vq.build_multiple_desc_chains(&descriptors).unwrap();
1557
1558        let mut q: Queue = vq.create_queue().unwrap();
1559
1560        // Setting the queue to ready should not allow consuming descriptors after reset.
1561        q.reset();
1562        q.set_ready(true);
1563        let mut counter = 0;
1564        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1565            // this empty loop is here to check that there are no side effects
1566            // in terms of memory & execution time.
1567            while desc_chain.next().is_some() {
1568                counter += 1;
1569            }
1570        }
1571        assert_eq!(counter, 0);
1572
1573        // Setting the avail_addr to valid should not allow consuming descriptors after reset.
1574        q.reset();
1575        q.set_avail_ring_address(Some(0x1000), None);
1576        assert_eq!(q.avail_ring, GuestAddress(0x1000));
1577        counter = 0;
1578        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1579            // this empty loop is here to check that there are no side effects
1580            // in terms of memory & execution time.
1581            while desc_chain.next().is_some() {
1582                counter += 1;
1583            }
1584        }
1585        assert_eq!(counter, 0);
1586    }
1587}