virtio_queue/
chain.rs

1// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE-BSD-3-Clause file.
4//
5// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6//
7// Copyright © 2019 Intel Corporation
8//
9// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
10//
11// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
12
13use std::fmt::{self, Debug};
14use std::mem::size_of;
15use std::ops::Deref;
16
17use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
18use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryRegion};
19
20use crate::{Descriptor, Error, Reader, Writer};
21use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE;
22
23/// A virtio descriptor chain.
24#[derive(Clone, Debug)]
25pub struct DescriptorChain<M> {
26    mem: M,
27    desc_table: GuestAddress,
28    queue_size: u16,
29    head_index: u16,
30    next_index: u16,
31    ttl: u16,
32    yielded_bytes: u32,
33    is_indirect: bool,
34}
35
36impl<M> DescriptorChain<M>
37where
38    M: Deref,
39    M::Target: GuestMemory,
40{
41    fn with_ttl(
42        mem: M,
43        desc_table: GuestAddress,
44        queue_size: u16,
45        ttl: u16,
46        head_index: u16,
47    ) -> Self {
48        DescriptorChain {
49            mem,
50            desc_table,
51            queue_size,
52            head_index,
53            next_index: head_index,
54            ttl,
55            is_indirect: false,
56            yielded_bytes: 0,
57        }
58    }
59
60    /// Create a new `DescriptorChain` instance.
61    ///
62    /// # Arguments
63    /// * `mem` - the `GuestMemory` object that can be used to access the buffers pointed to by the
64    ///           descriptor chain.
65    /// * `desc_table` - the address of the descriptor table.
66    /// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor
67    ///                  chain.
68    /// * `head_index` - the descriptor index of the chain head.
69    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
70        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
71    }
72
73    /// Get the descriptor index of the chain head.
74    pub fn head_index(&self) -> u16 {
75        self.head_index
76    }
77
78    /// Return a `GuestMemory` object that can be used to access the buffers pointed to by the
79    /// descriptor chain.
80    pub fn memory(&self) -> &M::Target {
81        self.mem.deref()
82    }
83
84    /// Return an iterator that only yields the readable descriptors in the chain.
85    pub fn readable(self) -> DescriptorChainRwIter<M> {
86        DescriptorChainRwIter {
87            chain: self,
88            writable: false,
89        }
90    }
91
92    /// Return a new instance of Writer
93    pub fn writer<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Writer<'a, B>, Error>
94    where
95        M::Target: Sized,
96        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
97    {
98        Writer::new(mem, self).map_err(|_| Error::InvalidChain)
99    }
100
101    /// Return a new instance of Reader
102    pub fn reader<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Reader<'a, B>, Error>
103    where
104        M::Target: Sized,
105        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
106    {
107        Reader::new(mem, self).map_err(|_| Error::InvalidChain)
108    }
109
110    /// Return an iterator that only yields the writable descriptors in the chain.
111    pub fn writable(self) -> DescriptorChainRwIter<M> {
112        DescriptorChainRwIter {
113            chain: self,
114            writable: true,
115        }
116    }
117
118    // Alters the internal state of the `DescriptorChain` to switch iterating over an
119    // indirect descriptor table defined by `desc`.
120    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
121        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
122        // an indirect descriptor.
123        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
124        if self.is_indirect {
125            return Err(Error::InvalidIndirectDescriptor);
126        }
127
128        // Alignment requirements for vring elements start from virtio 1.0,
129        // but this is not necessary for address of indirect descriptor.
130        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
131            return Err(Error::InvalidIndirectDescriptorTable);
132        }
133
134        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
135        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
136        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
137        if table_len > u32::from(u16::MAX) {
138            return Err(Error::InvalidIndirectDescriptorTable);
139        }
140
141        self.desc_table = desc.addr();
142        // try_from cannot fail as we've checked table_len above
143        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
144        self.next_index = 0;
145        self.ttl = self.queue_size;
146        self.is_indirect = true;
147
148        Ok(())
149    }
150}
151
152impl<M> Iterator for DescriptorChain<M>
153where
154    M: Deref,
155    M::Target: GuestMemory,
156{
157    type Item = Descriptor;
158
159    /// Return the next descriptor in this descriptor chain, if there is one.
160    ///
161    /// Note that this is distinct from the next descriptor chain returned by
162    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
163    /// _available_ descriptor chain.
164    fn next(&mut self) -> Option<Self::Item> {
165        if self.ttl == 0 || self.next_index >= self.queue_size {
166            return None;
167        }
168
169        let desc_addr = self
170            .desc_table
171            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
172            // small number.
173            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
174
175        // The guest device driver should not touch the descriptor once submitted, so it's safe
176        // to use read_obj() here.
177        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
178
179        if desc.refers_to_indirect_table() {
180            self.switch_to_indirect_table(desc).ok()?;
181            return self.next();
182        }
183
184        // constructing a chain that is longer than 2^32 bytes is illegal,
185        // let's terminate the iteration if something violated this.
186        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
187        // longer than 2^32 bytes in total;")
188        match self.yielded_bytes.checked_add(desc.len()) {
189            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
190            None => return None,
191        };
192
193        if desc.has_next() {
194            self.next_index = desc.next();
195            // It's ok to decrement `self.ttl` here because we check at the start of the method
196            // that it's greater than 0.
197            self.ttl -= 1;
198        } else {
199            self.ttl = 0;
200        }
201
202        Some(desc)
203    }
204}
205
206/// An iterator for readable or writable descriptors.
207#[derive(Clone)]
208pub struct DescriptorChainRwIter<M> {
209    chain: DescriptorChain<M>,
210    writable: bool,
211}
212
213impl<M> Iterator for DescriptorChainRwIter<M>
214where
215    M: Deref,
216    M::Target: GuestMemory,
217{
218    type Item = Descriptor;
219
220    /// Return the next readable/writeable descriptor (depending on the `writable` value) in this
221    /// descriptor chain, if there is one.
222    ///
223    /// Note that this is distinct from the next descriptor chain returned by
224    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
225    /// _available_ descriptor chain.
226    fn next(&mut self) -> Option<Self::Item> {
227        loop {
228            match self.chain.next() {
229                Some(v) => {
230                    if v.is_write_only() == self.writable {
231                        return Some(v);
232                    }
233                }
234                None => return None,
235            }
236        }
237    }
238}
239
240// We can't derive Debug, because rustc doesn't generate the `M::T: Debug` constraint
241impl<M> Debug for DescriptorChainRwIter<M>
242where
243    M: Debug,
244{
245    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
246        f.debug_struct("DescriptorChainRwIter")
247            .field("chain", &self.chain)
248            .field("writable", &self.writable)
249            .finish()
250    }
251}
252
253#[cfg(test)]
254mod tests {
255    use super::*;
256    use crate::mock::{DescriptorTable, MockSplitQueue};
257    use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT};
258    use vm_memory::GuestMemoryMmap;
259
260    #[test]
261    fn test_checked_new_descriptor_chain() {
262        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
263        let vq = MockSplitQueue::new(m, 16);
264
265        assert!(vq.end().0 < 0x1000);
266
267        // index >= queue_size
268        assert!(
269            DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16)
270                .next()
271                .is_none()
272        );
273
274        // desc_table address is way off
275        assert!(
276            DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0)
277                .next()
278                .is_none()
279        );
280
281        {
282            // the first desc has a normal len, and the next_descriptor flag is set
283            // but the the index of the next descriptor is too large
284            let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_NEXT as u16, 16);
285            vq.desc_table().store(0, desc).unwrap();
286
287            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
288            c.next().unwrap();
289            assert!(c.next().is_none());
290        }
291
292        // finally, let's test an ok chain
293        {
294            let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_NEXT as u16, 1);
295            vq.desc_table().store(0, desc).unwrap();
296
297            let desc = Descriptor::new(0x2000, 0x1000, 0, 0);
298            vq.desc_table().store(1, desc).unwrap();
299
300            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
301
302            assert_eq!(
303                c.memory() as *const GuestMemoryMmap,
304                m as *const GuestMemoryMmap
305            );
306
307            assert_eq!(c.desc_table, vq.start());
308            assert_eq!(c.queue_size, 16);
309            assert_eq!(c.ttl, c.queue_size);
310
311            let desc = c.next().unwrap();
312            assert_eq!(desc.addr(), GuestAddress(0x1000));
313            assert_eq!(desc.len(), 0x1000);
314            assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
315            assert_eq!(desc.next(), 1);
316            assert_eq!(c.ttl, c.queue_size - 1);
317
318            assert!(c.next().is_some());
319            // The descriptor above was the last from the chain, so `ttl` should be 0 now.
320            assert_eq!(c.ttl, 0);
321            assert!(c.next().is_none());
322            assert_eq!(c.ttl, 0);
323        }
324    }
325
326    #[test]
327    fn test_ttl_wrap_around() {
328        const QUEUE_SIZE: u16 = 16;
329
330        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x100000)]).unwrap();
331        let vq = MockSplitQueue::new(m, QUEUE_SIZE);
332
333        // Populate the entire descriptor table with entries. Only the last one should not have the
334        // VIRTQ_DESC_F_NEXT set.
335        for i in 0..QUEUE_SIZE - 1 {
336            let desc = Descriptor::new(
337                0x1000 * (i + 1) as u64,
338                0x1000,
339                VRING_DESC_F_NEXT as u16,
340                i + 1,
341            );
342            vq.desc_table().store(i, desc).unwrap();
343        }
344        let desc = Descriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0);
345        vq.desc_table().store(QUEUE_SIZE - 1, desc).unwrap();
346
347        let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0);
348        assert_eq!(c.ttl, c.queue_size);
349
350        // Validate that `ttl` wraps around even when the entire descriptor table is populated.
351        for i in 0..QUEUE_SIZE {
352            let _desc = c.next().unwrap();
353            assert_eq!(c.ttl, c.queue_size - i - 1);
354        }
355        assert!(c.next().is_none());
356    }
357
358    #[test]
359    fn test_new_from_indirect_descriptor() {
360        // This is testing that chaining an indirect table works as expected. It is also a negative
361        // test for the following requirement from the spec:
362        // `A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.`. In
363        // case the driver is setting both of these flags, we check that the device doesn't panic.
364        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
365        let vq = MockSplitQueue::new(m, 16);
366        let dtable = vq.desc_table();
367
368        // Create a chain with one normal descriptor and one pointing to an indirect table.
369        let desc = Descriptor::new(0x6000, 0x1000, VRING_DESC_F_NEXT as u16, 1);
370        dtable.store(0, desc).unwrap();
371        // The spec forbids setting both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags. We do
372        // not currently enforce this rule, we just ignore the VIRTQ_DESC_F_NEXT flag.
373        let desc = Descriptor::new(
374            0x7000,
375            0x1000,
376            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
377            2,
378        );
379        dtable.store(1, desc).unwrap();
380        let desc = Descriptor::new(0x8000, 0x1000, 0, 0);
381        dtable.store(2, desc).unwrap();
382
383        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
384
385        // create an indirect table with 4 chained descriptors
386        let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4);
387        for i in 0..4u16 {
388            let desc: Descriptor = if i < 3 {
389                Descriptor::new(0x1000 * i as u64, 0x1000, VRING_DESC_F_NEXT as u16, i + 1)
390            } else {
391                Descriptor::new(0x1000 * i as u64, 0x1000, 0, 0)
392            };
393            idtable.store(i, desc).unwrap();
394        }
395
396        assert_eq!(c.head_index(), 0);
397        // Consume the first descriptor.
398        c.next().unwrap();
399
400        // The chain logic hasn't parsed the indirect descriptor yet.
401        assert!(!c.is_indirect);
402
403        // Try to iterate through the indirect descriptor chain.
404        for i in 0..4 {
405            let desc = c.next().unwrap();
406            assert!(c.is_indirect);
407            if i < 3 {
408                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
409                assert_eq!(desc.next(), i + 1);
410            }
411        }
412        // Even though we added a new descriptor after the one that is pointing to the indirect
413        // table, this descriptor won't be available when parsing the chain.
414        assert!(c.next().is_none());
415    }
416
417    #[test]
418    fn test_indirect_descriptor_address_noaligned() {
419        // Alignment requirements for vring elements start from virtio 1.0,
420        // but this is not necessary for address of indirect descriptor.
421        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
422        let vq = MockSplitQueue::new(m, 16);
423        let dtable = vq.desc_table();
424
425        // Create a chain with a descriptor pointing to an indirect table with unaligned address.
426        let desc = Descriptor::new(
427            0x7001,
428            0x1000,
429            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
430            2,
431        );
432        dtable.store(0, desc).unwrap();
433
434        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
435
436        // Create an indirect table with 4 chained descriptors.
437        let idtable = DescriptorTable::new(m, GuestAddress(0x7001), 4);
438        for i in 0..4u16 {
439            let desc: Descriptor = if i < 3 {
440                Descriptor::new(0x1000 * i as u64, 0x1000, VRING_DESC_F_NEXT as u16, i + 1)
441            } else {
442                Descriptor::new(0x1000 * i as u64, 0x1000, 0, 0)
443            };
444            idtable.store(i, desc).unwrap();
445        }
446
447        // Try to iterate through the indirect descriptor chain.
448        for i in 0..4 {
449            let desc = c.next().unwrap();
450            assert!(c.is_indirect);
451            if i < 3 {
452                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
453                assert_eq!(desc.next(), i + 1);
454            }
455        }
456    }
457
458    #[test]
459    fn test_indirect_descriptor_err() {
460        // We are testing here different misconfigurations of the indirect table. For these error
461        // case scenarios, the iterator over the descriptor chain won't return a new descriptor.
462        {
463            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
464            let vq = MockSplitQueue::new(m, 16);
465
466            // Create a chain with a descriptor pointing to an invalid indirect table: len not a
467            // multiple of descriptor size.
468            let desc = Descriptor::new(0x1000, 0x1001, VRING_DESC_F_INDIRECT as u16, 0);
469            vq.desc_table().store(0, desc).unwrap();
470
471            let mut c: DescriptorChain<&GuestMemoryMmap> =
472                DescriptorChain::new(m, vq.start(), 16, 0);
473
474            assert!(c.next().is_none());
475        }
476
477        {
478            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
479            let vq = MockSplitQueue::new(m, 16);
480
481            // Create a chain with a descriptor pointing to an invalid indirect table: table len >
482            // u16::MAX.
483            let desc = Descriptor::new(
484                0x1000,
485                (u16::MAX as u32 + 1) * VRING_DESC_ALIGN_SIZE,
486                VRING_DESC_F_INDIRECT as u16,
487                0,
488            );
489            vq.desc_table().store(0, desc).unwrap();
490
491            let mut c: DescriptorChain<&GuestMemoryMmap> =
492                DescriptorChain::new(m, vq.start(), 16, 0);
493
494            assert!(c.next().is_none());
495        }
496
497        {
498            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
499            let vq = MockSplitQueue::new(m, 16);
500
501            // Create a chain with a descriptor pointing to an indirect table.
502            let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_INDIRECT as u16, 0);
503            vq.desc_table().store(0, desc).unwrap();
504            // It's ok for an indirect descriptor to have flags = 0.
505            let desc = Descriptor::new(0x3000, 0x1000, 0, 0);
506            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
507
508            let mut c: DescriptorChain<&GuestMemoryMmap> =
509                DescriptorChain::new(m, vq.start(), 16, 0);
510            assert!(c.next().is_some());
511
512            // But it's not allowed to have an indirect descriptor that points to another indirect
513            // table.
514            let desc = Descriptor::new(0x3000, 0x1000, VRING_DESC_F_INDIRECT as u16, 0);
515            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
516
517            let mut c: DescriptorChain<&GuestMemoryMmap> =
518                DescriptorChain::new(m, vq.start(), 16, 0);
519
520            assert!(c.next().is_none());
521        }
522    }
523}