virtio_queue/
chain.rs

1// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE-BSD-3-Clause file.
4//
5// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6//
7// Copyright © 2019 Intel Corporation
8//
9// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
10//
11// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
12
13use std::fmt::{self, Debug};
14use std::mem::size_of;
15use std::ops::Deref;
16
17use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
18use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryRegion};
19
20use crate::{desc::split::Descriptor, Error, Reader, Writer};
21use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE;
22
23/// A virtio descriptor chain.
24#[derive(Clone, Debug)]
25pub struct DescriptorChain<M> {
26    mem: M,
27    desc_table: GuestAddress,
28    queue_size: u16,
29    head_index: u16,
30    next_index: u16,
31    ttl: u16,
32    yielded_bytes: u32,
33    is_indirect: bool,
34}
35
36impl<M> DescriptorChain<M>
37where
38    M: Deref,
39    M::Target: GuestMemory,
40{
41    fn with_ttl(
42        mem: M,
43        desc_table: GuestAddress,
44        queue_size: u16,
45        ttl: u16,
46        head_index: u16,
47    ) -> Self {
48        DescriptorChain {
49            mem,
50            desc_table,
51            queue_size,
52            head_index,
53            next_index: head_index,
54            ttl,
55            is_indirect: false,
56            yielded_bytes: 0,
57        }
58    }
59
60    /// Create a new `DescriptorChain` instance.
61    ///
62    /// # Arguments
63    /// * `mem` - the `GuestMemory` object that can be used to access the buffers pointed to by the
64    ///           descriptor chain.
65    /// * `desc_table` - the address of the descriptor table.
66    /// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor
67    ///                  chain.
68    /// * `head_index` - the descriptor index of the chain head.
69    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
70        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
71    }
72
73    /// Get the descriptor index of the chain head.
74    pub fn head_index(&self) -> u16 {
75        self.head_index
76    }
77
78    /// Return a `GuestMemory` object that can be used to access the buffers pointed to by the
79    /// descriptor chain.
80    pub fn memory(&self) -> &M::Target {
81        self.mem.deref()
82    }
83
84    /// Return an iterator that only yields the readable descriptors in the chain.
85    pub fn readable(self) -> DescriptorChainRwIter<M> {
86        DescriptorChainRwIter {
87            chain: self,
88            writable: false,
89        }
90    }
91
92    /// Return a new instance of Writer
93    pub fn writer<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Writer<'a, B>, Error>
94    where
95        M::Target: Sized,
96        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
97    {
98        Writer::new(mem, self).map_err(|_| Error::InvalidChain)
99    }
100
101    /// Return a new instance of Reader
102    pub fn reader<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Reader<'a, B>, Error>
103    where
104        M::Target: Sized,
105        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
106    {
107        Reader::new(mem, self).map_err(|_| Error::InvalidChain)
108    }
109
110    /// Return an iterator that only yields the writable descriptors in the chain.
111    pub fn writable(self) -> DescriptorChainRwIter<M> {
112        DescriptorChainRwIter {
113            chain: self,
114            writable: true,
115        }
116    }
117
118    // Alters the internal state of the `DescriptorChain` to switch iterating over an
119    // indirect descriptor table defined by `desc`.
120    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
121        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
122        // an indirect descriptor.
123        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
124        if self.is_indirect {
125            return Err(Error::InvalidIndirectDescriptor);
126        }
127
128        // Alignment requirements for vring elements start from virtio 1.0,
129        // but this is not necessary for address of indirect descriptor.
130        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
131            return Err(Error::InvalidIndirectDescriptorTable);
132        }
133
134        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
135        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
136        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
137        if table_len > u32::from(u16::MAX) {
138            return Err(Error::InvalidIndirectDescriptorTable);
139        }
140
141        self.desc_table = desc.addr();
142        // try_from cannot fail as we've checked table_len above
143        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
144        self.next_index = 0;
145        self.ttl = self.queue_size;
146        self.is_indirect = true;
147
148        Ok(())
149    }
150}
151
152impl<M> Iterator for DescriptorChain<M>
153where
154    M: Deref,
155    M::Target: GuestMemory,
156{
157    type Item = Descriptor;
158
159    /// Return the next descriptor in this descriptor chain, if there is one.
160    ///
161    /// Note that this is distinct from the next descriptor chain returned by
162    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
163    /// _available_ descriptor chain.
164    fn next(&mut self) -> Option<Self::Item> {
165        if self.ttl == 0 || self.next_index >= self.queue_size {
166            return None;
167        }
168
169        let desc_addr = self
170            .desc_table
171            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
172            // small number.
173            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
174
175        // The guest device driver should not touch the descriptor once submitted, so it's safe
176        // to use read_obj() here.
177        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
178
179        if desc.refers_to_indirect_table() {
180            self.switch_to_indirect_table(desc).ok()?;
181            return self.next();
182        }
183
184        // constructing a chain that is longer than 2^32 bytes is illegal,
185        // let's terminate the iteration if something violated this.
186        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
187        // longer than 2^32 bytes in total;")
188        match self.yielded_bytes.checked_add(desc.len()) {
189            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
190            None => return None,
191        };
192
193        if desc.has_next() {
194            self.next_index = desc.next();
195            // It's ok to decrement `self.ttl` here because we check at the start of the method
196            // that it's greater than 0.
197            self.ttl -= 1;
198        } else {
199            self.ttl = 0;
200        }
201
202        Some(desc)
203    }
204}
205
206/// An iterator for readable or writable descriptors.
207#[derive(Clone)]
208pub struct DescriptorChainRwIter<M> {
209    chain: DescriptorChain<M>,
210    writable: bool,
211}
212
213impl<M> Iterator for DescriptorChainRwIter<M>
214where
215    M: Deref,
216    M::Target: GuestMemory,
217{
218    type Item = Descriptor;
219
220    /// Return the next readable/writeable descriptor (depending on the `writable` value) in this
221    /// descriptor chain, if there is one.
222    ///
223    /// Note that this is distinct from the next descriptor chain returned by
224    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
225    /// _available_ descriptor chain.
226    fn next(&mut self) -> Option<Self::Item> {
227        loop {
228            match self.chain.next() {
229                Some(v) => {
230                    if v.is_write_only() == self.writable {
231                        return Some(v);
232                    }
233                }
234                None => return None,
235            }
236        }
237    }
238}
239
240// We can't derive Debug, because rustc doesn't generate the `M::T: Debug` constraint
241impl<M> Debug for DescriptorChainRwIter<M>
242where
243    M: Debug,
244{
245    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
246        f.debug_struct("DescriptorChainRwIter")
247            .field("chain", &self.chain)
248            .field("writable", &self.writable)
249            .finish()
250    }
251}
252
253#[cfg(test)]
254mod tests {
255    use super::*;
256    use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
257    use crate::mock::{DescriptorTable, MockSplitQueue};
258    use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT};
259    use vm_memory::GuestMemoryMmap;
260
261    #[test]
262    fn test_checked_new_descriptor_chain() {
263        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
264        let vq = MockSplitQueue::new(m, 16);
265
266        assert!(vq.end().0 < 0x1000);
267
268        // index >= queue_size
269        assert!(
270            DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16)
271                .next()
272                .is_none()
273        );
274
275        // desc_table address is way off
276        assert!(
277            DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0)
278                .next()
279                .is_none()
280        );
281
282        {
283            // the first desc has a normal len, and the next_descriptor flag is set
284            // but the the index of the next descriptor is too large
285            let desc = RawDescriptor::from(SplitDescriptor::new(
286                0x1000,
287                0x1000,
288                VRING_DESC_F_NEXT as u16,
289                16,
290            ));
291            vq.desc_table().store(0, desc).unwrap();
292
293            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
294            c.next().unwrap();
295            assert!(c.next().is_none());
296        }
297
298        // finally, let's test an ok chain
299        {
300            let desc = RawDescriptor::from(SplitDescriptor::new(
301                0x1000,
302                0x1000,
303                VRING_DESC_F_NEXT as u16,
304                1,
305            ));
306            vq.desc_table().store(0, desc).unwrap();
307
308            let desc = RawDescriptor::from(SplitDescriptor::new(0x2000, 0x1000, 0, 0));
309            vq.desc_table().store(1, desc).unwrap();
310
311            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
312
313            assert_eq!(
314                c.memory() as *const GuestMemoryMmap,
315                m as *const GuestMemoryMmap
316            );
317
318            assert_eq!(c.desc_table, vq.start());
319            assert_eq!(c.queue_size, 16);
320            assert_eq!(c.ttl, c.queue_size);
321
322            let desc = c.next().unwrap();
323            assert_eq!(desc.addr(), GuestAddress(0x1000));
324            assert_eq!(desc.len(), 0x1000);
325            assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
326            assert_eq!(desc.next(), 1);
327            assert_eq!(c.ttl, c.queue_size - 1);
328
329            assert!(c.next().is_some());
330            // The descriptor above was the last from the chain, so `ttl` should be 0 now.
331            assert_eq!(c.ttl, 0);
332            assert!(c.next().is_none());
333            assert_eq!(c.ttl, 0);
334        }
335    }
336
337    #[test]
338    fn test_ttl_wrap_around() {
339        const QUEUE_SIZE: u16 = 16;
340
341        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x100000)]).unwrap();
342        let vq = MockSplitQueue::new(m, QUEUE_SIZE);
343
344        // Populate the entire descriptor table with entries. Only the last one should not have the
345        // VIRTQ_DESC_F_NEXT set.
346        for i in 0..QUEUE_SIZE - 1 {
347            let desc = RawDescriptor::from(SplitDescriptor::new(
348                0x1000 * (i + 1) as u64,
349                0x1000,
350                VRING_DESC_F_NEXT as u16,
351                i + 1,
352            ));
353            vq.desc_table().store(i, desc).unwrap();
354        }
355        let desc = RawDescriptor::from(SplitDescriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0));
356        vq.desc_table().store(QUEUE_SIZE - 1, desc).unwrap();
357
358        let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0);
359        assert_eq!(c.ttl, c.queue_size);
360
361        // Validate that `ttl` wraps around even when the entire descriptor table is populated.
362        for i in 0..QUEUE_SIZE {
363            let _desc = c.next().unwrap();
364            assert_eq!(c.ttl, c.queue_size - i - 1);
365        }
366        assert!(c.next().is_none());
367    }
368
369    #[test]
370    fn test_new_from_indirect_descriptor() {
371        // This is testing that chaining an indirect table works as expected. It is also a negative
372        // test for the following requirement from the spec:
373        // `A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.`. In
374        // case the driver is setting both of these flags, we check that the device doesn't panic.
375        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
376        let vq = MockSplitQueue::new(m, 16);
377        let dtable = vq.desc_table();
378
379        // Create a chain with one normal descriptor and one pointing to an indirect table.
380        let desc = RawDescriptor::from(SplitDescriptor::new(
381            0x6000,
382            0x1000,
383            VRING_DESC_F_NEXT as u16,
384            1,
385        ));
386        dtable.store(0, desc).unwrap();
387        // The spec forbids setting both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags. We do
388        // not currently enforce this rule, we just ignore the VIRTQ_DESC_F_NEXT flag.
389        let desc = RawDescriptor::from(SplitDescriptor::new(
390            0x7000,
391            0x1000,
392            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
393            2,
394        ));
395        dtable.store(1, desc).unwrap();
396        let desc = RawDescriptor::from(SplitDescriptor::new(0x8000, 0x1000, 0, 0));
397        dtable.store(2, desc).unwrap();
398
399        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
400
401        // create an indirect table with 4 chained descriptors
402        let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4);
403        for i in 0..4u16 {
404            let desc: RawDescriptor = if i < 3 {
405                RawDescriptor::from(SplitDescriptor::new(
406                    0x1000 * i as u64,
407                    0x1000,
408                    VRING_DESC_F_NEXT as u16,
409                    i + 1,
410                ))
411            } else {
412                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
413            };
414            idtable.store(i, desc).unwrap();
415        }
416
417        assert_eq!(c.head_index(), 0);
418        // Consume the first descriptor.
419        c.next().unwrap();
420
421        // The chain logic hasn't parsed the indirect descriptor yet.
422        assert!(!c.is_indirect);
423
424        // Try to iterate through the indirect descriptor chain.
425        for i in 0..4 {
426            let desc = c.next().unwrap();
427            assert!(c.is_indirect);
428            if i < 3 {
429                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
430                assert_eq!(desc.next(), i + 1);
431            }
432        }
433        // Even though we added a new descriptor after the one that is pointing to the indirect
434        // table, this descriptor won't be available when parsing the chain.
435        assert!(c.next().is_none());
436    }
437
438    #[test]
439    fn test_indirect_descriptor_address_noaligned() {
440        // Alignment requirements for vring elements start from virtio 1.0,
441        // but this is not necessary for address of indirect descriptor.
442        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
443        let vq = MockSplitQueue::new(m, 16);
444        let dtable = vq.desc_table();
445
446        // Create a chain with a descriptor pointing to an indirect table with unaligned address.
447        let desc = RawDescriptor::from(SplitDescriptor::new(
448            0x7001,
449            0x1000,
450            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
451            2,
452        ));
453        dtable.store(0, desc).unwrap();
454
455        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
456
457        // Create an indirect table with 4 chained descriptors.
458        let idtable = DescriptorTable::new(m, GuestAddress(0x7001), 4);
459        for i in 0..4u16 {
460            let desc: RawDescriptor = if i < 3 {
461                RawDescriptor::from(SplitDescriptor::new(
462                    0x1000 * i as u64,
463                    0x1000,
464                    VRING_DESC_F_NEXT as u16,
465                    i + 1,
466                ))
467            } else {
468                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
469            };
470            idtable.store(i, desc).unwrap();
471        }
472
473        // Try to iterate through the indirect descriptor chain.
474        for i in 0..4 {
475            let desc = c.next().unwrap();
476            assert!(c.is_indirect);
477            if i < 3 {
478                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
479                assert_eq!(desc.next(), i + 1);
480            }
481        }
482    }
483
484    #[test]
485    fn test_indirect_descriptor_err() {
486        // We are testing here different misconfigurations of the indirect table. For these error
487        // case scenarios, the iterator over the descriptor chain won't return a new descriptor.
488        {
489            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
490            let vq = MockSplitQueue::new(m, 16);
491
492            // Create a chain with a descriptor pointing to an invalid indirect table: len not a
493            // multiple of descriptor size.
494            let desc = RawDescriptor::from(SplitDescriptor::new(
495                0x1000,
496                0x1001,
497                VRING_DESC_F_INDIRECT as u16,
498                0,
499            ));
500            vq.desc_table().store(0, desc).unwrap();
501
502            let mut c: DescriptorChain<&GuestMemoryMmap> =
503                DescriptorChain::new(m, vq.start(), 16, 0);
504
505            assert!(c.next().is_none());
506        }
507
508        {
509            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
510            let vq = MockSplitQueue::new(m, 16);
511
512            // Create a chain with a descriptor pointing to an invalid indirect table: table len >
513            // u16::MAX.
514            let desc = RawDescriptor::from(SplitDescriptor::new(
515                0x1000,
516                (u16::MAX as u32 + 1) * VRING_DESC_ALIGN_SIZE,
517                VRING_DESC_F_INDIRECT as u16,
518                0,
519            ));
520            vq.desc_table().store(0, desc).unwrap();
521
522            let mut c: DescriptorChain<&GuestMemoryMmap> =
523                DescriptorChain::new(m, vq.start(), 16, 0);
524
525            assert!(c.next().is_none());
526        }
527
528        {
529            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
530            let vq = MockSplitQueue::new(m, 16);
531
532            // Create a chain with a descriptor pointing to an indirect table.
533            let desc = RawDescriptor::from(SplitDescriptor::new(
534                0x1000,
535                0x1000,
536                VRING_DESC_F_INDIRECT as u16,
537                0,
538            ));
539            vq.desc_table().store(0, desc).unwrap();
540            // It's ok for an indirect descriptor to have flags = 0.
541            let desc = RawDescriptor::from(SplitDescriptor::new(0x3000, 0x1000, 0, 0));
542            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
543
544            let mut c: DescriptorChain<&GuestMemoryMmap> =
545                DescriptorChain::new(m, vq.start(), 16, 0);
546            assert!(c.next().is_some());
547
548            // But it's not allowed to have an indirect descriptor that points to another indirect
549            // table.
550            let desc = RawDescriptor::from(SplitDescriptor::new(
551                0x3000,
552                0x1000,
553                VRING_DESC_F_INDIRECT as u16,
554                0,
555            ));
556            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
557
558            let mut c: DescriptorChain<&GuestMemoryMmap> =
559                DescriptorChain::new(m, vq.start(), 16, 0);
560
561            assert!(c.next().is_none());
562        }
563    }
564}