virtio_queue/
chain.rs

1// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE-BSD-3-Clause file.
4//
5// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6//
7// Copyright © 2019 Intel Corporation
8//
9// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
10//
11// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
12
13use std::fmt::{self, Debug};
14use std::mem::size_of;
15use std::ops::Deref;
16
17use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
18use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryRegion};
19
20use crate::{desc::split::Descriptor, Error, Reader, Writer};
21use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE;
22
23/// A virtio descriptor chain.
24#[derive(Clone, Debug)]
25pub struct DescriptorChain<M> {
26    mem: M,
27    desc_table: GuestAddress,
28    queue_size: u16,
29    head_index: u16,
30    next_index: u16,
31    ttl: u16,
32    yielded_bytes: u32,
33    is_indirect: bool,
34}
35
36impl<M> DescriptorChain<M>
37where
38    M: Deref,
39    M::Target: GuestMemory,
40{
41    fn with_ttl(
42        mem: M,
43        desc_table: GuestAddress,
44        queue_size: u16,
45        ttl: u16,
46        head_index: u16,
47    ) -> Self {
48        DescriptorChain {
49            mem,
50            desc_table,
51            queue_size,
52            head_index,
53            next_index: head_index,
54            ttl,
55            is_indirect: false,
56            yielded_bytes: 0,
57        }
58    }
59
60    /// Create a new `DescriptorChain` instance.
61    ///
62    /// # Arguments
63    ///
64    /// * `mem` - the `GuestMemory` object that can be used to access the buffers pointed to by the descriptor chain.
65    /// * `desc_table` - the address of the descriptor table.
66    /// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor chain.
67    /// * `head_index` - the descriptor index of the chain head.
68    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
69        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
70    }
71
72    /// Get the descriptor index of the chain head.
73    pub fn head_index(&self) -> u16 {
74        self.head_index
75    }
76
77    /// Return a `GuestMemory` object that can be used to access the buffers pointed to by the
78    /// descriptor chain.
79    pub fn memory(&self) -> &M::Target {
80        self.mem.deref()
81    }
82
83    /// Return an iterator that only yields the readable descriptors in the chain.
84    pub fn readable(self) -> DescriptorChainRwIter<M> {
85        DescriptorChainRwIter {
86            chain: self,
87            writable: false,
88        }
89    }
90
91    /// Return a new instance of Writer
92    pub fn writer<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Writer<'a, B>, Error>
93    where
94        M::Target: Sized,
95        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
96    {
97        Writer::new(mem, self).map_err(|_| Error::InvalidChain)
98    }
99
100    /// Return a new instance of Reader
101    pub fn reader<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Reader<'a, B>, Error>
102    where
103        M::Target: Sized,
104        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
105    {
106        Reader::new(mem, self).map_err(|_| Error::InvalidChain)
107    }
108
109    /// Return an iterator that only yields the writable descriptors in the chain.
110    pub fn writable(self) -> DescriptorChainRwIter<M> {
111        DescriptorChainRwIter {
112            chain: self,
113            writable: true,
114        }
115    }
116
117    // Alters the internal state of the `DescriptorChain` to switch iterating over an
118    // indirect descriptor table defined by `desc`.
119    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
120        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
121        // an indirect descriptor.
122        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
123        if self.is_indirect {
124            return Err(Error::InvalidIndirectDescriptor);
125        }
126
127        // Alignment requirements for vring elements start from virtio 1.0,
128        // but this is not necessary for address of indirect descriptor.
129        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
130            return Err(Error::InvalidIndirectDescriptorTable);
131        }
132
133        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
134        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
135        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
136        if table_len > u32::from(u16::MAX) {
137            return Err(Error::InvalidIndirectDescriptorTable);
138        }
139
140        self.desc_table = desc.addr();
141        // try_from cannot fail as we've checked table_len above
142        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
143        self.next_index = 0;
144        self.ttl = self.queue_size;
145        self.is_indirect = true;
146
147        Ok(())
148    }
149}
150
151impl<M> Iterator for DescriptorChain<M>
152where
153    M: Deref,
154    M::Target: GuestMemory,
155{
156    type Item = Descriptor;
157
158    /// Return the next descriptor in this descriptor chain, if there is one.
159    ///
160    /// Note that this is distinct from the next descriptor chain returned by
161    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
162    /// _available_ descriptor chain.
163    fn next(&mut self) -> Option<Self::Item> {
164        if self.ttl == 0 || self.next_index >= self.queue_size {
165            return None;
166        }
167
168        let desc_addr = self
169            .desc_table
170            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
171            // small number.
172            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
173
174        // The guest device driver should not touch the descriptor once submitted, so it's safe
175        // to use read_obj() here.
176        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
177
178        if desc.refers_to_indirect_table() {
179            self.switch_to_indirect_table(desc).ok()?;
180            return self.next();
181        }
182
183        // constructing a chain that is longer than 2^32 bytes is illegal,
184        // let's terminate the iteration if something violated this.
185        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
186        // longer than 2^32 bytes in total;")
187        match self.yielded_bytes.checked_add(desc.len()) {
188            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
189            None => return None,
190        };
191
192        if desc.has_next() {
193            self.next_index = desc.next();
194            // It's ok to decrement `self.ttl` here because we check at the start of the method
195            // that it's greater than 0.
196            self.ttl -= 1;
197        } else {
198            self.ttl = 0;
199        }
200
201        Some(desc)
202    }
203}
204
205/// An iterator for readable or writable descriptors.
206#[derive(Clone)]
207pub struct DescriptorChainRwIter<M> {
208    chain: DescriptorChain<M>,
209    writable: bool,
210}
211
212impl<M> Iterator for DescriptorChainRwIter<M>
213where
214    M: Deref,
215    M::Target: GuestMemory,
216{
217    type Item = Descriptor;
218
219    /// Return the next readable/writeable descriptor (depending on the `writable` value) in this
220    /// descriptor chain, if there is one.
221    ///
222    /// Note that this is distinct from the next descriptor chain returned by
223    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
224    /// _available_ descriptor chain.
225    fn next(&mut self) -> Option<Self::Item> {
226        loop {
227            match self.chain.next() {
228                Some(v) => {
229                    if v.is_write_only() == self.writable {
230                        return Some(v);
231                    }
232                }
233                None => return None,
234            }
235        }
236    }
237}
238
239// We can't derive Debug, because rustc doesn't generate the `M::T: Debug` constraint
240impl<M> Debug for DescriptorChainRwIter<M>
241where
242    M: Debug,
243{
244    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
245        f.debug_struct("DescriptorChainRwIter")
246            .field("chain", &self.chain)
247            .field("writable", &self.writable)
248            .finish()
249    }
250}
251
252#[cfg(test)]
253mod tests {
254    use super::*;
255    use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
256    use crate::mock::{DescriptorTable, MockSplitQueue};
257    use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT};
258    use vm_memory::GuestMemoryMmap;
259
260    #[test]
261    fn test_checked_new_descriptor_chain() {
262        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
263        let vq = MockSplitQueue::new(m, 16);
264
265        assert!(vq.end().0 < 0x1000);
266
267        // index >= queue_size
268        assert!(
269            DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16)
270                .next()
271                .is_none()
272        );
273
274        // desc_table address is way off
275        assert!(
276            DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0)
277                .next()
278                .is_none()
279        );
280
281        {
282            // the first desc has a normal len, and the next_descriptor flag is set
283            // but the the index of the next descriptor is too large
284            let desc = RawDescriptor::from(SplitDescriptor::new(
285                0x1000,
286                0x1000,
287                VRING_DESC_F_NEXT as u16,
288                16,
289            ));
290            vq.desc_table().store(0, desc).unwrap();
291
292            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
293            c.next().unwrap();
294            assert!(c.next().is_none());
295        }
296
297        // finally, let's test an ok chain
298        {
299            let desc = RawDescriptor::from(SplitDescriptor::new(
300                0x1000,
301                0x1000,
302                VRING_DESC_F_NEXT as u16,
303                1,
304            ));
305            vq.desc_table().store(0, desc).unwrap();
306
307            let desc = RawDescriptor::from(SplitDescriptor::new(0x2000, 0x1000, 0, 0));
308            vq.desc_table().store(1, desc).unwrap();
309
310            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
311
312            assert_eq!(
313                c.memory() as *const GuestMemoryMmap,
314                m as *const GuestMemoryMmap
315            );
316
317            assert_eq!(c.desc_table, vq.start());
318            assert_eq!(c.queue_size, 16);
319            assert_eq!(c.ttl, c.queue_size);
320
321            let desc = c.next().unwrap();
322            assert_eq!(desc.addr(), GuestAddress(0x1000));
323            assert_eq!(desc.len(), 0x1000);
324            assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
325            assert_eq!(desc.next(), 1);
326            assert_eq!(c.ttl, c.queue_size - 1);
327
328            assert!(c.next().is_some());
329            // The descriptor above was the last from the chain, so `ttl` should be 0 now.
330            assert_eq!(c.ttl, 0);
331            assert!(c.next().is_none());
332            assert_eq!(c.ttl, 0);
333        }
334    }
335
336    #[test]
337    fn test_ttl_wrap_around() {
338        const QUEUE_SIZE: u16 = 16;
339
340        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x100000)]).unwrap();
341        let vq = MockSplitQueue::new(m, QUEUE_SIZE);
342
343        // Populate the entire descriptor table with entries. Only the last one should not have the
344        // VIRTQ_DESC_F_NEXT set.
345        for i in 0..QUEUE_SIZE - 1 {
346            let desc = RawDescriptor::from(SplitDescriptor::new(
347                0x1000 * (i + 1) as u64,
348                0x1000,
349                VRING_DESC_F_NEXT as u16,
350                i + 1,
351            ));
352            vq.desc_table().store(i, desc).unwrap();
353        }
354        let desc = RawDescriptor::from(SplitDescriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0));
355        vq.desc_table().store(QUEUE_SIZE - 1, desc).unwrap();
356
357        let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0);
358        assert_eq!(c.ttl, c.queue_size);
359
360        // Validate that `ttl` wraps around even when the entire descriptor table is populated.
361        for i in 0..QUEUE_SIZE {
362            let _desc = c.next().unwrap();
363            assert_eq!(c.ttl, c.queue_size - i - 1);
364        }
365        assert!(c.next().is_none());
366    }
367
368    #[test]
369    fn test_new_from_indirect_descriptor() {
370        // This is testing that chaining an indirect table works as expected. It is also a negative
371        // test for the following requirement from the spec:
372        // `A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.`. In
373        // case the driver is setting both of these flags, we check that the device doesn't panic.
374        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
375        let vq = MockSplitQueue::new(m, 16);
376        let dtable = vq.desc_table();
377
378        // Create a chain with one normal descriptor and one pointing to an indirect table.
379        let desc = RawDescriptor::from(SplitDescriptor::new(
380            0x6000,
381            0x1000,
382            VRING_DESC_F_NEXT as u16,
383            1,
384        ));
385        dtable.store(0, desc).unwrap();
386        // The spec forbids setting both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags. We do
387        // not currently enforce this rule, we just ignore the VIRTQ_DESC_F_NEXT flag.
388        let desc = RawDescriptor::from(SplitDescriptor::new(
389            0x7000,
390            0x1000,
391            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
392            2,
393        ));
394        dtable.store(1, desc).unwrap();
395        let desc = RawDescriptor::from(SplitDescriptor::new(0x8000, 0x1000, 0, 0));
396        dtable.store(2, desc).unwrap();
397
398        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
399
400        // create an indirect table with 4 chained descriptors
401        let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4);
402        for i in 0..4u16 {
403            let desc: RawDescriptor = if i < 3 {
404                RawDescriptor::from(SplitDescriptor::new(
405                    0x1000 * i as u64,
406                    0x1000,
407                    VRING_DESC_F_NEXT as u16,
408                    i + 1,
409                ))
410            } else {
411                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
412            };
413            idtable.store(i, desc).unwrap();
414        }
415
416        assert_eq!(c.head_index(), 0);
417        // Consume the first descriptor.
418        c.next().unwrap();
419
420        // The chain logic hasn't parsed the indirect descriptor yet.
421        assert!(!c.is_indirect);
422
423        // Try to iterate through the indirect descriptor chain.
424        for i in 0..4 {
425            let desc = c.next().unwrap();
426            assert!(c.is_indirect);
427            if i < 3 {
428                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
429                assert_eq!(desc.next(), i + 1);
430            }
431        }
432        // Even though we added a new descriptor after the one that is pointing to the indirect
433        // table, this descriptor won't be available when parsing the chain.
434        assert!(c.next().is_none());
435    }
436
437    #[test]
438    fn test_indirect_descriptor_address_noaligned() {
439        // Alignment requirements for vring elements start from virtio 1.0,
440        // but this is not necessary for address of indirect descriptor.
441        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
442        let vq = MockSplitQueue::new(m, 16);
443        let dtable = vq.desc_table();
444
445        // Create a chain with a descriptor pointing to an indirect table with unaligned address.
446        let desc = RawDescriptor::from(SplitDescriptor::new(
447            0x7001,
448            0x1000,
449            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
450            2,
451        ));
452        dtable.store(0, desc).unwrap();
453
454        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
455
456        // Create an indirect table with 4 chained descriptors.
457        let idtable = DescriptorTable::new(m, GuestAddress(0x7001), 4);
458        for i in 0..4u16 {
459            let desc: RawDescriptor = if i < 3 {
460                RawDescriptor::from(SplitDescriptor::new(
461                    0x1000 * i as u64,
462                    0x1000,
463                    VRING_DESC_F_NEXT as u16,
464                    i + 1,
465                ))
466            } else {
467                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
468            };
469            idtable.store(i, desc).unwrap();
470        }
471
472        // Try to iterate through the indirect descriptor chain.
473        for i in 0..4 {
474            let desc = c.next().unwrap();
475            assert!(c.is_indirect);
476            if i < 3 {
477                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
478                assert_eq!(desc.next(), i + 1);
479            }
480        }
481    }
482
483    #[test]
484    fn test_indirect_descriptor_err() {
485        // We are testing here different misconfigurations of the indirect table. For these error
486        // case scenarios, the iterator over the descriptor chain won't return a new descriptor.
487        {
488            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
489            let vq = MockSplitQueue::new(m, 16);
490
491            // Create a chain with a descriptor pointing to an invalid indirect table: len not a
492            // multiple of descriptor size.
493            let desc = RawDescriptor::from(SplitDescriptor::new(
494                0x1000,
495                0x1001,
496                VRING_DESC_F_INDIRECT as u16,
497                0,
498            ));
499            vq.desc_table().store(0, desc).unwrap();
500
501            let mut c: DescriptorChain<&GuestMemoryMmap> =
502                DescriptorChain::new(m, vq.start(), 16, 0);
503
504            assert!(c.next().is_none());
505        }
506
507        {
508            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
509            let vq = MockSplitQueue::new(m, 16);
510
511            // Create a chain with a descriptor pointing to an invalid indirect table: table len >
512            // u16::MAX.
513            let desc = RawDescriptor::from(SplitDescriptor::new(
514                0x1000,
515                (u16::MAX as u32 + 1) * VRING_DESC_ALIGN_SIZE,
516                VRING_DESC_F_INDIRECT as u16,
517                0,
518            ));
519            vq.desc_table().store(0, desc).unwrap();
520
521            let mut c: DescriptorChain<&GuestMemoryMmap> =
522                DescriptorChain::new(m, vq.start(), 16, 0);
523
524            assert!(c.next().is_none());
525        }
526
527        {
528            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
529            let vq = MockSplitQueue::new(m, 16);
530
531            // Create a chain with a descriptor pointing to an indirect table.
532            let desc = RawDescriptor::from(SplitDescriptor::new(
533                0x1000,
534                0x1000,
535                VRING_DESC_F_INDIRECT as u16,
536                0,
537            ));
538            vq.desc_table().store(0, desc).unwrap();
539            // It's ok for an indirect descriptor to have flags = 0.
540            let desc = RawDescriptor::from(SplitDescriptor::new(0x3000, 0x1000, 0, 0));
541            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
542
543            let mut c: DescriptorChain<&GuestMemoryMmap> =
544                DescriptorChain::new(m, vq.start(), 16, 0);
545            assert!(c.next().is_some());
546
547            // But it's not allowed to have an indirect descriptor that points to another indirect
548            // table.
549            let desc = RawDescriptor::from(SplitDescriptor::new(
550                0x3000,
551                0x1000,
552                VRING_DESC_F_INDIRECT as u16,
553                0,
554            ));
555            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
556
557            let mut c: DescriptorChain<&GuestMemoryMmap> =
558                DescriptorChain::new(m, vq.start(), 16, 0);
559
560            assert!(c.next().is_none());
561        }
562    }
563}