virtio_queue/desc/
packed.rs

1//! packed descriptor
2use virtio_bindings::bindings::virtio_ring::{
3    VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT, VRING_DESC_F_WRITE,
4};
5use vm_memory::{ByteValued, GuestAddress, Le16, Le32, Le64};
6
7/// A virtio packed descriptor constraints with C representation.
8#[repr(C)]
9#[derive(Default, Clone, Copy, Debug)]
10pub struct Descriptor {
11    /// Guest physical address of device specific data.
12    addr: Le64,
13    /// Length of device specific data.
14    len: Le32,
15    /// Index of descriptor in the descriptor table.
16    id: Le16,
17    /// Includes next, write, and indirect bits.
18    flags: Le16,
19}
20
21#[allow(clippy::len_without_is_empty)]
22impl Descriptor {
23    /// Return the guest physical address of the descriptor buffer.
24    pub fn addr(&self) -> GuestAddress {
25        GuestAddress(self.addr.into())
26    }
27
28    /// Return the length of the descriptor buffer.
29    pub fn len(&self) -> u32 {
30        self.len.into()
31    }
32
33    /// Return the flags for this descriptor, including next, write and indirect bits.
34    pub fn flags(&self) -> u16 {
35        self.flags.into()
36    }
37
38    /// Return the index of the descriptor in the descriptor table.
39    pub fn id(&self) -> u16 {
40        self.id.into()
41    }
42
43    /// Check whether this descriptor refers to a buffer containing an indirect descriptor table.
44    pub fn refers_to_indirect_table(&self) -> bool {
45        self.flags() & VRING_DESC_F_INDIRECT as u16 != 0
46    }
47
48    /// Check whether the `VIRTQ_DESC_F_NEXT` is set for the descriptor.
49    pub fn has_next(&self) -> bool {
50        self.flags() & VRING_DESC_F_NEXT as u16 != 0
51    }
52
53    /// Check if the driver designated this as a write only descriptor.
54    ///
55    /// If this is false, this descriptor is read only.
56    /// Write only means the the emulated device can write and the driver can read.
57    pub fn is_write_only(&self) -> bool {
58        self.flags() & VRING_DESC_F_WRITE as u16 != 0
59    }
60}
61
62impl Descriptor {
63    /// Create a new descriptor.
64    ///
65    /// # Arguments
66    /// * `addr` - the guest physical address of the descriptor buffer.
67    /// * `len` - the length of the descriptor buffer.
68    /// * `flags` - the `flags` for the descriptor.
69    /// * `next` - the `next` field of the descriptor.
70    pub fn new(addr: u64, len: u32, id: u16, flags: u16) -> Self {
71        Descriptor {
72            addr: addr.into(),
73            len: len.into(),
74            id: id.into(),
75            flags: flags.into(),
76        }
77    }
78
79    /// Set the guest physical address of the descriptor buffer.
80    pub fn set_addr(&mut self, addr: u64) {
81        self.addr = addr.into();
82    }
83
84    /// Set the length of the descriptor buffer.
85    pub fn set_len(&mut self, len: u32) {
86        self.len = len.into();
87    }
88
89    /// Set the flags for this descriptor.
90    pub fn set_flags(&mut self, flags: u16) {
91        self.flags = flags.into();
92    }
93
94    /// Set the value stored in the `next` field of the descriptor.
95    pub fn set_id(&mut self, id: u16) {
96        self.id = id.into();
97    }
98}
99
100// SAFETY: This is safe because `Descriptor` contains only wrappers over POD types and
101// all accesses through safe `vm-memory` API will validate any garbage that could be
102// included in there.
103unsafe impl ByteValued for Descriptor {}
104
105/// A packed descriptor event constraints with C representation.
106#[repr(C)]
107#[derive(Clone, Copy, Debug)]
108pub struct PackedDescEvent {
109    off_wrap: Le16,
110    flags: Le16,
111}
112
113impl PackedDescEvent {
114    /// Create a new `VirtqUsedElem` instance.
115    ///
116    /// # Arguments
117    /// * `id` - the index of the used descriptor chain.
118    /// * `len` - the total length of the descriptor chain which was used (written to).
119    #[allow(unused)]
120    pub(crate) fn new(off_wrap: u16, flags: u16) -> Self {
121        PackedDescEvent {
122            off_wrap: off_wrap.into(),
123            flags: flags.into(),
124        }
125    }
126}
127
128// SAFETY: This is safe because `PackedDescEvent` contains only wrappers over POD types and
129// all accesses through safe `vm-memory` API will validate any garbage that could be
130// included in there.
131unsafe impl ByteValued for PackedDescEvent {}
132
133#[cfg(test)]
134mod tests {
135    use super::*;
136    use memoffset::offset_of;
137    use std::mem::{align_of, size_of};
138
139    #[test]
140    fn test_descriptor_offset() {
141        assert_eq!(size_of::<Descriptor>(), 16);
142        assert_eq!(offset_of!(Descriptor, addr), 0);
143        assert_eq!(offset_of!(Descriptor, len), 8);
144        assert_eq!(offset_of!(Descriptor, id), 12);
145        assert_eq!(offset_of!(Descriptor, flags), 14);
146        assert!(align_of::<Descriptor>() <= 16);
147    }
148
149    #[test]
150    fn test_descriptor_getter_setter() {
151        let mut desc = Descriptor::new(0, 0, 0, 0);
152
153        desc.set_addr(0x1000);
154        assert_eq!(desc.addr(), GuestAddress(0x1000));
155        desc.set_len(0x2000);
156        assert_eq!(desc.len(), 0x2000);
157        desc.set_flags(VRING_DESC_F_NEXT as u16);
158        assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
159        assert!(desc.has_next());
160        assert!(!desc.is_write_only());
161        assert!(!desc.refers_to_indirect_table());
162        desc.set_flags(VRING_DESC_F_WRITE as u16);
163        assert_eq!(desc.flags(), VRING_DESC_F_WRITE as u16);
164        assert!(!desc.has_next());
165        assert!(desc.is_write_only());
166        assert!(!desc.refers_to_indirect_table());
167        desc.set_flags(VRING_DESC_F_INDIRECT as u16);
168        assert_eq!(desc.flags(), VRING_DESC_F_INDIRECT as u16);
169        assert!(!desc.is_write_only());
170        assert!(desc.refers_to_indirect_table());
171        desc.set_id(1);
172        assert_eq!(desc.id(), 1);
173    }
174
175    #[test]
176    fn test_descriptor_copy() {
177        let e1 = Descriptor::new(1, 2, 0, 3);
178        let mut e2 = Descriptor::default();
179
180        e2.as_mut_slice().copy_from_slice(e1.as_slice());
181        assert_eq!(e1.addr(), e2.addr());
182        assert_eq!(e1.len(), e2.len());
183        assert_eq!(e1.id(), e2.id());
184        assert_eq!(e1.flags(), e2.flags());
185    }
186
187    #[test]
188    fn test_packed_desc_event_offset() {
189        assert_eq!(offset_of!(PackedDescEvent, off_wrap), 0);
190        assert_eq!(offset_of!(PackedDescEvent, flags), 2);
191        assert_eq!(size_of::<PackedDescEvent>(), 4);
192    }
193
194    #[test]
195    fn test_packed_desc_event_copy() {
196        let e1 = PackedDescEvent::new(1, 2);
197        let mut e2 = PackedDescEvent::new(0, 0);
198
199        e2.as_mut_slice().copy_from_slice(e1.as_slice());
200        assert_eq!(e1.off_wrap, e2.off_wrap);
201        assert_eq!(e1.flags, e2.flags);
202    }
203}