axvirtio_common/queue/
available.rs

1use crate::constants::*;
2use crate::error::{VirtioError, VirtioResult};
3use alloc::sync::Arc;
4use axaddrspace::GuestMemoryAccessor;
5use axaddrspace::GuestPhysAddr;
6
7/// VirtIO available ring header structure.
8///
9/// This structure represents the memory layout of the available ring header
10/// in guest memory according to the VirtIO specification. It is a simple
11/// C-compatible data structure that directly maps to guest memory.
12///
13/// The complete available ring in guest memory consists of:
14/// 1. This header structure (VirtQueueAvail)
15/// 2. An array of descriptor indices (ring[\queue_size])
16/// 3. An optional used_event field (if VIRTIO_F_EVENT_IDX is negotiated)
17///
18/// This structure is used by `AvailableRing` to read/write the header portion
19/// of the available ring through guest memory accessor.
20#[repr(C)]
21#[derive(Debug, Clone, Copy, Default)]
22pub struct VirtQueueAvail {
23    /// Flags
24    pub flags: u16,
25    /// Index of the next available descriptor
26    pub idx: u16,
27}
28
29impl VirtQueueAvail {
30    /// Create a new available ring header
31    pub fn new() -> Self {
32        Self { flags: 0, idx: 0 }
33    }
34
35    /// Check if interrupts are disabled
36    pub fn no_interrupt(&self) -> bool {
37        (self.flags & VIRTQ_AVAIL_F_NO_INTERRUPT) != 0
38    }
39
40    /// Set the no interrupt flag
41    pub fn set_no_interrupt(&mut self, no_interrupt: bool) {
42        if no_interrupt {
43            self.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT;
44        } else {
45            self.flags &= !VIRTQ_AVAIL_F_NO_INTERRUPT;
46        }
47    }
48}
49
50/// Available ring management structure.
51///
52/// This structure provides a high-level interface for managing the VirtIO
53/// available ring in guest memory. It wraps the guest memory accessor and
54/// provides methods to read/write various parts of the available ring:
55/// - The header (VirtQueueAvail structure)
56/// - The ring array of descriptor indices
57/// - The used_event field (if VIRTIO_F_EVENT_IDX is negotiated)
58///
59/// Relationship with VirtQueueAvail:
60/// - VirtQueueAvail defines the memory layout of the available ring header
61/// - AvailableRing uses VirtQueueAvail to access the header in guest memory
62/// - AvailableRing manages the entire available ring structure, not just the header
63///
64/// Memory Layout:
65/// ```text
66/// base_addr -> +-------------------+
67///              | VirtQueueAvail    |  (flags + idx)
68///              +-------------------+
69///              | ring[0]           |  (descriptor index)
70///              | ring[1]           |
71///              | ...               |
72///              | ring[queue_size-1]|
73///              +-------------------+
74///              | used_event        |  (optional, if event_idx enabled)
75///              +-------------------+
76/// ```
77#[derive(Debug, Clone)]
78pub struct AvailableRing<T: GuestMemoryAccessor + Clone> {
79    /// Base address of the available ring
80    pub base_addr: GuestPhysAddr,
81    /// Queue size
82    pub size: u16,
83    /// Last seen available index
84    pub last_avail_idx: u16,
85    /// Guest memory accessor
86    accessor: Arc<T>,
87}
88
89impl<T: GuestMemoryAccessor + Clone> AvailableRing<T> {
90    /// Create a new available ring
91    pub fn new(base_addr: GuestPhysAddr, size: u16, accessor: Arc<T>) -> Self {
92        Self {
93            base_addr,
94            size,
95            last_avail_idx: 0,
96            accessor,
97        }
98    }
99
100    /// Get the address of the available ring header
101    pub fn header_addr(&self) -> GuestPhysAddr {
102        self.base_addr
103    }
104
105    /// Get the address of the ring array
106    pub fn ring_addr(&self) -> GuestPhysAddr {
107        self.base_addr + core::mem::size_of::<VirtQueueAvail>()
108    }
109
110    /// Get the address of a specific ring entry
111    pub fn ring_entry_addr(&self, index: u16) -> Option<GuestPhysAddr> {
112        if index >= self.size {
113            return None;
114        }
115
116        let offset = core::mem::size_of::<VirtQueueAvail>() + (index as usize * 2);
117        Some(self.base_addr + offset)
118    }
119
120    /// Get the address of the used event field (if event_idx is enabled)
121    pub fn used_event_addr(&self) -> GuestPhysAddr {
122        let offset = core::mem::size_of::<VirtQueueAvail>() + (self.size as usize * 2);
123        self.base_addr + offset
124    }
125
126    /// Calculate the total size of the available ring
127    pub fn total_size(&self) -> usize {
128        core::mem::size_of::<VirtQueueAvail>() + (self.size as usize * 2) + 2
129    }
130
131    /// Check if the available ring is valid
132    pub fn is_valid(&self) -> bool {
133        self.base_addr.as_usize() != 0 && self.size > 0
134    }
135
136    /// Check if there are new available descriptors
137    pub fn has_new_avail(&self, current_idx: u16) -> bool {
138        current_idx != self.last_avail_idx
139    }
140
141    /// Update the last seen available index
142    pub fn update_last_avail_idx(&mut self, idx: u16) {
143        self.last_avail_idx = idx;
144    }
145
146    /// Read the available ring header
147    pub fn read_avail_header(&self) -> VirtioResult<VirtQueueAvail> {
148        if !self.is_valid() {
149            return Err(VirtioError::QueueNotReady);
150        }
151
152        self.accessor
153            .read_obj(self.base_addr)
154            .map_err(|_| VirtioError::InvalidAddress)
155    }
156
157    /// Write the available ring header
158    pub fn write_avail_header(&self, header: &VirtQueueAvail) -> VirtioResult<()> {
159        if !self.is_valid() {
160            return Err(VirtioError::QueueNotReady);
161        }
162
163        self.accessor
164            .write_obj(self.base_addr, header)
165            .map_err(|_| VirtioError::InvalidAddress)
166    }
167
168    /// Read the current available index from guest memory
169    pub fn read_avail_idx(&self) -> VirtioResult<u16> {
170        if !self.is_valid() {
171            return Err(VirtioError::QueueNotReady);
172        }
173
174        // Read the idx field from the header (offset 2 bytes for flags)
175        let idx_addr = self.base_addr + 2;
176        self.accessor
177            .read_obj(idx_addr)
178            .map_err(|_| VirtioError::InvalidAddress)
179    }
180
181    /// Get the available index for external access
182    pub fn get_avail_idx(&self) -> VirtioResult<u16> {
183        self.read_avail_idx()
184    }
185
186    /// Read a descriptor index from the available ring
187    pub fn read_avail_ring_entry(&self, ring_index: u16) -> VirtioResult<u16> {
188        if !self.is_valid() {
189            return Err(VirtioError::QueueNotReady);
190        }
191
192        let entry_addr = self
193            .ring_entry_addr(ring_index % self.size)
194            .ok_or(VirtioError::InvalidQueue)?;
195
196        self.accessor
197            .read_obj(entry_addr)
198            .map_err(|_| VirtioError::InvalidAddress)
199    }
200
201    /// Write a descriptor index to the available ring
202    pub fn write_avail_ring_entry(&self, ring_index: u16, desc_index: u16) -> VirtioResult<()> {
203        if !self.is_valid() {
204            return Err(VirtioError::QueueNotReady);
205        }
206
207        let entry_addr = self
208            .ring_entry_addr(ring_index % self.size)
209            .ok_or(VirtioError::InvalidQueue)?;
210
211        self.accessor
212            .write_obj(entry_addr, desc_index)
213            .map_err(|_| VirtioError::InvalidAddress)?;
214
215        Ok(())
216    }
217
218    /// Get the number of available descriptors since last check
219    pub fn get_available_count(&self) -> VirtioResult<u16> {
220        let current_idx = self.read_avail_idx()?;
221        Ok(current_idx.wrapping_sub(self.last_avail_idx))
222    }
223
224    /// Check if interrupts are suppressed
225    pub fn interrupts_suppressed(&self) -> VirtioResult<bool> {
226        let header = self.read_avail_header()?;
227        Ok(header.no_interrupt())
228    }
229
230    /// Set interrupt suppression
231    pub fn set_interrupt_suppression(&self, suppress: bool) -> VirtioResult<()> {
232        let mut header = self.read_avail_header()?;
233        header.set_no_interrupt(suppress);
234        self.write_avail_header(&header)?;
235        Ok(())
236    }
237
238    /// Read the used event field (for event_idx feature)
239    pub fn read_used_event(&self) -> VirtioResult<u16> {
240        if !self.is_valid() {
241            return Err(VirtioError::QueueNotReady);
242        }
243
244        let event_addr = self.used_event_addr();
245        self.accessor
246            .read_obj(event_addr)
247            .map_err(|_| VirtioError::InvalidAddress)
248    }
249
250    /// Write the used event field (for event_idx feature)
251    pub fn write_used_event(&self, event: u16) -> VirtioResult<()> {
252        if !self.is_valid() {
253            return Err(VirtioError::QueueNotReady);
254        }
255
256        let event_addr = self.used_event_addr();
257        self.accessor
258            .write_obj(event_addr, event)
259            .map_err(|_| VirtioError::InvalidAddress)
260    }
261}