axvirtio_common/queue/
available.rs1use crate::constants::*;
2use crate::error::{VirtioError, VirtioResult};
3use alloc::sync::Arc;
4use axaddrspace::GuestMemoryAccessor;
5use axaddrspace::GuestPhysAddr;
6
7#[repr(C)]
21#[derive(Debug, Clone, Copy, Default)]
22pub struct VirtQueueAvail {
23 pub flags: u16,
25 pub idx: u16,
27}
28
29impl VirtQueueAvail {
30 pub fn new() -> Self {
32 Self { flags: 0, idx: 0 }
33 }
34
35 pub fn no_interrupt(&self) -> bool {
37 (self.flags & VIRTQ_AVAIL_F_NO_INTERRUPT) != 0
38 }
39
40 pub fn set_no_interrupt(&mut self, no_interrupt: bool) {
42 if no_interrupt {
43 self.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT;
44 } else {
45 self.flags &= !VIRTQ_AVAIL_F_NO_INTERRUPT;
46 }
47 }
48}
49
50#[derive(Debug, Clone)]
78pub struct AvailableRing<T: GuestMemoryAccessor + Clone> {
79 pub base_addr: GuestPhysAddr,
81 pub size: u16,
83 pub last_avail_idx: u16,
85 accessor: Arc<T>,
87}
88
89impl<T: GuestMemoryAccessor + Clone> AvailableRing<T> {
90 pub fn new(base_addr: GuestPhysAddr, size: u16, accessor: Arc<T>) -> Self {
92 Self {
93 base_addr,
94 size,
95 last_avail_idx: 0,
96 accessor,
97 }
98 }
99
100 pub fn header_addr(&self) -> GuestPhysAddr {
102 self.base_addr
103 }
104
105 pub fn ring_addr(&self) -> GuestPhysAddr {
107 self.base_addr + core::mem::size_of::<VirtQueueAvail>()
108 }
109
110 pub fn ring_entry_addr(&self, index: u16) -> Option<GuestPhysAddr> {
112 if index >= self.size {
113 return None;
114 }
115
116 let offset = core::mem::size_of::<VirtQueueAvail>() + (index as usize * 2);
117 Some(self.base_addr + offset)
118 }
119
120 pub fn used_event_addr(&self) -> GuestPhysAddr {
122 let offset = core::mem::size_of::<VirtQueueAvail>() + (self.size as usize * 2);
123 self.base_addr + offset
124 }
125
126 pub fn total_size(&self) -> usize {
128 core::mem::size_of::<VirtQueueAvail>() + (self.size as usize * 2) + 2
129 }
130
131 pub fn is_valid(&self) -> bool {
133 self.base_addr.as_usize() != 0 && self.size > 0
134 }
135
136 pub fn has_new_avail(&self, current_idx: u16) -> bool {
138 current_idx != self.last_avail_idx
139 }
140
141 pub fn update_last_avail_idx(&mut self, idx: u16) {
143 self.last_avail_idx = idx;
144 }
145
146 pub fn read_avail_header(&self) -> VirtioResult<VirtQueueAvail> {
148 if !self.is_valid() {
149 return Err(VirtioError::QueueNotReady);
150 }
151
152 self.accessor
153 .read_obj(self.base_addr)
154 .map_err(|_| VirtioError::InvalidAddress)
155 }
156
157 pub fn write_avail_header(&self, header: &VirtQueueAvail) -> VirtioResult<()> {
159 if !self.is_valid() {
160 return Err(VirtioError::QueueNotReady);
161 }
162
163 self.accessor
164 .write_obj(self.base_addr, header)
165 .map_err(|_| VirtioError::InvalidAddress)
166 }
167
168 pub fn read_avail_idx(&self) -> VirtioResult<u16> {
170 if !self.is_valid() {
171 return Err(VirtioError::QueueNotReady);
172 }
173
174 let idx_addr = self.base_addr + 2;
176 self.accessor
177 .read_obj(idx_addr)
178 .map_err(|_| VirtioError::InvalidAddress)
179 }
180
181 pub fn get_avail_idx(&self) -> VirtioResult<u16> {
183 self.read_avail_idx()
184 }
185
186 pub fn read_avail_ring_entry(&self, ring_index: u16) -> VirtioResult<u16> {
188 if !self.is_valid() {
189 return Err(VirtioError::QueueNotReady);
190 }
191
192 let entry_addr = self
193 .ring_entry_addr(ring_index % self.size)
194 .ok_or(VirtioError::InvalidQueue)?;
195
196 self.accessor
197 .read_obj(entry_addr)
198 .map_err(|_| VirtioError::InvalidAddress)
199 }
200
201 pub fn write_avail_ring_entry(&self, ring_index: u16, desc_index: u16) -> VirtioResult<()> {
203 if !self.is_valid() {
204 return Err(VirtioError::QueueNotReady);
205 }
206
207 let entry_addr = self
208 .ring_entry_addr(ring_index % self.size)
209 .ok_or(VirtioError::InvalidQueue)?;
210
211 self.accessor
212 .write_obj(entry_addr, desc_index)
213 .map_err(|_| VirtioError::InvalidAddress)?;
214
215 Ok(())
216 }
217
218 pub fn get_available_count(&self) -> VirtioResult<u16> {
220 let current_idx = self.read_avail_idx()?;
221 Ok(current_idx.wrapping_sub(self.last_avail_idx))
222 }
223
224 pub fn interrupts_suppressed(&self) -> VirtioResult<bool> {
226 let header = self.read_avail_header()?;
227 Ok(header.no_interrupt())
228 }
229
230 pub fn set_interrupt_suppression(&self, suppress: bool) -> VirtioResult<()> {
232 let mut header = self.read_avail_header()?;
233 header.set_no_interrupt(suppress);
234 self.write_avail_header(&header)?;
235 Ok(())
236 }
237
238 pub fn read_used_event(&self) -> VirtioResult<u16> {
240 if !self.is_valid() {
241 return Err(VirtioError::QueueNotReady);
242 }
243
244 let event_addr = self.used_event_addr();
245 self.accessor
246 .read_obj(event_addr)
247 .map_err(|_| VirtioError::InvalidAddress)
248 }
249
250 pub fn write_used_event(&self, event: u16) -> VirtioResult<()> {
252 if !self.is_valid() {
253 return Err(VirtioError::QueueNotReady);
254 }
255
256 let event_addr = self.used_event_addr();
257 self.accessor
258 .write_obj(event_addr, event)
259 .map_err(|_| VirtioError::InvalidAddress)
260 }
261}