1use std::fs::File;
9use std::io;
10use std::ops::{Deref, DerefMut};
11use std::os::unix::io::{FromRawFd, IntoRawFd};
12use std::result::Result;
13use std::sync::atomic::Ordering;
14use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
15
16use virtio_queue::{Error as VirtQueError, Queue, QueueT};
17use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap};
18use vmm_sys_util::eventfd::EventFd;
19
20pub trait VringStateGuard<'a, M: GuestAddressSpace> {
22 type G: Deref<Target = VringState<M>>;
24}
25
26pub trait VringStateMutGuard<'a, M: GuestAddressSpace> {
28 type G: DerefMut<Target = VringState<M>>;
30}
31
32pub trait VringT<M: GuestAddressSpace>:
33 for<'a> VringStateGuard<'a, M> + for<'a> VringStateMutGuard<'a, M>
34{
35 fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError>
37 where
38 Self: Sized;
39
40 fn get_ref(&self) -> <Self as VringStateGuard<M>>::G;
42
43 fn get_mut(&self) -> <Self as VringStateMutGuard<M>>::G;
45
46 fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>;
48
49 fn signal_used_queue(&self) -> io::Result<()>;
51
52 fn enable_notification(&self) -> Result<bool, VirtQueError>;
54
55 fn disable_notification(&self) -> Result<(), VirtQueError>;
57
58 fn needs_notification(&self) -> Result<bool, VirtQueError>;
60
61 fn set_enabled(&self, enabled: bool);
63
64 fn set_queue_info(
66 &self,
67 desc_table: u64,
68 avail_ring: u64,
69 used_ring: u64,
70 ) -> Result<(), VirtQueError>;
71
72 fn queue_next_avail(&self) -> u16;
74
75 fn set_queue_next_avail(&self, base: u16);
77
78 fn set_queue_next_used(&self, idx: u16);
80
81 fn queue_used_idx(&self) -> Result<u16, VirtQueError>;
83
84 fn set_queue_size(&self, num: u16);
86
87 fn set_queue_event_idx(&self, enabled: bool);
89
90 fn set_queue_ready(&self, ready: bool);
92
93 fn set_kick(&self, file: Option<File>);
95
96 fn read_kick(&self) -> io::Result<bool>;
98
99 fn set_call(&self, file: Option<File>);
101
102 fn set_err(&self, file: Option<File>);
104}
105
106pub struct VringState<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
111 queue: Queue,
112 kick: Option<EventFd>,
113 call: Option<EventFd>,
114 err: Option<EventFd>,
115 enabled: bool,
116 mem: M,
117}
118
119impl<M: GuestAddressSpace> VringState<M> {
120 fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
122 Ok(VringState {
123 queue: Queue::new(max_queue_size)?,
124 kick: None,
125 call: None,
126 err: None,
127 enabled: false,
128 mem,
129 })
130 }
131
132 pub fn get_queue(&self) -> &Queue {
134 &self.queue
135 }
136
137 pub fn get_queue_mut(&mut self) -> &mut Queue {
139 &mut self.queue
140 }
141
142 pub fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
144 self.queue
145 .add_used(self.mem.memory().deref(), desc_index, len)
146 }
147
148 pub fn signal_used_queue(&self) -> io::Result<()> {
150 if let Some(call) = self.call.as_ref() {
151 call.write(1)
152 } else {
153 Ok(())
154 }
155 }
156
157 pub fn enable_notification(&mut self) -> Result<bool, VirtQueError> {
159 self.queue.enable_notification(self.mem.memory().deref())
160 }
161
162 pub fn disable_notification(&mut self) -> Result<(), VirtQueError> {
164 self.queue.disable_notification(self.mem.memory().deref())
165 }
166
167 pub fn needs_notification(&mut self) -> Result<bool, VirtQueError> {
169 self.queue.needs_notification(self.mem.memory().deref())
170 }
171
172 pub fn set_enabled(&mut self, enabled: bool) {
174 self.enabled = enabled;
175 }
176
177 pub fn set_queue_info(
179 &mut self,
180 desc_table: u64,
181 avail_ring: u64,
182 used_ring: u64,
183 ) -> Result<(), VirtQueError> {
184 self.queue
185 .try_set_desc_table_address(GuestAddress(desc_table))?;
186 self.queue
187 .try_set_avail_ring_address(GuestAddress(avail_ring))?;
188 self.queue
189 .try_set_used_ring_address(GuestAddress(used_ring))
190 }
191
192 fn queue_next_avail(&self) -> u16 {
194 self.queue.next_avail()
195 }
196
197 fn set_queue_next_avail(&mut self, base: u16) {
199 self.queue.set_next_avail(base);
200 }
201
202 fn set_queue_next_used(&mut self, idx: u16) {
204 self.queue.set_next_used(idx);
205 }
206
207 fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
209 self.queue
210 .used_idx(self.mem.memory().deref(), Ordering::Relaxed)
211 .map(|idx| idx.0)
212 }
213
214 fn set_queue_size(&mut self, num: u16) {
216 self.queue.set_size(num);
217 }
218
219 fn set_queue_event_idx(&mut self, enabled: bool) {
221 self.queue.set_event_idx(enabled);
222 }
223
224 fn set_queue_ready(&mut self, ready: bool) {
226 self.queue.set_ready(ready);
227 }
228
229 pub fn get_kick(&self) -> &Option<EventFd> {
231 &self.kick
232 }
233
234 fn set_kick(&mut self, file: Option<File>) {
236 self.kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
241 }
242
243 fn read_kick(&self) -> io::Result<bool> {
245 if let Some(kick) = &self.kick {
246 kick.read()?;
247 }
248
249 Ok(self.enabled)
250 }
251
252 fn set_call(&mut self, file: Option<File>) {
254 self.call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
256 }
257
258 pub fn get_call(&self) -> &Option<EventFd> {
260 &self.call
261 }
262
263 fn set_err(&mut self, file: Option<File>) {
265 self.err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
267 }
268}
269
270#[derive(Clone)]
272pub struct VringMutex<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
273 state: Arc<Mutex<VringState<M>>>,
274}
275
276impl<M: GuestAddressSpace> VringMutex<M> {
277 fn lock(&self) -> MutexGuard<'_, VringState<M>> {
279 self.state.lock().unwrap()
280 }
281}
282
283impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringMutex<M> {
284 type G = MutexGuard<'a, VringState<M>>;
285}
286
287impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringMutex<M> {
288 type G = MutexGuard<'a, VringState<M>>;
289}
290
291impl<M: 'static + GuestAddressSpace> VringT<M> for VringMutex<M> {
292 fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
293 Ok(VringMutex {
294 state: Arc::new(Mutex::new(VringState::new(mem, max_queue_size)?)),
295 })
296 }
297
298 fn get_ref(&self) -> <Self as VringStateGuard<'_, M>>::G {
299 self.state.lock().unwrap()
300 }
301
302 fn get_mut(&self) -> <Self as VringStateMutGuard<'_, M>>::G {
303 self.lock()
304 }
305
306 fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
307 self.lock().add_used(desc_index, len)
308 }
309
310 fn signal_used_queue(&self) -> io::Result<()> {
311 self.get_ref().signal_used_queue()
312 }
313
314 fn enable_notification(&self) -> Result<bool, VirtQueError> {
315 self.lock().enable_notification()
316 }
317
318 fn disable_notification(&self) -> Result<(), VirtQueError> {
319 self.lock().disable_notification()
320 }
321
322 fn needs_notification(&self) -> Result<bool, VirtQueError> {
323 self.lock().needs_notification()
324 }
325
326 fn set_enabled(&self, enabled: bool) {
327 self.lock().set_enabled(enabled)
328 }
329
330 fn set_queue_info(
331 &self,
332 desc_table: u64,
333 avail_ring: u64,
334 used_ring: u64,
335 ) -> Result<(), VirtQueError> {
336 self.lock()
337 .set_queue_info(desc_table, avail_ring, used_ring)
338 }
339
340 fn queue_next_avail(&self) -> u16 {
341 self.get_ref().queue_next_avail()
342 }
343
344 fn set_queue_next_avail(&self, base: u16) {
345 self.lock().set_queue_next_avail(base)
346 }
347
348 fn set_queue_next_used(&self, idx: u16) {
349 self.lock().set_queue_next_used(idx)
350 }
351
352 fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
353 self.lock().queue_used_idx()
354 }
355
356 fn set_queue_size(&self, num: u16) {
357 self.lock().set_queue_size(num);
358 }
359
360 fn set_queue_event_idx(&self, enabled: bool) {
361 self.lock().set_queue_event_idx(enabled);
362 }
363
364 fn set_queue_ready(&self, ready: bool) {
365 self.lock().set_queue_ready(ready);
366 }
367
368 fn set_kick(&self, file: Option<File>) {
369 self.lock().set_kick(file);
370 }
371
372 fn read_kick(&self) -> io::Result<bool> {
373 self.get_ref().read_kick()
374 }
375
376 fn set_call(&self, file: Option<File>) {
377 self.lock().set_call(file)
378 }
379
380 fn set_err(&self, file: Option<File>) {
381 self.lock().set_err(file)
382 }
383}
384
385#[derive(Clone)]
387pub struct VringRwLock<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
388 state: Arc<RwLock<VringState<M>>>,
389}
390
391impl<M: GuestAddressSpace> VringRwLock<M> {
392 fn write_lock(&self) -> RwLockWriteGuard<'_, VringState<M>> {
394 self.state.write().unwrap()
395 }
396}
397
398impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringRwLock<M> {
399 type G = RwLockReadGuard<'a, VringState<M>>;
400}
401
402impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringRwLock<M> {
403 type G = RwLockWriteGuard<'a, VringState<M>>;
404}
405
406impl<M: 'static + GuestAddressSpace> VringT<M> for VringRwLock<M> {
407 fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
408 Ok(VringRwLock {
409 state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size)?)),
410 })
411 }
412
413 fn get_ref(&self) -> <Self as VringStateGuard<'_, M>>::G {
414 self.state.read().unwrap()
415 }
416
417 fn get_mut(&self) -> <Self as VringStateMutGuard<'_, M>>::G {
418 self.write_lock()
419 }
420
421 fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
422 self.write_lock().add_used(desc_index, len)
423 }
424
425 fn signal_used_queue(&self) -> io::Result<()> {
426 self.get_ref().signal_used_queue()
427 }
428
429 fn enable_notification(&self) -> Result<bool, VirtQueError> {
430 self.write_lock().enable_notification()
431 }
432
433 fn disable_notification(&self) -> Result<(), VirtQueError> {
434 self.write_lock().disable_notification()
435 }
436
437 fn needs_notification(&self) -> Result<bool, VirtQueError> {
438 self.write_lock().needs_notification()
439 }
440
441 fn set_enabled(&self, enabled: bool) {
442 self.write_lock().set_enabled(enabled)
443 }
444
445 fn set_queue_info(
446 &self,
447 desc_table: u64,
448 avail_ring: u64,
449 used_ring: u64,
450 ) -> Result<(), VirtQueError> {
451 self.write_lock()
452 .set_queue_info(desc_table, avail_ring, used_ring)
453 }
454
455 fn queue_next_avail(&self) -> u16 {
456 self.get_ref().queue_next_avail()
457 }
458
459 fn set_queue_next_avail(&self, base: u16) {
460 self.write_lock().set_queue_next_avail(base)
461 }
462
463 fn set_queue_next_used(&self, idx: u16) {
464 self.write_lock().set_queue_next_used(idx)
465 }
466
467 fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
468 self.get_ref().queue_used_idx()
469 }
470
471 fn set_queue_size(&self, num: u16) {
472 self.write_lock().set_queue_size(num);
473 }
474
475 fn set_queue_event_idx(&self, enabled: bool) {
476 self.write_lock().set_queue_event_idx(enabled);
477 }
478
479 fn set_queue_ready(&self, ready: bool) {
480 self.write_lock().set_queue_ready(ready);
481 }
482
483 fn set_kick(&self, file: Option<File>) {
484 self.write_lock().set_kick(file);
485 }
486
487 fn read_kick(&self) -> io::Result<bool> {
488 self.get_ref().read_kick()
489 }
490
491 fn set_call(&self, file: Option<File>) {
492 self.write_lock().set_call(file)
493 }
494
495 fn set_err(&self, file: Option<File>) {
496 self.write_lock().set_err(file)
497 }
498}
499
500#[cfg(test)]
501mod tests {
502 use super::*;
503 use std::os::unix::io::AsRawFd;
504 use vm_memory::bitmap::AtomicBitmap;
505 use vmm_sys_util::eventfd::EventFd;
506
507 #[test]
508 fn test_new_vring() {
509 let mem = GuestMemoryAtomic::new(
510 GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0x100000), 0x10000)])
511 .unwrap(),
512 );
513 let vring = VringMutex::new(mem, 0x1000).unwrap();
514
515 assert!(vring.get_ref().get_kick().is_none());
516 assert!(!vring.get_mut().enabled);
517 assert!(!vring.lock().queue.ready());
518 assert!(!vring.lock().queue.event_idx_enabled());
519
520 vring.set_enabled(true);
521 assert!(vring.get_ref().enabled);
522
523 vring.set_queue_info(0x100100, 0x100200, 0x100300).unwrap();
524 assert_eq!(vring.lock().get_queue().desc_table(), 0x100100);
525 assert_eq!(vring.lock().get_queue().avail_ring(), 0x100200);
526 assert_eq!(vring.lock().get_queue().used_ring(), 0x100300);
527
528 assert_eq!(vring.queue_next_avail(), 0);
529 vring.set_queue_next_avail(0x20);
530 assert_eq!(vring.queue_next_avail(), 0x20);
531
532 vring.set_queue_size(0x200);
533 assert_eq!(vring.lock().queue.size(), 0x200);
534
535 vring.set_queue_event_idx(true);
536 assert!(vring.lock().queue.event_idx_enabled());
537
538 vring.set_queue_ready(true);
539 assert!(vring.lock().queue.ready());
540 }
541
542 #[test]
543 fn test_vring_set_fd() {
544 let mem = GuestMemoryAtomic::new(
545 GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
546 );
547 let vring = VringMutex::new(mem, 0x1000).unwrap();
548
549 vring.set_enabled(true);
550 assert!(vring.get_ref().enabled);
551
552 let eventfd = EventFd::new(0).unwrap();
553 let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
555 assert!(vring.get_mut().kick.is_none());
556 assert!(vring.read_kick().unwrap());
557 vring.set_kick(Some(file));
558 eventfd.write(1).unwrap();
559 assert!(vring.read_kick().unwrap());
560 assert!(vring.get_ref().kick.is_some());
561 vring.set_kick(None);
562 assert!(vring.get_ref().kick.is_none());
563 std::mem::forget(eventfd);
564
565 let eventfd = EventFd::new(0).unwrap();
566 let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
568 assert!(vring.get_ref().call.is_none());
569 vring.set_call(Some(file));
570 assert!(vring.get_ref().call.is_some());
571 vring.set_call(None);
572 assert!(vring.get_ref().call.is_none());
573 std::mem::forget(eventfd);
574
575 let eventfd = EventFd::new(0).unwrap();
576 let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
578 assert!(vring.get_ref().err.is_none());
579 vring.set_err(Some(file));
580 assert!(vring.get_ref().err.is_some());
581 vring.set_err(None);
582 assert!(vring.get_ref().err.is_none());
583 std::mem::forget(eventfd);
584 }
585}