1use std::fs::File;
22use std::io::Result;
23use std::ops::Deref;
24use std::sync::{Arc, Mutex, RwLock};
25
26use vhost::vhost_user::message::{
27 VhostTransferStateDirection, VhostTransferStatePhase, VhostUserProtocolFeatures,
28 VhostUserSharedMsg,
29};
30use vhost::vhost_user::Backend;
31use vm_memory::bitmap::Bitmap;
32use vmm_sys_util::epoll::EventSet;
33use vmm_sys_util::eventfd::EventFd;
34
35use vhost::vhost_user::GpuBackend;
36
37use super::vring::VringT;
38use super::GM;
39
40pub trait VhostUserBackend: Send + Sync {
44 type Bitmap: Bitmap + 'static;
45 type Vring: VringT<GM<Self::Bitmap>>;
46
47 fn num_queues(&self) -> usize;
49
50 fn max_queue_size(&self) -> usize;
52
53 fn features(&self) -> u64;
55
56 fn acked_features(&self, _features: u64) {}
58
59 fn protocol_features(&self) -> VhostUserProtocolFeatures;
61
62 fn reset_device(&self) {}
67
68 fn set_event_idx(&self, enabled: bool);
70
71 fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
76 Vec::new()
77 }
78
79 fn set_config(&self, _offset: u32, _buf: &[u8]) -> Result<()> {
84 Ok(())
85 }
86
87 fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()>;
89
90 fn set_backend_req_fd(&self, _backend: Backend) {}
95
96 fn get_shared_object(&self, _uuid: VhostUserSharedMsg) -> Result<File> {
104 Err(std::io::Error::new(
105 std::io::ErrorKind::Unsupported,
106 "back end does not support get shared object",
107 ))
108 }
109
110 fn set_gpu_socket(&self, _gpu_backend: GpuBackend) -> Result<()> {
116 Err(std::io::Error::new(
117 std::io::ErrorKind::Unsupported,
118 "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
119 ))
120 }
121
122 fn queues_per_thread(&self) -> Vec<u64> {
128 vec![0xffff_ffff]
129 }
130
131 fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
136 None
137 }
138
139 fn handle_event(
145 &self,
146 device_event: u16,
147 evset: EventSet,
148 vrings: &[Self::Vring],
149 thread_id: usize,
150 ) -> Result<()>;
151
152 fn set_device_state_fd(
160 &self,
161 _direction: VhostTransferStateDirection,
162 _phase: VhostTransferStatePhase,
163 _file: File,
164 ) -> Result<Option<File>> {
165 Err(std::io::Error::new(
166 std::io::ErrorKind::Unsupported,
167 "back end does not support state transfer",
168 ))
169 }
170
171 fn check_device_state(&self) -> Result<()> {
177 Err(std::io::Error::new(
178 std::io::ErrorKind::Unsupported,
179 "back end does not support state transfer",
180 ))
181 }
182}
183
184pub trait VhostUserBackendMut: Send + Sync {
186 type Bitmap: Bitmap + 'static;
187 type Vring: VringT<GM<Self::Bitmap>>;
188
189 fn num_queues(&self) -> usize;
191
192 fn max_queue_size(&self) -> usize;
194
195 fn features(&self) -> u64;
197
198 fn acked_features(&mut self, _features: u64) {}
200
201 fn protocol_features(&self) -> VhostUserProtocolFeatures;
203
204 fn reset_device(&mut self) {}
209
210 fn set_event_idx(&mut self, enabled: bool);
212
213 fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
218 Vec::new()
219 }
220
221 fn set_config(&mut self, _offset: u32, _buf: &[u8]) -> Result<()> {
226 Ok(())
227 }
228
229 fn update_memory(&mut self, mem: GM<Self::Bitmap>) -> Result<()>;
231
232 fn set_backend_req_fd(&mut self, _backend: Backend) {}
237
238 fn get_shared_object(&mut self, _uuid: VhostUserSharedMsg) -> Result<File> {
246 Err(std::io::Error::new(
247 std::io::ErrorKind::Unsupported,
248 "back end does not support get shared object",
249 ))
250 }
251
252 fn set_gpu_socket(&mut self, _gpu_backend: GpuBackend) -> Result<()> {
258 Err(std::io::Error::new(
259 std::io::ErrorKind::Unsupported,
260 "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
261 ))
262 }
263
264 fn queues_per_thread(&self) -> Vec<u64> {
270 vec![0xffff_ffff]
271 }
272
273 fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
279 None
280 }
281
282 fn handle_event(
288 &mut self,
289 device_event: u16,
290 evset: EventSet,
291 vrings: &[Self::Vring],
292 thread_id: usize,
293 ) -> Result<()>;
294
295 fn set_device_state_fd(
301 &mut self,
302 _direction: VhostTransferStateDirection,
303 _phase: VhostTransferStatePhase,
304 _file: File,
305 ) -> Result<Option<File>> {
306 Err(std::io::Error::new(
307 std::io::ErrorKind::Unsupported,
308 "back end does not support state transfer",
309 ))
310 }
311
312 fn check_device_state(&self) -> Result<()> {
318 Err(std::io::Error::new(
319 std::io::ErrorKind::Unsupported,
320 "back end does not support state transfer",
321 ))
322 }
323}
324
325impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
326 type Bitmap = T::Bitmap;
327 type Vring = T::Vring;
328
329 fn num_queues(&self) -> usize {
330 self.deref().num_queues()
331 }
332
333 fn max_queue_size(&self) -> usize {
334 self.deref().max_queue_size()
335 }
336
337 fn features(&self) -> u64 {
338 self.deref().features()
339 }
340
341 fn acked_features(&self, features: u64) {
342 self.deref().acked_features(features)
343 }
344
345 fn protocol_features(&self) -> VhostUserProtocolFeatures {
346 self.deref().protocol_features()
347 }
348
349 fn reset_device(&self) {
350 self.deref().reset_device()
351 }
352
353 fn set_event_idx(&self, enabled: bool) {
354 self.deref().set_event_idx(enabled)
355 }
356
357 fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
358 self.deref().get_config(offset, size)
359 }
360
361 fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
362 self.deref().set_config(offset, buf)
363 }
364
365 fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
366 self.deref().update_memory(mem)
367 }
368
369 fn set_backend_req_fd(&self, backend: Backend) {
370 self.deref().set_backend_req_fd(backend)
371 }
372
373 fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
374 self.deref().get_shared_object(uuid)
375 }
376
377 fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
378 self.deref().set_gpu_socket(gpu_backend)
379 }
380
381 fn queues_per_thread(&self) -> Vec<u64> {
382 self.deref().queues_per_thread()
383 }
384
385 fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
386 self.deref().exit_event(thread_index)
387 }
388
389 fn handle_event(
390 &self,
391 device_event: u16,
392 evset: EventSet,
393 vrings: &[Self::Vring],
394 thread_id: usize,
395 ) -> Result<()> {
396 self.deref()
397 .handle_event(device_event, evset, vrings, thread_id)
398 }
399
400 fn set_device_state_fd(
401 &self,
402 direction: VhostTransferStateDirection,
403 phase: VhostTransferStatePhase,
404 file: File,
405 ) -> Result<Option<File>> {
406 self.deref().set_device_state_fd(direction, phase, file)
407 }
408
409 fn check_device_state(&self) -> Result<()> {
410 self.deref().check_device_state()
411 }
412}
413
414impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
415 type Bitmap = T::Bitmap;
416 type Vring = T::Vring;
417
418 fn num_queues(&self) -> usize {
419 self.lock().unwrap().num_queues()
420 }
421
422 fn max_queue_size(&self) -> usize {
423 self.lock().unwrap().max_queue_size()
424 }
425
426 fn features(&self) -> u64 {
427 self.lock().unwrap().features()
428 }
429
430 fn acked_features(&self, features: u64) {
431 self.lock().unwrap().acked_features(features)
432 }
433
434 fn protocol_features(&self) -> VhostUserProtocolFeatures {
435 self.lock().unwrap().protocol_features()
436 }
437
438 fn reset_device(&self) {
439 self.lock().unwrap().reset_device()
440 }
441
442 fn set_event_idx(&self, enabled: bool) {
443 self.lock().unwrap().set_event_idx(enabled)
444 }
445
446 fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
447 self.lock().unwrap().get_config(offset, size)
448 }
449
450 fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
451 self.lock().unwrap().set_config(offset, buf)
452 }
453
454 fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
455 self.lock().unwrap().update_memory(mem)
456 }
457
458 fn set_backend_req_fd(&self, backend: Backend) {
459 self.lock().unwrap().set_backend_req_fd(backend)
460 }
461
462 fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
463 self.lock().unwrap().get_shared_object(uuid)
464 }
465
466 fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
467 self.lock().unwrap().set_gpu_socket(gpu_backend)
468 }
469
470 fn queues_per_thread(&self) -> Vec<u64> {
471 self.lock().unwrap().queues_per_thread()
472 }
473
474 fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
475 self.lock().unwrap().exit_event(thread_index)
476 }
477
478 fn handle_event(
479 &self,
480 device_event: u16,
481 evset: EventSet,
482 vrings: &[Self::Vring],
483 thread_id: usize,
484 ) -> Result<()> {
485 self.lock()
486 .unwrap()
487 .handle_event(device_event, evset, vrings, thread_id)
488 }
489
490 fn set_device_state_fd(
491 &self,
492 direction: VhostTransferStateDirection,
493 phase: VhostTransferStatePhase,
494 file: File,
495 ) -> Result<Option<File>> {
496 self.lock()
497 .unwrap()
498 .set_device_state_fd(direction, phase, file)
499 }
500
501 fn check_device_state(&self) -> Result<()> {
502 self.lock().unwrap().check_device_state()
503 }
504}
505
506impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
507 type Bitmap = T::Bitmap;
508 type Vring = T::Vring;
509
510 fn num_queues(&self) -> usize {
511 self.read().unwrap().num_queues()
512 }
513
514 fn max_queue_size(&self) -> usize {
515 self.read().unwrap().max_queue_size()
516 }
517
518 fn features(&self) -> u64 {
519 self.read().unwrap().features()
520 }
521
522 fn acked_features(&self, features: u64) {
523 self.write().unwrap().acked_features(features)
524 }
525
526 fn protocol_features(&self) -> VhostUserProtocolFeatures {
527 self.read().unwrap().protocol_features()
528 }
529
530 fn reset_device(&self) {
531 self.write().unwrap().reset_device()
532 }
533
534 fn set_event_idx(&self, enabled: bool) {
535 self.write().unwrap().set_event_idx(enabled)
536 }
537
538 fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
539 self.read().unwrap().get_config(offset, size)
540 }
541
542 fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
543 self.write().unwrap().set_config(offset, buf)
544 }
545
546 fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
547 self.write().unwrap().update_memory(mem)
548 }
549
550 fn set_backend_req_fd(&self, backend: Backend) {
551 self.write().unwrap().set_backend_req_fd(backend)
552 }
553
554 fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
555 self.write().unwrap().get_shared_object(uuid)
556 }
557
558 fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
559 self.write().unwrap().set_gpu_socket(gpu_backend)
560 }
561
562 fn queues_per_thread(&self) -> Vec<u64> {
563 self.read().unwrap().queues_per_thread()
564 }
565
566 fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
567 self.read().unwrap().exit_event(thread_index)
568 }
569
570 fn handle_event(
571 &self,
572 device_event: u16,
573 evset: EventSet,
574 vrings: &[Self::Vring],
575 thread_id: usize,
576 ) -> Result<()> {
577 self.write()
578 .unwrap()
579 .handle_event(device_event, evset, vrings, thread_id)
580 }
581
582 fn set_device_state_fd(
583 &self,
584 direction: VhostTransferStateDirection,
585 phase: VhostTransferStatePhase,
586 file: File,
587 ) -> Result<Option<File>> {
588 self.write()
589 .unwrap()
590 .set_device_state_fd(direction, phase, file)
591 }
592
593 fn check_device_state(&self) -> Result<()> {
594 self.read().unwrap().check_device_state()
595 }
596}
597
598#[cfg(test)]
599pub mod tests {
600 use super::*;
601 use crate::VringRwLock;
602 use libc::EFD_NONBLOCK;
603 use std::sync::Mutex;
604 use uuid::Uuid;
605 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
606
607 pub struct MockVhostBackend {
608 events: u64,
609 event_idx: bool,
610 acked_features: u64,
611 exit_event_fds: Vec<EventFd>,
612 }
613
614 impl MockVhostBackend {
615 pub fn new() -> Self {
616 let mut backend = MockVhostBackend {
617 events: 0,
618 event_idx: false,
619 acked_features: 0,
620 exit_event_fds: vec![],
621 };
622
623 backend.exit_event_fds = (0..backend.queues_per_thread().len())
627 .map(|_| EventFd::new(EFD_NONBLOCK).unwrap())
628 .collect();
629
630 backend
631 }
632 }
633
634 impl VhostUserBackendMut for MockVhostBackend {
635 type Bitmap = ();
636 type Vring = VringRwLock;
637
638 fn num_queues(&self) -> usize {
639 2
640 }
641
642 fn max_queue_size(&self) -> usize {
643 256
644 }
645
646 fn features(&self) -> u64 {
647 0xffff_ffff_ffff_ffff
648 }
649
650 fn acked_features(&mut self, features: u64) {
651 self.acked_features = features;
652 }
653
654 fn protocol_features(&self) -> VhostUserProtocolFeatures {
655 VhostUserProtocolFeatures::all()
656 }
657
658 fn reset_device(&mut self) {
659 self.event_idx = false;
660 self.events = 0;
661 self.acked_features = 0;
662 }
663
664 fn set_event_idx(&mut self, enabled: bool) {
665 self.event_idx = enabled;
666 }
667
668 fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
669 assert_eq!(offset, 0x200);
670 assert_eq!(size, 8);
671
672 vec![0xa5u8; 8]
673 }
674
675 fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> {
676 assert_eq!(offset, 0x200);
677 assert_eq!(buf.len(), 8);
678 assert_eq!(buf, &[0xa5u8; 8]);
679
680 Ok(())
681 }
682
683 fn update_memory(&mut self, _atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()> {
684 Ok(())
685 }
686
687 fn set_backend_req_fd(&mut self, _backend: Backend) {}
688
689 fn get_shared_object(&mut self, _uuid: VhostUserSharedMsg) -> Result<File> {
690 let file = tempfile::tempfile().unwrap();
691 Ok(file)
692 }
693
694 fn queues_per_thread(&self) -> Vec<u64> {
695 vec![1, 1]
696 }
697
698 fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
699 Some(
700 self.exit_event_fds
701 .get(thread_index)?
702 .try_clone()
703 .expect("Could not clone exit eventfd"),
704 )
705 }
706
707 fn handle_event(
708 &mut self,
709 _device_event: u16,
710 _evset: EventSet,
711 _vrings: &[VringRwLock],
712 _thread_id: usize,
713 ) -> Result<()> {
714 self.events += 1;
715
716 Ok(())
717 }
718 }
719
720 #[test]
721 fn test_new_mock_backend_mutex() {
722 let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
723
724 assert_eq!(backend.num_queues(), 2);
725 assert_eq!(backend.max_queue_size(), 256);
726 assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
727 assert_eq!(
728 backend.protocol_features(),
729 VhostUserProtocolFeatures::all()
730 );
731 assert_eq!(backend.queues_per_thread(), [1, 1]);
732
733 assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
734 backend.set_config(0x200, &[0xa5; 8]).unwrap();
735
736 backend.acked_features(0xffff);
737 assert_eq!(backend.lock().unwrap().acked_features, 0xffff);
738
739 backend.set_event_idx(true);
740 assert!(backend.lock().unwrap().event_idx);
741
742 let _ = backend.exit_event(0).unwrap();
743
744 let uuid = VhostUserSharedMsg {
745 uuid: Uuid::new_v4(),
746 };
747 backend.get_shared_object(uuid).unwrap();
748
749 let mem = GuestMemoryAtomic::new(
750 GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
751 );
752 backend.update_memory(mem).unwrap();
753
754 backend.reset_device();
755 assert!(backend.lock().unwrap().events == 0);
756 assert!(!backend.lock().unwrap().event_idx);
757 assert!(backend.lock().unwrap().acked_features == 0);
758 }
759
760 #[test]
761 fn test_new_mock_backend_rwlock() {
762 let backend = Arc::new(RwLock::new(MockVhostBackend::new()));
763
764 assert_eq!(backend.num_queues(), 2);
765 assert_eq!(backend.max_queue_size(), 256);
766 assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
767 assert_eq!(
768 backend.protocol_features(),
769 VhostUserProtocolFeatures::all()
770 );
771 assert_eq!(backend.queues_per_thread(), [1, 1]);
772
773 assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
774 backend.set_config(0x200, &[0xa5; 8]).unwrap();
775
776 backend.acked_features(0xffff);
777 assert_eq!(backend.read().unwrap().acked_features, 0xffff);
778
779 backend.set_event_idx(true);
780 assert!(backend.read().unwrap().event_idx);
781
782 let _ = backend.exit_event(0).unwrap();
783
784 let mem = GuestMemoryAtomic::new(
785 GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
786 );
787 backend.update_memory(mem.clone()).unwrap();
788
789 let uuid = VhostUserSharedMsg {
790 uuid: Uuid::new_v4(),
791 };
792 backend.get_shared_object(uuid).unwrap();
793
794 let vring = VringRwLock::new(mem, 0x1000).unwrap();
795 backend
796 .handle_event(0x1, EventSet::IN, &[vring], 0)
797 .unwrap();
798
799 backend.reset_device();
800 assert!(backend.read().unwrap().events == 0);
801 assert!(!backend.read().unwrap().event_idx);
802 assert!(backend.read().unwrap().acked_features == 0);
803 }
804}