vhost_user_backend/
backend.rs

1// Copyright 2019 Intel Corporation. All Rights Reserved.
2// Copyright 2019-2021 Alibaba Cloud. All rights reserved.
3//
4// SPDX-License-Identifier: Apache-2.0
5
6//! Traits for vhost user backend servers to implement virtio data plain services.
7//!
8//! Define two traits for vhost user backend servers to implement virtio data plane services.
9//! The only difference between the two traits is mutability. The [VhostUserBackend] trait is
10//! designed with interior mutability, so the implementor may choose the suitable way to protect
11//! itself from concurrent accesses. The [VhostUserBackendMut] is designed without interior
12//! mutability, and an implementation of:
13//! ```ignore
14//! impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> { }
15//! ```
16//! is provided for convenience.
17//!
18//! [VhostUserBackend]: trait.VhostUserBackend.html
19//! [VhostUserBackendMut]: trait.VhostUserBackendMut.html
20
21use std::fs::File;
22use std::io::Result;
23use std::ops::Deref;
24use std::sync::{Arc, Mutex, RwLock};
25
26use vhost::vhost_user::message::{
27    VhostTransferStateDirection, VhostTransferStatePhase, VhostUserProtocolFeatures,
28    VhostUserSharedMsg,
29};
30use vhost::vhost_user::Backend;
31use vm_memory::bitmap::Bitmap;
32use vmm_sys_util::epoll::EventSet;
33use vmm_sys_util::eventfd::EventFd;
34
35use vhost::vhost_user::GpuBackend;
36
37use super::vring::VringT;
38use super::GM;
39
40/// Trait with interior mutability for vhost user backend servers to implement concrete services.
41///
42/// To support multi-threading and asynchronous IO, we enforce `Send + Sync` bound.
43pub trait VhostUserBackend: Send + Sync {
44    type Bitmap: Bitmap + 'static;
45    type Vring: VringT<GM<Self::Bitmap>>;
46
47    /// Get number of queues supported.
48    fn num_queues(&self) -> usize;
49
50    /// Get maximum queue size supported.
51    fn max_queue_size(&self) -> usize;
52
53    /// Get available virtio features.
54    fn features(&self) -> u64;
55
56    /// Set acknowledged virtio features.
57    fn acked_features(&self, _features: u64) {}
58
59    /// Get available vhost protocol features.
60    fn protocol_features(&self) -> VhostUserProtocolFeatures;
61
62    /// Reset the emulated device state.
63    ///
64    /// A default implementation is provided as we cannot expect all backends to implement this
65    /// function.
66    fn reset_device(&self) {}
67
68    /// Enable or disable the virtio EVENT_IDX feature
69    fn set_event_idx(&self, enabled: bool);
70
71    /// Get virtio device configuration.
72    ///
73    /// A default implementation is provided as we cannot expect all backends to implement this
74    /// function.
75    fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
76        Vec::new()
77    }
78
79    /// Set virtio device configuration.
80    ///
81    /// A default implementation is provided as we cannot expect all backends to implement this
82    /// function.
83    fn set_config(&self, _offset: u32, _buf: &[u8]) -> Result<()> {
84        Ok(())
85    }
86
87    /// Update guest memory regions.
88    fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()>;
89
90    /// Set handler for communicating with the frontend by the backend communication channel.
91    ///
92    /// A default implementation is provided as we cannot expect all backends to implement this
93    /// function.
94    fn set_backend_req_fd(&self, _backend: Backend) {}
95
96    /// This method retrieves a file descriptor for a shared object, identified by a unique UUID,
97    /// which can be used by the front-end for DMA. If the shared object is found, it must return
98    /// a File that the frontend can use. If the shared object does not exist the function returns
99    /// `None` (indicating no file descriptor is available).
100    ///
101    /// This function returns a `Result`, returning an error if the backend does not implement this
102    /// function.
103    fn get_shared_object(&self, _uuid: VhostUserSharedMsg) -> Result<File> {
104        Err(std::io::Error::new(
105            std::io::ErrorKind::Unsupported,
106            "back end does not support get shared object",
107        ))
108    }
109
110    /// Set handler for communicating with the frontend by the gpu specific backend communication
111    /// channel.
112    ///
113    /// This function returns a `Result`, returning an error if the backend does not implement this
114    /// function.
115    fn set_gpu_socket(&self, _gpu_backend: GpuBackend) -> Result<()> {
116        Err(std::io::Error::new(
117            std::io::ErrorKind::Unsupported,
118            "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
119        ))
120    }
121
122    /// Get the map to map queue index to worker thread index.
123    ///
124    /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0,
125    /// the following two queues will be handled by worker thread 1, and the last four queues will
126    /// be handled by worker thread 2.
127    fn queues_per_thread(&self) -> Vec<u64> {
128        vec![0xffff_ffff]
129    }
130
131    /// Provide an optional exit EventFd for the specified worker thread.
132    ///
133    /// The returned `EventFd` will be monitored for IO events. When the
134    /// returned EventFd is written to, the worker thread will exit.
135    fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
136        None
137    }
138
139    /// Handle IO events for backend registered file descriptors.
140    ///
141    /// This function gets called if the backend registered some additional listeners onto specific
142    /// file descriptors. The library can handle virtqueues on its own, but does not know what to
143    /// do with events happening on custom listeners.
144    fn handle_event(
145        &self,
146        device_event: u16,
147        evset: EventSet,
148        vrings: &[Self::Vring],
149        thread_id: usize,
150    ) -> Result<()>;
151
152    /// Initiate transfer of internal state for the purpose of migration to/from the back-end.
153    ///
154    /// Depending on `direction`, the state should either be saved (i.e. serialized and written to
155    /// `file`) or loaded (i.e. read from `file` and deserialized). The back-end can choose to use
156    /// a different channel than file. If so, it must return a File that the front-end can use.
157    /// Note that this function must not block during transfer, i.e. I/O to/from `file` must be
158    /// done outside of this function.
159    fn set_device_state_fd(
160        &self,
161        _direction: VhostTransferStateDirection,
162        _phase: VhostTransferStatePhase,
163        _file: File,
164    ) -> Result<Option<File>> {
165        Err(std::io::Error::new(
166            std::io::ErrorKind::Unsupported,
167            "back end does not support state transfer",
168        ))
169    }
170
171    /// After transferring internal state, check for any resulting errors, including potential
172    /// deserialization errors when loading state.
173    ///
174    /// Although this function return a `Result`, the front-end will not receive any details about
175    /// this error.
176    fn check_device_state(&self) -> Result<()> {
177        Err(std::io::Error::new(
178            std::io::ErrorKind::Unsupported,
179            "back end does not support state transfer",
180        ))
181    }
182}
183
184/// Trait without interior mutability for vhost user backend servers to implement concrete services.
185pub trait VhostUserBackendMut: Send + Sync {
186    type Bitmap: Bitmap + 'static;
187    type Vring: VringT<GM<Self::Bitmap>>;
188
189    /// Get number of queues supported.
190    fn num_queues(&self) -> usize;
191
192    /// Get maximum queue size supported.
193    fn max_queue_size(&self) -> usize;
194
195    /// Get available virtio features.
196    fn features(&self) -> u64;
197
198    /// Set acknowledged virtio features.
199    fn acked_features(&mut self, _features: u64) {}
200
201    /// Get available vhost protocol features.
202    fn protocol_features(&self) -> VhostUserProtocolFeatures;
203
204    /// Reset the emulated device state.
205    ///
206    /// A default implementation is provided as we cannot expect all backends to implement this
207    /// function.
208    fn reset_device(&mut self) {}
209
210    /// Enable or disable the virtio EVENT_IDX feature
211    fn set_event_idx(&mut self, enabled: bool);
212
213    /// Get virtio device configuration.
214    ///
215    /// A default implementation is provided as we cannot expect all backends to implement this
216    /// function.
217    fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
218        Vec::new()
219    }
220
221    /// Set virtio device configuration.
222    ///
223    /// A default implementation is provided as we cannot expect all backends to implement this
224    /// function.
225    fn set_config(&mut self, _offset: u32, _buf: &[u8]) -> Result<()> {
226        Ok(())
227    }
228
229    /// Update guest memory regions.
230    fn update_memory(&mut self, mem: GM<Self::Bitmap>) -> Result<()>;
231
232    /// Set handler for communicating with the frontend by the backend communication channel.
233    ///
234    /// A default implementation is provided as we cannot expect all backends to implement this
235    /// function.
236    fn set_backend_req_fd(&mut self, _backend: Backend) {}
237
238    /// This method retrieves a file descriptor for a shared object, identified by a unique UUID,
239    /// which can be used by the front-end for DMA. If the shared object is found, it must return
240    /// a File that the frontend can use. If the shared object does not exist the function returns
241    /// `None` (indicating no file descriptor is available).
242    ///
243    /// This function returns a `Result`, returning an error if the backend does not implement this
244    /// function.
245    fn get_shared_object(&mut self, _uuid: VhostUserSharedMsg) -> Result<File> {
246        Err(std::io::Error::new(
247            std::io::ErrorKind::Unsupported,
248            "back end does not support get shared object",
249        ))
250    }
251
252    /// Set handler for communicating with the frontend by the gpu specific backend communication
253    /// channel.
254    ///
255    /// This function returns a `Result`, returning an error if the backend does not implement this
256    /// function.
257    fn set_gpu_socket(&mut self, _gpu_backend: GpuBackend) -> Result<()> {
258        Err(std::io::Error::new(
259            std::io::ErrorKind::Unsupported,
260            "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
261        ))
262    }
263
264    /// Get the map to map queue index to worker thread index.
265    ///
266    /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0,
267    /// the following two queues will be handled by worker thread 1, and the last four queues will
268    /// be handled by worker thread 2.
269    fn queues_per_thread(&self) -> Vec<u64> {
270        vec![0xffff_ffff]
271    }
272
273    /// Provide an optional exit EventFd for the specified worker thread.
274    ///
275    /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO
276    /// events by using epoll with the specified `token`. When the returned EventFd is written to,
277    /// the worker thread will exit.
278    fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
279        None
280    }
281
282    /// Handle IO events for backend registered file descriptors.
283    ///
284    /// This function gets called if the backend registered some additional listeners onto specific
285    /// file descriptors. The library can handle virtqueues on its own, but does not know what to
286    /// do with events happening on custom listeners.
287    fn handle_event(
288        &mut self,
289        device_event: u16,
290        evset: EventSet,
291        vrings: &[Self::Vring],
292        thread_id: usize,
293    ) -> Result<()>;
294
295    /// Initiate transfer of internal state for the purpose of migration to/from the back-end.
296    ///
297    /// Depending on `direction`, the state should either be saved (i.e. serialized and written to
298    /// `file`) or loaded (i.e. read from `file` and deserialized).  Note that this function must
299    /// not block during transfer, i.e. I/O to/from `file` must be done outside of this function.
300    fn set_device_state_fd(
301        &mut self,
302        _direction: VhostTransferStateDirection,
303        _phase: VhostTransferStatePhase,
304        _file: File,
305    ) -> Result<Option<File>> {
306        Err(std::io::Error::new(
307            std::io::ErrorKind::Unsupported,
308            "back end does not support state transfer",
309        ))
310    }
311
312    /// After transferring internal state, check for any resulting errors, including potential
313    /// deserialization errors when loading state.
314    ///
315    /// Although this function return a `Result`, the front-end will not receive any details about
316    /// this error.
317    fn check_device_state(&self) -> Result<()> {
318        Err(std::io::Error::new(
319            std::io::ErrorKind::Unsupported,
320            "back end does not support state transfer",
321        ))
322    }
323}
324
325impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
326    type Bitmap = T::Bitmap;
327    type Vring = T::Vring;
328
329    fn num_queues(&self) -> usize {
330        self.deref().num_queues()
331    }
332
333    fn max_queue_size(&self) -> usize {
334        self.deref().max_queue_size()
335    }
336
337    fn features(&self) -> u64 {
338        self.deref().features()
339    }
340
341    fn acked_features(&self, features: u64) {
342        self.deref().acked_features(features)
343    }
344
345    fn protocol_features(&self) -> VhostUserProtocolFeatures {
346        self.deref().protocol_features()
347    }
348
349    fn reset_device(&self) {
350        self.deref().reset_device()
351    }
352
353    fn set_event_idx(&self, enabled: bool) {
354        self.deref().set_event_idx(enabled)
355    }
356
357    fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
358        self.deref().get_config(offset, size)
359    }
360
361    fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
362        self.deref().set_config(offset, buf)
363    }
364
365    fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
366        self.deref().update_memory(mem)
367    }
368
369    fn set_backend_req_fd(&self, backend: Backend) {
370        self.deref().set_backend_req_fd(backend)
371    }
372
373    fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
374        self.deref().get_shared_object(uuid)
375    }
376
377    fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
378        self.deref().set_gpu_socket(gpu_backend)
379    }
380
381    fn queues_per_thread(&self) -> Vec<u64> {
382        self.deref().queues_per_thread()
383    }
384
385    fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
386        self.deref().exit_event(thread_index)
387    }
388
389    fn handle_event(
390        &self,
391        device_event: u16,
392        evset: EventSet,
393        vrings: &[Self::Vring],
394        thread_id: usize,
395    ) -> Result<()> {
396        self.deref()
397            .handle_event(device_event, evset, vrings, thread_id)
398    }
399
400    fn set_device_state_fd(
401        &self,
402        direction: VhostTransferStateDirection,
403        phase: VhostTransferStatePhase,
404        file: File,
405    ) -> Result<Option<File>> {
406        self.deref().set_device_state_fd(direction, phase, file)
407    }
408
409    fn check_device_state(&self) -> Result<()> {
410        self.deref().check_device_state()
411    }
412}
413
414impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
415    type Bitmap = T::Bitmap;
416    type Vring = T::Vring;
417
418    fn num_queues(&self) -> usize {
419        self.lock().unwrap().num_queues()
420    }
421
422    fn max_queue_size(&self) -> usize {
423        self.lock().unwrap().max_queue_size()
424    }
425
426    fn features(&self) -> u64 {
427        self.lock().unwrap().features()
428    }
429
430    fn acked_features(&self, features: u64) {
431        self.lock().unwrap().acked_features(features)
432    }
433
434    fn protocol_features(&self) -> VhostUserProtocolFeatures {
435        self.lock().unwrap().protocol_features()
436    }
437
438    fn reset_device(&self) {
439        self.lock().unwrap().reset_device()
440    }
441
442    fn set_event_idx(&self, enabled: bool) {
443        self.lock().unwrap().set_event_idx(enabled)
444    }
445
446    fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
447        self.lock().unwrap().get_config(offset, size)
448    }
449
450    fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
451        self.lock().unwrap().set_config(offset, buf)
452    }
453
454    fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
455        self.lock().unwrap().update_memory(mem)
456    }
457
458    fn set_backend_req_fd(&self, backend: Backend) {
459        self.lock().unwrap().set_backend_req_fd(backend)
460    }
461
462    fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
463        self.lock().unwrap().get_shared_object(uuid)
464    }
465
466    fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
467        self.lock().unwrap().set_gpu_socket(gpu_backend)
468    }
469
470    fn queues_per_thread(&self) -> Vec<u64> {
471        self.lock().unwrap().queues_per_thread()
472    }
473
474    fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
475        self.lock().unwrap().exit_event(thread_index)
476    }
477
478    fn handle_event(
479        &self,
480        device_event: u16,
481        evset: EventSet,
482        vrings: &[Self::Vring],
483        thread_id: usize,
484    ) -> Result<()> {
485        self.lock()
486            .unwrap()
487            .handle_event(device_event, evset, vrings, thread_id)
488    }
489
490    fn set_device_state_fd(
491        &self,
492        direction: VhostTransferStateDirection,
493        phase: VhostTransferStatePhase,
494        file: File,
495    ) -> Result<Option<File>> {
496        self.lock()
497            .unwrap()
498            .set_device_state_fd(direction, phase, file)
499    }
500
501    fn check_device_state(&self) -> Result<()> {
502        self.lock().unwrap().check_device_state()
503    }
504}
505
506impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
507    type Bitmap = T::Bitmap;
508    type Vring = T::Vring;
509
510    fn num_queues(&self) -> usize {
511        self.read().unwrap().num_queues()
512    }
513
514    fn max_queue_size(&self) -> usize {
515        self.read().unwrap().max_queue_size()
516    }
517
518    fn features(&self) -> u64 {
519        self.read().unwrap().features()
520    }
521
522    fn acked_features(&self, features: u64) {
523        self.write().unwrap().acked_features(features)
524    }
525
526    fn protocol_features(&self) -> VhostUserProtocolFeatures {
527        self.read().unwrap().protocol_features()
528    }
529
530    fn reset_device(&self) {
531        self.write().unwrap().reset_device()
532    }
533
534    fn set_event_idx(&self, enabled: bool) {
535        self.write().unwrap().set_event_idx(enabled)
536    }
537
538    fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
539        self.read().unwrap().get_config(offset, size)
540    }
541
542    fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
543        self.write().unwrap().set_config(offset, buf)
544    }
545
546    fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
547        self.write().unwrap().update_memory(mem)
548    }
549
550    fn set_backend_req_fd(&self, backend: Backend) {
551        self.write().unwrap().set_backend_req_fd(backend)
552    }
553
554    fn get_shared_object(&self, uuid: VhostUserSharedMsg) -> Result<File> {
555        self.write().unwrap().get_shared_object(uuid)
556    }
557
558    fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
559        self.write().unwrap().set_gpu_socket(gpu_backend)
560    }
561
562    fn queues_per_thread(&self) -> Vec<u64> {
563        self.read().unwrap().queues_per_thread()
564    }
565
566    fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
567        self.read().unwrap().exit_event(thread_index)
568    }
569
570    fn handle_event(
571        &self,
572        device_event: u16,
573        evset: EventSet,
574        vrings: &[Self::Vring],
575        thread_id: usize,
576    ) -> Result<()> {
577        self.write()
578            .unwrap()
579            .handle_event(device_event, evset, vrings, thread_id)
580    }
581
582    fn set_device_state_fd(
583        &self,
584        direction: VhostTransferStateDirection,
585        phase: VhostTransferStatePhase,
586        file: File,
587    ) -> Result<Option<File>> {
588        self.write()
589            .unwrap()
590            .set_device_state_fd(direction, phase, file)
591    }
592
593    fn check_device_state(&self) -> Result<()> {
594        self.read().unwrap().check_device_state()
595    }
596}
597
598#[cfg(test)]
599pub mod tests {
600    use super::*;
601    use crate::VringRwLock;
602    use libc::EFD_NONBLOCK;
603    use std::sync::Mutex;
604    use uuid::Uuid;
605    use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
606
607    pub struct MockVhostBackend {
608        events: u64,
609        event_idx: bool,
610        acked_features: u64,
611        exit_event_fds: Vec<EventFd>,
612    }
613
614    impl MockVhostBackend {
615        pub fn new() -> Self {
616            let mut backend = MockVhostBackend {
617                events: 0,
618                event_idx: false,
619                acked_features: 0,
620                exit_event_fds: vec![],
621            };
622
623            // Create a event_fd for each thread. We make it NONBLOCKing in
624            // order to allow tests maximum flexibility in checking whether
625            // signals arrived or not.
626            backend.exit_event_fds = (0..backend.queues_per_thread().len())
627                .map(|_| EventFd::new(EFD_NONBLOCK).unwrap())
628                .collect();
629
630            backend
631        }
632    }
633
634    impl VhostUserBackendMut for MockVhostBackend {
635        type Bitmap = ();
636        type Vring = VringRwLock;
637
638        fn num_queues(&self) -> usize {
639            2
640        }
641
642        fn max_queue_size(&self) -> usize {
643            256
644        }
645
646        fn features(&self) -> u64 {
647            0xffff_ffff_ffff_ffff
648        }
649
650        fn acked_features(&mut self, features: u64) {
651            self.acked_features = features;
652        }
653
654        fn protocol_features(&self) -> VhostUserProtocolFeatures {
655            VhostUserProtocolFeatures::all()
656        }
657
658        fn reset_device(&mut self) {
659            self.event_idx = false;
660            self.events = 0;
661            self.acked_features = 0;
662        }
663
664        fn set_event_idx(&mut self, enabled: bool) {
665            self.event_idx = enabled;
666        }
667
668        fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
669            assert_eq!(offset, 0x200);
670            assert_eq!(size, 8);
671
672            vec![0xa5u8; 8]
673        }
674
675        fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> {
676            assert_eq!(offset, 0x200);
677            assert_eq!(buf.len(), 8);
678            assert_eq!(buf, &[0xa5u8; 8]);
679
680            Ok(())
681        }
682
683        fn update_memory(&mut self, _atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()> {
684            Ok(())
685        }
686
687        fn set_backend_req_fd(&mut self, _backend: Backend) {}
688
689        fn get_shared_object(&mut self, _uuid: VhostUserSharedMsg) -> Result<File> {
690            let file = tempfile::tempfile().unwrap();
691            Ok(file)
692        }
693
694        fn queues_per_thread(&self) -> Vec<u64> {
695            vec![1, 1]
696        }
697
698        fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
699            Some(
700                self.exit_event_fds
701                    .get(thread_index)?
702                    .try_clone()
703                    .expect("Could not clone exit eventfd"),
704            )
705        }
706
707        fn handle_event(
708            &mut self,
709            _device_event: u16,
710            _evset: EventSet,
711            _vrings: &[VringRwLock],
712            _thread_id: usize,
713        ) -> Result<()> {
714            self.events += 1;
715
716            Ok(())
717        }
718    }
719
720    #[test]
721    fn test_new_mock_backend_mutex() {
722        let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
723
724        assert_eq!(backend.num_queues(), 2);
725        assert_eq!(backend.max_queue_size(), 256);
726        assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
727        assert_eq!(
728            backend.protocol_features(),
729            VhostUserProtocolFeatures::all()
730        );
731        assert_eq!(backend.queues_per_thread(), [1, 1]);
732
733        assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
734        backend.set_config(0x200, &[0xa5; 8]).unwrap();
735
736        backend.acked_features(0xffff);
737        assert_eq!(backend.lock().unwrap().acked_features, 0xffff);
738
739        backend.set_event_idx(true);
740        assert!(backend.lock().unwrap().event_idx);
741
742        let _ = backend.exit_event(0).unwrap();
743
744        let uuid = VhostUserSharedMsg {
745            uuid: Uuid::new_v4(),
746        };
747        backend.get_shared_object(uuid).unwrap();
748
749        let mem = GuestMemoryAtomic::new(
750            GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
751        );
752        backend.update_memory(mem).unwrap();
753
754        backend.reset_device();
755        assert!(backend.lock().unwrap().events == 0);
756        assert!(!backend.lock().unwrap().event_idx);
757        assert!(backend.lock().unwrap().acked_features == 0);
758    }
759
760    #[test]
761    fn test_new_mock_backend_rwlock() {
762        let backend = Arc::new(RwLock::new(MockVhostBackend::new()));
763
764        assert_eq!(backend.num_queues(), 2);
765        assert_eq!(backend.max_queue_size(), 256);
766        assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
767        assert_eq!(
768            backend.protocol_features(),
769            VhostUserProtocolFeatures::all()
770        );
771        assert_eq!(backend.queues_per_thread(), [1, 1]);
772
773        assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
774        backend.set_config(0x200, &[0xa5; 8]).unwrap();
775
776        backend.acked_features(0xffff);
777        assert_eq!(backend.read().unwrap().acked_features, 0xffff);
778
779        backend.set_event_idx(true);
780        assert!(backend.read().unwrap().event_idx);
781
782        let _ = backend.exit_event(0).unwrap();
783
784        let mem = GuestMemoryAtomic::new(
785            GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
786        );
787        backend.update_memory(mem.clone()).unwrap();
788
789        let uuid = VhostUserSharedMsg {
790            uuid: Uuid::new_v4(),
791        };
792        backend.get_shared_object(uuid).unwrap();
793
794        let vring = VringRwLock::new(mem, 0x1000).unwrap();
795        backend
796            .handle_event(0x1, EventSet::IN, &[vring], 0)
797            .unwrap();
798
799        backend.reset_device();
800        assert!(backend.read().unwrap().events == 0);
801        assert!(!backend.read().unwrap().event_idx);
802        assert!(backend.read().unwrap().acked_features == 0);
803    }
804}