1use std::fmt::{self, Display};
36use std::ops::Deref;
37
38use virtio_queue::DescriptorChain;
39use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
40use vm_memory::{
41 Address, ByteValued, Bytes, GuestMemory, GuestMemoryError, GuestMemoryRegion, Le16, Le32, Le64,
42 VolatileMemoryError, VolatileSlice,
43};
44
45#[derive(Debug)]
47pub enum Error {
48 DescriptorChainTooShort,
50 DescriptorLengthTooSmall,
52 DescriptorLengthTooLong,
54 InvalidHeaderInputSize(usize),
56 InvalidHeaderLen(u32),
58 InvalidMemoryAccess(GuestMemoryError),
60 InvalidVolatileAccess(VolatileMemoryError),
62 UnexpectedReadOnlyDescriptor,
64 UnexpectedWriteOnlyDescriptor,
66}
67
68impl std::error::Error for Error {}
69
70impl Display for Error {
71 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72 match self {
73 Error::DescriptorChainTooShort => {
74 write!(f, "There are not enough descriptors in the chain.")
75 }
76 Error::DescriptorLengthTooSmall => write!(
77 f,
78 "The descriptor is pointing to a buffer that has a smaller length than expected."
79 ),
80 Error::DescriptorLengthTooLong => write!(
81 f,
82 "The descriptor is pointing to a buffer that has a longer length than expected."
83 ),
84 Error::InvalidHeaderInputSize(size) => {
85 write!(f, "Invalid header input size: {}", size)
86 }
87 Error::InvalidHeaderLen(size) => {
88 write!(f, "Invalid header `len` field value: {}", size)
89 }
90 Error::InvalidMemoryAccess(error) => {
91 write!(f, "Invalid guest memory access: {}", error)
92 }
93 Error::InvalidVolatileAccess(error) => {
94 write!(f, "Invalid volatile memory access: {}", error)
95 }
96 Error::UnexpectedReadOnlyDescriptor => {
97 write!(f, "Unexpected read-only descriptor.")
98 }
99 Error::UnexpectedWriteOnlyDescriptor => {
100 write!(f, "Unexpected write-only descriptor.")
101 }
102 }
103 }
104}
105
106#[repr(C, packed)]
107#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
108pub struct PacketHeader {
110 src_cid: Le64,
111 dst_cid: Le64,
112 src_port: Le32,
113 dst_port: Le32,
114 len: Le32,
115 type_: Le16,
116 op: Le16,
117 flags: Le32,
118 buf_alloc: Le32,
119 fwd_cnt: Le32,
120}
121
122unsafe impl ByteValued for PacketHeader {}
126pub const PKT_HEADER_SIZE: usize = std::mem::size_of::<PacketHeader>();
135
136const SRC_CID_OFFSET: usize = 0;
138const DST_CID_OFFSET: usize = 8;
139const SRC_PORT_OFFSET: usize = 16;
140const DST_PORT_OFFSET: usize = 20;
141const LEN_OFFSET: usize = 24;
142const TYPE_OFFSET: usize = 28;
143const OP_OFFSET: usize = 30;
144const FLAGS_OFFSET: usize = 32;
145const BUF_ALLOC_OFFSET: usize = 36;
146const FWD_CNT_OFFSET: usize = 40;
147
148pub type Result<T> = std::result::Result<T, Error>;
150
151#[derive(Debug)]
155pub struct VsockPacket<'a, B: BitmapSlice> {
156 header_slice: VolatileSlice<'a, B>,
162 header: PacketHeader,
163 data_slice: Option<VolatileSlice<'a, B>>,
164}
165
166macro_rules! set_header_field {
170 ($packet:ident, $field:ident, $offset:ident, $value:ident) => {
171 $packet.header.$field = $value.into();
172 $packet
173 .header_slice
174 .write(&$value.to_le_bytes(), $offset)
175 .unwrap();
177 };
178}
179
180impl<'a, B: BitmapSlice> VsockPacket<'a, B> {
181 pub fn header_slice(&self) -> &VolatileSlice<'a, B> {
183 &self.header_slice
184 }
185
186 pub fn data_slice(&self) -> Option<&VolatileSlice<'a, B>> {
188 self.data_slice.as_ref()
189 }
190
191 pub fn set_header_from_raw(&mut self, bytes: &[u8]) -> Result<()> {
225 if bytes.len() != PKT_HEADER_SIZE {
226 return Err(Error::InvalidHeaderInputSize(bytes.len()));
227 }
228 self.header_slice
229 .write(bytes, 0)
230 .map_err(Error::InvalidVolatileAccess)?;
231 let header = self
232 .header_slice()
233 .read_obj::<PacketHeader>(0)
234 .map_err(Error::InvalidVolatileAccess)?;
235 self.header = header;
236 Ok(())
237 }
238
239 pub fn src_cid(&self) -> u64 {
241 self.header.src_cid.into()
242 }
243
244 pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
246 set_header_field!(self, src_cid, SRC_CID_OFFSET, cid);
247 self
248 }
249
250 pub fn dst_cid(&self) -> u64 {
252 self.header.dst_cid.into()
253 }
254
255 pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
257 set_header_field!(self, dst_cid, DST_CID_OFFSET, cid);
258 self
259 }
260
261 pub fn src_port(&self) -> u32 {
263 self.header.src_port.into()
264 }
265
266 pub fn set_src_port(&mut self, port: u32) -> &mut Self {
268 set_header_field!(self, src_port, SRC_PORT_OFFSET, port);
269 self
270 }
271
272 pub fn dst_port(&self) -> u32 {
274 self.header.dst_port.into()
275 }
276
277 pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
279 set_header_field!(self, dst_port, DST_PORT_OFFSET, port);
280 self
281 }
282
283 pub fn len(&self) -> u32 {
285 self.header.len.into()
286 }
287
288 pub fn is_empty(&self) -> bool {
290 self.len() == 0
291 }
292
293 pub fn set_len(&mut self, len: u32) -> &mut Self {
295 set_header_field!(self, len, LEN_OFFSET, len);
296 self
297 }
298
299 pub fn type_(&self) -> u16 {
301 self.header.type_.into()
302 }
303
304 pub fn set_type(&mut self, type_: u16) -> &mut Self {
306 set_header_field!(self, type_, TYPE_OFFSET, type_);
307 self
308 }
309
310 pub fn op(&self) -> u16 {
312 self.header.op.into()
313 }
314
315 pub fn set_op(&mut self, op: u16) -> &mut Self {
317 set_header_field!(self, op, OP_OFFSET, op);
318 self
319 }
320
321 pub fn flags(&self) -> u32 {
323 self.header.flags.into()
324 }
325
326 pub fn set_flags(&mut self, flags: u32) -> &mut Self {
328 set_header_field!(self, flags, FLAGS_OFFSET, flags);
329 self
330 }
331
332 pub fn set_flag(&mut self, flag: u32) -> &mut Self {
334 self.set_flags(self.flags() | flag);
335 self
336 }
337
338 pub fn buf_alloc(&self) -> u32 {
340 self.header.buf_alloc.into()
341 }
342
343 pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
345 set_header_field!(self, buf_alloc, BUF_ALLOC_OFFSET, buf_alloc);
346 self
347 }
348
349 pub fn fwd_cnt(&self) -> u32 {
351 self.header.fwd_cnt.into()
352 }
353
354 pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
356 set_header_field!(self, fwd_cnt, FWD_CNT_OFFSET, fwd_cnt);
357 self
358 }
359
360 pub fn from_tx_virtq_chain<M, T>(
419 mem: &'a M,
420 desc_chain: &mut DescriptorChain<T>,
421 max_data_size: u32,
422 ) -> Result<Self>
423 where
424 M: GuestMemory,
425 <<M as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
426 T: Deref,
427 T::Target: GuestMemory,
428 {
429 let chain_head = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
430 if chain_head.is_write_only() {
432 return Err(Error::UnexpectedWriteOnlyDescriptor);
433 }
434
435 if (chain_head.len() as usize) < PKT_HEADER_SIZE {
437 return Err(Error::DescriptorLengthTooSmall);
438 }
439
440 let header_slice = mem
441 .get_slice(chain_head.addr(), PKT_HEADER_SIZE)
442 .map_err(Error::InvalidMemoryAccess)?;
443
444 let header = mem
445 .read_obj(chain_head.addr())
446 .map_err(Error::InvalidMemoryAccess)?;
447
448 let mut pkt = Self {
449 header_slice,
450 header,
451 data_slice: None,
452 };
453
454 if pkt.is_empty() {
456 return Ok(pkt);
457 }
458
459 if pkt.len() > max_data_size {
461 return Err(Error::InvalidHeaderLen(pkt.len()));
462 }
463
464 let data_slice =
467 if !chain_head.has_next() && chain_head.len() - PKT_HEADER_SIZE as u32 >= pkt.len() {
468 mem.get_slice(
469 chain_head
470 .addr()
471 .checked_add(PKT_HEADER_SIZE as u64)
472 .ok_or(Error::DescriptorLengthTooSmall)?,
473 pkt.len() as usize,
474 )
475 .map_err(Error::InvalidMemoryAccess)?
476 } else {
477 if !chain_head.has_next() {
478 return Err(Error::DescriptorChainTooShort);
479 }
480
481 let data_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
482
483 if data_desc.is_write_only() {
484 return Err(Error::UnexpectedWriteOnlyDescriptor);
485 }
486
487 if data_desc.len() < pkt.len() {
490 return Err(Error::DescriptorLengthTooSmall);
491 }
492
493 mem.get_slice(data_desc.addr(), pkt.len() as usize)
494 .map_err(Error::InvalidMemoryAccess)?
495 };
496
497 pkt.data_slice = Some(data_slice);
498 Ok(pkt)
499 }
500
501 pub fn from_rx_virtq_chain<M, T>(
582 mem: &'a M,
583 desc_chain: &mut DescriptorChain<T>,
584 max_data_size: u32,
585 ) -> Result<Self>
586 where
587 M: GuestMemory,
588 <<M as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
589 T: Deref,
590 T::Target: GuestMemory,
591 {
592 let chain_head = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
593 if !chain_head.is_write_only() {
595 return Err(Error::UnexpectedReadOnlyDescriptor);
596 }
597
598 if (chain_head.len() as usize) < PKT_HEADER_SIZE {
600 return Err(Error::DescriptorLengthTooSmall);
601 }
602
603 let header_slice = mem
604 .get_slice(chain_head.addr(), PKT_HEADER_SIZE)
605 .map_err(Error::InvalidMemoryAccess)?;
606
607 let data_slice = if !chain_head.has_next() && chain_head.len() as usize > PKT_HEADER_SIZE {
610 mem.get_slice(
611 chain_head
612 .addr()
613 .checked_add(PKT_HEADER_SIZE as u64)
614 .ok_or(Error::DescriptorLengthTooSmall)?,
615 chain_head.len() as usize - PKT_HEADER_SIZE,
616 )
617 .map_err(Error::InvalidMemoryAccess)?
618 } else {
619 if !chain_head.has_next() {
620 return Err(Error::DescriptorChainTooShort);
621 }
622
623 let data_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
624
625 if !data_desc.is_write_only() {
626 return Err(Error::UnexpectedReadOnlyDescriptor);
627 }
628
629 if data_desc.len() > max_data_size {
630 return Err(Error::DescriptorLengthTooLong);
631 }
632
633 mem.get_slice(data_desc.addr(), data_desc.len() as usize)
634 .map_err(Error::InvalidMemoryAccess)?
635 };
636
637 Ok(Self {
638 header_slice,
639 header: Default::default(),
640 data_slice: Some(data_slice),
641 })
642 }
643}
644
645impl<'a> VsockPacket<'a, ()> {
646 pub unsafe fn new(header: &mut [u8], data: Option<&mut [u8]>) -> Result<VsockPacket<'a, ()>> {
669 if header.len() != PKT_HEADER_SIZE {
670 return Err(Error::InvalidHeaderInputSize(header.len()));
671 }
672 Ok(VsockPacket {
673 header_slice: VolatileSlice::new(header.as_mut_ptr(), PKT_HEADER_SIZE),
674 header: Default::default(),
675 data_slice: data.map(|data| VolatileSlice::new(data.as_mut_ptr(), data.len())),
676 })
677 }
678}
679
680#[cfg(test)]
681mod tests {
682 use super::*;
683
684 use vm_memory::{GuestAddress, GuestMemoryMmap};
685
686 use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE;
687 use virtio_queue::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
688 use virtio_queue::mock::MockSplitQueue;
689
690 impl PartialEq for Error {
691 fn eq(&self, other: &Self) -> bool {
692 use self::Error::*;
693 match (self, other) {
694 (DescriptorChainTooShort, DescriptorChainTooShort) => true,
695 (DescriptorLengthTooSmall, DescriptorLengthTooSmall) => true,
696 (DescriptorLengthTooLong, DescriptorLengthTooLong) => true,
697 (InvalidHeaderInputSize(size), InvalidHeaderInputSize(other_size)) => {
698 size == other_size
699 }
700 (InvalidHeaderLen(size), InvalidHeaderLen(other_size)) => size == other_size,
701 (InvalidMemoryAccess(ref e), InvalidMemoryAccess(ref other_e)) => {
702 format!("{}", e).eq(&format!("{}", other_e))
703 }
704 (InvalidVolatileAccess(ref e), InvalidVolatileAccess(ref other_e)) => {
705 format!("{}", e).eq(&format!("{}", other_e))
706 }
707 (UnexpectedReadOnlyDescriptor, UnexpectedReadOnlyDescriptor) => true,
708 (UnexpectedWriteOnlyDescriptor, UnexpectedWriteOnlyDescriptor) => true,
709 _ => false,
710 }
711 }
712 }
713
714 const SRC_CID: u64 = 1;
716 const DST_CID: u64 = 2;
717 const SRC_PORT: u32 = 3;
718 const DST_PORT: u32 = 4;
719 const LEN: u32 = 16;
720 const TYPE: u16 = 5;
721 const OP: u16 = 6;
722 const FLAGS: u32 = 7;
723 const FLAG: u32 = 8;
724 const BUF_ALLOC: u32 = 256;
725 const FWD_CNT: u32 = 9;
726
727 const MAX_PKT_BUF_SIZE: u32 = 64 * 1024;
728
729 #[test]
730 fn test_from_rx_virtq_chain() {
731 let mem: GuestMemoryMmap =
732 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x1000_0000)]).unwrap();
733
734 let v = vec![
736 RawDescriptor::from(SplitDescriptor::new(0x10_0000, 0x100, 0, 0)),
738 RawDescriptor::from(SplitDescriptor::new(
739 0x20_0000,
740 0x100,
741 VRING_DESC_F_WRITE as u16,
742 0,
743 )),
744 ];
745 let queue = MockSplitQueue::new(&mem, 16);
746 let mut chain = queue.build_desc_chain(&v).unwrap();
747 assert_eq!(
748 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
749 Error::UnexpectedReadOnlyDescriptor
750 );
751
752 let v = vec![
753 RawDescriptor::from(SplitDescriptor::new(
755 0x10_0000,
756 PKT_HEADER_SIZE as u32 - 1,
757 VRING_DESC_F_WRITE as u16,
758 0,
759 )),
760 RawDescriptor::from(SplitDescriptor::new(
761 0x20_0000,
762 0x100,
763 VRING_DESC_F_WRITE as u16,
764 0,
765 )),
766 ];
767 let mut chain = queue.build_desc_chain(&v).unwrap();
768 assert_eq!(
769 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
770 Error::DescriptorLengthTooSmall
771 );
772
773 let v = vec![
774 RawDescriptor::from(SplitDescriptor::new(
775 0x10_0000,
776 PKT_HEADER_SIZE as u32,
777 VRING_DESC_F_WRITE as u16,
778 0,
779 )),
780 RawDescriptor::from(SplitDescriptor::new(
781 0x20_0000,
782 MAX_PKT_BUF_SIZE + 1,
783 VRING_DESC_F_WRITE as u16,
784 0,
785 )),
786 ];
787 let mut chain = queue.build_desc_chain(&v).unwrap();
788 assert_eq!(
789 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
790 Error::DescriptorLengthTooLong
791 );
792
793 let v = vec![
794 RawDescriptor::from(SplitDescriptor::new(
796 0x10_0000,
797 PKT_HEADER_SIZE as u32,
798 VRING_DESC_F_WRITE as u16,
799 0,
800 )),
801 ];
802 let mut chain = queue.build_desc_chain(&v).unwrap();
803 assert_eq!(
804 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
805 Error::DescriptorChainTooShort
806 );
807
808 let v = vec![
809 RawDescriptor::from(SplitDescriptor::new(0x10_0000, 0x100, 0, 0)),
810 RawDescriptor::from(SplitDescriptor::new(
811 0x20_0000,
812 0x100,
813 VRING_DESC_F_WRITE as u16,
814 0,
815 )),
816 ];
817 let mut chain = queue.build_desc_chain(&v).unwrap();
818 assert_eq!(
819 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
820 Error::UnexpectedReadOnlyDescriptor
821 );
822
823 let mem: GuestMemoryMmap =
824 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10_0004)]).unwrap();
825
826 let v = vec![
827 RawDescriptor::from(SplitDescriptor::new(
829 0x10_0000,
830 0x100,
831 VRING_DESC_F_WRITE as u16,
832 0,
833 )),
834 RawDescriptor::from(SplitDescriptor::new(
835 0x20_0000,
836 0x100,
837 VRING_DESC_F_WRITE as u16,
838 0,
839 )),
840 ];
841 let queue = MockSplitQueue::new(&mem, 16);
842 let mut chain = queue.build_desc_chain(&v).unwrap();
843 assert_eq!(
844 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
845 Error::InvalidMemoryAccess(GuestMemoryError::InvalidBackendAddress)
846 );
847
848 let v = vec![
849 RawDescriptor::from(SplitDescriptor::new(
851 0x20_0000,
852 0x100,
853 VRING_DESC_F_WRITE as u16,
854 0,
855 )),
856 RawDescriptor::from(SplitDescriptor::new(
857 0x30_0000,
858 0x100,
859 VRING_DESC_F_WRITE as u16,
860 0,
861 )),
862 ];
863 let mut chain = queue.build_desc_chain(&v).unwrap();
864 assert_eq!(
865 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
866 Error::InvalidMemoryAccess(GuestMemoryError::InvalidGuestAddress(GuestAddress(
867 0x20_0000
868 )))
869 );
870
871 let v = vec![
872 RawDescriptor::from(SplitDescriptor::new(
873 0x5_0000,
874 0x100,
875 VRING_DESC_F_WRITE as u16,
876 0,
877 )),
878 RawDescriptor::from(SplitDescriptor::new(0x8_0000, 0x100, 0, 0)),
880 ];
881 let mut chain = queue.build_desc_chain(&v).unwrap();
882 assert_eq!(
883 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
884 Error::UnexpectedReadOnlyDescriptor
885 );
886 let v = vec![
887 RawDescriptor::from(SplitDescriptor::new(
888 0x5_0000,
889 0x100,
890 VRING_DESC_F_WRITE as u16,
891 0,
892 )),
893 RawDescriptor::from(SplitDescriptor::new(
895 0x10_0000,
896 0x100,
897 VRING_DESC_F_WRITE as u16,
898 0,
899 )),
900 ];
901 let mut chain = queue.build_desc_chain(&v).unwrap();
902 assert_eq!(
903 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
904 Error::InvalidMemoryAccess(GuestMemoryError::InvalidBackendAddress)
905 );
906
907 let v = vec![
908 RawDescriptor::from(SplitDescriptor::new(
909 0x5_0000,
910 0x100,
911 VRING_DESC_F_WRITE as u16,
912 0,
913 )),
914 RawDescriptor::from(SplitDescriptor::new(
916 0x20_0000,
917 0x100,
918 VRING_DESC_F_WRITE as u16,
919 0,
920 )),
921 ];
922 let mut chain = queue.build_desc_chain(&v).unwrap();
923 assert_eq!(
924 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
925 Error::InvalidMemoryAccess(GuestMemoryError::InvalidGuestAddress(GuestAddress(
926 0x20_0000
927 )))
928 );
929
930 let v = vec![
932 RawDescriptor::from(SplitDescriptor::new(
933 0x5_0000,
934 0x100,
935 VRING_DESC_F_WRITE as u16,
936 0,
937 )),
938 RawDescriptor::from(SplitDescriptor::new(
939 0x8_0000,
940 0x100,
941 VRING_DESC_F_WRITE as u16,
942 0,
943 )),
944 ];
945 let mut chain = queue.build_desc_chain(&v).unwrap();
946
947 let packet = VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
948 assert_eq!(packet.header, PacketHeader::default());
949 let header = packet.header_slice();
950 assert_eq!(
951 header.ptr_guard().as_ptr(),
952 mem.get_host_address(GuestAddress(0x5_0000)).unwrap()
953 );
954 assert_eq!(header.len(), PKT_HEADER_SIZE);
955
956 let data = packet.data_slice().unwrap();
957 assert_eq!(
958 data.ptr_guard().as_ptr(),
959 mem.get_host_address(GuestAddress(0x8_0000)).unwrap()
960 );
961 assert_eq!(data.len(), 0x100);
962
963 assert_eq!(
966 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
967 Error::DescriptorChainTooShort
968 );
969
970 let v = vec![RawDescriptor::from(SplitDescriptor::new(
973 0x5_0000,
974 PKT_HEADER_SIZE as u32 + 0x100,
975 VRING_DESC_F_WRITE as u16,
976 0,
977 ))];
978 let mut chain = queue.build_desc_chain(&v).unwrap();
979
980 let packet = VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
981 assert_eq!(packet.header, PacketHeader::default());
982 let header = packet.header_slice();
983 assert_eq!(
984 header.ptr_guard().as_ptr(),
985 mem.get_host_address(GuestAddress(0x5_0000)).unwrap()
986 );
987 assert_eq!(header.len(), PKT_HEADER_SIZE);
988
989 let data = packet.data_slice().unwrap();
990 assert_eq!(
991 data.ptr_guard().as_ptr(),
992 mem.get_host_address(GuestAddress(0x5_0000 + PKT_HEADER_SIZE as u64))
993 .unwrap()
994 );
995 assert_eq!(data.len(), 0x100);
996 }
997
998 #[test]
999 fn test_from_tx_virtq_chain() {
1000 let mem: GuestMemoryMmap =
1001 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x1000_0000)]).unwrap();
1002
1003 let v = vec![
1005 RawDescriptor::from(SplitDescriptor::new(
1007 0x10_0000,
1008 0x100,
1009 VRING_DESC_F_WRITE as u16,
1010 0,
1011 )),
1012 RawDescriptor::from(SplitDescriptor::new(0x20_0000, 0x100, 0, 0)),
1013 ];
1014 let queue = MockSplitQueue::new(&mem, 16);
1015 let mut chain = queue.build_desc_chain(&v).unwrap();
1016 assert_eq!(
1017 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1018 Error::UnexpectedWriteOnlyDescriptor
1019 );
1020
1021 let v = vec![
1022 RawDescriptor::from(SplitDescriptor::new(
1024 0x10_0000,
1025 PKT_HEADER_SIZE as u32 - 1,
1026 0,
1027 0,
1028 )),
1029 RawDescriptor::from(SplitDescriptor::new(0x20_0000, 0x100, 0, 0)),
1030 ];
1031 let mut chain = queue.build_desc_chain(&v).unwrap();
1032 assert_eq!(
1033 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1034 Error::DescriptorLengthTooSmall
1035 );
1036
1037 let v = vec![RawDescriptor::from(SplitDescriptor::new(
1039 0x10_0000,
1040 PKT_HEADER_SIZE as u32,
1041 0,
1042 0,
1043 ))];
1044 let mut chain = queue.build_desc_chain(&v).unwrap();
1045
1046 let header = PacketHeader {
1047 src_cid: SRC_CID.into(),
1048 dst_cid: DST_CID.into(),
1049 src_port: SRC_PORT.into(),
1050 dst_port: DST_PORT.into(),
1051 len: 0.into(),
1052 type_: 0.into(),
1053 op: 0.into(),
1054 flags: 0.into(),
1055 buf_alloc: 0.into(),
1056 fwd_cnt: 0.into(),
1057 };
1058 mem.write_obj(header, GuestAddress(0x10_0000)).unwrap();
1059
1060 let packet = VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1061 assert_eq!(packet.header, header);
1062 let header_slice = packet.header_slice();
1063 assert_eq!(
1064 header_slice.ptr_guard().as_ptr(),
1065 mem.get_host_address(GuestAddress(0x10_0000)).unwrap()
1066 );
1067 assert_eq!(header_slice.len(), PKT_HEADER_SIZE);
1068 assert!(packet.data_slice().is_none());
1069
1070 let mem: GuestMemoryMmap =
1071 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10_0004)]).unwrap();
1072
1073 let v = vec![
1074 RawDescriptor::from(SplitDescriptor::new(0x10_0000, 0x100, 0, 0)),
1076 RawDescriptor::from(SplitDescriptor::new(0x20_0000, 0x100, 0, 0)),
1077 ];
1078 let queue = MockSplitQueue::new(&mem, 16);
1079 let mut chain = queue.build_desc_chain(&v).unwrap();
1080 assert_eq!(
1081 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1082 Error::InvalidMemoryAccess(GuestMemoryError::InvalidBackendAddress)
1083 );
1084
1085 let v = vec![
1086 RawDescriptor::from(SplitDescriptor::new(0x20_0000, 0x100, 0, 0)),
1088 RawDescriptor::from(SplitDescriptor::new(0x30_0000, 0x100, 0, 0)),
1089 ];
1090 let mut chain = queue.build_desc_chain(&v).unwrap();
1091 assert_eq!(
1092 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1093 Error::InvalidMemoryAccess(GuestMemoryError::InvalidGuestAddress(GuestAddress(
1094 0x20_0000
1095 )))
1096 );
1097
1098 let header = PacketHeader {
1101 src_cid: SRC_CID.into(),
1102 dst_cid: DST_CID.into(),
1103 src_port: SRC_PORT.into(),
1104 dst_port: DST_PORT.into(),
1105 len: (MAX_PKT_BUF_SIZE + 1).into(),
1106 type_: 0.into(),
1107 op: 0.into(),
1108 flags: 0.into(),
1109 buf_alloc: 0.into(),
1110 fwd_cnt: 0.into(),
1111 };
1112 mem.write_obj(header, GuestAddress(0x5_0000)).unwrap();
1113 let v = vec![
1114 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1115 RawDescriptor::from(SplitDescriptor::new(0x8_0000, 0x100, 0, 0)),
1116 ];
1117 let mut chain = queue.build_desc_chain(&v).unwrap();
1118 assert_eq!(
1119 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1120 Error::InvalidHeaderLen(MAX_PKT_BUF_SIZE + 1)
1121 );
1122
1123 let header = PacketHeader {
1125 src_cid: SRC_CID.into(),
1126 dst_cid: DST_CID.into(),
1127 src_port: SRC_PORT.into(),
1128 dst_port: DST_PORT.into(),
1129 len: LEN.into(),
1130 type_: 0.into(),
1131 op: 0.into(),
1132 flags: 0.into(),
1133 buf_alloc: 0.into(),
1134 fwd_cnt: 0.into(),
1135 };
1136 mem.write_obj(header, GuestAddress(0x5_0000)).unwrap();
1137 let v = vec![
1138 RawDescriptor::from(SplitDescriptor::new(0x5_0000, PKT_HEADER_SIZE as u32, 0, 0)),
1140 ];
1141 let mut chain = queue.build_desc_chain(&v).unwrap();
1142 assert_eq!(
1143 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1144 Error::DescriptorChainTooShort
1145 );
1146
1147 let v = vec![
1148 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1149 RawDescriptor::from(SplitDescriptor::new(0x10_0000, 0x100, 0, 0)),
1151 ];
1152 let mut chain = queue.build_desc_chain(&v).unwrap();
1153 assert_eq!(
1154 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1155 Error::InvalidMemoryAccess(GuestMemoryError::InvalidBackendAddress)
1156 );
1157
1158 let v = vec![
1159 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1160 RawDescriptor::from(SplitDescriptor::new(0x20_0000, 0x100, 0, 0)),
1162 ];
1163 let mut chain = queue.build_desc_chain(&v).unwrap();
1164 assert_eq!(
1165 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1166 Error::InvalidMemoryAccess(GuestMemoryError::InvalidGuestAddress(GuestAddress(
1167 0x20_0000
1168 )))
1169 );
1170
1171 let v = vec![
1172 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1173 RawDescriptor::from(SplitDescriptor::new(
1175 0x8_0000,
1176 0x100,
1177 VRING_DESC_F_WRITE as u16,
1178 0,
1179 )),
1180 ];
1181 let mut chain = queue.build_desc_chain(&v).unwrap();
1182 assert_eq!(
1183 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1184 Error::UnexpectedWriteOnlyDescriptor
1185 );
1186
1187 let v = vec![
1188 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1189 RawDescriptor::from(SplitDescriptor::new(0x8_0000, LEN - 1, 0, 0)),
1191 ];
1192 let mut chain = queue.build_desc_chain(&v).unwrap();
1193 assert_eq!(
1194 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1195 Error::DescriptorLengthTooSmall
1196 );
1197
1198 let v = vec![
1200 RawDescriptor::from(SplitDescriptor::new(0x5_0000, 0x100, 0, 0)),
1201 RawDescriptor::from(SplitDescriptor::new(0x8_0000, 0x100, 0, 0)),
1202 ];
1203 let mut chain = queue.build_desc_chain(&v).unwrap();
1204
1205 let packet = VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1206 assert_eq!(packet.header, header);
1207 let header_slice = packet.header_slice();
1208 assert_eq!(
1209 header_slice.ptr_guard().as_ptr(),
1210 mem.get_host_address(GuestAddress(0x5_0000)).unwrap()
1211 );
1212 assert_eq!(header_slice.len(), PKT_HEADER_SIZE);
1213 assert_eq!(packet.len(), LEN);
1215
1216 let data = packet.data_slice().unwrap();
1217 assert_eq!(
1218 data.ptr_guard().as_ptr(),
1219 mem.get_host_address(GuestAddress(0x8_0000)).unwrap()
1220 );
1221 assert_eq!(data.len(), LEN as usize);
1222
1223 assert_eq!(
1226 VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap_err(),
1227 Error::DescriptorChainTooShort
1228 );
1229
1230 let v = vec![RawDescriptor::from(SplitDescriptor::new(
1233 0x5_0000,
1234 PKT_HEADER_SIZE as u32 + 0x100,
1235 0,
1236 0,
1237 ))];
1238 let mut chain = queue.build_desc_chain(&v).unwrap();
1239
1240 let packet = VsockPacket::from_tx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1241 assert_eq!(packet.header, header);
1242 let header_slice = packet.header_slice();
1243 assert_eq!(
1244 header_slice.ptr_guard().as_ptr(),
1245 mem.get_host_address(GuestAddress(0x5_0000)).unwrap()
1246 );
1247 assert_eq!(header_slice.len(), PKT_HEADER_SIZE);
1248 assert_eq!(packet.len(), LEN);
1250
1251 let data = packet.data_slice().unwrap();
1252 assert_eq!(
1253 data.ptr_guard().as_ptr(),
1254 mem.get_host_address(GuestAddress(0x5_0000 + PKT_HEADER_SIZE as u64))
1255 .unwrap()
1256 );
1257 assert_eq!(data.len(), LEN as usize);
1258 }
1259
1260 #[test]
1261 fn test_header_set_get() {
1262 let mem: GuestMemoryMmap =
1263 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x30_0000)]).unwrap();
1264 let v = vec![
1266 RawDescriptor::from(SplitDescriptor::new(
1267 0x10_0000,
1268 0x100,
1269 VRING_DESC_F_WRITE as u16,
1270 0,
1271 )),
1272 RawDescriptor::from(SplitDescriptor::new(
1273 0x20_0000,
1274 0x100,
1275 VRING_DESC_F_WRITE as u16,
1276 0,
1277 )),
1278 ];
1279 let queue = MockSplitQueue::new(&mem, 16);
1280 let mut chain = queue.build_desc_chain(&v).unwrap();
1281
1282 let mut packet =
1283 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1284 packet
1285 .set_src_cid(SRC_CID)
1286 .set_dst_cid(DST_CID)
1287 .set_src_port(SRC_PORT)
1288 .set_dst_port(DST_PORT)
1289 .set_len(LEN)
1290 .set_type(TYPE)
1291 .set_op(OP)
1292 .set_flags(FLAGS)
1293 .set_flag(FLAG)
1294 .set_buf_alloc(BUF_ALLOC)
1295 .set_fwd_cnt(FWD_CNT);
1296
1297 assert_eq!(packet.flags(), FLAGS | FLAG);
1298 assert_eq!(packet.op(), OP);
1299 assert_eq!(packet.type_(), TYPE);
1300 assert_eq!(packet.dst_cid(), DST_CID);
1301 assert_eq!(packet.dst_port(), DST_PORT);
1302 assert_eq!(packet.src_cid(), SRC_CID);
1303 assert_eq!(packet.src_port(), SRC_PORT);
1304 assert_eq!(packet.fwd_cnt(), FWD_CNT);
1305 assert_eq!(packet.len(), LEN);
1306 assert_eq!(packet.buf_alloc(), BUF_ALLOC);
1307
1308 let expected_header = PacketHeader {
1309 src_cid: SRC_CID.into(),
1310 dst_cid: DST_CID.into(),
1311 src_port: SRC_PORT.into(),
1312 dst_port: DST_PORT.into(),
1313 len: LEN.into(),
1314 type_: TYPE.into(),
1315 op: OP.into(),
1316 flags: (FLAGS | FLAG).into(),
1317 buf_alloc: BUF_ALLOC.into(),
1318 fwd_cnt: FWD_CNT.into(),
1319 };
1320
1321 assert_eq!(packet.header, expected_header);
1322 assert_eq!(
1323 u64::from_le(
1324 packet
1325 .header_slice()
1326 .read_obj::<u64>(SRC_CID_OFFSET)
1327 .unwrap()
1328 ),
1329 SRC_CID
1330 );
1331 assert_eq!(
1332 u64::from_le(
1333 packet
1334 .header_slice()
1335 .read_obj::<u64>(DST_CID_OFFSET)
1336 .unwrap()
1337 ),
1338 DST_CID
1339 );
1340 assert_eq!(
1341 u32::from_le(
1342 packet
1343 .header_slice()
1344 .read_obj::<u32>(SRC_PORT_OFFSET)
1345 .unwrap()
1346 ),
1347 SRC_PORT
1348 );
1349 assert_eq!(
1350 u32::from_le(
1351 packet
1352 .header_slice()
1353 .read_obj::<u32>(DST_PORT_OFFSET)
1354 .unwrap()
1355 ),
1356 DST_PORT,
1357 );
1358 assert_eq!(
1359 u32::from_le(packet.header_slice().read_obj::<u32>(LEN_OFFSET).unwrap()),
1360 LEN
1361 );
1362 assert_eq!(
1363 u16::from_le(packet.header_slice().read_obj::<u16>(TYPE_OFFSET).unwrap()),
1364 TYPE
1365 );
1366 assert_eq!(
1367 u16::from_le(packet.header_slice().read_obj::<u16>(OP_OFFSET).unwrap()),
1368 OP
1369 );
1370 assert_eq!(
1371 u32::from_le(packet.header_slice().read_obj::<u32>(FLAGS_OFFSET).unwrap()),
1372 FLAGS | FLAG
1373 );
1374 assert_eq!(
1375 u32::from_le(
1376 packet
1377 .header_slice()
1378 .read_obj::<u32>(BUF_ALLOC_OFFSET)
1379 .unwrap()
1380 ),
1381 BUF_ALLOC
1382 );
1383 assert_eq!(
1384 u32::from_le(
1385 packet
1386 .header_slice()
1387 .read_obj::<u32>(FWD_CNT_OFFSET)
1388 .unwrap()
1389 ),
1390 FWD_CNT
1391 );
1392 }
1393
1394 #[test]
1395 fn test_set_header_from_raw() {
1396 let mem: GuestMemoryMmap =
1397 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x30_0000)]).unwrap();
1398 let v = vec![
1400 RawDescriptor::from(SplitDescriptor::new(
1401 0x10_0000,
1402 0x100,
1403 VRING_DESC_F_WRITE as u16,
1404 0,
1405 )),
1406 RawDescriptor::from(SplitDescriptor::new(
1407 0x20_0000,
1408 0x100,
1409 VRING_DESC_F_WRITE as u16,
1410 0,
1411 )),
1412 ];
1413 let queue = MockSplitQueue::new(&mem, 16);
1414 let mut chain = queue.build_desc_chain(&v).unwrap();
1415
1416 let mut packet =
1417 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1418
1419 let header = PacketHeader {
1420 src_cid: SRC_CID.into(),
1421 dst_cid: DST_CID.into(),
1422 src_port: SRC_PORT.into(),
1423 dst_port: DST_PORT.into(),
1424 len: LEN.into(),
1425 type_: TYPE.into(),
1426 op: OP.into(),
1427 flags: (FLAGS | FLAG).into(),
1428 buf_alloc: BUF_ALLOC.into(),
1429 fwd_cnt: FWD_CNT.into(),
1430 };
1431
1432 let slice = unsafe {
1434 std::slice::from_raw_parts(
1435 (&header as *const PacketHeader) as *const u8,
1436 std::mem::size_of::<PacketHeader>(),
1437 )
1438 };
1439 assert_eq!(packet.header, PacketHeader::default());
1440 packet.set_header_from_raw(slice).unwrap();
1441 assert_eq!(packet.header, header);
1442 let header_from_slice: PacketHeader = packet.header_slice().read_obj(0).unwrap();
1443 assert_eq!(header_from_slice, header);
1444
1445 let invalid_slice = [0; PKT_HEADER_SIZE - 1];
1446 assert_eq!(
1447 packet.set_header_from_raw(&invalid_slice).unwrap_err(),
1448 Error::InvalidHeaderInputSize(PKT_HEADER_SIZE - 1)
1449 );
1450 }
1451
1452 #[test]
1453 fn test_packet_new() {
1454 let mut pkt_raw = [0u8; PKT_HEADER_SIZE + LEN as usize];
1455 let (hdr_raw, data_raw) = pkt_raw.split_at_mut(PKT_HEADER_SIZE);
1456 let packet = unsafe { VsockPacket::new(hdr_raw, Some(data_raw)).unwrap() };
1459 assert_eq!(
1460 packet.header_slice.ptr_guard().as_ptr(),
1461 hdr_raw.as_mut_ptr(),
1462 );
1463 assert_eq!(packet.header_slice.len(), PKT_HEADER_SIZE);
1464 assert_eq!(packet.header, PacketHeader::default());
1465 assert_eq!(
1466 packet.data_slice.unwrap().ptr_guard().as_ptr(),
1467 data_raw.as_mut_ptr(),
1468 );
1469 assert_eq!(packet.data_slice.unwrap().len(), LEN as usize);
1470
1471 let packet = unsafe { VsockPacket::new(hdr_raw, None).unwrap() };
1474 assert_eq!(
1475 packet.header_slice.ptr_guard().as_ptr(),
1476 hdr_raw.as_mut_ptr(),
1477 );
1478 assert_eq!(packet.header, PacketHeader::default());
1479 assert!(packet.data_slice.is_none());
1480
1481 let mut hdr_raw = [0u8; PKT_HEADER_SIZE - 1];
1482 assert_eq!(
1483 unsafe { VsockPacket::new(&mut hdr_raw, None).unwrap_err() },
1485 Error::InvalidHeaderInputSize(PKT_HEADER_SIZE - 1)
1486 );
1487 }
1488
1489 #[test]
1490 #[should_panic]
1491 fn test_set_header_field_with_invalid_offset() {
1492 const INVALID_OFFSET: usize = 50;
1493
1494 let mem: GuestMemoryMmap =
1495 GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x30_0000)]).unwrap();
1496 let v = vec![
1498 RawDescriptor::from(SplitDescriptor::new(
1499 0x10_0000,
1500 0x100,
1501 VRING_DESC_F_WRITE as u16,
1502 0,
1503 )),
1504 RawDescriptor::from(SplitDescriptor::new(
1505 0x20_0000,
1506 0x100,
1507 VRING_DESC_F_WRITE as u16,
1508 0,
1509 )),
1510 ];
1511 let queue = MockSplitQueue::new(&mem, 16);
1512 let mut chain = queue.build_desc_chain(&v).unwrap();
1513
1514 let mut packet =
1515 VsockPacket::from_rx_virtq_chain(&mem, &mut chain, MAX_PKT_BUF_SIZE).unwrap();
1516 set_header_field!(packet, src_cid, INVALID_OFFSET, SRC_CID);
1518 }
1519}