use super::device::*;
use crate::vmm::net_config::NetConfig;
use virtio_bindings::virtio_config::{
VIRTIO_CONFIG_S_DRIVER, VIRTIO_CONFIG_S_FEATURES_OK, VIRTIO_F_VERSION_1,
};
use virtio_bindings::virtio_ids::VIRTIO_ID_NET;
use virtio_bindings::virtio_mmio::{
VIRTIO_MMIO_DEVICE_FEATURES, VIRTIO_MMIO_DEVICE_FEATURES_SEL, VIRTIO_MMIO_DEVICE_ID,
VIRTIO_MMIO_DRIVER_FEATURES, VIRTIO_MMIO_DRIVER_FEATURES_SEL, VIRTIO_MMIO_INT_VRING,
VIRTIO_MMIO_INTERRUPT_ACK, VIRTIO_MMIO_INTERRUPT_STATUS, VIRTIO_MMIO_MAGIC_VALUE,
VIRTIO_MMIO_QUEUE_AVAIL_LOW, VIRTIO_MMIO_QUEUE_DESC_LOW, VIRTIO_MMIO_QUEUE_NOTIFY,
VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_READY,
VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_QUEUE_USED_LOW, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_VENDOR_ID,
VIRTIO_MMIO_VERSION,
};
use virtio_bindings::virtio_net::VIRTIO_NET_F_MAC;
use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
fn read_reg(dev: &VirtioNet, offset: u32) -> u32 {
let mut buf = [0u8; 4];
dev.mmio_read(offset as u64, &mut buf);
u32::from_le_bytes(buf)
}
fn write_reg(dev: &mut VirtioNet, offset: u32, val: u32) {
dev.mmio_write(offset as u64, &val.to_le_bytes());
}
fn init_until_features_ok(dev: &mut VirtioNet) {
write_reg(dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES, 1u32 << VIRTIO_NET_F_MAC);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
write_reg(
dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << (VIRTIO_F_VERSION_1 - 32),
);
write_reg(dev, VIRTIO_MMIO_STATUS, S_FEAT);
}
#[test]
fn magic_version_device_id() {
let dev = VirtioNet::new(NetConfig::default());
assert_eq!(read_reg(&dev, VIRTIO_MMIO_MAGIC_VALUE), 0x7472_6976);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_VERSION), 2);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_DEVICE_ID), VIRTIO_ID_NET);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_VENDOR_ID), 0);
}
#[test]
fn device_features_advertises_version_1_and_mac() {
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
let lo = read_reg(&dev, VIRTIO_MMIO_DEVICE_FEATURES);
write_reg(&mut dev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
let hi = read_reg(&dev, VIRTIO_MMIO_DEVICE_FEATURES);
let features = ((hi as u64) << 32) | lo as u64;
assert_ne!(
features & (1u64 << VIRTIO_F_VERSION_1),
0,
"VIRTIO_F_VERSION_1 must be advertised (forces 12-byte mrg_rxbuf hdr)",
);
assert_ne!(
features & (1u64 << VIRTIO_NET_F_MAC),
0,
"VIRTIO_NET_F_MAC must be advertised (deterministic MAC)",
);
}
#[test]
fn device_features_does_not_advertise_unsupported_bits() {
use virtio_bindings::virtio_net::{
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_MQ, VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_NET_F_MTU, VIRTIO_NET_F_STATUS,
};
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
let lo = read_reg(&dev, VIRTIO_MMIO_DEVICE_FEATURES);
write_reg(&mut dev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
let hi = read_reg(&dev, VIRTIO_MMIO_DEVICE_FEATURES);
let features = ((hi as u64) << 32) | lo as u64;
for (bit, name) in [
(VIRTIO_NET_F_CSUM, "CSUM"),
(VIRTIO_NET_F_MRG_RXBUF, "MRG_RXBUF"),
(VIRTIO_NET_F_STATUS, "STATUS"),
(VIRTIO_NET_F_CTRL_VQ, "CTRL_VQ"),
(VIRTIO_NET_F_MQ, "MQ"),
(VIRTIO_NET_F_MTU, "MTU"),
] {
assert_eq!(
features & (1u64 << bit),
0,
"v0 must not advertise VIRTIO_NET_F_{name}",
);
}
}
#[test]
fn status_state_machine_walks_phases() {
let mut dev = VirtioNet::new(NetConfig::default());
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), 0);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), S_ACK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_DRV);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), S_DRV);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS),
S_DRV,
"skip FEATURES_OK must be rejected"
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS),
S_DRV,
"clearing DRIVER bit must be rejected"
);
}
#[test]
fn status_skip_acknowledge_rejected() {
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_STATUS, VIRTIO_CONFIG_S_DRIVER);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS),
0,
"DRIVER without prior ACKNOWLEDGE must be rejected"
);
}
#[test]
fn status_reset_via_zero() {
let mut dev = VirtioNet::new(NetConfig::default());
init_until_features_ok(&mut dev);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), S_OK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, 0);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), 0);
}
#[test]
fn driver_features_gated_by_status() {
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES, 0xDEAD);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << (VIRTIO_F_VERSION_1 - 32),
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_FEAT);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), S_FEAT);
}
#[test]
fn features_ok_rejected_without_version_1() {
use virtio_bindings::virtio_config::VIRTIO_CONFIG_S_FAILED;
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << VIRTIO_NET_F_MAC,
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_FEAT);
let status = read_reg(&dev, VIRTIO_MMIO_STATUS);
assert_eq!(
status & VIRTIO_CONFIG_S_FEATURES_OK,
0,
"FEATURES_OK must NOT be set when VERSION_1 is missing",
);
assert_ne!(
status & VIRTIO_CONFIG_S_FAILED,
0,
"FAILED bit must be set when the driver fails to negotiate VERSION_1",
);
}
#[test]
fn queue_config_rejected_after_driver_ok() {
let mut dev = VirtioNet::new(NetConfig::default());
init_until_features_ok(&mut dev);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_SEL, 0);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NUM, 64);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_READY, 1);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_QUEUE_READY),
0,
"queue config writes after DRIVER_OK must not take effect"
);
}
#[test]
fn queue_num_max_is_256_for_both_queues() {
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_SEL, 0);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_QUEUE_NUM_MAX),
QUEUE_MAX_SIZE as u32
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_SEL, 1);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_QUEUE_NUM_MAX),
QUEUE_MAX_SIZE as u32
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_SEL, 2);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_QUEUE_NUM_MAX),
0,
"queue index >= NUM_QUEUES must report max=0"
);
}
#[test]
fn config_space_serves_mac_at_offset_0() {
let mac = [0x52, 0x54, 0x00, 0x12, 0x34, 0x56];
let dev = VirtioNet::new(NetConfig::default().mac(mac));
let mut buf = [0u8; 6];
dev.mmio_read(0x100, &mut buf);
assert_eq!(
buf, mac,
"config offset 0x100 must serve the configured MAC bytes",
);
}
#[test]
fn config_space_serves_zeros_past_layout() {
let dev = VirtioNet::new(NetConfig::default());
let mut buf = [0u8; 4];
dev.mmio_read(0x100 + VIRTIO_NET_CONFIG_SIZE as u64, &mut buf);
assert_eq!(buf, [0, 0, 0, 0], "reads past populated layout return zero");
let mut buf = [0u8; 8];
dev.mmio_read(0x100 + VIRTIO_NET_CONFIG_SIZE as u64 + 16, &mut buf);
assert_eq!(buf, [0u8; 8], "reads far past populated layout return zero");
}
#[test]
fn config_space_mac_byte_order_matches_kernel_uapi() {
let mac = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF];
let dev = VirtioNet::new(NetConfig::default().mac(mac));
let mut buf0 = [0u8; 1];
dev.mmio_read(0x100, &mut buf0);
assert_eq!(buf0[0], 0xAA);
let mut buf5 = [0u8; 1];
dev.mmio_read(0x100 + 5, &mut buf5);
assert_eq!(buf5[0], 0xFF);
}
#[test]
fn config_space_writes_silently_ignored() {
let mac = [0x02, 0x00, 0x00, 0x00, 0x00, 0x01];
let mut dev = VirtioNet::new(NetConfig::default().mac(mac));
dev.mmio_write(0x100, &[0xff, 0xff, 0xff, 0xff]);
let mut buf = [0u8; 6];
dev.mmio_read(0x100, &mut buf);
assert_eq!(
buf, mac,
"config-space writes must be silently ignored (device is not driver-configurable)",
);
}
#[test]
fn interrupt_ack_clears_bits() {
let mut dev = VirtioNet::new(NetConfig::default());
let (mem, layout) = build_test_memory();
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let before = read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS);
assert_ne!(
before & VIRTIO_MMIO_INT_VRING,
0,
"INT_VRING must be set after a successful loopback delivery"
);
write_reg(&mut dev, VIRTIO_MMIO_INTERRUPT_ACK, VIRTIO_MMIO_INT_VRING);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_VRING,
0,
"INT_VRING must be cleared after ACK"
);
}
const GUEST_MEM_SIZE: usize = 0x10_0000; const TX_DESC_TABLE_BASE: u64 = 0x1000;
const TX_AVAIL_RING_BASE: u64 = 0x2000;
const TX_USED_RING_BASE: u64 = 0x3000;
const TX_HEADER_BUF: u64 = 0x4000;
const TX_FRAME_BUF: u64 = 0x5000;
const RX_DESC_TABLE_BASE: u64 = 0x6000;
const RX_AVAIL_RING_BASE: u64 = 0x7000;
const RX_USED_RING_BASE: u64 = 0x8000;
const RX_BUF: u64 = 0x9000;
struct TestLayout {
tx_desc: u64,
tx_avail: u64,
tx_used: u64,
#[allow(dead_code)]
tx_hdr_buf: u64,
tx_frame_buf: u64,
rx_desc: u64,
rx_avail: u64,
rx_used: u64,
rx_buf: u64,
}
fn test_layout() -> TestLayout {
TestLayout {
tx_desc: TX_DESC_TABLE_BASE,
tx_avail: TX_AVAIL_RING_BASE,
tx_used: TX_USED_RING_BASE,
tx_hdr_buf: TX_HEADER_BUF,
tx_frame_buf: TX_FRAME_BUF,
rx_desc: RX_DESC_TABLE_BASE,
rx_avail: RX_AVAIL_RING_BASE,
rx_used: RX_USED_RING_BASE,
rx_buf: RX_BUF,
}
}
fn build_test_memory() -> (GuestMemoryMmap, TestLayout) {
let mem = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), GUEST_MEM_SIZE)]).unwrap();
let layout = test_layout();
(mem, layout)
}
fn program_queues(dev: &mut VirtioNet, layout: &TestLayout) {
write_reg(dev, VIRTIO_MMIO_QUEUE_SEL, RXQ as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_NUM, 4);
write_reg(dev, VIRTIO_MMIO_QUEUE_DESC_LOW, layout.rx_desc as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_AVAIL_LOW, layout.rx_avail as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_USED_LOW, layout.rx_used as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_READY, 1);
write_reg(dev, VIRTIO_MMIO_QUEUE_SEL, TXQ as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_NUM, 4);
write_reg(dev, VIRTIO_MMIO_QUEUE_DESC_LOW, layout.tx_desc as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_AVAIL_LOW, layout.tx_avail as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_USED_LOW, layout.tx_used as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_READY, 1);
}
fn write_desc(
mem: &GuestMemoryMmap,
table_base: u64,
idx: u16,
addr: u64,
len: u32,
flags: u16,
next: u16,
) {
let off = table_base + (idx as u64) * 16;
let mut buf = [0u8; 16];
buf[0..8].copy_from_slice(&addr.to_le_bytes());
buf[8..12].copy_from_slice(&len.to_le_bytes());
buf[12..14].copy_from_slice(&flags.to_le_bytes());
buf[14..16].copy_from_slice(&next.to_le_bytes());
mem.write_slice(&buf, GuestAddress(off)).unwrap();
}
fn publish_avail(mem: &GuestMemoryMmap, avail_base: u64, head_idx: u16, ring_pos: u16) {
let ring_off = avail_base + 4 + (ring_pos as u64) * 2;
mem.write_slice(&head_idx.to_le_bytes(), GuestAddress(ring_off))
.unwrap();
let idx_off = avail_base + 2;
mem.write_slice(&(ring_pos + 1).to_le_bytes(), GuestAddress(idx_off))
.unwrap();
}
fn payload_42_bytes() -> Vec<u8> {
(0..42u8).collect()
}
fn place_tx_chain(mem: &GuestMemoryMmap, layout: &TestLayout, payload: &[u8]) {
let zero_hdr = [0u8; VIRTIO_NET_HDR_LEN];
mem.write_slice(&zero_hdr, GuestAddress(layout.tx_frame_buf))
.unwrap();
mem.write_slice(
payload,
GuestAddress(layout.tx_frame_buf + VIRTIO_NET_HDR_LEN as u64),
)
.unwrap();
let total = (VIRTIO_NET_HDR_LEN + payload.len()) as u32;
write_desc(mem, layout.tx_desc, 0, layout.tx_frame_buf, total, 0, 0);
publish_avail(mem, layout.tx_avail, 0, 0);
}
fn place_rx_chain(mem: &GuestMemoryMmap, layout: &TestLayout) {
write_desc(mem, layout.rx_desc, 0, layout.rx_buf, 256, 2, 0);
publish_avail(mem, layout.rx_avail, 0, 0);
}
#[test]
fn loopback_delivers_tx_payload_to_rx_with_zero_header() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
let payload = payload_42_bytes();
place_tx_chain(&mem, &layout, &payload);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let mut delivered = vec![0u8; VIRTIO_NET_HDR_LEN + payload.len()];
mem.read_slice(&mut delivered, GuestAddress(layout.rx_buf))
.unwrap();
let mut expected_hdr = [0u8; VIRTIO_NET_HDR_LEN];
expected_hdr[10] = 1;
expected_hdr[11] = 0;
assert_eq!(
&delivered[..VIRTIO_NET_HDR_LEN],
&expected_hdr,
"RX virtio header must be zero-filled with num_buffers=1 LE u16 at offset 10"
);
assert_eq!(
&delivered[VIRTIO_NET_HDR_LEN..],
payload.as_slice(),
"RX frame bytes must match the TX payload"
);
let counters = dev.counters();
assert_eq!(counters.tx_packets(), 1);
assert_eq!(counters.rx_packets(), 1);
assert_eq!(counters.tx_bytes(), payload.len() as u64);
assert_eq!(counters.rx_bytes(), payload.len() as u64);
assert_eq!(counters.tx_chain_invalid(), 0);
assert_eq!(counters.rx_chain_invalid(), 0);
assert_eq!(counters.tx_dropped_no_rx_buffer(), 0);
}
#[test]
fn loopback_drops_tx_when_rx_queue_empty() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
let payload = payload_42_bytes();
place_tx_chain(&mem, &layout, &payload);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.tx_packets(),
1,
"TX add_used succeeded → tx_packets bumps"
);
assert_eq!(counters.tx_bytes(), payload.len() as u64);
assert_eq!(counters.rx_packets(), 0, "no RX delivery when queue empty");
assert_eq!(counters.rx_bytes(), 0);
assert_eq!(
counters.tx_dropped_no_rx_buffer(),
1,
"must record TX drop when RX queue empty"
);
assert_eq!(
counters.rx_chain_invalid(),
0,
"queue was empty, not malformed"
);
assert_eq!(counters.tx_add_used_failures(), 0);
assert_eq!(counters.rx_add_used_failures(), 0);
let kicks = dev.irq_evt().read().unwrap_or(0);
assert_eq!(kicks, 1, "TX add_used advance must produce one irqfd kick");
}
#[test]
fn tx_chain_with_only_header_produces_zero_frame_loopback() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
let zero_hdr = [0u8; VIRTIO_NET_HDR_LEN];
mem.write_slice(&zero_hdr, GuestAddress(layout.tx_frame_buf))
.unwrap();
write_desc(
&mem,
layout.tx_desc,
0,
layout.tx_frame_buf,
VIRTIO_NET_HDR_LEN as u32,
0,
0,
);
publish_avail(&mem, layout.tx_avail, 0, 0);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(counters.tx_packets(), 1);
assert_eq!(counters.rx_packets(), 1);
assert_eq!(counters.tx_bytes(), 0);
assert_eq!(counters.rx_bytes(), 0);
assert_eq!(counters.tx_chain_invalid(), 0);
}
#[test]
fn tx_chain_shorter_than_header_marked_invalid() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_desc(&mem, layout.tx_desc, 0, layout.tx_frame_buf, 8, 0, 0);
publish_avail(&mem, layout.tx_avail, 0, 0);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(counters.tx_chain_invalid(), 1);
assert_eq!(counters.tx_packets(), 0);
}
#[test]
fn tx_chain_with_write_only_descriptor_marked_invalid() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_desc(
&mem,
layout.tx_desc,
0,
layout.tx_frame_buf,
100,
2, 0,
);
publish_avail(&mem, layout.tx_avail, 0, 0);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(counters.tx_chain_invalid(), 1);
assert_eq!(counters.tx_packets(), 0);
}
#[test]
fn rx_chain_with_read_only_descriptor_marked_invalid() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
write_desc(&mem, layout.rx_desc, 0, layout.rx_buf, 256, 0, 0);
publish_avail(&mem, layout.rx_avail, 0, 0);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.rx_chain_invalid(),
1,
"RX direction violation must be flagged"
);
assert_eq!(
counters.rx_write_failed(),
0,
"shape rejection must NOT also bump rx_write_failed",
);
assert_eq!(
counters.tx_dropped_no_rx_buffer(),
0,
"must NOT also bump tx_dropped_no_rx_buffer (RX queue was non-empty, just malformed)",
);
assert_eq!(
counters.tx_packets(),
1,
"TX add_used succeeded → tx_packets bumps"
);
assert_eq!(counters.rx_packets(), 0, "no successful RX delivery");
assert_eq!(
counters.rx_add_used_failures(),
0,
"RX add_used succeeded (recycled with len=0)"
);
let kicks = dev.irq_evt().read().unwrap_or(0);
assert_eq!(
kicks, 1,
"used-ring advance from RX recycle + TX completion must trigger one coalesced kick",
);
}
#[test]
fn rx_chain_with_unmapped_gpa_bumps_rx_write_failed() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
let unmapped_gpa: u64 = (GUEST_MEM_SIZE as u64) + 0x1000;
write_desc(&mem, layout.rx_desc, 0, unmapped_gpa, 256, 2, 0);
publish_avail(&mem, layout.rx_avail, 0, 0);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.rx_write_failed(),
1,
"GPA-unmapped header write must bump rx_write_failed",
);
assert_eq!(
counters.rx_chain_invalid(),
0,
"GPA write failure must NOT also bump rx_chain_invalid \
(chain shape was valid)",
);
assert_eq!(
counters.tx_dropped_no_rx_buffer(),
0,
"RX queue was non-empty (just write-broken) — must NOT \
bump tx_dropped_no_rx_buffer",
);
assert_eq!(
counters.tx_packets(),
1,
"TX add_used succeeded → tx_packets bumps",
);
assert_eq!(counters.rx_packets(), 0, "no successful RX delivery");
assert_eq!(
counters.rx_add_used_failures(),
0,
"RX recycle add_used succeeded — used-ring is in mapped \
memory, only the descriptor's payload GPA was unmapped",
);
let kicks = dev.irq_evt().read().unwrap_or(0);
assert_eq!(
kicks, 1,
"TX completion + RX recycle used-ring advances coalesce \
into a single irqfd kick",
);
}
#[test]
fn rx_chain_with_unmapped_gpa_on_frame_bumps_rx_write_failed() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
let unmapped_gpa: u64 = (GUEST_MEM_SIZE as u64) + 0x1000;
write_desc(
&mem,
layout.rx_desc,
0,
layout.rx_buf,
VIRTIO_NET_HDR_LEN as u32,
1 | 2, 1, );
write_desc(&mem, layout.rx_desc, 1, unmapped_gpa, 256, 2, 0);
publish_avail(&mem, layout.rx_avail, 0, 0);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.rx_write_failed(),
1,
"frame-walk write_slice failure must bump rx_write_failed",
);
assert_eq!(
counters.rx_chain_invalid(),
0,
"frame-walk write failure must NOT bump rx_chain_invalid \
(chain shape was valid)",
);
assert_eq!(counters.rx_packets(), 0, "no successful RX delivery");
assert_eq!(counters.tx_packets(), 1);
}
#[test]
fn rx_write_failed_initially_zero() {
let dev = VirtioNet::new(NetConfig::default());
let counters = dev.counters();
assert_eq!(counters.rx_write_failed(), 0);
}
#[test]
fn loopback_two_frames_in_one_kick() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
let payload0: Vec<u8> = (10..30u8).collect();
let zero_hdr = [0u8; VIRTIO_NET_HDR_LEN];
mem.write_slice(&zero_hdr, GuestAddress(layout.tx_frame_buf))
.unwrap();
mem.write_slice(
&payload0,
GuestAddress(layout.tx_frame_buf + VIRTIO_NET_HDR_LEN as u64),
)
.unwrap();
write_desc(
&mem,
layout.tx_desc,
0,
layout.tx_frame_buf,
(VIRTIO_NET_HDR_LEN + payload0.len()) as u32,
0,
0,
);
let chain1_buf = layout.tx_frame_buf + 0x800;
let payload1: Vec<u8> = (50..70u8).collect();
mem.write_slice(&zero_hdr, GuestAddress(chain1_buf))
.unwrap();
mem.write_slice(
&payload1,
GuestAddress(chain1_buf + VIRTIO_NET_HDR_LEN as u64),
)
.unwrap();
write_desc(
&mem,
layout.tx_desc,
1,
chain1_buf,
(VIRTIO_NET_HDR_LEN + payload1.len()) as u32,
0,
0,
);
let avail_idx_off = layout.tx_avail + 2;
let ring_off = layout.tx_avail + 4;
mem.write_slice(&0u16.to_le_bytes(), GuestAddress(ring_off))
.unwrap();
mem.write_slice(&1u16.to_le_bytes(), GuestAddress(ring_off + 2))
.unwrap();
mem.write_slice(&2u16.to_le_bytes(), GuestAddress(avail_idx_off))
.unwrap();
write_desc(&mem, layout.rx_desc, 0, layout.rx_buf, 256, 2, 0);
write_desc(&mem, layout.rx_desc, 1, layout.rx_buf + 0x400, 256, 2, 0);
let avail_idx_off = layout.rx_avail + 2;
let ring_off = layout.rx_avail + 4;
mem.write_slice(&0u16.to_le_bytes(), GuestAddress(ring_off))
.unwrap();
mem.write_slice(&1u16.to_le_bytes(), GuestAddress(ring_off + 2))
.unwrap();
mem.write_slice(&2u16.to_le_bytes(), GuestAddress(avail_idx_off))
.unwrap();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(counters.tx_packets(), 2);
assert_eq!(counters.rx_packets(), 2);
assert_eq!(
counters.tx_bytes(),
(payload0.len() + payload1.len()) as u64
);
let mut rx0 = vec![0u8; VIRTIO_NET_HDR_LEN + payload0.len()];
mem.read_slice(&mut rx0, GuestAddress(layout.rx_buf))
.unwrap();
assert_eq!(&rx0[VIRTIO_NET_HDR_LEN..], payload0.as_slice());
let mut rx1 = vec![0u8; VIRTIO_NET_HDR_LEN + payload1.len()];
mem.read_slice(&mut rx1, GuestAddress(layout.rx_buf + 0x400))
.unwrap();
assert_eq!(&rx1[VIRTIO_NET_HDR_LEN..], payload1.as_slice());
}
#[test]
fn loopback_emits_single_irqfd_kick_for_drain() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let kick_count = dev.irq_evt().read().unwrap();
assert_eq!(
kick_count, 1,
"single drain must produce exactly one irqfd write"
);
}
#[test]
fn reset_clears_state_but_preserves_counters() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
place_tx_chain(&mem, &layout, &payload_42_bytes());
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(dev.counters().tx_packets(), 1);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, 0);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), 0);
assert_eq!(
dev.counters().tx_packets(),
1,
"counters survive reset (operator-observability invariant)",
);
}
#[test]
fn non_4byte_register_read_returns_ff() {
let dev = VirtioNet::new(NetConfig::default());
let mut buf = [0u8; 2];
dev.mmio_read(0, &mut buf);
assert_eq!(buf, [0xff, 0xff]);
}
#[test]
fn non_4byte_register_write_ignored() {
let mut dev = VirtioNet::new(NetConfig::default());
dev.mmio_write(VIRTIO_MMIO_STATUS as u64, &[0x01, 0x00]);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), 0);
}
#[test]
fn unknown_register_returns_zero() {
let dev = VirtioNet::new(NetConfig::default());
assert_eq!(read_reg(&dev, 0xC0), 0);
}
#[test]
fn unknown_register_write_ignored() {
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, 0xC0, 0xDEAD);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_STATUS), 0);
}
#[test]
fn tx_add_used_failures_initially_zero() {
let dev = VirtioNet::new(NetConfig::default());
let counters = dev.counters();
assert_eq!(counters.tx_add_used_failures(), 0);
assert_eq!(counters.rx_add_used_failures(), 0);
}
#[test]
fn tx_add_used_failures_distinct_from_tx_chain_invalid() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_desc(&mem, layout.tx_desc, 0, layout.tx_frame_buf, 100, 2, 0);
publish_avail(&mem, layout.tx_avail, 0, 0);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.tx_chain_invalid(),
1,
"chain-shape rejection counted"
);
assert_eq!(
counters.tx_add_used_failures(),
0,
"add_used succeeded; queue-state counter must NOT bump",
);
assert_eq!(counters.rx_add_used_failures(), 0);
}
#[test]
fn features_ok_rejected_when_driver_accepts_unoffered_bit() {
use virtio_bindings::virtio_config::VIRTIO_CONFIG_S_FAILED;
use virtio_bindings::virtio_net::VIRTIO_NET_F_MQ;
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << VIRTIO_NET_F_MQ,
);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << (VIRTIO_F_VERSION_1 - 32),
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_FEAT);
let status = read_reg(&dev, VIRTIO_MMIO_STATUS);
assert_eq!(
status & VIRTIO_CONFIG_S_FEATURES_OK,
0,
"FEATURES_OK must NOT be set when driver accepts an unoffered bit",
);
assert_ne!(
status & VIRTIO_CONFIG_S_FAILED,
0,
"FAILED bit must be set on subset-rule violation",
);
}
#[test]
fn features_ok_accepted_with_only_offered_bits() {
use virtio_bindings::virtio_config::VIRTIO_CONFIG_S_FAILED;
use virtio_bindings::virtio_net::VIRTIO_NET_F_MAC;
let mut dev = VirtioNet::new(NetConfig::default());
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << VIRTIO_NET_F_MAC,
);
write_reg(&mut dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
write_reg(
&mut dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << (VIRTIO_F_VERSION_1 - 32),
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_FEAT);
let status = read_reg(&dev, VIRTIO_MMIO_STATUS);
assert_ne!(
status & VIRTIO_CONFIG_S_FEATURES_OK,
0,
"FEATURES_OK must be set when driver accepts only offered bits",
);
assert_eq!(
status & VIRTIO_CONFIG_S_FAILED,
0,
"FAILED bit must NOT be set on a clean subset",
);
}
#[test]
fn tx_chain_with_address_overflow_dropped_gracefully() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_desc(
&mem,
layout.tx_desc,
0,
u64::MAX - 11,
24, 0, 0,
);
publish_avail(&mem, layout.tx_avail, 0, 0);
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.tx_chain_invalid(),
1,
"GPA-overflow chain must bump tx_chain_invalid (graceful drop)",
);
assert_eq!(counters.tx_packets(), 0, "no TX completion on overflow");
assert_eq!(counters.rx_packets(), 0, "no RX delivery on dropped chain");
}
#[test]
fn tx_kick_before_driver_ok_ignored() {
let (mem, layout) = build_test_memory();
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev, &layout);
place_tx_chain(&mem, &layout, &payload_42_bytes());
place_rx_chain(&mem, &layout);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.tx_packets(),
0,
"kick before DRIVER_OK must not advance counters"
);
assert_eq!(counters.rx_packets(), 0);
assert_eq!(counters.tx_chain_invalid(), 0);
assert_eq!(counters.rx_chain_invalid(), 0);
assert_eq!(counters.tx_dropped_no_rx_buffer(), 0);
assert_eq!(counters.tx_add_used_failures(), 0);
assert_eq!(counters.rx_add_used_failures(), 0);
assert!(
dev.irq_evt().read().is_err(),
"no irqfd kick when device is pre-DRIVER_OK"
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
let counters = dev.counters();
assert_eq!(
counters.tx_packets(),
1,
"post-DRIVER_OK kick processes the queued chain",
);
}