#![cfg(test)]
use super::device::*;
use crate::vmm::net_config::NetConfig;
use virtio_bindings::virtio_config::{VIRTIO_CONFIG_S_NEEDS_RESET, VIRTIO_F_VERSION_1};
use virtio_bindings::virtio_mmio::{
VIRTIO_MMIO_DRIVER_FEATURES, VIRTIO_MMIO_DRIVER_FEATURES_SEL, VIRTIO_MMIO_INT_CONFIG,
VIRTIO_MMIO_INTERRUPT_STATUS, VIRTIO_MMIO_QUEUE_AVAIL_LOW, VIRTIO_MMIO_QUEUE_DESC_LOW,
VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_READY,
VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_QUEUE_USED_LOW, VIRTIO_MMIO_STATUS,
};
use virtio_bindings::virtio_net::VIRTIO_NET_F_MAC;
use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
const GUEST_MEM_SIZE: usize = 0x10_0000; const TX_DESC_BASE: u64 = 0x1000;
const TX_AVAIL_BASE: u64 = 0x2000;
const TX_USED_BASE: u64 = 0x3000;
const TX_FRAME_BUF: u64 = 0x4000;
const RX_DESC_BASE: u64 = 0x6000;
const RX_AVAIL_BASE: u64 = 0x7000;
const RX_USED_BASE: u64 = 0x8000;
const RX_BUF: u64 = 0x9000;
const QUEUE_SIZE: u16 = 256;
const VRING_DESC_F_WRITE: u16 = 2;
fn read_reg(dev: &VirtioNet, offset: u32) -> u32 {
let mut buf = [0u8; 4];
dev.mmio_read(offset as u64, &mut buf);
u32::from_le_bytes(buf)
}
fn write_reg(dev: &mut VirtioNet, offset: u32, val: u32) {
dev.mmio_write(offset as u64, &val.to_le_bytes());
}
fn init_until_features_ok(dev: &mut VirtioNet) {
write_reg(dev, VIRTIO_MMIO_STATUS, S_ACK);
write_reg(dev, VIRTIO_MMIO_STATUS, S_DRV);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES, 1u32 << VIRTIO_NET_F_MAC);
write_reg(dev, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
write_reg(
dev,
VIRTIO_MMIO_DRIVER_FEATURES,
1u32 << (VIRTIO_F_VERSION_1 - 32),
);
write_reg(dev, VIRTIO_MMIO_STATUS, S_FEAT);
}
fn program_queues(dev: &mut VirtioNet) {
write_reg(dev, VIRTIO_MMIO_QUEUE_SEL, RXQ as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_NUM, QUEUE_SIZE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_DESC_LOW, RX_DESC_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_AVAIL_LOW, RX_AVAIL_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_USED_LOW, RX_USED_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_READY, 1);
write_reg(dev, VIRTIO_MMIO_QUEUE_SEL, TXQ as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_NUM, QUEUE_SIZE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_DESC_LOW, TX_DESC_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_AVAIL_LOW, TX_AVAIL_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_USED_LOW, TX_USED_BASE as u32);
write_reg(dev, VIRTIO_MMIO_QUEUE_READY, 1);
}
fn build_fixture() -> (VirtioNet, GuestMemoryMmap) {
let mem = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), GUEST_MEM_SIZE)])
.expect("create poison-test guest mem");
let mut dev = VirtioNet::new(NetConfig::default());
dev.set_mem(mem.clone());
init_until_features_ok(&mut dev);
program_queues(&mut dev);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
(dev, mem)
}
fn write_desc(
mem: &GuestMemoryMmap,
table_base: u64,
idx: u16,
addr: u64,
len: u32,
flags: u16,
next: u16,
) {
let off = table_base + (idx as u64) * 16;
let mut buf = [0u8; 16];
buf[0..8].copy_from_slice(&addr.to_le_bytes());
buf[8..12].copy_from_slice(&len.to_le_bytes());
buf[12..14].copy_from_slice(&flags.to_le_bytes());
buf[14..16].copy_from_slice(&next.to_le_bytes());
mem.write_slice(&buf, GuestAddress(off))
.expect("plant descriptor");
}
fn place_tx_chain(mem: &GuestMemoryMmap) {
let zero_hdr = [0u8; VIRTIO_NET_HDR_LEN];
mem.write_slice(&zero_hdr, GuestAddress(TX_FRAME_BUF))
.unwrap();
let payload: [u8; 12] = [0xAA; 12];
mem.write_slice(
&payload,
GuestAddress(TX_FRAME_BUF + VIRTIO_NET_HDR_LEN as u64),
)
.unwrap();
let total = (VIRTIO_NET_HDR_LEN + payload.len()) as u32;
write_desc(mem, TX_DESC_BASE, 0, TX_FRAME_BUF, total, 0, 0);
let ring_off = TX_AVAIL_BASE + 4;
mem.write_slice(&0u16.to_le_bytes(), GuestAddress(ring_off))
.unwrap();
mem.write_slice(&1u16.to_le_bytes(), GuestAddress(TX_AVAIL_BASE + 2))
.unwrap();
}
fn place_rx_chain(mem: &GuestMemoryMmap) {
write_desc(mem, RX_DESC_BASE, 0, RX_BUF, 64, VRING_DESC_F_WRITE, 0);
let ring_off = RX_AVAIL_BASE + 4;
mem.write_slice(&0u16.to_le_bytes(), GuestAddress(ring_off))
.unwrap();
mem.write_slice(&1u16.to_le_bytes(), GuestAddress(RX_AVAIL_BASE + 2))
.unwrap();
}
fn poison_avail_idx(mem: &GuestMemoryMmap, avail_base: u64, bogus_idx: u16) {
mem.write_obj(bogus_idx, GuestAddress(avail_base + 2))
.expect("plant bogus avail.idx");
}
#[test]
fn tx_hostile_avail_idx_poisons_queue_and_signals() {
let (mut dev, mem) = build_fixture();
assert_eq!(
dev.counters().invalid_avail_idx_count(),
0,
"fresh device must have zero InvalidAvailRingIndex events",
);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"fresh device must not have NEEDS_RESET set",
);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
"fresh device must not have INT_CONFIG set",
);
place_tx_chain(&mem);
place_rx_chain(&mem);
poison_avail_idx(&mem, TX_AVAIL_BASE, 1000);
let pre_tx_packets = dev.counters().tx_packets();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
1,
"first hostile-idx kick must bump invalid_avail_idx_count exactly once",
);
assert_eq!(
dev.counters().tx_packets(),
pre_tx_packets,
"no TX must be serviced — the poisoned queue is structurally broken",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"queue-poison path must set VIRTIO_CONFIG_S_NEEDS_RESET",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
"queue-poison path must set VIRTIO_MMIO_INT_CONFIG \
alongside NEEDS_RESET (spec-compliant config-interrupt \
pairing)",
);
assert!(
dev.irq_evt().read().is_ok(),
"queue-poison path must signal irq_evt; a missed write would \
prevent the guest's vm_interrupt from running",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
1,
"subsequent kicks against a poisoned queue MUST NOT \
re-bump the counter — the queue_poisoned gate \
short-circuits before iter()",
);
assert!(
dev.irq_evt().read().is_err(),
"re-kick of a poisoned queue must NOT re-fire the irqfd \
— the poison gate short-circuits before signal_queue_poisoned",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"NEEDS_RESET stays set across re-kicks until reset",
);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, 0);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"STATUS=0 reset must clear NEEDS_RESET",
);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
"STATUS=0 reset must clear INT_CONFIG (interrupt_status \
zeroed on reset)",
);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
1,
"invalid_avail_idx_count is cumulative across reset; only \
the per-device poison flag clears",
);
init_until_features_ok(&mut dev);
program_queues(&mut dev);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, S_OK);
mem.write_obj(0u16, GuestAddress(TX_AVAIL_BASE + 2))
.unwrap();
mem.write_obj(0u16, GuestAddress(TX_USED_BASE + 2)).unwrap();
mem.write_obj(0u16, GuestAddress(RX_AVAIL_BASE + 2))
.unwrap();
mem.write_obj(0u16, GuestAddress(RX_USED_BASE + 2)).unwrap();
place_tx_chain(&mem);
place_rx_chain(&mem);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().tx_packets(),
pre_tx_packets + 1,
"post-reset chain must service normally — queue_poisoned cleared",
);
}
#[test]
fn rx_hostile_avail_idx_poisons_queue_and_signals() {
let (mut dev, mem) = build_fixture();
place_tx_chain(&mem);
place_rx_chain(&mem);
poison_avail_idx(&mem, RX_AVAIL_BASE, 1000);
let pre_invalid = dev.counters().invalid_avail_idx_count();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
pre_invalid + 1,
"RX-side hostile avail.idx must bump invalid_avail_idx_count exactly once",
);
assert_eq!(
dev.counters().tx_packets(),
1,
"TX add_used succeeded before the poison-signal bail; \
tx_packets must bump",
);
assert_eq!(
dev.counters().rx_packets(),
0,
"RX poison prevents delivery; rx_packets must stay zero",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"RX poison must set NEEDS_RESET",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
"RX poison path must set INT_CONFIG alongside NEEDS_RESET",
);
assert!(
dev.irq_evt().read().is_ok(),
"RX poison must signal irq_evt (signal_used + \
signal_queue_poisoned coalesced)",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
pre_invalid + 1,
"re-kick of a poisoned queue MUST NOT re-bump the counter",
);
assert!(
dev.irq_evt().read().is_err(),
"re-kick of a poisoned queue MUST NOT re-fire the irqfd",
);
}
#[test]
fn rx_poison_does_not_halt_tx_progress() {
let (mut dev, mem) = build_fixture();
place_tx_chain(&mem);
place_rx_chain(&mem);
poison_avail_idx(&mem, RX_AVAIL_BASE, 1000);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(dev.counters().tx_packets(), 1);
assert_eq!(dev.counters().invalid_avail_idx_count(), 1);
let _ = dev.irq_evt().read();
mem.write_slice(&0u16.to_le_bytes(), GuestAddress(TX_AVAIL_BASE + 4 + 2))
.unwrap();
mem.write_slice(&2u16.to_le_bytes(), GuestAddress(TX_AVAIL_BASE + 2))
.unwrap();
let pre_tx = dev.counters().tx_packets();
let pre_invalid = dev.counters().invalid_avail_idx_count();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_eq!(
dev.counters().tx_packets(),
pre_tx + 1,
"TX must continue servicing kicks even when RX is poisoned \
— per-queue poison flags isolate the failure",
);
assert_eq!(
dev.counters().invalid_avail_idx_count(),
pre_invalid,
"RxAlreadyPoisoned arm must NOT re-bump invalid_avail_idx_count \
— counter is event-once per false→true transition",
);
let kicks = dev.irq_evt().read().unwrap_or(0);
assert_eq!(
kicks, 1,
"TX completion in a kick where RX is already-poisoned must \
fire signal_used exactly once and signal_queue_poisoned \
zero times",
);
}
#[test]
fn rx_poison_signal_sequence_sets_needs_reset_and_int_config() {
let (mut dev, mem) = build_fixture();
place_tx_chain(&mem);
place_rx_chain(&mem);
poison_avail_idx(&mem, RX_AVAIL_BASE, 1000);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, TXQ as u32);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
"NEEDS_RESET must be set",
);
assert_ne!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
"INT_CONFIG must be set alongside NEEDS_RESET",
);
assert!(dev.irq_evt().read().is_ok(), "irq_evt must be signaled",);
write_reg(&mut dev, VIRTIO_MMIO_STATUS, 0);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_STATUS) & VIRTIO_CONFIG_S_NEEDS_RESET,
0,
);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & VIRTIO_MMIO_INT_CONFIG,
0,
);
}