#![cfg(test)]
#![allow(unused_imports)]
use super::testing::*;
use super::*;
use std::io::{Seek, Write};
use std::num::NonZeroU64;
use std::os::unix::fs::FileExt;
use std::sync::atomic::Ordering;
use std::time::Instant;
use tempfile::tempfile;
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE;
use virtio_queue::desc::{RawDescriptor, split::Descriptor as SplitDescriptor};
use virtio_queue::mock::MockSplitQueue;
use vm_memory::Address;
#[test]
fn classify_get_id_returns_none_for_both_modes() {
let counters = VirtioBlkCounters::default();
assert_eq!(
VirtioBlk::classify_pre_throttle(VIRTIO_BLK_T_GET_ID, false, &counters),
None,
"writable disk: T_GET_ID falls through to handler",
);
assert_eq!(
VirtioBlk::classify_pre_throttle(VIRTIO_BLK_T_GET_ID, true, &counters),
None,
"read-only disk: T_GET_ID is metadata-read-only and \
still falls through to handler",
);
assert_eq!(
counters.io_errors.load(Ordering::Relaxed),
0,
"T_GET_ID classification never bumps io_errors",
);
}
#[test]
fn handle_get_id_writes_serial_and_returns_ok() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_guest_mem(16384);
let data_addr = GuestAddress(0x1000);
let status_addr = GuestAddress(0x2000);
mem.write_slice(&[0xCDu8; VIRTIO_BLK_ID_BYTES as usize], data_addr)
.unwrap();
let segs = vec![ChainDescriptor {
addr: data_addr,
len: VIRTIO_BLK_ID_BYTES,
is_write_only: true,
}];
let (status, used) = dev.handle_get_id(&mem, &segs, status_addr);
assert_eq!(status, VIRTIO_BLK_S_OK as u8);
assert_eq!(
used,
VIRTIO_BLK_ID_BYTES + 1,
"used_len = 20 data bytes + 1 status byte",
);
let mut buf = [0u8; VIRTIO_BLK_ID_BYTES as usize];
mem.read_slice(&mut buf, data_addr).unwrap();
assert_eq!(
buf, VIRTIO_BLK_SERIAL,
"data segment must hold the device serial verbatim",
);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_OK as u8);
}
#[test]
fn handle_get_id_rejects_short_buffer() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_guest_mem(16384);
let data_addr = GuestAddress(0x1000);
let status_addr = GuestAddress(0x2000);
let segs = vec![ChainDescriptor {
addr: data_addr,
len: VIRTIO_BLK_ID_BYTES - 1,
is_write_only: true,
}];
let (status, used) = dev.handle_get_id(&mem, &segs, status_addr);
assert_eq!(
status, VIRTIO_BLK_S_IOERR as u8,
"sub-20-byte buffer must IOERR, not truncate",
);
assert_eq!(used, 1, "IOERR used_len is 1 (status byte only)");
assert_eq!(
dev.counters().io_errors.load(Ordering::Relaxed),
1,
"short buffer rejection bumps io_errors",
);
}
#[test]
fn handle_get_id_rejects_readonly_data_segment() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_guest_mem(16384);
let data_addr = GuestAddress(0x1000);
let status_addr = GuestAddress(0x2000);
let segs = vec![ChainDescriptor {
addr: data_addr,
len: VIRTIO_BLK_ID_BYTES,
is_write_only: false, }];
let (status, _) = dev.handle_get_id(&mem, &segs, status_addr);
assert_eq!(status, VIRTIO_BLK_S_IOERR as u8);
assert_eq!(dev.counters().io_errors.load(Ordering::Relaxed), 1);
}
#[test]
fn process_requests_full_get_id_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
mem.write_slice(&[0xCDu8; VIRTIO_BLK_ID_BYTES as usize], data_addr)
.unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_GET_ID, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
VIRTIO_BLK_ID_BYTES,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_OK as u8);
let mut buf = [0u8; VIRTIO_BLK_ID_BYTES as usize];
mem.read_slice(&mut buf, data_addr).unwrap();
assert_eq!(
buf, VIRTIO_BLK_SERIAL,
"T_GET_ID chain must populate data segment with device serial",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.flushes_completed.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_get_id_succeeds_on_ro_disk() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::with_options(f, cap, DiskThrottle::default(), true);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_GET_ID, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
VIRTIO_BLK_ID_BYTES,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], VIRTIO_BLK_S_OK as u8,
"RO disk must accept T_GET_ID — serial is RO-safe metadata",
);
let mut buf = [0u8; VIRTIO_BLK_ID_BYTES as usize];
mem.read_slice(&mut buf, data_addr).unwrap();
assert_eq!(buf, VIRTIO_BLK_SERIAL);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_get_id_short_buffer_returns_ioerr() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_GET_ID, 0);
let short_len: u32 = VIRTIO_BLK_ID_BYTES - 1;
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
short_len,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_IOERR as u8);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
}
#[test]
fn process_requests_get_id_zero_data_returns_ioerr() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_GET_ID, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_IOERR as u8);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
assert_eq!(c.throttled_count.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_get_id_readonly_data_returns_ioerr() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_GET_ID, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
VIRTIO_BLK_ID_BYTES,
0, 0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_IOERR as u8);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
0,
"direction violation must not consume throttle tokens",
);
}
#[test]
fn serial_constant_is_id_bytes_long() {
assert_eq!(
VIRTIO_BLK_SERIAL.len(),
VIRTIO_BLK_ID_BYTES as usize,
"serial must be exactly VIRTIO_BLK_ID_BYTES (20) bytes",
);
assert_eq!(
&VIRTIO_BLK_SERIAL[..16],
b"ktstr-virtio-blk",
"serial payload prefix",
);
assert_eq!(
&VIRTIO_BLK_SERIAL[16..],
&[0u8; 4],
"trailing 4 bytes are NUL padding",
);
}
#[test]
fn event_idx_successive_drains_span_threshold() {
let mem = make_chain_test_mem();
let (mut dev, mock) = setup_blk(&mem, false, DiskThrottle::default());
let qsize = 16u16;
let used_event = used_event_addr(mock.avail_addr(), qsize);
mem.write_obj::<u16>(u16::to_le(2), used_event)
.expect("plant used_event");
dev.set_mem(mem.clone());
wire_device_to_mock_with_event_idx(&mut dev, &mock, qsize, GuestAddress(0x10000));
{
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain 1");
}
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx_after_d1: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read used.idx after drain 1");
assert_eq!(
used_idx_after_d1, 1,
"drain 1 must publish exactly one chain",
);
assert_ne!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"interrupt_status bit must be set after drain 1 \
(V8 split: bit set independent of irqfd)",
);
let status = read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS);
assert_eq!(status & 1, 1);
assert!(
dev.irq_evt.read().is_err(),
"drain 1 irqfd must be suppressed: next_used=1 < used_event=2",
);
for i in 0..2u64 {
let header_addr = GuestAddress(0x7000 + i * 0x1000);
let data_addr = GuestAddress(0x9000 + i * 0x1000);
let status_addr = GuestAddress(0xB000 + i * 0x100);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs)
.expect("build chain in drain 2");
}
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
dev.counters().reads_completed.load(Ordering::Relaxed),
3,
"1 chain in drain 1 + 2 chains in drain 2 = 3 total reads",
);
let used_idx_after_d2: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read used.idx after drain 2");
assert_eq!(
used_idx_after_d2, 3,
"used.idx must advance to 3 after both drains",
);
let val = dev
.irq_evt
.read()
.expect("irq_evt must be readable after drain 2 crossed threshold");
assert_eq!(
val, 1,
"drain 2 must fire the irqfd exactly once when used_event \
threshold is crossed across multiple drains",
);
}
#[test]
fn event_idx_needs_notification_err_fires_irqfd_fail_safe() {
use virtio_queue::QueueT;
use vm_memory::Bytes;
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem =
GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0xA000), (GuestAddress(0xB000), 0x40000)])
.expect("create multi-region guest mem with avail-event hole");
let qsize = 16u16;
let mock = MockSplitQueue::create(&mem, GuestAddress(0), qsize);
let custom_avail = GuestAddress(0x9FDC);
let custom_used_event = custom_avail
.checked_add(4 + qsize as u64 * 2)
.expect("custom used_event addr");
assert_eq!(
custom_used_event,
GuestAddress(0xA000),
"test layout error: custom used_event must land at hole boundary",
);
let mut probe = [0u8; 2];
assert!(
mem.read_slice(&mut probe, custom_used_event).is_err(),
"test layout error: custom used_event GPA must be unmapped",
);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_FLUSH, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build flush chain");
mem.write_obj::<u16>(u16::to_le(0), custom_avail)
.expect("plant custom avail.flags");
mem.write_obj::<u16>(
u16::to_le(1),
custom_avail.checked_add(2).expect("idx addr"),
)
.expect("plant custom avail.idx");
mem.write_obj::<u16>(
u16::to_le(0),
custom_avail.checked_add(4).expect("ring[0] addr"),
)
.expect("plant custom avail.ring[0]");
dev.set_mem(mem.clone());
wire_device_to_mock_with_event_idx(&mut dev, &mock, qsize, GuestAddress(0xB000));
dev.worker.queues[REQ_QUEUE].set_avail_ring_address(
Some(custom_avail.0 as u32),
Some((custom_avail.0 >> 32) as u32),
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].avail_ring(),
custom_avail.0,
"avail ring override did not take effect",
);
assert!(
dev.irq_evt.read().is_err(),
"irq_evt must not be signalled before notify",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.flushes_completed.load(Ordering::Relaxed),
1,
"FLUSH chain must complete normally — failure is in \
needs_notification, not request processing",
);
let used_idx: u16 = mem
.read_obj(GuestAddress(0xB000).checked_add(2).unwrap())
.expect("read used.idx at override addr");
assert_eq!(
used_idx, 1,
"used.idx must advance to 1 — add_used path is independent \
of needs_notification",
);
assert_ne!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"interrupt_status bit must be set after publish, even \
when needs_notification fails",
);
let val = dev
.irq_evt
.read()
.expect("irq_evt must fire fail-safe when needs_notification Err");
assert_eq!(
val, 1,
"irq_evt must fire exactly once via unwrap_or(true) \
when needs_notification returns Err",
);
}
#[test]
fn legacy_disable_enable_notification_toggles_used_flags() {
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
let mem = make_chain_test_mem();
let (mut dev, mock) = setup_blk(&mem, false, DiskThrottle::default());
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
use virtio_queue::QueueT;
assert!(
!dev.worker.queues[REQ_QUEUE].event_idx_enabled(),
"wire_device_to_mock must produce a legacy-path queue \
(no EVENT_IDX); test premise depends on it",
);
let flags0: u16 = mem
.read_obj(mock.used_addr())
.expect("read initial used.flags");
assert_eq!(flags0, 0, "mock initializes used.flags to 0",);
dev.worker.queues[REQ_QUEUE]
.disable_notification(&mem)
.expect("disable_notification on legacy queue");
let flags1: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags after disable");
assert_eq!(
flags1, VRING_USED_F_NO_NOTIFY as u16,
"legacy disable_notification must set VRING_USED_F_NO_NOTIFY \
({:#x}); got {:#x}",
VRING_USED_F_NO_NOTIFY, flags1,
);
let re_drain = dev.worker.queues[REQ_QUEUE]
.enable_notification(&mem)
.expect("enable_notification on legacy queue");
assert!(
!re_drain,
"no chains queued; enable_notification must return Ok(false)",
);
let flags2: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags after enable");
assert_eq!(
flags2, 0,
"legacy enable_notification must clear used.flags; got {:#x}",
flags2,
);
dev.worker.queues[REQ_QUEUE]
.disable_notification(&mem)
.expect("second disable");
let flags3: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags after second disable");
assert_eq!(flags3, VRING_USED_F_NO_NOTIFY as u16);
dev.worker.queues[REQ_QUEUE]
.enable_notification(&mem)
.expect("second enable");
let flags4: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags after second enable");
assert_eq!(flags4, 0);
}
#[test]
fn status_write_failure_skips_add_used_and_irqfd() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = GuestMemoryMmap::from_ranges(&[
(GuestAddress(0), 0x20000),
(GuestAddress(0x30000), 0x10000),
])
.expect("create multi-region guest mem with status hole");
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x20000); write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let mut probe = [0u8; 1];
assert!(
mem.write_slice(&[0u8], status_addr).is_err(),
"test layout error: status_addr must be unmapped",
);
assert!(
mem.read_slice(&mut probe, status_addr).is_err(),
"test layout error: status_addr must be unmapped",
);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
assert!(
dev.irq_evt.read().is_err(),
"irq_evt must not be signalled before notify",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(
used_idx, 0,
"status-write failure must skip add_used; used.idx \
must stay 0 so the chain remains in the avail ring \
and the guest's hung-task watchdog \
(kernel.hung_task_timeout_secs, default 120 s) \
eventually fires — virtio_blk has no mq_ops->timeout",
);
let c = dev.counters();
assert!(
c.io_errors.load(Ordering::Relaxed) >= 1,
"io_errors must be bumped on status-write failure; got {}",
c.io_errors.load(Ordering::Relaxed),
);
assert!(
dev.irq_evt.read().is_err(),
"irq_evt must be unsignalled when publish_completion fails — \
a chain the guest can't observe must NOT trigger an IRQ",
);
assert_eq!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"interrupt_status bit must stay 0 when no chain is \
published — signal_needed remained false throughout",
);
assert_eq!(read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & 1, 0);
}
#[test]
fn multi_notify_boundary_drains_subsequent_chain() {
let mem = make_chain_test_mem();
let (mut dev, mock) = setup_blk(&mem, false, DiskThrottle::default());
dev.set_mem(mem.clone());
let qsize = 16u16;
let used_event = used_event_addr(mock.avail_addr(), qsize);
mem.write_obj::<u16>(u16::to_le(0), used_event)
.expect("plant used_event=0 for drain 1");
wire_device_to_mock_with_event_idx(&mut dev, &mock, qsize, GuestAddress(0x10000));
{
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x4100);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_FLUSH, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain 1");
}
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
dev.counters().flushes_completed.load(Ordering::Relaxed),
1,
"drain 1 must complete the first FLUSH",
);
let used_idx_d1: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read used.idx after drain 1");
assert_eq!(used_idx_d1, 1);
let val1 = dev.irq_evt.read().expect("drain 1 irqfd must fire");
assert_eq!(val1, 1, "drain 1 fires exactly once");
mem.write_obj::<u16>(u16::to_le(1), used_event)
.expect("update used_event=1 for drain 2");
{
let header_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x5100);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_FLUSH, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain 2");
}
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
dev.counters().flushes_completed.load(Ordering::Relaxed),
2,
"drain 2 must process the chain enqueued after drain 1 — \
a stranded chain would leave flushes_completed at 1",
);
let used_idx_d2: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read used.idx after drain 2");
assert_eq!(
used_idx_d2, 2,
"used.idx must advance to 2 across the two notifies",
);
let val2 = dev.irq_evt.read().expect("drain 2 irqfd must fire");
assert_eq!(
val2, 1,
"drain 2 fires the irqfd exactly once for the post-boundary chain",
);
}
#[test]
fn legacy_process_requests_clears_used_flags_post_bracket() {
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
let mem = make_chain_test_mem();
let (mut dev, mock) = setup_blk(&mem, false, DiskThrottle::default());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
use virtio_queue::QueueT;
assert!(
!dev.worker.queues[REQ_QUEUE].event_idx_enabled(),
"legacy wiring must not negotiate EVENT_IDX",
);
let flags_before: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags before notify");
assert_eq!(flags_before, 0, "mock initializes used.flags to 0",);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let flags_after: u16 = mem
.read_obj(mock.used_addr())
.expect("read used.flags after notify");
assert_eq!(
flags_after, 0,
"process_requests bracket must end with used.flags=0; \
VRING_USED_F_NO_NOTIFY ({:#x}) must NOT remain set after \
enable_notification ran. Got {:#x}",
VRING_USED_F_NO_NOTIFY, flags_after,
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1, "chain must complete normally (legacy path)",);
assert_eq!(dev.counters().reads_completed.load(Ordering::Relaxed), 1,);
let val = dev.irq_evt.read().expect("legacy path must fire irq_evt");
assert_eq!(
val, 1,
"legacy path fires irq_evt unconditionally — pinned to \
confirm the bracket didn't suppress on legacy",
);
}
#[test]
fn throttle_event_idx_stall_leaves_chain_in_avail_ring() {
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mem = make_chain_test_mem();
let qsize = 16u16;
let (mut dev, mock) = setup_blk(&mem, false, throttle);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(
dev.worker.state_mut().ops_bucket.consume(1),
"drain the 1-token bucket"
);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let used_event = used_event_addr(mock.avail_addr(), qsize);
mem.write_obj::<u16>(u16::to_le(0), used_event)
.expect("plant used_event");
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock_with_event_idx(&mut dev, &mock, qsize, GuestAddress(0x10000));
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], 0xEE,
"throttle stall must NOT write a status byte even on \
EVENT_IDX path",
);
let c = dev.counters();
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"throttle stall bumps throttled_count exactly once",
);
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
0,
"throttle stall is not classified as an I/O error",
);
let used_idx: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read device used.idx at override addr");
assert_eq!(
used_idx, 0,
"throttle stall must NOT advance used.idx even on \
EVENT_IDX path",
);
assert_eq!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"throttle stall must NOT set INT_VRING — signal_needed \
stays false on the stall path",
);
let status = read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS);
assert_eq!(status & 1, 0);
assert!(
dev.irq_evt.read().is_err(),
"throttle stall must NOT signal the irqfd",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before,
"post-stall next_avail must equal pre-stall value \
(rollback preserved on EVENT_IDX path)",
);
}
#[test]
fn mem_unset_warned_latch_fires_once() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
assert!(
!dev.mem_unset_warned.load(Ordering::Relaxed),
"fresh device must have mem_unset_warned=false",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert!(
dev.mem_unset_warned.load(Ordering::Relaxed),
"first pre-set_mem notify must flip the latch to true",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert!(
dev.mem_unset_warned.load(Ordering::Relaxed),
"second pre-set_mem notify must leave the latch at true",
);
let c = dev.counters();
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}
#[test]
fn interrupt_ack_clears_status_bit() {
let mem = make_chain_test_mem();
let (mut dev, mock) = setup_blk(&mem, false, DiskThrottle::default());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & 1,
1,
"drained chain must set VIRTIO_MMIO_INT_VRING in INTERRUPT_STATUS",
);
write_reg(&mut dev, VIRTIO_MMIO_INTERRUPT_ACK, VIRTIO_MMIO_INT_VRING);
assert_eq!(
read_reg(&dev, VIRTIO_MMIO_INTERRUPT_STATUS) & 1,
0,
"INTERRUPT_ACK with VIRTIO_MMIO_INT_VRING must clear the bit",
);
}