#![cfg(test)]
#![allow(unused_imports)]
use super::testing::*;
use super::*;
use std::io::{Seek, Write};
use std::num::NonZeroU64;
use std::os::unix::fs::FileExt;
use std::sync::atomic::Ordering;
use std::time::Instant;
use tempfile::tempfile;
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE;
use virtio_queue::desc::{RawDescriptor, split::Descriptor as SplitDescriptor};
use virtio_queue::mock::MockSplitQueue;
use vm_memory::Address;
#[test]
fn process_requests_full_read_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0, 0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut data_buf = [0u8; 512];
mem.read_slice(&mut data_buf, data_addr).unwrap();
assert!(
data_buf.iter().all(|&b| b == 0xAB),
"data segment must contain backing file's 0xAB pattern after read",
);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], VIRTIO_BLK_S_OK as u8,
"status byte must be S_OK after successful read",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1, "exactly one used-ring entry expected");
let c = dev.counters();
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 1);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
assert_eq!(c.bytes_read.load(Ordering::Relaxed), 512);
}
#[test]
fn process_requests_full_write_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let f_for_verify = f.try_clone().expect("clone backing for verify");
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_OUT, 1);
let payload = vec![0xCDu8; 512];
mem.write_slice(&payload, data_addr).expect("plant payload");
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
0, 0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut readback = [0u8; 512];
f_for_verify
.read_at(&mut readback, 512)
.expect("read backing");
assert!(
readback.iter().all(|&b| b == 0xCD),
"backing file at sector 1 must hold the 0xCD payload after write",
);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(status_buf[0], VIRTIO_BLK_S_OK as u8);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 1);
assert_eq!(c.bytes_written.load(Ordering::Relaxed), 512);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_unknown_type_returns_unsupp() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
write_blk_header(&mem, header_addr, 0xBEEF, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], VIRTIO_BLK_S_UNSUPP as u8,
"unknown req_type must produce S_UNSUPP, not S_IOERR or S_OK",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1, "UNSUPP completions still update used.idx");
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.flushes_completed.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_flush_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_FLUSH, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(status_buf[0], VIRTIO_BLK_S_OK as u8);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.flushes_completed.load(Ordering::Relaxed), 1);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_short_header_returns_ioerr() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
8, 0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], VIRTIO_BLK_S_IOERR as u8,
"short header must be rejected with S_IOERR",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.flushes_completed.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_status_not_writable_drops_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
0, 0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], 0xEE,
"no status descriptor → device must not write a status byte; \
sentinel 0xEE survives",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(
used_idx, 0,
"no-status chain must NOT advance used.idx; advancing would \
let the guest's stale in_hdr.status surface as \
BLK_STS_OK (silent data corruption)",
);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0
);
}
#[test]
fn process_requests_multiple_chains_drained_in_one_notify() {
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_NEXT;
let cap = 4096u64;
let mut f = tempfile().unwrap();
f.set_len(cap).unwrap();
f.write_all(&[0x11; 512]).unwrap(); f.write_all(&[0x22; 512]).unwrap(); f.write_all(&[0x33; 512]).unwrap(); f.rewind().unwrap();
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let chains = [
(
GuestAddress(0x4000),
GuestAddress(0x4400),
GuestAddress(0x4800),
0u64,
),
(
GuestAddress(0x5000),
GuestAddress(0x5400),
GuestAddress(0x5800),
1u64,
),
(
GuestAddress(0x6000),
GuestAddress(0x6400),
GuestAddress(0x6800),
2u64,
),
];
for &(hdr, _, _, sector) in &chains {
write_blk_header(&mem, hdr, VIRTIO_BLK_T_IN, sector);
}
let mut descs = Vec::new();
for (chain_i, &(hdr, data, status, _)) in chains.iter().enumerate() {
let base = (chain_i as u16) * 3;
descs.push(RawDescriptor::from(SplitDescriptor::new(
hdr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
VRING_DESC_F_NEXT as u16,
base + 1,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
data.0,
512,
VRING_DESC_F_WRITE as u16 | VRING_DESC_F_NEXT as u16,
base + 2,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
status.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)));
}
mock.add_desc_chains(&descs, 0).expect("add 3 chains");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 3, "single notify must drain 3 chains");
let c = dev.counters();
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 3);
assert_eq!(c.bytes_read.load(Ordering::Relaxed), 3 * 512);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
for (i, &(_, data, _, _)) in chains.iter().enumerate() {
let mut buf = [0u8; 512];
mem.read_slice(&mut buf, data).unwrap();
let expected = (i as u8 + 1) * 0x11;
assert!(
buf.iter().all(|&b| b == expected),
"chain {i}'s data must hold sector {i}'s pattern (0x{expected:02X})",
);
}
}
#[test]
fn process_requests_throttled_rolls_back_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(
dev.worker.state_mut().ops_bucket.consume(1),
"drain the 1-token bucket"
);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
let sentinel_data = vec![0xFFu8; 512];
mem.write_slice(&sentinel_data, data_addr).unwrap();
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
assert!(
dev.irq_evt.read().is_err(),
"irq_evt must not be signalled before notify",
);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], 0xEE,
"throttle stall must NOT write a status byte; the chain \
stays in the avail ring until the retry timer fires. \
Sentinel 0xEE must survive.",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(
used_idx, 0,
"throttle stall must NOT advance used.idx; advancing \
would tell the guest the request completed with whatever \
stale status byte was at the descriptor, defeating the \
rollback.",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before,
"throttle stall must rewind next_avail to its pre-pop \
value so the next drain re-pops the same head",
);
let c = dev.counters();
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"throttle stall must bump throttled_count exactly once",
);
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
0,
"throttle stall is NOT classified as an I/O error — the \
chain is deferred, not failed",
);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.bytes_read.load(Ordering::Relaxed), 0);
let mut readback = [0u8; 512];
mem.read_slice(&mut readback, data_addr).unwrap();
assert!(
readback.iter().all(|&b| b == 0xFF),
"stalled chain must NOT touch the data descriptor; \
0xFF sentinel must survive",
);
assert!(
dev.irq_evt.read().is_err(),
"throttle stall must NOT signal the irqfd",
);
assert_eq!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"throttle stall must NOT set INT_VRING; the bit is only \
set when a chain is published, and a stalled chain is not",
);
}
#[test]
fn throttle_stall_then_refill_retry_succeeds() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().ops_bucket.consume(1), "drain bucket");
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
let sentinel_data = vec![0xFFu8; 512];
mem.write_slice(&sentinel_data, data_addr).unwrap();
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], 0xEE, "first notify must stall (no status write)",);
let used_idx_after_stall: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after stall");
assert_eq!(used_idx_after_stall, 0, "stall must not advance used.idx");
assert_eq!(
dev.counters().throttled_count.load(Ordering::Relaxed),
1,
"first notify bumps throttled_count exactly once",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before,
"post-stall next_avail must equal pre-stall value (rollback)",
);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now() - std::time::Duration::from_secs(2));
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], VIRTIO_BLK_S_OK as u8,
"post-refill notify must complete the chain with S_OK",
);
let used_idx_after_retry: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after retry");
assert_eq!(
used_idx_after_retry, 1,
"post-refill notify must advance used.idx by 1; the \
rolled-back chain is the SAME head, not a duplicate",
);
let mut data_buf = [0u8; 512];
mem.read_slice(&mut data_buf, data_addr).unwrap();
assert!(
data_buf.iter().all(|&b| b == 0xAB),
"data segment must hold backing file's 0xAB pattern \
after the retry; sentinel 0xFF must be overwritten",
);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
1,
"retry counts as a single read completion (not double)",
);
assert_eq!(
c.bytes_read.load(Ordering::Relaxed),
512,
"retry counts the data bytes once",
);
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
0,
"no IO error across the stall+retry sequence",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"retry success must not bump throttled_count again",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before.wrapping_add(1),
"post-retry next_avail must equal pre-stall + 1 \
(chain consumed exactly once across the stall+retry)",
);
}
#[test]
fn throttle_stall_fifo_order() {
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_NEXT;
let cap = 4096u64;
let mut f = tempfile().unwrap();
f.set_len(cap).unwrap();
f.write_all(&[0x11; 512]).unwrap();
f.write_all(&[0x22; 512]).unwrap();
f.rewind().unwrap();
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let chains = [
(
GuestAddress(0x4000),
GuestAddress(0x4400),
GuestAddress(0x4800),
0u64,
),
(
GuestAddress(0x5000),
GuestAddress(0x5400),
GuestAddress(0x5800),
1u64,
),
];
for &(hdr, _, _, sector) in &chains {
write_blk_header(&mem, hdr, VIRTIO_BLK_T_IN, sector);
}
let mut descs = Vec::new();
for (chain_i, &(hdr, data, status, _)) in chains.iter().enumerate() {
let base = (chain_i as u16) * 3;
descs.push(RawDescriptor::from(SplitDescriptor::new(
hdr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
VRING_DESC_F_NEXT as u16,
base + 1,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
data.0,
512,
VRING_DESC_F_WRITE as u16 | VRING_DESC_F_NEXT as u16,
base + 2,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
status.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)));
}
mock.add_desc_chains(&descs, 0).expect("add 2 chains");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after first notify");
assert_eq!(
used_idx, 1,
"first notify must complete chain 0 (one used-ring entry)",
);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
1,
"exactly one read completed before the stall",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"second chain stalled — throttled_count == 1",
);
let mut buf0 = [0u8; 512];
mem.read_slice(&mut buf0, chains[0].1).unwrap();
assert!(
buf0.iter().all(|&b| b == 0x11),
"chain 0's data must hold sector 0's pattern (0x11)",
);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now() - std::time::Duration::from_secs(2));
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after retry");
assert_eq!(
used_idx, 2,
"retry must complete chain 1; used.idx advances to 2",
);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
2,
"both chains completed",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"no fresh stall on retry — throttled_count stays at 1",
);
let mut buf1 = [0u8; 512];
mem.read_slice(&mut buf1, chains[1].1).unwrap();
assert!(
buf1.iter().all(|&b| b == 0x22),
"chain 1's data must hold sector 1's pattern (0x22) — \
FIFO order preserved across the stall+retry",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before.wrapping_add(2),
"post-retry next_avail must equal pre-stall + 2 \
(both chains consumed exactly once across stall+retry)",
);
}
#[test]
fn validation_precedes_throttle_on_stall() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().ops_bucket.consume(1));
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
513, VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], VIRTIO_BLK_S_IOERR as u8,
"sub-sector chain must produce S_IOERR via validation gate",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(
used_idx, 1,
"validation rejection completes the chain (add_used runs)",
);
let c = dev.counters();
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
1,
"validation gate bumps io_errors",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
0,
"validation gate must fire BEFORE throttle; \
throttled_count must stay 0 even with a drained bucket",
);
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 0);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before.wrapping_add(1),
"validation gate consumes the chain — next_avail must \
advance by exactly 1, not roll back like throttle stall",
);
}
#[test]
fn throttle_stall_event_idx_retry_routes_through_gate() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let qsize = 16u16;
let mock = MockSplitQueue::create(&mem, GuestAddress(0), qsize);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().ops_bucket.consume(1));
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let used_event = used_event_addr(mock.avail_addr(), qsize);
mem.write_obj::<u16>(u16::to_le(u16::MAX), used_event)
.expect("plant used_event");
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock_with_event_idx(&mut dev, &mock, qsize, GuestAddress(0x10000));
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
dev.counters().throttled_count.load(Ordering::Relaxed),
1,
"first notify stalls",
);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now() - std::time::Duration::from_secs(2));
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(GuestAddress(0x10000).checked_add(2).unwrap())
.expect("read device used.idx at override addr");
assert_eq!(used_idx, 1, "retry must publish the chain");
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], VIRTIO_BLK_S_OK as u8);
let c = dev.counters();
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 1);
assert_ne!(
dev.interrupt_status.load(Ordering::Acquire) & VIRTIO_MMIO_INT_VRING,
0,
"retry completion sets INT_VRING (V8 bit/eventfd split)",
);
assert!(
dev.irq_evt.read().is_err(),
"EVENT_IDX gate must suppress irqfd on retry when \
used_event threshold is unreached",
);
assert_eq!(
dev.counters().io_errors.load(Ordering::Relaxed),
0,
"throttle stall + retry must not bump io_errors",
);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before.wrapping_add(1),
"post-retry next_avail must equal pre-stall + 1 \
(chain consumed exactly once across stall+retry on \
EVENT_IDX path)",
);
}
#[test]
fn throttle_bytes_request_exceeds_capacity_stalls() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: None,
bytes_per_sec: std::num::NonZeroU64::new(512),
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(
dev.worker.state_mut().bytes_bucket.consume(4096),
"priming overconsume must succeed against fresh \
available=512 — the gate is `available >= 0` for \
n > capacity",
);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
1024,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], 0xEE,
"bytes-bucket stall must NOT write a status byte",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 0, "bytes-bucket stall must NOT advance used.idx",);
let c = dev.counters();
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"bytes-bucket exhaustion bumps throttled_count",
);
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
0,
"bytes-bucket stall is not an IO error",
);
}
#[test]
fn throttle_both_buckets_max_wait() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(10),
bytes_per_sec: std::num::NonZeroU64::new(1024),
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(
dev.worker.state_mut().bytes_bucket.consume(2048),
"priming overconsume must succeed against fresh \
available=1024 — the gate is `available >= 0` for \
n > capacity",
);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
2048,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"two-bucket stall bumps throttled_count exactly once",
);
assert_eq!(
c.io_errors.load(Ordering::Relaxed),
0,
"two-bucket stall is not an IO error",
);
}
#[test]
fn throttle_bytes_bucket_retry_succeeds() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: None,
bytes_per_sec: std::num::NonZeroU64::new(1024),
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(
dev.worker.state_mut().bytes_bucket.consume(4096),
"priming overconsume must succeed against fresh \
available=1024 — the gate is `available >= 0` for \
n > capacity",
);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
let sentinel = vec![0xFFu8; 1024];
mem.write_slice(&sentinel, data_addr).unwrap();
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
1024,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
assert_eq!(
dev.counters().throttled_count.load(Ordering::Relaxed),
1,
"first notify must stall on bytes bucket — pre-drained \
into -3072 debt, normal-path gate `available >= n` \
fails",
);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], 0xEE, "stall must not write status byte");
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now() - std::time::Duration::from_secs(4));
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(
s[0], VIRTIO_BLK_S_OK as u8,
"post-refill chain must complete with S_OK",
);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
1,
"exactly one read completed (the post-refill retry)",
);
assert_eq!(
c.bytes_read.load(Ordering::Relaxed),
1024,
"bytes_read counts the 1024-byte read",
);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"throttled_count == 1 — only the first notify stalled; \
the post-refill retry succeeded without re-stalling \
(refill cleared the debt and raised available to \
capacity, exactly enough for the 1024-byte chain)",
);
}
#[test]
fn throttle_multi_stall_same_head() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(1),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().ops_bucket.consume(1));
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
mem.write_slice(&[0xEEu8], status_addr).unwrap();
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_IN, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
data_addr.0,
512,
VRING_DESC_F_WRITE as u16,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
let next_avail_before = dev.worker.queues[REQ_QUEUE].next_avail();
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
2,
"two stalls on the same head must bump throttled_count twice — \
a regression that lost the rolled-back chain would surface as 1",
);
let mut s = [0u8; 1];
mem.read_slice(&mut s, status_addr).unwrap();
assert_eq!(s[0], 0xEE);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 0);
assert_eq!(
dev.worker.queues[REQ_QUEUE].next_avail(),
next_avail_before,
"next_avail must equal pre-stall value after both \
stalls (rollback preserved across 2 stalls)",
);
}
#[test]
fn throttle_stall_mid_batch_three_chains() {
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_NEXT;
let cap = 4096u64;
let mut f = tempfile().unwrap();
f.set_len(cap).unwrap();
f.write_all(&[0x11; 512]).unwrap(); f.write_all(&[0x22; 512]).unwrap(); f.write_all(&[0x33; 512]).unwrap(); f.rewind().unwrap();
let throttle = DiskThrottle {
iops: std::num::NonZeroU64::new(2),
bytes_per_sec: None,
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 32);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().ops_bucket.consume(1));
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now());
let chains = [
(
GuestAddress(0x4000),
GuestAddress(0x4400),
GuestAddress(0x4800),
0u64,
),
(
GuestAddress(0x5000),
GuestAddress(0x5400),
GuestAddress(0x5800),
1u64,
),
(
GuestAddress(0x6000),
GuestAddress(0x6400),
GuestAddress(0x6800),
2u64,
),
];
for &(hdr, _, _, sector) in &chains {
write_blk_header(&mem, hdr, VIRTIO_BLK_T_IN, sector);
}
let mut descs = Vec::new();
for (chain_i, &(hdr, data, status, _)) in chains.iter().enumerate() {
let base = (chain_i as u16) * 3;
descs.push(RawDescriptor::from(SplitDescriptor::new(
hdr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
VRING_DESC_F_NEXT as u16,
base + 1,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
data.0,
512,
VRING_DESC_F_WRITE as u16 | VRING_DESC_F_NEXT as u16,
base + 2,
)));
descs.push(RawDescriptor::from(SplitDescriptor::new(
status.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)));
}
mock.add_desc_chains(&descs, 0).expect("add 3 chains");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
1,
"chain 0 completed before stall",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
1,
"chain 1 stalled",
);
let used_idx_after_stall: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after stall");
assert_eq!(used_idx_after_stall, 1);
dev.worker
.state_mut()
.ops_bucket
.set_last_refill_for_test(std::time::Instant::now() - std::time::Duration::from_secs(5));
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.reads_completed.load(Ordering::Relaxed),
3,
"all three chains completed (chain 0 first notify, \
chains 1+2 second notify)",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx after retry");
assert_eq!(used_idx, 3, "used.idx covers all three completions");
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
assert_eq!(c.throttled_count.load(Ordering::Relaxed), 1);
let mut buf1 = [0u8; 512];
mem.read_slice(&mut buf1, chains[1].1).unwrap();
assert!(
buf1.iter().all(|&b| b == 0x22),
"chain 1's data must hold sector 1's pattern (0x22) — \
FIFO order preserved across stall+retry",
);
let mut buf2 = [0u8; 512];
mem.read_slice(&mut buf2, chains[2].1).unwrap();
assert!(
buf2.iter().all(|&b| b == 0x33),
"chain 2's data must hold sector 2's pattern (0x33) — \
retry processed chains in avail-ring order",
);
}
#[test]
fn throttle_flush_on_drained_bytes_bucket() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let throttle = DiskThrottle {
iops: None,
bytes_per_sec: std::num::NonZeroU64::new(1),
iops_burst_capacity: None,
bytes_burst_capacity: None,
};
let mut dev = VirtioBlk::new(f, cap, throttle);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
assert!(dev.worker.state_mut().bytes_bucket.consume(1));
dev.worker
.state_mut()
.bytes_bucket
.set_last_refill_for_test(std::time::Instant::now());
let header_addr = GuestAddress(0x4000);
let status_addr = GuestAddress(0x5000);
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_FLUSH, 0);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let c = dev.counters();
assert_eq!(
c.flushes_completed.load(Ordering::Relaxed),
1,
"FLUSH must complete despite drained bytes bucket — \
FLUSH consumes 0 bytes-tokens",
);
assert_eq!(
c.throttled_count.load(Ordering::Relaxed),
0,
"FLUSH must NOT stall on bytes bucket — data_len=0 \
means can_consume(0)=true unconditionally",
);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_read_only_write_returns_ioerr_chain() {
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0x00);
let mut dev = VirtioBlk::with_options(f, cap, DiskThrottle::default(), true);
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 16);
let header_addr = GuestAddress(0x4000);
let data_addr = GuestAddress(0x5000);
let status_addr = GuestAddress(0x6000);
let payload = vec![0xCDu8; 512];
mem.write_slice(&payload, data_addr).expect("plant");
write_blk_header(&mem, header_addr, VIRTIO_BLK_T_OUT, 1);
let descs = [
RawDescriptor::from(SplitDescriptor::new(
header_addr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
0,
0,
)),
RawDescriptor::from(SplitDescriptor::new(data_addr.0, 512, 0, 0)),
RawDescriptor::from(SplitDescriptor::new(
status_addr.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.build_desc_chain(&descs).expect("build chain");
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let mut status_buf = [0u8; 1];
mem.read_slice(&mut status_buf, status_addr).unwrap();
assert_eq!(
status_buf[0], VIRTIO_BLK_S_IOERR as u8,
"RO device must reject T_OUT with S_IOERR through the chain pipeline",
);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(used_idx, 1);
let c = dev.counters();
assert_eq!(c.io_errors.load(Ordering::Relaxed), 1);
assert_eq!(c.writes_completed.load(Ordering::Relaxed), 0);
assert_eq!(c.bytes_written.load(Ordering::Relaxed), 0);
assert_eq!(c.throttled_count.load(Ordering::Relaxed), 0);
}
#[test]
fn process_requests_used_idx_advances_across_repeated_notifies() {
use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_NEXT;
let cap = 4096u64;
let f = make_backed_file_with_pattern(cap, 0xAB);
let mut dev = VirtioBlk::new(f, cap, DiskThrottle::default());
let mem = make_chain_test_mem();
let mock = MockSplitQueue::create(&mem, GuestAddress(0), 32);
dev.set_mem(mem.clone());
wire_device_to_mock(&mut dev, &mock);
for i in 0..5u64 {
let hdr = GuestAddress(0x4000 + i * 0x1000);
let data = GuestAddress(0x4400 + i * 0x1000);
let status = GuestAddress(0x4800 + i * 0x1000);
write_blk_header(&mem, hdr, VIRTIO_BLK_T_IN, 0);
let base = (i as u16) * 3;
let descs = [
RawDescriptor::from(SplitDescriptor::new(
hdr.0,
VIRTIO_BLK_OUTHDR_SIZE as u32,
VRING_DESC_F_NEXT as u16,
base + 1,
)),
RawDescriptor::from(SplitDescriptor::new(
data.0,
512,
VRING_DESC_F_WRITE as u16 | VRING_DESC_F_NEXT as u16,
base + 2,
)),
RawDescriptor::from(SplitDescriptor::new(
status.0,
1,
VRING_DESC_F_WRITE as u16,
0,
)),
];
mock.add_desc_chains(&descs, base).expect("add chain");
write_reg(&mut dev, VIRTIO_MMIO_QUEUE_NOTIFY, REQ_QUEUE as u32);
let used_idx: u16 = mem
.read_obj(mock.used_addr().checked_add(2).unwrap())
.expect("read used.idx");
assert_eq!(
used_idx,
(i as u16) + 1,
"after notify #{} used.idx must equal {}",
i + 1,
i + 1,
);
}
let c = dev.counters();
assert_eq!(c.reads_completed.load(Ordering::Relaxed), 5);
assert_eq!(c.io_errors.load(Ordering::Relaxed), 0);
}