use std::sync::Arc;
use std::sync::OnceLock;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use virtio_bindings::virtio_config::{
VIRTIO_CONFIG_S_ACKNOWLEDGE, VIRTIO_CONFIG_S_DRIVER, VIRTIO_CONFIG_S_DRIVER_OK,
VIRTIO_CONFIG_S_FAILED, VIRTIO_CONFIG_S_FEATURES_OK, VIRTIO_CONFIG_S_NEEDS_RESET,
VIRTIO_F_VERSION_1,
};
use virtio_bindings::virtio_ids::VIRTIO_ID_NET;
use virtio_bindings::virtio_mmio::{
VIRTIO_MMIO_CONFIG_GENERATION, VIRTIO_MMIO_DEVICE_FEATURES, VIRTIO_MMIO_DEVICE_FEATURES_SEL,
VIRTIO_MMIO_DEVICE_ID, VIRTIO_MMIO_DRIVER_FEATURES, VIRTIO_MMIO_DRIVER_FEATURES_SEL,
VIRTIO_MMIO_INT_CONFIG, VIRTIO_MMIO_INT_VRING, VIRTIO_MMIO_INTERRUPT_ACK,
VIRTIO_MMIO_INTERRUPT_STATUS, VIRTIO_MMIO_MAGIC_VALUE, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
VIRTIO_MMIO_QUEUE_AVAIL_LOW, VIRTIO_MMIO_QUEUE_DESC_HIGH, VIRTIO_MMIO_QUEUE_DESC_LOW,
VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX,
VIRTIO_MMIO_QUEUE_READY, VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_QUEUE_USED_HIGH,
VIRTIO_MMIO_QUEUE_USED_LOW, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_VENDOR_ID, VIRTIO_MMIO_VERSION,
};
use virtio_bindings::virtio_net::VIRTIO_NET_F_MAC;
use virtio_queue::{Error as VirtioQueueError, Queue, QueueOwnedT, QueueT};
use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use crate::vmm::net_config::NetConfig;
pub(crate) const MMIO_MAGIC: u32 = 0x7472_6976; pub(crate) const MMIO_VERSION: u32 = 2; pub(crate) const VENDOR_ID: u32 = 0;
pub const VIRTIO_MMIO_SIZE: u64 = 0x1000;
pub(crate) const NUM_QUEUES: usize = 2;
pub(crate) const QUEUE_MAX_SIZE: u16 = 256;
pub(crate) const RXQ: usize = 0;
pub(crate) const TXQ: usize = 1;
pub const VIRTIO_NET_HDR_LEN: usize = 12;
pub(crate) const MAX_FRAME_SIZE: usize = 65_536;
pub(crate) const TX_DESC_MAX: usize = MAX_FRAME_SIZE;
pub(crate) const S_ACK: u32 = VIRTIO_CONFIG_S_ACKNOWLEDGE;
pub(crate) const S_DRV: u32 = S_ACK | VIRTIO_CONFIG_S_DRIVER;
pub(crate) const S_FEAT: u32 = S_DRV | VIRTIO_CONFIG_S_FEATURES_OK;
#[cfg(test)]
pub(crate) const S_OK: u32 = S_FEAT | VIRTIO_CONFIG_S_DRIVER_OK;
#[repr(C, packed)]
#[derive(Copy, Clone, Default, Debug)]
pub(crate) struct VirtioNetConfig {
pub(crate) mac: [u8; 6],
pub(crate) status: u16,
pub(crate) max_virtqueue_pairs: u16,
pub(crate) mtu: u16,
}
unsafe impl ByteValued for VirtioNetConfig {}
pub(crate) const VIRTIO_NET_CONFIG_SIZE: usize = std::mem::size_of::<VirtioNetConfig>();
const _: () = assert!(std::mem::offset_of!(VirtioNetConfig, mac) == 0x00);
const _: () = assert!(std::mem::offset_of!(VirtioNetConfig, status) == 0x06);
const _: () = assert!(std::mem::offset_of!(VirtioNetConfig, max_virtqueue_pairs) == 0x08);
const _: () = assert!(std::mem::offset_of!(VirtioNetConfig, mtu) == 0x0A);
const _: () = assert!(VIRTIO_NET_CONFIG_SIZE == 12);
#[derive(Debug, Default)]
pub struct VirtioNetCounters {
pub(crate) tx_packets: AtomicU64,
pub(crate) tx_bytes: AtomicU64,
pub(crate) rx_packets: AtomicU64,
pub(crate) rx_bytes: AtomicU64,
pub(crate) tx_dropped_no_rx_buffer: AtomicU64,
pub(crate) tx_chain_invalid: AtomicU64,
pub(crate) rx_chain_invalid: AtomicU64,
pub(crate) rx_write_failed: AtomicU64,
pub(crate) tx_add_used_failures: AtomicU64,
pub(crate) rx_add_used_failures: AtomicU64,
pub(crate) invalid_avail_idx_count: AtomicU64,
}
impl VirtioNetCounters {
pub(crate) fn record_tx_completed(&self, frame_bytes: u64) {
self.tx_packets.fetch_add(1, Ordering::Relaxed);
self.tx_bytes.fetch_add(frame_bytes, Ordering::Relaxed);
}
pub(crate) fn record_rx_delivered(&self, frame_bytes: u64) {
self.rx_packets.fetch_add(1, Ordering::Relaxed);
self.rx_bytes.fetch_add(frame_bytes, Ordering::Relaxed);
}
pub(crate) fn record_tx_dropped_no_rx_buffer(&self) {
self.tx_dropped_no_rx_buffer.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_tx_chain_invalid(&self) {
self.tx_chain_invalid.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_rx_chain_invalid(&self) {
self.rx_chain_invalid.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_rx_write_failed(&self) {
self.rx_write_failed.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_tx_add_used_failure(&self) {
self.tx_add_used_failures.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_rx_add_used_failure(&self) {
self.rx_add_used_failures.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn record_invalid_avail_idx(&self) {
self.invalid_avail_idx_count.fetch_add(1, Ordering::Relaxed);
}
pub fn tx_packets(&self) -> u64 {
self.tx_packets.load(Ordering::Relaxed)
}
pub fn tx_bytes(&self) -> u64 {
self.tx_bytes.load(Ordering::Relaxed)
}
pub fn rx_packets(&self) -> u64 {
self.rx_packets.load(Ordering::Relaxed)
}
pub fn rx_bytes(&self) -> u64 {
self.rx_bytes.load(Ordering::Relaxed)
}
pub fn tx_dropped_no_rx_buffer(&self) -> u64 {
self.tx_dropped_no_rx_buffer.load(Ordering::Relaxed)
}
pub fn tx_chain_invalid(&self) -> u64 {
self.tx_chain_invalid.load(Ordering::Relaxed)
}
pub fn rx_chain_invalid(&self) -> u64 {
self.rx_chain_invalid.load(Ordering::Relaxed)
}
pub fn rx_write_failed(&self) -> u64 {
self.rx_write_failed.load(Ordering::Relaxed)
}
pub fn tx_add_used_failures(&self) -> u64 {
self.tx_add_used_failures.load(Ordering::Relaxed)
}
pub fn rx_add_used_failures(&self) -> u64 {
self.rx_add_used_failures.load(Ordering::Relaxed)
}
pub fn invalid_avail_idx_count(&self) -> u64 {
self.invalid_avail_idx_count.load(Ordering::Relaxed)
}
}
pub struct VirtioNet {
queues: [Queue; NUM_QUEUES],
queue_select: u32,
device_features_sel: u32,
driver_features_sel: u32,
driver_features: u64,
device_status: u32,
interrupt_status: u32,
config_generation: u32,
irq_evt: EventFd,
mem: Arc<OnceLock<GuestMemoryMmap>>,
mem_unset_warned: Arc<AtomicBool>,
config: VirtioNetConfig,
counters: Arc<VirtioNetCounters>,
tx_frame_scratch: Vec<u8>,
queue_poisoned: [bool; NUM_QUEUES],
}
impl VirtioNet {
pub fn new(config: NetConfig) -> Self {
let irq_evt =
EventFd::new(libc::EFD_NONBLOCK).expect("failed to create virtio-net irq eventfd");
VirtioNet {
queues: [
Queue::new(QUEUE_MAX_SIZE).expect("valid queue size"),
Queue::new(QUEUE_MAX_SIZE).expect("valid queue size"),
],
queue_select: 0,
device_features_sel: 0,
driver_features_sel: 0,
driver_features: 0,
device_status: 0,
interrupt_status: 0,
config_generation: 0,
irq_evt,
mem: Arc::new(OnceLock::new()),
mem_unset_warned: Arc::new(AtomicBool::new(false)),
config: VirtioNetConfig {
mac: config.mac,
status: 0,
max_virtqueue_pairs: 0,
mtu: 0,
},
counters: Arc::new(VirtioNetCounters::default()),
tx_frame_scratch: Vec::with_capacity(MAX_FRAME_SIZE),
queue_poisoned: [false; NUM_QUEUES],
}
}
pub fn irq_evt(&self) -> &EventFd {
&self.irq_evt
}
pub fn set_mem(&mut self, mem: GuestMemoryMmap) {
if self.mem.set(mem).is_err() {
tracing::warn!(
"virtio-net: set_mem called on already-initialised \
device; guest memory binding unchanged (mem is set \
once at boot and preserved across reset())"
);
}
}
pub fn counters(&self) -> Arc<VirtioNetCounters> {
Arc::clone(&self.counters)
}
fn device_features(&self) -> u64 {
(1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_NET_F_MAC)
}
fn selected_queue(&self) -> Option<usize> {
let idx = self.queue_select as usize;
if idx < NUM_QUEUES { Some(idx) } else { None }
}
fn signal_used(&mut self) {
self.interrupt_status |= VIRTIO_MMIO_INT_VRING;
if let Err(e) = self.irq_evt.write(1) {
tracing::warn!(%e, "virtio-net irq_evt.write failed");
}
}
fn signal_queue_poisoned(&mut self) {
self.device_status |= VIRTIO_CONFIG_S_NEEDS_RESET;
self.interrupt_status |= VIRTIO_MMIO_INT_CONFIG;
if let Err(e) = self.irq_evt.write(1) {
tracing::warn!(%e, "virtio-net irq_evt.write failed (poison signal)");
}
}
fn queue_config_allowed(&self) -> bool {
self.device_status & S_FEAT == S_FEAT && self.device_status & VIRTIO_CONFIG_S_DRIVER_OK == 0
}
fn features_write_allowed(&self) -> bool {
self.device_status & S_DRV == S_DRV && self.device_status & VIRTIO_CONFIG_S_FEATURES_OK == 0
}
fn process_tx_loopback(&mut self) {
if self.device_status & VIRTIO_CONFIG_S_DRIVER_OK == 0 {
return;
}
let mem_arc = Arc::clone(&self.mem);
let Some(mem) = mem_arc.get() else {
if !self.mem_unset_warned.swap(true, Ordering::Relaxed) {
tracing::warn!(
"virtio-net: queue notify before set_mem; \
dropping TX kick until guest memory is wired"
);
}
return;
};
let mut had_used_ring_publish = false;
let mut tx_just_poisoned = false;
let mut rx_just_poisoned = false;
loop {
let pop_outcome = self.pop_and_capture_tx(mem);
let chain_outcome = match pop_outcome {
TxPopOutcome::Empty => break,
TxPopOutcome::JustPoisoned => {
tx_just_poisoned = true;
break;
}
TxPopOutcome::Chain(c) => c,
};
let TxChainOutcome { head, frame_len } = chain_outcome;
if let Some(len) = frame_len {
match self.try_loopback_to_rx(mem, len) {
LoopbackOutcome::Delivered { l2_bytes_written } => {
self.counters.record_rx_delivered(l2_bytes_written);
had_used_ring_publish = true;
}
LoopbackOutcome::DeliveredButAddUsedFailed => {
}
LoopbackOutcome::RxAlreadyPoisoned => {
}
LoopbackOutcome::JustRxPoisoned => {
rx_just_poisoned = true;
}
LoopbackOutcome::NoRxBuffer => {
self.counters.record_tx_dropped_no_rx_buffer();
}
LoopbackOutcome::RxChainInvalid { add_used_ok } => {
if add_used_ok {
had_used_ring_publish = true;
}
}
}
}
let q = &mut self.queues[TXQ];
match q.add_used(mem, head, 0) {
Ok(()) => {
if let Some(len) = frame_len {
self.counters.record_tx_completed(len as u64);
}
had_used_ring_publish = true;
}
Err(e) => {
self.counters.record_tx_add_used_failure();
tracing::warn!(
head,
%e,
"virtio-net TX add_used failed (used-ring address \
likely unmapped); bumped tx_add_used_failures, \
will NOT bump tx_packets"
);
}
}
if rx_just_poisoned {
break;
}
}
if had_used_ring_publish {
self.signal_used();
}
if tx_just_poisoned || rx_just_poisoned {
self.signal_queue_poisoned();
}
}
fn pop_and_capture_tx(&mut self, mem: &GuestMemoryMmap) -> TxPopOutcome {
if self.queue_poisoned[TXQ] {
return TxPopOutcome::Empty;
}
enum IterStep<C> {
Chain(C),
Empty,
Poisoned(VirtioQueueError),
}
let step: IterStep<_> = {
let q = &mut self.queues[TXQ];
match q.iter(mem) {
Ok(mut iter) => match iter.next() {
Some(c) => IterStep::Chain(c),
None => IterStep::Empty,
},
Err(e) => IterStep::Poisoned(e),
}
};
let (chain, head) = match step {
IterStep::Empty => return TxPopOutcome::Empty,
IterStep::Chain(c) => {
let h = c.head_index();
(c, h)
}
IterStep::Poisoned(err) => {
self.queue_poisoned[TXQ] = true;
self.counters.record_invalid_avail_idx();
tracing::warn!(
err = %err,
"virtio-net TX iter() failed; poisoning TX queue until \
guest reset (any structural queue error is \
non-recoverable; cloud-hypervisor convergence)"
);
return TxPopOutcome::JustPoisoned;
}
};
self.tx_frame_scratch.clear();
let mut hdr_remaining: usize = VIRTIO_NET_HDR_LEN;
let mut total_data_bytes: usize = 0;
let mut chain_invalid = false;
for desc in chain {
if desc.is_write_only() {
chain_invalid = true;
break;
}
let mut desc_len = (desc.len() as usize).min(TX_DESC_MAX);
let mut desc_addr = desc.addr();
if hdr_remaining > 0 {
let skip = hdr_remaining.min(desc_len);
let Some(new_addr) = desc_addr.checked_add(skip as u64) else {
chain_invalid = true;
break;
};
hdr_remaining -= skip;
desc_len -= skip;
desc_addr = new_addr;
}
if desc_len == 0 {
continue;
}
let remaining = MAX_FRAME_SIZE.saturating_sub(total_data_bytes);
let take = desc_len.min(remaining);
if take == 0 {
break;
}
let start = self.tx_frame_scratch.len();
self.tx_frame_scratch.resize(start + take, 0);
if mem
.read_slice(&mut self.tx_frame_scratch[start..start + take], desc_addr)
.is_err()
{
self.tx_frame_scratch.truncate(start);
chain_invalid = true;
break;
}
total_data_bytes += take;
}
if chain_invalid || hdr_remaining != 0 {
self.counters.record_tx_chain_invalid();
return TxPopOutcome::Chain(TxChainOutcome {
head,
frame_len: None,
});
}
TxPopOutcome::Chain(TxChainOutcome {
head,
frame_len: Some(total_data_bytes),
})
}
fn try_loopback_to_rx(&mut self, mem: &GuestMemoryMmap, frame_len: usize) -> LoopbackOutcome {
if self.queue_poisoned[RXQ] {
return LoopbackOutcome::RxAlreadyPoisoned;
}
enum IterStep<C> {
Chain(C),
NoBuffer,
Poisoned(VirtioQueueError),
}
let step: IterStep<_> = {
let q = &mut self.queues[RXQ];
if !q.ready() {
return LoopbackOutcome::NoRxBuffer;
}
match q.iter(mem) {
Ok(mut iter) => match iter.next() {
Some(c) => IterStep::Chain(c),
None => IterStep::NoBuffer,
},
Err(e) => IterStep::Poisoned(e),
}
};
let (chain, head) = match step {
IterStep::NoBuffer => return LoopbackOutcome::NoRxBuffer,
IterStep::Chain(c) => {
let h = c.head_index();
(c, h)
}
IterStep::Poisoned(err) => {
self.queue_poisoned[RXQ] = true;
self.counters.record_invalid_avail_idx();
tracing::warn!(
err = %err,
"virtio-net RX iter() failed; poisoning RX queue until \
guest reset (any structural queue error is \
non-recoverable; cloud-hypervisor convergence)"
);
return LoopbackOutcome::JustRxPoisoned;
}
};
let mut bytes_written: u32 = 0;
let mut hdr_remaining: usize = VIRTIO_NET_HDR_LEN;
let mut frame_pos: usize = 0;
let mut hdr_write_slots: [(GuestAddress, usize); VIRTIO_NET_HDR_LEN] =
[(GuestAddress(0), 0); VIRTIO_NET_HDR_LEN];
let mut hdr_write_count: usize = 0;
enum InvalidReason {
Shape,
WriteFailed,
}
let mut chain_invalid: Option<InvalidReason> = None;
for desc in chain {
if !desc.is_write_only() {
chain_invalid = Some(InvalidReason::Shape);
break;
}
let mut desc_addr = desc.addr();
let mut desc_len = desc.len() as usize;
if hdr_remaining > 0 {
let take = hdr_remaining.min(desc_len);
const RX_HDR: [u8; VIRTIO_NET_HDR_LEN] = {
let mut h = [0u8; VIRTIO_NET_HDR_LEN];
h[10] = 1;
h[11] = 0;
h
};
let hdr_start = VIRTIO_NET_HDR_LEN - hdr_remaining;
let hdr_slice = &RX_HDR[hdr_start..hdr_start + take];
if mem.write_slice(hdr_slice, desc_addr).is_err() {
chain_invalid = Some(InvalidReason::WriteFailed);
break;
}
hdr_write_slots[hdr_write_count] = (desc_addr, take);
hdr_write_count += 1;
let Some(new_addr) = desc_addr.checked_add(take as u64) else {
chain_invalid = Some(InvalidReason::Shape);
break;
};
bytes_written = bytes_written
.checked_add(take as u32)
.expect("bytes_written cannot overflow u32 — capped by MAX_FRAME_SIZE+12");
hdr_remaining -= take;
desc_len -= take;
desc_addr = new_addr;
}
if desc_len == 0 || frame_pos == frame_len {
continue;
}
let take = desc_len.min(frame_len - frame_pos);
if mem
.write_slice(
&self.tx_frame_scratch[frame_pos..frame_pos + take],
desc_addr,
)
.is_err()
{
chain_invalid = Some(InvalidReason::WriteFailed);
break;
}
bytes_written = bytes_written
.checked_add(take as u32)
.expect("bytes_written cannot overflow u32 — capped by MAX_FRAME_SIZE+12");
frame_pos += take;
if frame_pos == frame_len && hdr_remaining == 0 {
break;
}
}
if let Some(reason) = chain_invalid {
match reason {
InvalidReason::Shape => self.counters.record_rx_chain_invalid(),
InvalidReason::WriteFailed => self.counters.record_rx_write_failed(),
}
const ZEROS: [u8; VIRTIO_NET_HDR_LEN] = [0u8; VIRTIO_NET_HDR_LEN];
for &(addr, len) in &hdr_write_slots[..hdr_write_count] {
let _ = mem.write_slice(&ZEROS[..len], addr);
}
let add_used_ok = match self.queues[RXQ].add_used(mem, head, 0) {
Ok(()) => true,
Err(e) => {
self.counters.record_rx_add_used_failure();
tracing::warn!(
head,
%e,
"virtio-net RX add_used failed after malformed-chain \
reject (used-ring address likely unmapped); bumped \
rx_add_used_failures"
);
false
}
};
return LoopbackOutcome::RxChainInvalid { add_used_ok };
}
if frame_pos < frame_len || hdr_remaining != 0 {
tracing::debug!(
frame_len,
bytes_written,
hdr_remaining,
"virtio-net RX buffer too small for full frame; truncating"
);
}
let hdr_taken = (VIRTIO_NET_HDR_LEN - hdr_remaining) as u32;
let l2_bytes = bytes_written.saturating_sub(hdr_taken) as u64;
match self.queues[RXQ].add_used(mem, head, bytes_written) {
Ok(()) => LoopbackOutcome::Delivered {
l2_bytes_written: l2_bytes,
},
Err(e) => {
self.counters.record_rx_add_used_failure();
tracing::warn!(
head,
%e,
"virtio-net RX add_used failed after successful frame \
write (used-ring address likely unmapped); bumped \
rx_add_used_failures, will NOT bump rx_packets"
);
LoopbackOutcome::DeliveredButAddUsedFailed
}
}
}
}
enum LoopbackOutcome {
Delivered { l2_bytes_written: u64 },
DeliveredButAddUsedFailed,
NoRxBuffer,
RxChainInvalid { add_used_ok: bool },
JustRxPoisoned,
RxAlreadyPoisoned,
}
enum TxPopOutcome {
Empty,
JustPoisoned,
Chain(TxChainOutcome),
}
struct TxChainOutcome {
head: u16,
frame_len: Option<usize>,
}
impl VirtioNet {
pub fn mmio_read(&self, offset: u64, data: &mut [u8]) {
if offset >= 0x100 {
self.read_config_space(offset - 0x100, data);
return;
}
if data.len() != 4 {
for b in data.iter_mut() {
*b = 0xff;
}
return;
}
let val: u32 = match offset as u32 {
VIRTIO_MMIO_MAGIC_VALUE => MMIO_MAGIC,
VIRTIO_MMIO_VERSION => MMIO_VERSION,
VIRTIO_MMIO_DEVICE_ID => VIRTIO_ID_NET,
VIRTIO_MMIO_VENDOR_ID => VENDOR_ID,
VIRTIO_MMIO_DEVICE_FEATURES => {
let page = self.device_features_sel;
if page == 0 {
self.device_features() as u32
} else if page == 1 {
(self.device_features() >> 32) as u32
} else {
0
}
}
VIRTIO_MMIO_QUEUE_NUM_MAX => self
.selected_queue()
.map(|i| self.queues[i].max_size() as u32)
.unwrap_or(0),
VIRTIO_MMIO_QUEUE_READY => self
.selected_queue()
.map(|i| self.queues[i].ready() as u32)
.unwrap_or(0),
VIRTIO_MMIO_INTERRUPT_STATUS => self.interrupt_status,
VIRTIO_MMIO_STATUS => self.device_status,
VIRTIO_MMIO_CONFIG_GENERATION => self.config_generation,
_ => 0,
};
tracing::debug!(offset, val, "virtio-net mmio_read");
data.copy_from_slice(&val.to_le_bytes());
}
fn read_config_space(&self, offset: u64, data: &mut [u8]) {
let config_bytes = self.config.as_slice();
let start = offset as usize;
for (i, byte) in data.iter_mut().enumerate() {
let cfg_idx = start + i;
*byte = config_bytes.get(cfg_idx).copied().unwrap_or(0);
}
}
pub fn mmio_write(&mut self, offset: u64, data: &[u8]) {
if offset >= 0x100 {
tracing::debug!(
offset,
len = data.len(),
"virtio-net config-space write ignored"
);
return;
}
if data.len() != 4 {
return;
}
let val = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
tracing::debug!(offset, val, "virtio-net mmio_write");
match offset as u32 {
VIRTIO_MMIO_DEVICE_FEATURES_SEL => self.device_features_sel = val,
VIRTIO_MMIO_DRIVER_FEATURES_SEL => self.driver_features_sel = val,
VIRTIO_MMIO_DRIVER_FEATURES => {
if !self.features_write_allowed() {
return;
}
let page = self.driver_features_sel;
if page == 0 {
self.driver_features =
(self.driver_features & 0xFFFF_FFFF_0000_0000) | val as u64;
} else if page == 1 {
self.driver_features =
(self.driver_features & 0x0000_0000_FFFF_FFFF) | ((val as u64) << 32);
}
}
VIRTIO_MMIO_QUEUE_SEL => self.queue_select = val,
VIRTIO_MMIO_QUEUE_NUM if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_size(val as u16);
}
}
VIRTIO_MMIO_QUEUE_READY if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_ready(val == 1);
}
}
VIRTIO_MMIO_QUEUE_NOTIFY => {
let idx = val as usize;
if idx == TXQ {
self.process_tx_loopback();
}
}
VIRTIO_MMIO_INTERRUPT_ACK => {
self.interrupt_status &= !val;
}
VIRTIO_MMIO_STATUS => {
if val == 0 {
self.reset();
} else {
self.set_status(val);
}
}
VIRTIO_MMIO_QUEUE_DESC_LOW if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_desc_table_address(Some(val), None);
}
}
VIRTIO_MMIO_QUEUE_DESC_HIGH if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_desc_table_address(None, Some(val));
}
}
VIRTIO_MMIO_QUEUE_AVAIL_LOW if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_avail_ring_address(Some(val), None);
}
}
VIRTIO_MMIO_QUEUE_AVAIL_HIGH if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_avail_ring_address(None, Some(val));
}
}
VIRTIO_MMIO_QUEUE_USED_LOW if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_used_ring_address(Some(val), None);
}
}
VIRTIO_MMIO_QUEUE_USED_HIGH if self.queue_config_allowed() => {
if let Some(i) = self.selected_queue() {
self.queues[i].set_used_ring_address(None, Some(val));
}
}
_ => {}
}
}
fn set_status(&mut self, val: u32) {
let old = self.device_status;
if val & self.device_status != self.device_status {
tracing::debug!(old, val, "virtio-net set_status: rejected (clears bits)");
return;
}
let new_bits = val & !self.device_status;
let valid = match new_bits {
VIRTIO_CONFIG_S_ACKNOWLEDGE => self.device_status == 0,
VIRTIO_CONFIG_S_DRIVER => self.device_status == S_ACK,
VIRTIO_CONFIG_S_FEATURES_OK => self.device_status == S_DRV,
VIRTIO_CONFIG_S_DRIVER_OK => self.device_status == S_FEAT,
_ => false,
};
if !valid {
tracing::debug!(
old,
val,
"virtio-net set_status: rejected (invalid transition)"
);
return;
}
if new_bits == VIRTIO_CONFIG_S_FEATURES_OK {
let device_features = self.device_features();
let unoffered = self.driver_features & !device_features;
if unoffered != 0 {
self.device_status |= VIRTIO_CONFIG_S_FAILED;
tracing::warn!(
old,
attempted = val,
driver_features = self.driver_features,
device_features,
unoffered,
"virtio-net set_status: driver accepted features not \
offered by device; rejecting FEATURES_OK and setting \
FAILED bit"
);
return;
}
if (self.driver_features & (1u64 << VIRTIO_F_VERSION_1)) == 0 {
self.device_status |= VIRTIO_CONFIG_S_FAILED;
tracing::warn!(
old,
attempted = val,
"virtio-net set_status: VIRTIO_F_VERSION_1 not \
negotiated; rejecting FEATURES_OK and setting FAILED bit"
);
return;
}
}
self.device_status = val;
tracing::debug!(old, new = val, "virtio-net set_status: accepted");
}
fn reset(&mut self) {
self.device_status = 0;
self.interrupt_status = 0;
self.queue_select = 0;
self.device_features_sel = 0;
self.driver_features_sel = 0;
self.driver_features = 0;
self.tx_frame_scratch.clear();
self.queue_poisoned = [false; NUM_QUEUES];
for q in &mut self.queues {
q.reset();
}
}
}