use crate::arch::x86_64::kernel::apic;
use crate::arch::x86_64::kernel::irq::*;
use crate::arch::x86_64::kernel::pci::{
self, get_network_driver, PciAdapter, PciClassCode, PciDriver, PciNetworkControllerSubclass,
};
use crate::arch::x86_64::kernel::percore::core_scheduler;
use crate::arch::x86_64::kernel::virtio_fs;
use crate::arch::x86_64::kernel::virtio_net;
use crate::arch::x86_64::mm::paging;
use crate::config::VIRTIO_MAX_QUEUE_SIZE;
use alloc::boxed::Box;
use alloc::rc::Rc;
use alloc::vec::Vec;
use core::cell::RefCell;
use core::convert::TryInto;
use core::sync::atomic::spin_loop_hint;
use core::sync::atomic::{fence, Ordering};
use self::consts::*;
pub mod consts {
pub const VIRTIO_PCI_CAP_COMMON_CFG: u32 = 1;
pub const VIRTIO_PCI_CAP_NOTIFY_CFG: u32 = 2;
pub const VIRTIO_PCI_CAP_ISR_CFG: u32 = 3;
pub const VIRTIO_PCI_CAP_DEVICE_CFG: u32 = 4;
pub const VIRTIO_PCI_CAP_PCI_CFG: u32 = 5;
pub const VIRTIO_F_RING_INDIRECT_DESC: u64 = 1 << 28;
pub const VIRTIO_F_RING_EVENT_IDX: u64 = 1 << 29;
pub const VIRTIO_F_VERSION_1: u64 = 1 << 32;
pub const VIRTIO_F_ACCESS_PLATFORM: u64 = 1 << 33;
pub const VIRTIO_F_RING_PACKED: u64 = 1 << 34;
pub const VIRTIO_F_IN_ORDER: u64 = 1 << 35;
pub const VIRTIO_F_ORDER_PLATFORM: u64 = 1 << 36;
pub const VIRTIO_F_SR_IOV: u64 = 1 << 37;
pub const VIRTIO_F_NOTIFICATION_DATA: u64 = 1 << 38;
pub const VIRTQ_DESC_F_NEXT: u16 = 1;
pub const VIRTQ_DESC_F_WRITE: u16 = 2;
pub const VIRTQ_DESC_F_INDIRECT: u16 = 4;
pub const VRING_AVAIL_F_NO_INTERRUPT: u16 = 1;
}
pub struct Virtq<'a> {
index: u16,
vqsize: u16,
virtq_desc: VirtqDescriptors,
avail: Rc<RefCell<VirtqAvail<'a>>>,
used: Rc<RefCell<VirtqUsed<'a>>>,
queue_notify_address: &'a mut u16,
}
impl<'a> Virtq<'a> {
fn new(
index: u16,
vqsize: u16,
virtq_desc: Vec<Box<virtq_desc_raw>>,
avail: VirtqAvail<'a>,
used: VirtqUsed<'a>,
queue_notify_address: &'a mut u16,
) -> Self {
Virtq {
index,
vqsize,
virtq_desc: VirtqDescriptors::new(virtq_desc),
avail: Rc::new(RefCell::new(avail)),
used: Rc::new(RefCell::new(used)),
queue_notify_address,
}
}
pub fn new_from_common(
index: u16,
common_cfg: &mut virtio_pci_common_cfg,
notify_cfg: &mut VirtioNotification,
) -> Option<Self> {
common_cfg.queue_select = index;
if common_cfg.queue_size == 0 {
return None;
} else if common_cfg.queue_size > VIRTIO_MAX_QUEUE_SIZE {
common_cfg.queue_size = VIRTIO_MAX_QUEUE_SIZE;
}
let vqsize = common_cfg.queue_size as usize;
info!("Initializing virtqueue {}, of size {}", index, vqsize);
let desc_table = vec![
virtq_desc_raw {
addr: 0,
len: 0,
flags: 0,
next: 0
};
vqsize
];
let desc_table = desc_table.into_boxed_slice();
let avail_mem_box = vec![0 as u16; (6 + 2 * vqsize) >> 1].into_boxed_slice();
let used_mem_box = vec![0 as u16; (6 + 8 * vqsize) >> 1].into_boxed_slice();
let desc_table = alloc::boxed::Box::leak(desc_table);
let avail_mem = alloc::boxed::Box::leak(avail_mem_box);
let used_mem = alloc::boxed::Box::leak(used_mem_box);
let mut desc_raw_wrappers: Vec<Box<virtq_desc_raw>> = Vec::new();
for i in 0..vqsize {
let drw = unsafe { Box::from_raw(&mut desc_table[i] as *mut _) };
desc_raw_wrappers.push(drw);
}
let (avail_flags, avail_mem) = avail_mem.split_first_mut().unwrap();
let (avail_idx, avail_mem) = avail_mem.split_first_mut().unwrap();
let (used_flags, used_mem) = used_mem.split_first_mut().unwrap();
let (used_idx, used_mem) = used_mem.split_first_mut().unwrap();
common_cfg.queue_select = index;
common_cfg.queue_desc = paging::virt_to_phys(desc_table.as_ptr() as usize) as u64;
common_cfg.queue_avail = paging::virt_to_phys(avail_flags as *mut _ as usize) as u64;
common_cfg.queue_used = paging::virt_to_phys(used_flags as *const _ as usize) as u64;
common_cfg.queue_enable = 1;
debug!(
"desc 0x{:x}, avail 0x{:x}, used 0x{:x}",
common_cfg.queue_desc, common_cfg.queue_avail, common_cfg.queue_used
);
let avail = VirtqAvail {
flags: avail_flags,
idx: avail_idx,
ring: avail_mem,
};
let used = VirtqUsed {
flags: used_flags,
idx: used_idx,
ring: unsafe { core::slice::from_raw_parts(used_mem.as_ptr() as *const _, vqsize) },
last_idx: 0,
};
let vq = Virtq::new(
index,
vqsize as u16,
desc_raw_wrappers,
avail,
used,
notify_cfg.get_notify_addr(common_cfg.queue_notify_off as u32),
);
Some(vq)
}
fn notify_device(&mut self) {
trace!("Notifying device of updated virtqueue ({})...!", self.index);
*self.queue_notify_address = self.index;
}
pub fn send_non_blocking(&mut self, index: usize, len: usize) -> Result<(), ()> {
let chainrc = self.virtq_desc.get_chain_by_index(index);
let mut chain = chainrc.borrow_mut();
let mut vqavail = self.avail.borrow_mut();
let aind = (*vqavail.idx % self.vqsize) as usize;
if aind != index {
warn!(
"Available index {} is different from buffer index {}",
aind, index
);
}
let req = &mut chain.0.last_mut().unwrap().raw;
req.len = len.try_into().unwrap();
req.flags = 0;
fence(Ordering::SeqCst);
*vqavail.flags = VRING_AVAIL_F_NO_INTERRUPT;
*vqavail.idx = vqavail.idx.wrapping_add(1);
if *vqavail.idx == 0 {
trace!("VirtQ index wrapped!");
}
fence(Ordering::SeqCst);
let vqused = self.used.borrow();
let should_notify = *vqused.flags == 0;
drop(vqavail);
drop(vqused);
if should_notify {
self.notify_device();
}
Ok(())
}
pub fn send_blocking(&mut self, dat: &[&[u8]], rsp_buf: Option<&[&mut [u8]]>) {
let chainrc = self.virtq_desc.get_empty_chain();
let mut chain = chainrc.borrow_mut();
for dat in dat {
self.virtq_desc.extend(&mut chain);
let req = &mut chain.0.last_mut().unwrap().raw;
req.addr = paging::virt_to_phys(dat.as_ptr() as usize) as u64;
req.len = dat.len() as u32;
req.flags = 0;
trace!("written out descriptor: {:?} @ {:p}", req, req);
}
if let Some(rsp_buf) = rsp_buf {
for dat in rsp_buf {
self.virtq_desc.extend(&mut chain);
let rsp = &mut chain.0.last_mut().unwrap().raw;
rsp.addr = paging::virt_to_phys(dat.as_ptr() as usize) as u64;
rsp.len = dat.len() as u32;
rsp.flags = VIRTQ_DESC_F_WRITE;
trace!("written in descriptor: {:?} @ {:p}", rsp, rsp);
}
}
trace!("Sending Descriptor chain {:?}", chain);
let mut vqavail = self.avail.borrow_mut();
let aind = (*vqavail.idx % self.vqsize) as usize;
vqavail.ring[aind] = chain.0.first().unwrap().index;
fence(Ordering::SeqCst);
*vqavail.idx = vqavail.idx.wrapping_add(1);
if *vqavail.idx == 0 {
trace!("VirtQ index wrapped!");
}
fence(Ordering::SeqCst);
let vqused = self.used.borrow();
let should_notify = *vqused.flags == 0;
drop(vqavail);
drop(vqused);
if should_notify {
self.notify_device();
}
let mut vqused = self.used.borrow_mut();
vqused.wait_until_done(&chain);
drop(chain);
self.virtq_desc.recycle_chain(chainrc)
}
pub fn check_used_elements(&mut self) -> Option<u32> {
let mut vqused = self.used.borrow_mut();
vqused.check_elements()
}
pub fn add_buffer(&mut self, index: usize, addr: u64, len: usize, flags: u16) {
let chainrc = self.virtq_desc.get_empty_chain();
let mut chain = chainrc.borrow_mut();
self.virtq_desc.extend(&mut chain);
let rsp = &mut chain.0.last_mut().unwrap().raw;
rsp.addr = paging::virt_to_phys(addr as usize) as u64;
rsp.len = len.try_into().unwrap();
rsp.flags = flags;
let mut vqavail = self.avail.borrow_mut();
if flags != 0 {
let aind = (*vqavail.idx % self.vqsize) as usize;
vqavail.ring[aind] = chain.0.first().unwrap().index;
fence(Ordering::SeqCst);
*vqavail.idx = vqavail.idx.wrapping_add(1);
fence(Ordering::SeqCst);
if *vqavail.idx == 0 {
warn!("VirtQ index wrapped!");
}
} else {
let aind = index % self.vqsize as usize;
vqavail.ring[aind] = chain.0.first().unwrap().index;
}
}
pub fn has_packet(&self) -> bool {
let vqused = self.used.borrow();
vqused.last_idx != *vqused.idx
}
pub fn get_available_buffer(&self) -> Result<u32, ()> {
let vqavail = self.avail.borrow();
let index = *vqavail.idx % self.vqsize;
Ok(index as u32)
}
pub fn get_used_buffer(&self) -> Result<(u32, u32), ()> {
let vqused = self.used.borrow();
if vqused.last_idx != *vqused.idx {
let used_index = vqused.last_idx as usize;
let usedelem = vqused.ring[used_index % vqused.ring.len()];
Ok((usedelem.id, usedelem.len))
} else {
Err(())
}
}
pub fn buffer_consumed(&mut self) {
let mut vqused = self.used.borrow_mut();
if vqused.last_idx != *vqused.idx {
let usedelem = vqused.ring[vqused.last_idx as usize % vqused.ring.len()];
vqused.last_idx = vqused.last_idx.wrapping_add(1);
let mut vqavail = self.avail.borrow_mut();
let aind = (*vqavail.idx % self.vqsize) as usize;
vqavail.ring[aind] = usedelem.id.try_into().unwrap();
fence(Ordering::SeqCst);
*vqavail.idx = vqavail.idx.wrapping_add(1);
fence(Ordering::SeqCst);
let should_notify = *vqused.flags == 0;
drop(vqavail);
drop(vqused);
if should_notify {
self.notify_device();
}
}
}
}
#[repr(C)]
#[derive(Clone, Debug)]
pub struct virtq_desc_raw {
pub addr: u64,
pub len: u32,
pub flags: u16,
pub next: u16,
}
impl Drop for virtq_desc_raw {
fn drop(&mut self) {
warn!("Dropping virtq_desc_raw, this is likely an error as of now! No memory will be deallocated!");
}
}
#[derive(Debug)]
struct VirtqDescriptor {
index: u16,
raw: Box<virtq_desc_raw>,
}
#[derive(Debug)]
struct VirtqDescriptorChain(Vec<VirtqDescriptor>);
impl PartialEq for VirtqDescriptorChain {
fn eq(&self, other: &Self) -> bool {
&self.0 as *const _ == &other.0 as *const _
}
}
struct VirtqDescriptors {
free: RefCell<VirtqDescriptorChain>,
used_chains: RefCell<Vec<Rc<RefCell<VirtqDescriptorChain>>>>,
}
impl VirtqDescriptors {
fn new(descr_raw: Vec<Box<virtq_desc_raw>>) -> Self {
VirtqDescriptors {
free: RefCell::new(VirtqDescriptorChain(
descr_raw
.into_iter()
.enumerate()
.map(|(i, braw)| VirtqDescriptor {
index: i as u16,
raw: braw,
})
.rev()
.collect(),
)),
used_chains: RefCell::new(Vec::new()),
}
}
fn get_chain_by_index(&self, index: usize) -> Rc<RefCell<VirtqDescriptorChain>> {
let idx = self
.used_chains
.borrow()
.iter()
.position(|c| c.borrow().0.last().unwrap().index == index.try_into().unwrap())
.unwrap();
self.used_chains.borrow()[idx].clone()
}
fn get_empty_chain(&self) -> Rc<RefCell<VirtqDescriptorChain>> {
let mut used = self.used_chains.borrow_mut();
let newchain = VirtqDescriptorChain(Vec::new() );
let cell = Rc::new(RefCell::new(newchain));
used.push(cell.clone());
cell
}
fn recycle_chain(&self, chain: Rc<RefCell<VirtqDescriptorChain>>) {
let mut free = self.free.borrow_mut();
let mut used = self.used_chains.borrow_mut();
let index = used.iter().position(|c| *c == chain);
if let Some(index) = index {
used.remove(index);
} else {
warn!("Trying to remove chain from virtq which does not exist!");
return;
}
free.0.append(&mut chain.borrow_mut().0);
}
fn extend(&self, chain: &mut VirtqDescriptorChain) {
let mut free = self.free.borrow_mut();
let mut next = free.0.pop().unwrap();
if !chain.0.is_empty() {
let last = chain.0.last_mut().unwrap();
last.raw.next = next.index;
last.raw.flags |= VIRTQ_DESC_F_NEXT;
}
next.raw.next = 0;
next.raw.flags = 0;
next.raw.len = 0;
next.raw.addr = 0;
chain.0.push(next);
}
}
#[allow(dead_code)]
struct VirtqAvail<'a> {
flags: &'a mut u16,
idx: &'a mut u16,
ring: &'a mut [u16],
}
#[allow(dead_code)]
struct VirtqUsed<'a> {
flags: &'a u16,
idx: &'a u16,
ring: &'a [virtq_used_elem],
last_idx: u16,
}
impl<'a> VirtqUsed<'a> {
fn check_elements(&mut self) -> Option<u32> {
if unsafe { core::ptr::read_volatile(self.idx) } == self.last_idx {
None
} else {
let usedelem = self.ring[(self.last_idx as usize) % self.ring.len()];
self.last_idx = self.last_idx.wrapping_add(1);
fence(Ordering::SeqCst);
Some(usedelem.id)
}
}
fn wait_until_done(&mut self, chain: &VirtqDescriptorChain) -> bool {
while unsafe { core::ptr::read_volatile(self.idx) } == self.last_idx {
spin_loop_hint();
}
self.last_idx = *self.idx;
let usedelem = self.ring[(self.last_idx.wrapping_sub(1) as usize) % self.ring.len()];
fence(Ordering::SeqCst);
assert_eq!(usedelem.id, chain.0.first().unwrap().index as u32);
true
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
struct virtq_used_elem {
id: u32,
len: u32,
}
#[repr(C)]
#[derive(Debug)]
struct virtio_pci_cap {
cap_vndr: u8,
cap_next: u8,
cap_len: u8,
cfg_type: u8,
bar: u8,
padding: [u8; 3],
offset: u32,
length: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct virtio_pci_notify_cap {
cap: virtio_pci_cap,
notify_off_multiplier: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct virtio_pci_common_cfg {
pub device_feature_select: u32,
pub device_feature: u32,
pub driver_feature_select: u32,
pub driver_feature: u32,
pub msix_config: u16,
pub num_queues: u16,
pub device_status: u8,
pub config_generation: u8,
pub queue_select: u16,
pub queue_size: u16,
pub queue_msix_vector: u16,
pub queue_enable: u16,
pub queue_notify_off: u16,
pub queue_desc: u64,
pub queue_avail: u64,
pub queue_used: u64,
}
#[derive(Debug)]
pub struct VirtioNotification {
pub notification_ptr: *mut u16,
pub notify_off_multiplier: u32,
}
impl VirtioNotification {
pub fn get_notify_addr(&self, queue_notify_off: u32) -> &'static mut u16 {
let addr = unsafe {
&mut *self
.notification_ptr
.offset((queue_notify_off * self.notify_off_multiplier) as isize / 2)
};
debug!(
"Queue notify address parts: {:p} {} {} {:p}",
self.notification_ptr, queue_notify_off, self.notify_off_multiplier, addr
);
addr
}
}
pub fn map_virtiocap(
bus: u8,
device: u8,
adapter: &PciAdapter,
caplist: u32,
virtiocaptype: u32,
) -> Option<(usize, u32)> {
let mut nextcaplist = caplist;
if nextcaplist < 0x40 {
error!(
"Caplist inside header! Offset: 0x{:x}, Aborting",
nextcaplist
);
return None;
}
let virtiocapoffset = loop {
if nextcaplist == 0 || nextcaplist < 0x40 {
error!("Next caplist invalid, and still not found the wanted virtio cap, aborting!");
return None;
}
let captypeword = pci::read_config(bus, device, nextcaplist);
debug!(
"Read cap at offset 0x{:x}: captype 0x{:x}",
nextcaplist, captypeword
);
let captype = captypeword & 0xFF;
if captype == pci::PCI_CAP_ID_VNDR {
debug!("found vendor, virtio type: {}", (captypeword >> 24) & 0xFF);
if (captypeword >> 24) & 0xFF == virtiocaptype {
break nextcaplist;
}
}
nextcaplist = (captypeword >> 8) & 0xFF;
};
let baridx: u8 = (pci::read_config(bus, device, virtiocapoffset + 4) & 0xFF) as u8;
let offset: usize = pci::read_config(bus, device, virtiocapoffset + 8) as usize;
let length: usize = pci::read_config(bus, device, virtiocapoffset + 12) as usize;
debug!(
"Found virtio config bar as 0x{:x}, offset 0x{:x}, length 0x{:x}",
baridx, offset, length
);
if let Some((virtualbaraddr, size)) = adapter.memory_map_bar(baridx, true) {
let virtualcapaddr = virtualbaraddr + offset;
if size < offset + length {
error!(
"virtio config struct does not fit in bar! Aborting! 0x{:x} < 0x{:x}",
size,
offset + length
);
return None;
}
if virtiocaptype == VIRTIO_PCI_CAP_NOTIFY_CFG {
let notify_off_multiplier: u32 = pci::read_config(bus, device, virtiocapoffset + 16);
Some((virtualcapaddr, notify_off_multiplier))
} else {
Some((virtualcapaddr, 0))
}
} else {
warn!("Could not map virtio-cap-bar!");
None
}
}
pub fn init_virtio_device(adapter: &pci::PciAdapter) {
match adapter.device_id {
0x1000..=0x103F => {
warn!("Legacy Virtio devices are not supported, skipping!");
return;
}
0x1041 => {
match num::FromPrimitive::from_u8(adapter.class_id).unwrap() {
PciClassCode::NetworkController => {
match num::FromPrimitive::from_u8(adapter.subclass_id).unwrap() {
PciNetworkControllerSubclass::EthernetController => {
let drv = virtio_net::create_virtionet_driver(adapter).unwrap();
pci::register_driver(PciDriver::VirtioNet(drv));
}
_ => {
warn!("Virtio device is NOT supported, skipping!");
return;
}
}
}
_ => {
warn!("Virtio device is NOT supported, skipping!");
return;
}
}
}
0x105a => {
info!("Found Virtio-FS device!");
virtio_fs::create_virtiofs_driver(adapter).unwrap();
}
_ => {
warn!("Virtio device is NOT supported, skipping!");
return;
}
};
irq_install_handler(adapter.irq as u32, virtio_irqhandler as usize);
}
#[cfg(target_arch = "x86_64")]
extern "x86-interrupt" fn virtio_irqhandler(_stack_frame: &mut ExceptionStackFrame) {
debug!("Receive virtio interrupt");
apic::eoi();
let check_scheduler = match get_network_driver() {
Some(driver) => driver.borrow_mut().handle_interrupt(),
_ => false,
};
if check_scheduler {
core_scheduler().scheduler();
}
}