use crate::addr::HostVirtAddr;
use crate::print::PrintBarrier;
use crate::spin::RwLocked;
use core::alloc::Layout;
use core::mem::{align_of, size_of};
use linked_list_allocator::Heap;
use lset::{Line, Span};
use primordial::{Address, Page as Page4KiB};
use spinning::Lazy;
use x86_64::{PhysAddr, VirtAddr};
pub static HOSTMAP: Lazy<RwLocked<HostMap>> =
Lazy::new(|| RwLocked::<HostMap>::new(HostMap::new()));
struct HostMemListPageHeader {
next: Option<&'static mut HostMemListPage>,
}
#[derive(Clone, Copy)]
struct HostMemEntry {
shim: Span<PhysAddr, usize>,
virt_start: VirtAddr,
}
pub const HOST_MEM_LIST_NUM_ENTRIES: usize = (Page4KiB::SIZE
- core::mem::size_of::<HostMemListPageHeader>())
/ core::mem::size_of::<HostMemEntry>();
struct HostMemListPage {
header: HostMemListPageHeader,
ent: [HostMemEntry; HOST_MEM_LIST_NUM_ENTRIES],
}
pub struct HostMap {
num_pages: usize,
end_of_mem: PhysAddr,
host_mem: HostMemListPage,
}
impl HostMap {
fn new() -> Self {
HostMap {
num_pages: 0,
end_of_mem: PhysAddr::new(0),
host_mem: HostMemListPage {
header: HostMemListPageHeader { next: None },
ent: [HostMemEntry {
shim: Span {
start: PhysAddr::new(0),
count: 0,
},
virt_start: VirtAddr::new(0),
}; HOST_MEM_LIST_NUM_ENTRIES],
},
}
}
fn get_virt_addr(&self, addr: PhysAddr) -> Option<VirtAddr> {
let mut free = &self.host_mem;
loop {
for i in free.ent.iter() {
if i.shim.count == 0 {
return None;
}
let line = Line::from(i.shim);
if line.end > addr {
let offset = addr.as_u64().checked_sub(i.shim.start.as_u64()).unwrap();
return Some(i.virt_start + offset);
}
}
match free.header.next {
None => return None,
Some(ref f) => free = *f,
}
}
}
fn do_extend_slots(&mut self, mem_slots: usize, allocator: &mut Heap) {
let num_pages = mem_slots.checked_div(HOST_MEM_LIST_NUM_ENTRIES).unwrap();
if self.num_pages >= num_pages {
return;
}
let mut last_page = &mut self.host_mem as *mut HostMemListPage;
for _i in 0..num_pages {
unsafe {
last_page = match (*last_page).header.next {
None => {
let new_page = {
let page_res = allocator.allocate_first_fit(
Layout::from_size_align(
size_of::<HostMemListPage>(),
align_of::<HostMemListPage>(),
)
.unwrap(),
);
if page_res.is_err() {
return;
}
let page: *mut HostMemListPage = page_res.unwrap().as_ptr() as _;
page.write_bytes(0, 1);
page
};
(*last_page).header.next = Some(&mut *new_page);
self.num_pages = self.num_pages.checked_add(1).unwrap();
new_page
}
Some(ref mut p) => *p as *mut _,
};
}
}
}
}
impl RwLocked<HostMap> {
pub fn extend_slots(&self, mem_slots: usize, allocator: &mut Heap) {
let _barrier = PrintBarrier::default();
self.write().do_extend_slots(mem_slots, allocator);
}
pub fn shim_phys_to_host_virt<U>(&self, shim_phys: PhysAddr) -> HostVirtAddr<U> {
let this = self.read();
let virt_addr = this.get_virt_addr(shim_phys).unwrap_or_else(|| {
panic!(
"Trying to get virtual offset from unmmapped location {:#?}",
shim_phys
)
});
unsafe { HostVirtAddr::new(Address::<u64, U>::unchecked(virt_addr.as_u64() as _)) }
}
pub fn first_entry(&self, vm_phys: PhysAddr, host_virt: VirtAddr, size: usize) {
let mut this = self.write();
this.host_mem.ent[0].shim.start = vm_phys;
this.host_mem.ent[0].shim.count = size;
this.host_mem.ent[0].virt_start = host_virt;
this.end_of_mem = vm_phys + size;
}
pub fn new_entry(
&self,
vm_phys: PhysAddr,
host_virt: VirtAddr,
size: usize,
) -> Option<Span<PhysAddr, usize>> {
let mut this = self.write();
let vm_line = Line::from(Span::new(vm_phys, size));
let old_max = this.end_of_mem;
this.end_of_mem = PhysAddr::new(u64::max(this.end_of_mem.as_u64(), vm_line.end.as_u64()));
let mut free = &mut this.host_mem;
loop {
for i in free.ent.iter_mut() {
if i.shim.count == 0 {
i.virt_start = host_virt;
i.shim.start = vm_phys;
i.shim.count = size;
return Some(i.shim);
}
}
if let Some(f) = free.header.next.as_mut() {
free = f;
} else {
this.end_of_mem = old_max;
return None;
}
}
}
pub fn end_of_mem(&self) -> PhysAddr {
self.read().end_of_mem
}
}