use crate::descriptor::{PagingAttributes, PhysicalAddress};
use crate::paging::{PageTable, Translation, deallocate};
use alloc::{vec, vec::Vec};
use core::{mem::size_of, ptr::NonNull};
#[derive(Debug)]
pub struct TargetAllocator<A: PagingAttributes> {
base_address: u64,
allocations: Vec<Option<NonNull<PageTable<A>>>>,
}
impl<A: PagingAttributes> TargetAllocator<A> {
pub fn new(base_address: u64) -> Self {
Self {
base_address,
allocations: vec![],
}
}
fn add_allocation(&mut self, page_table: NonNull<PageTable<A>>) -> usize {
for (i, allocation) in self.allocations.iter_mut().enumerate() {
if allocation.is_none() {
*allocation = Some(page_table);
return i;
}
}
self.allocations.push(Some(page_table));
self.allocations.len() - 1
}
fn remove_allocation(&mut self, page_table: NonNull<PageTable<A>>) -> bool {
for allocation in &mut self.allocations {
if *allocation == Some(page_table) {
*allocation = None;
return true;
}
}
false
}
pub fn as_bytes(&self) -> Vec<u8> {
let mut bytes = vec![0; self.allocations.len() * size_of::<PageTable<A>>()];
for (chunk, allocation) in bytes
.chunks_exact_mut(size_of::<PageTable<A>>())
.zip(self.allocations.iter())
{
if let Some(page_table) = allocation {
let page_table = unsafe { page_table.as_ref() };
page_table.write_to(chunk).unwrap();
}
}
bytes
}
}
impl<A: PagingAttributes> Translation<A> for TargetAllocator<A> {
fn allocate_table(&mut self) -> (NonNull<PageTable<A>>, PhysicalAddress) {
let page_table = PageTable::new();
let index = self.add_allocation(page_table);
let address = PhysicalAddress(
usize::try_from(self.base_address).unwrap() + index * size_of::<PageTable<A>>(),
);
(page_table, address)
}
unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable<A>>) {
if !self.remove_allocation(page_table) {
panic!(
"dealloc_table called for page table {:?} which isn't in allocations.",
page_table
);
}
unsafe {
deallocate(page_table);
}
}
fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable<A>> {
self.allocations
[(pa.0 - usize::try_from(self.base_address).unwrap()) / size_of::<PageTable<A>>()]
.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::descriptor::El1Attributes;
use crate::paging::{Constraints, El1And0, MemoryRegion, RootTable, VaRange};
const ROOT_LEVEL: usize = 1;
#[test]
fn map_one_page() {
let mut map = RootTable::with_va_range(
TargetAllocator::new(0x1_0000),
ROOT_LEVEL,
El1And0,
VaRange::Lower,
);
map.map_range(
&MemoryRegion::new(0x0, 0x1000),
PhysicalAddress(0x4_2000),
El1Attributes::VALID
| El1Attributes::ATTRIBUTE_INDEX_0
| El1Attributes::INNER_SHAREABLE
| El1Attributes::UXN,
Constraints::empty(),
)
.unwrap();
let bytes = map.translation().as_bytes();
assert_eq!(bytes.len(), 3 * size_of::<PageTable<El1Attributes>>());
assert_eq!(
bytes[0..8],
[0x03, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]
);
for byte in &bytes[8..size_of::<PageTable<El1Attributes>>()] {
assert_eq!(*byte, 0);
}
assert_eq!(
bytes[size_of::<PageTable<El1Attributes>>()..size_of::<PageTable<El1Attributes>>() + 8],
[0x03, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]
);
for byte in &bytes
[size_of::<PageTable<El1Attributes>>() + 8..2 * size_of::<PageTable<El1Attributes>>()]
{
assert_eq!(*byte, 0);
}
assert_eq!(
bytes[2 * size_of::<PageTable<El1Attributes>>()
..2 * size_of::<PageTable<El1Attributes>>() + 8],
[0x03, 0x23, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00]
);
for byte in &bytes[2 * size_of::<PageTable<El1Attributes>>() + 8
..3 * size_of::<PageTable<El1Attributes>>()]
{
assert_eq!(*byte, 0);
}
}
}