Skip to main content

axaddrspace/address_space/backend/
alloc.rs

1use memory_addr::{PageIter4K, PhysAddr};
2use page_table_multiarch::{MappingFlags, PageSize, PagingHandler};
3
4use super::Backend;
5use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable};
6
7impl<H: PagingHandler> Backend<H> {
8    /// Creates a new allocation mapping backend.
9    pub const fn new_alloc(populate: bool) -> Self {
10        Self::Alloc {
11            populate,
12            _phantom: core::marker::PhantomData,
13        }
14    }
15
16    pub(crate) fn map_alloc(
17        &self,
18        start: GuestPhysAddr,
19        size: usize,
20        flags: MappingFlags,
21        pt: &mut PageTable<H>,
22        populate: bool,
23    ) -> bool {
24        debug!(
25            "map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
26            start,
27            start + size,
28            flags,
29            populate
30        );
31        if populate {
32            // allocate all possible physical frames for populated mapping.
33            for addr in PageIter4K::new(start, start + size).unwrap() {
34                if H::alloc_frame()
35                    .and_then(|frame| pt.cursor().map(addr, frame, PageSize::Size4K, flags).ok())
36                    .is_none()
37                {
38                    return false;
39                }
40            }
41            true
42        } else {
43            // Map to a empty entry for on-demand mapping.
44            pt.cursor()
45                .map_region(
46                    start,
47                    |_va| PhysAddr::from(0),
48                    size,
49                    MappingFlags::empty(),
50                    false,
51                )
52                .is_ok()
53        }
54    }
55
56    pub(crate) fn unmap_alloc(
57        &self,
58        start: GuestPhysAddr,
59        size: usize,
60        pt: &mut PageTable<H>,
61        _populate: bool,
62    ) -> bool {
63        debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
64        for addr in PageIter4K::new(start, start + size).unwrap() {
65            if let Ok((frame, _, page_size)) = pt.cursor().unmap(addr) {
66                // Deallocate the physical frame if there is a mapping in the
67                // page table.
68                if page_size.is_huge() {
69                    return false;
70                }
71                H::dealloc_frame(frame);
72            } else {
73                // It's fine if the page is not mapped.
74            }
75        }
76        true
77    }
78
79    pub(crate) fn handle_page_fault_alloc(
80        &self,
81        vaddr: GuestPhysAddr,
82        orig_flags: MappingFlags,
83        pt: &mut PageTable<H>,
84        populate: bool,
85    ) -> bool {
86        if populate {
87            false // Populated mappings should not trigger page faults.
88        } else {
89            // Allocate a physical frame lazily and map it to the fault address.
90            // `vaddr` does not need to be aligned. It will be automatically
91            // aligned during `pt.cursor().remap` regardless of the page size.
92            H::alloc_frame()
93                .and_then(|frame| pt.cursor().remap(vaddr, frame, orig_flags).ok())
94                .is_some()
95        }
96    }
97}