page_walker/
address_space.rs

1//! This module provides the [`AddressSpace`] struct which provides an abstraction over a virtual
2//! address space and provides methods to introspect and manage the virtual address space.
3
4use core::marker::PhantomData;
5use core::ops::Range;
6use crate::PageFormat;
7use crate::walkers::*;
8use num_traits::{FromPrimitive, PrimInt, Unsigned};
9
10/// The [`AddressSpace`] struct expects a type implementing this trait in order to map the page
11/// tables while performing the various page table operations.
12pub trait PageTableMapper<PTE, Error>
13where
14    PTE: FromPrimitive + PrimInt + Unsigned,
15{
16    /// An `Error` constant indicating that the PTE was not found.
17    const PTE_NOT_FOUND: Error;
18
19    /// An `Error` constant indicating that a page was not present.
20    const PAGE_NOT_PRESENT: Error;
21
22    /// An `Error` constant indicating that a function has not been implemented.
23    const NOT_IMPLEMENTED: Error;
24
25    /// Reads the PTE at the given physical address.
26    fn read_pte(&self, phys_addr: PTE) -> Result<PTE, Error>;
27
28    /// Writes the PTE to the given physical address.
29    fn write_pte(&mut self, phys_addr: PTE, value: PTE) -> Result<(), Error>;
30
31    /// Reads the bytes from the given physical address.
32    fn read_bytes(&self, _bytes: &mut [u8], _phys_addr: PTE) -> Result<usize, Error> {
33        Err(Self::NOT_IMPLEMENTED)
34    }
35
36    /// Writes the given bytes to the given physical address.
37    fn write_bytes(&mut self, _phys_addr: PTE, _bytes: &[u8]) -> Result<usize, Error> {
38        Err(Self::NOT_IMPLEMENTED)
39    }
40
41    /// Allocates a physical page.
42    fn alloc_page(&mut self) -> Result<PTE, Error> {
43        Err(Self::NOT_IMPLEMENTED)
44    }
45
46    /// Frees a physical page.
47    fn free_page(&mut self, _pte: PTE) {
48    }
49}
50
51/// Abstracts a virtual address space.
52pub struct AddressSpace<'a, PTE, Mapper, Error>
53where
54    PTE: FromPrimitive + PrimInt + Unsigned,
55    Mapper: PageTableMapper<PTE, Error>,
56{
57    /// The page table format describing the page table hierarchy for this virtual address space.
58    format: PageFormat<'a, PTE>,
59
60    /// The root address of the page table hierarchy.
61    root: PTE,
62
63    /// The type implementing PageTableMapper.
64    mapper: &'a mut Mapper,
65
66    /// A marker for Error.
67    error: core::marker::PhantomData<Error>,
68}
69
70impl<'a, PTE, Mapper, Error> AddressSpace<'a, PTE, Mapper, Error>
71where
72    PTE: FromPrimitive + PrimInt + Unsigned,
73    Mapper: PageTableMapper<PTE, Error>,
74{
75    /// Creates a new address space for the given page table format descripting the page table
76    /// hierarchy, the page table mapper and the pointer to the root of the page table
77    /// hierarchy.
78    pub fn new(format: PageFormat<'a, PTE>, mapper: &'a mut Mapper, root: PTE) -> Self {
79        Self {
80            format,
81            mapper,
82            root,
83            error: PhantomData,
84        }
85    }
86
87    /// Reads the PTE for the given the virtual address if the virtual address is valid.
88    pub fn read_pte(&self, virt_addr: usize) -> Result<PTE, Error> {
89        let mut walker = PteReader {
90            mapper: self.mapper,
91            pte: None,
92            error: PhantomData,
93        };
94
95        self.format.walk(self.root, virt_addr..virt_addr + 1, &mut walker)?;
96
97        match walker.pte {
98            Some(pte) => Ok(pte),
99            _ => Err(Mapper::PTE_NOT_FOUND),
100        }
101    }
102
103    /// Writes the PTE for the given virtual address if the virtual address is valid.
104    pub fn write_pte(&mut self, virt_addr: usize, pte: PTE) -> Result<(), Error> {
105        let mut walker = PteWriter {
106            mapper: self.mapper,
107            pte,
108            error: PhantomData,
109        };
110
111        self.format.walk_mut(self.root, virt_addr..virt_addr + 1, &mut walker)?;
112
113        Ok(())
114    }
115
116    /// Allocates pages and the underlying page tables for a given range in the virtual address
117    /// space. The pages are protected using the given mask.
118    pub fn allocate_range(&mut self, range: Range<usize>, mask: PTE) -> Result<(), Error> {
119        let mut walker = PteAllocator {
120            mapper: self.mapper,
121            mask: Some(mask),
122            format: &self.format,
123            error: PhantomData,
124        };
125
126        self.format.walk_mut(self.root, range, &mut walker)?;
127
128        Ok(())
129    }
130
131    /// Maps the given range in the virtual address space range to the given physical address
132    /// offset and mask. Allocates the underlying page tables if they are missing. This is useful
133    /// for memory-mapped I/O.
134    pub fn map_range(&mut self, range: Range<usize>, mask: PTE) -> Result<(), Error> {
135        let mut walker = PteMapper {
136            mapper: self.mapper,
137            mask,
138            format: &self.format,
139            error: PhantomData,
140        };
141
142        self.format.walk_mut(self.root, range, &mut walker)?;
143
144        Ok(())
145    }
146
147    /// Changes the protection flags of the given range in the virtual address space. The first
148    /// mask specifies the full mask to clear the bits. The second mask specifies the bits that
149    /// should be set.
150    pub fn protect_range(&mut self, range: Range<usize>, mask: (PTE, PTE)) -> Result<(), Error> {
151        let mut walker = PteProtector {
152            mapper: self.mapper,
153            mask,
154            format: &self.format,
155            error: PhantomData,
156        };
157
158        self.format.walk_mut(self.root, range, &mut walker)?;
159
160        Ok(())
161    }
162
163    /// Frees the pages for the given range in the virtual address space. If the underlying page
164    /// tables have been cleared, then this function also free the underlying page tables.
165    pub fn free_range(&mut self, range: Range<usize>) -> Result<(), Error> {
166        let flags = PteRemovalFlags::all();
167
168        let mut walker = PteRemover {
169            mapper: self.mapper,
170            flags,
171            format: &self.format,
172            error: PhantomData,
173        };
174
175        self.format.walk_mut(self.root, range, &mut walker)?;
176
177        Ok(())
178    }
179
180    /// Unmaps the pages for the given range in the virtual address space without freeing the
181    /// underlying pages. This is useful for memory-mapped I/O.
182    pub fn unmap_range(&mut self, range: Range<usize>) -> Result<(), Error> {
183        let flags = PteRemovalFlags::empty();
184
185        let mut walker = PteRemover {
186            mapper: self.mapper,
187            flags,
188            format: &self.format,
189            error: PhantomData,
190        };
191
192        self.format.walk_mut(self.root, range, &mut walker)?;
193
194        Ok(())
195    }
196
197    /// Copies bytes starting at the given address into the given buffer.
198    pub fn copy_from(&mut self, data: &mut [u8], address: usize) -> Result<(), Error> {
199        let range = address..address + data.len();
200
201        let mut walker = CopyFromWalker {
202            mapper: self.mapper,
203            offset: 0,
204            data,
205            format: &self.format,
206            error: PhantomData,
207        };
208
209        self.format.walk(self.root, range, &mut walker)?;
210
211        Ok(())
212    }
213
214    /// Copies bytes from the given buffer to the given address.
215    pub fn copy_to(&mut self, address: usize, data: &[u8]) -> Result<(), Error> {
216        let range = address..address + data.len();
217
218        let mut walker = CopyToWalker {
219            mapper: self.mapper,
220            offset: 0,
221            data,
222            format: &self.format,
223            error: PhantomData,
224        };
225
226        self.format.walk(self.root, range, &mut walker)?;
227
228        Ok(())
229    }
230}