page_table_generic/
map.rs1use crate::{
2 FrameAllocator, PageTableEntry, PagingError, PagingResult, PhysAddr, PteConfig, TableMeta,
3 VirtAddr, frame::Frame,
4};
5
6#[repr(C)]
8#[derive(Clone, Copy)]
9pub struct MapConfig {
10 pub vaddr: VirtAddr,
11 pub paddr: PhysAddr,
12 pub size: usize,
13 pub pte: PteConfig,
17 pub allow_huge: bool,
18 pub flush: bool,
19}
20
21#[derive(Clone, Copy)]
23pub struct MapRecursiveConfig {
24 pub start_vaddr: VirtAddr,
25 pub start_paddr: PhysAddr,
26 pub end_vaddr: VirtAddr,
27 pub level: usize,
28 pub allow_huge: bool,
29 pub flush: bool,
30 pub pte_template: PteConfig,
31}
32
33#[derive(Clone, Copy)]
35pub struct UnmapConfig {
36 pub start_vaddr: VirtAddr,
37 pub size: usize,
38 pub flush: bool,
39}
40
41#[derive(Clone, Copy)]
43pub struct UnmapRecursiveConfig {
44 pub start_vaddr: VirtAddr,
45 pub end_vaddr: VirtAddr,
46 pub level: usize,
47 pub flush: bool,
48}
49
50impl core::fmt::Debug for MapConfig {
51 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
52 f.debug_struct("MapConfig")
53 .field("vaddr", &format_args!("{:#x}", self.vaddr.raw()))
54 .field("paddr", &format_args!("{:#x}", self.paddr.raw()))
55 .field("size", &format_args!("{:#x}", self.size))
56 .field("allow_huge", &self.allow_huge)
57 .field("flush", &self.flush)
58 .finish()
59 }
60}
61
62impl<T, A> Frame<T, A>
63where
64 T: TableMeta,
65 A: FrameAllocator,
66{
67 pub fn map_range_recursive(&mut self, config: MapRecursiveConfig) -> PagingResult<()> {
69 let mut vaddr = config.start_vaddr;
70 let mut paddr = config.start_paddr;
71
72 while vaddr < config.end_vaddr {
73 let index = Self::virt_to_index(vaddr, config.level);
74 let level_size = Self::level_size(config.level);
75 let remaining_size = config.end_vaddr - vaddr;
76
77 if config.allow_huge
79 && config.level > 1
80 && config.level <= T::MAX_BLOCK_LEVEL
81 && level_size <= remaining_size
82 && vaddr.raw().is_multiple_of(level_size)
83 && paddr.raw().is_multiple_of(level_size)
84 {
85 let entries = self.as_slice_mut();
87 let pte_ref = &mut entries[index];
88 if pte_ref.valid() {
89 return Err(PagingError::mapping_conflict(vaddr, paddr));
90 }
91 let mut pte_config = config.pte_template;
92 pte_config.paddr = paddr;
93 pte_config.valid = true;
94 pte_config.huge = true;
95 pte_config.is_dir = true;
96
97 *pte_ref = T::P::from_config(pte_config);
98
99 if config.flush {
101 T::flush(Some(vaddr));
102 }
103
104 vaddr += level_size;
105 paddr += level_size;
106 continue;
107 }
108
109 if config.level == 1 {
111 let entries = self.as_slice_mut();
113 let pte_ref = &mut entries[index];
114 if pte_ref.valid() {
115 return Err(PagingError::mapping_conflict(vaddr, paddr));
116 }
117
118 let mut pte_config = config.pte_template;
119 pte_config.paddr = paddr;
120 pte_config.valid = true;
121 pte_config.huge = false;
122 pte_config.is_dir = false;
123
124 *pte_ref = T::P::from_config(pte_config);
125
126 if config.flush {
128 T::flush(Some(vaddr));
129 }
130
131 vaddr += T::PAGE_SIZE;
132 paddr += T::PAGE_SIZE;
133 continue;
134 }
135
136 let allocator = self.allocator.clone();
138 let current_pte = self.as_slice()[index];
139 let current_config = current_pte.to_config(true);
140
141 let child_frame = if current_config.valid {
142 if current_config.huge {
144 return Err(PagingError::hierarchy_error(
145 "Cannot create page table under huge page",
146 ));
147 }
148
149 Frame::from_paddr(current_config.paddr, allocator)
151 } else {
152 let new_frame = Frame::<T, A>::new(allocator)?;
154 let new_frame_paddr = new_frame.paddr;
155
156 let entries = self.as_slice_mut();
158 let pte_ref = &mut entries[index];
159 let pte_config = PteConfig {
160 paddr: new_frame_paddr,
161 valid: true,
162 huge: false,
163 is_dir: true,
164 ..config.pte_template
165 };
166 *pte_ref = T::P::from_config(pte_config);
167
168 new_frame
169 };
170
171 let current_entry_end = (vaddr.raw() / level_size)
174 .saturating_add(1)
175 .saturating_mul(level_size)
176 .min(usize::MAX);
177 let next_level_vaddr = VirtAddr::new(current_entry_end.min(config.end_vaddr.raw()));
178 let mut child_frame = child_frame;
179 let child_config = MapRecursiveConfig {
180 start_vaddr: vaddr,
181 start_paddr: paddr,
182 end_vaddr: next_level_vaddr,
183 level: config.level - 1,
184 allow_huge: config.allow_huge,
185 flush: config.flush,
186 pte_template: config.pte_template,
187 };
188 child_frame.map_range_recursive(child_config)?;
189
190 let mapped_size = next_level_vaddr - vaddr;
192 vaddr = next_level_vaddr;
193 paddr += mapped_size;
194 }
195
196 Ok(())
197 }
198
199 pub fn unmap_range_recursive(&mut self, config: UnmapRecursiveConfig) -> PagingResult<bool> {
203 let mut vaddr = config.start_vaddr;
204 let mut can_reclaim = true;
205 let allocator = self.allocator.clone();
206
207 while vaddr < config.end_vaddr {
208 let index = Self::virt_to_index(vaddr, config.level);
209 let level_size = Self::level_size(config.level);
210 let remaining_size = config.end_vaddr - vaddr;
211
212 let entries = self.as_slice_mut();
213 let pte_ref = &mut entries[index];
214
215 let pte_config = pte_ref.to_config(config.level > 1);
217 if !pte_config.valid {
218 vaddr += level_size.min(remaining_size);
221 continue;
222 }
223
224 if config.level == 1 || pte_config.huge {
226 let invalid_config = PteConfig {
228 valid: false,
229 ..Default::default()
230 };
231 *pte_ref = T::P::from_config(invalid_config);
232
233 if config.flush {
235 T::flush(Some(vaddr));
236 }
237
238 vaddr += if pte_config.huge {
239 level_size
240 } else {
241 T::PAGE_SIZE
242 };
243 continue;
244 }
245
246 let child_paddr = pte_config.paddr;
249
250 let current_entry_end = ((vaddr.raw() / level_size) + 1) * level_size;
252 let next_level_vaddr = VirtAddr::new(current_entry_end.min(config.end_vaddr.raw()));
253
254 {
255 let mut child_frame: Frame<T, A> =
256 Frame::from_paddr(child_paddr, allocator.clone());
257 let child_config = UnmapRecursiveConfig {
258 start_vaddr: vaddr,
259 end_vaddr: next_level_vaddr,
260 level: config.level - 1,
261 flush: config.flush,
262 };
263
264 let child_can_reclaim = child_frame.unmap_range_recursive(child_config)?;
266
267 if child_can_reclaim {
268 let invalid_config = PteConfig {
271 valid: false,
272 ..Default::default()
273 };
274 *pte_ref = T::P::from_config(invalid_config);
275 allocator.dealloc_frame(child_paddr);
276 } else {
277 can_reclaim = false;
279 }
280 }
281
282 vaddr = next_level_vaddr;
283 }
284
285 if can_reclaim {
287 can_reclaim = self.is_frame_empty();
288 }
289
290 Ok(can_reclaim)
291 }
292
293 fn is_frame_empty(&self) -> bool {
295 let entries = self.as_slice();
296 for pte in entries {
297 if pte.to_config(false).valid {
298 return false;
299 }
300 }
301 true
302 }
303}