page_table_generic/
map.rs1use crate::{
2 FrameAllocator, PageTableEntry, PagingError, PagingResult, PhysAddr, PteConfig, TableMeta,
3 VirtAddr, frame::Frame,
4};
5
6#[repr(C)]
8#[derive(Clone, Copy)]
9pub struct MapConfig {
10 pub vaddr: VirtAddr,
11 pub paddr: PhysAddr,
12 pub size: usize,
13 pub pte: PteConfig,
17 pub allow_huge: bool,
18 pub flush: bool,
19}
20
21#[derive(Clone, Copy)]
23pub struct MapRecursiveConfig {
24 pub start_vaddr: VirtAddr,
25 pub start_paddr: PhysAddr,
26 pub end_vaddr: VirtAddr,
27 pub level: usize,
28 pub allow_huge: bool,
29 pub flush: bool,
30 pub pte_template: PteConfig,
31}
32
33#[derive(Clone, Copy)]
35pub struct UnmapConfig {
36 pub start_vaddr: VirtAddr,
37 pub size: usize,
38 pub flush: bool,
39}
40
41#[derive(Clone, Copy)]
43pub struct UnmapRecursiveConfig {
44 pub start_vaddr: VirtAddr,
45 pub end_vaddr: VirtAddr,
46 pub level: usize,
47 pub flush: bool,
48}
49
50impl core::fmt::Debug for MapConfig {
51 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
52 f.debug_struct("MapConfig")
53 .field("vaddr", &format_args!("{:#x}", self.vaddr.raw()))
54 .field("paddr", &format_args!("{:#x}", self.paddr.raw()))
55 .field("size", &format_args!("{:#x}", self.size))
56 .field("allow_huge", &self.allow_huge)
57 .field("flush", &self.flush)
58 .finish()
59 }
60}
61
62impl<T, A> Frame<T, A>
63where
64 T: TableMeta,
65 A: FrameAllocator,
66{
67 pub fn map_range_recursive(&mut self, config: MapRecursiveConfig) -> PagingResult<()> {
69 let mut vaddr = config.start_vaddr;
70 let mut paddr = config.start_paddr;
71
72 while vaddr < config.end_vaddr {
73 let index = Self::virt_to_index(vaddr, config.level);
74 let level_size = Self::level_size(config.level);
75 let remaining_size = config.end_vaddr - vaddr;
76
77 if config.allow_huge
79 && config.level > 1
80 && config.level <= T::MAX_BLOCK_LEVEL
81 && level_size <= remaining_size
82 && vaddr.raw().is_multiple_of(level_size)
83 && paddr.raw().is_multiple_of(level_size)
84 {
85 let entries = self.as_slice_mut();
87 let pte_ref = &mut entries[index];
88 if pte_ref.valid() {
89 return Err(PagingError::mapping_conflict(vaddr, paddr));
90 }
91 let mut pte_config = config.pte_template;
92 pte_config.paddr = paddr;
93 pte_config.valid = true;
94 pte_config.huge = true;
95 pte_config.is_dir = true;
96
97 *pte_ref = T::P::from_config(pte_config);
98
99 if config.flush {
101 T::flush(Some(vaddr));
102 }
103
104 vaddr += level_size;
105 paddr += level_size;
106 continue;
107 }
108
109 if config.level == 1 {
111 let entries = self.as_slice_mut();
113 let pte_ref = &mut entries[index];
114 if pte_ref.valid() {
115 return Err(PagingError::mapping_conflict(vaddr, paddr));
116 }
117
118 let mut pte_config = config.pte_template;
119 pte_config.paddr = paddr;
120 pte_config.valid = true;
121 pte_config.huge = false;
122 pte_config.is_dir = false;
123
124 *pte_ref = T::P::from_config(pte_config);
125
126 if config.flush {
128 T::flush(Some(vaddr));
129 }
130
131 vaddr += T::PAGE_SIZE;
132 paddr += T::PAGE_SIZE;
133 continue;
134 }
135
136 let allocator = self.allocator.clone();
138 let current_pte = self.as_slice()[index];
139 let current_config = current_pte.to_config(true);
140
141 let child_frame = if current_config.valid {
142 if current_config.huge {
144 return Err(PagingError::hierarchy_error(
145 "Cannot create page table under huge page",
146 ));
147 }
148
149 Frame::from_paddr(current_config.paddr, allocator)
151 } else {
152 let new_frame = Frame::<T, A>::new(allocator)?;
154 let new_frame_paddr = new_frame.paddr;
155
156 let entries = self.as_slice_mut();
158 let pte_ref = &mut entries[index];
159 let pte_config = PteConfig {
160 paddr: new_frame_paddr,
161 valid: true,
162 huge: false,
163 is_dir: true,
164 ..config.pte_template
165 };
166 *pte_ref = T::P::from_config(pte_config);
167
168 new_frame
169 };
170
171 let current_entry_end = (vaddr.raw() / level_size)
174 .saturating_add(1)
175 .saturating_mul(level_size);
176 let next_level_vaddr = VirtAddr::new(current_entry_end.min(config.end_vaddr.raw()));
177 let mut child_frame = child_frame;
178 let child_config = MapRecursiveConfig {
179 start_vaddr: vaddr,
180 start_paddr: paddr,
181 end_vaddr: next_level_vaddr,
182 level: config.level - 1,
183 allow_huge: config.allow_huge,
184 flush: config.flush,
185 pte_template: config.pte_template,
186 };
187 child_frame.map_range_recursive(child_config)?;
188
189 let mapped_size = next_level_vaddr - vaddr;
191 vaddr = next_level_vaddr;
192 paddr += mapped_size;
193 }
194
195 Ok(())
196 }
197
198 pub fn unmap_range_recursive(&mut self, config: UnmapRecursiveConfig) -> PagingResult<bool> {
202 let mut vaddr = config.start_vaddr;
203 let mut can_reclaim = true;
204 let allocator = self.allocator.clone();
205
206 while vaddr < config.end_vaddr {
207 let index = Self::virt_to_index(vaddr, config.level);
208 let level_size = Self::level_size(config.level);
209 let remaining_size = config.end_vaddr - vaddr;
210
211 let entries = self.as_slice_mut();
212 let pte_ref = &mut entries[index];
213
214 let pte_config = pte_ref.to_config(config.level > 1);
216 if !pte_config.valid {
217 vaddr += level_size.min(remaining_size);
220 continue;
221 }
222
223 if config.level == 1 || pte_config.huge {
225 let invalid_config = PteConfig {
227 valid: false,
228 ..Default::default()
229 };
230 *pte_ref = T::P::from_config(invalid_config);
231
232 if config.flush {
234 T::flush(Some(vaddr));
235 }
236
237 vaddr += if pte_config.huge {
238 level_size
239 } else {
240 T::PAGE_SIZE
241 };
242 continue;
243 }
244
245 let child_paddr = pte_config.paddr;
248
249 let current_entry_end = ((vaddr.raw() / level_size) + 1) * level_size;
251 let next_level_vaddr = VirtAddr::new(current_entry_end.min(config.end_vaddr.raw()));
252
253 {
254 let mut child_frame: Frame<T, A> =
255 Frame::from_paddr(child_paddr, allocator.clone());
256 let child_config = UnmapRecursiveConfig {
257 start_vaddr: vaddr,
258 end_vaddr: next_level_vaddr,
259 level: config.level - 1,
260 flush: config.flush,
261 };
262
263 let child_can_reclaim = child_frame.unmap_range_recursive(child_config)?;
265
266 if child_can_reclaim {
267 let invalid_config = PteConfig {
270 valid: false,
271 ..Default::default()
272 };
273 *pte_ref = T::P::from_config(invalid_config);
274 allocator.dealloc_frame(child_paddr);
275 } else {
276 can_reclaim = false;
278 }
279 }
280
281 vaddr = next_level_vaddr;
282 }
283
284 if can_reclaim {
286 can_reclaim = self.is_frame_empty();
287 }
288
289 Ok(can_reclaim)
290 }
291
292 fn is_frame_empty(&self) -> bool {
294 let entries = self.as_slice();
295 for pte in entries {
296 if pte.to_config(false).valid {
297 return false;
298 }
299 }
300 true
301 }
302}