aarch64_paging/linearmap.rs
1// Copyright 2022 The aarch64-paging Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! Functionality for managing page tables with linear mapping.
6//!
7//! See [`LinearMap`] for details on how to use it.
8
9use crate::{
10 MapError, Mapping,
11 descriptor::{Attributes, Descriptor, PhysicalAddress, UpdatableDescriptor, VirtualAddress},
12 paging::{
13 Constraints, MemoryRegion, PAGE_SIZE, PageTable, Translation, TranslationRegime, VaRange,
14 deallocate, is_aligned,
15 },
16};
17use core::ptr::NonNull;
18
19/// Linear mapping, where every virtual address is either unmapped or mapped to an IPA with a fixed
20/// offset.
21#[derive(Copy, Clone, Debug, Eq, PartialEq)]
22pub struct LinearTranslation {
23 /// The offset from a virtual address to the corresponding (intermediate) physical address.
24 offset: isize,
25}
26
27impl LinearTranslation {
28 /// Constructs a new linear translation, which will map a virtual address `va` to the
29 /// (intermediate) physical address `va + offset`.
30 ///
31 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
32 pub fn new(offset: isize) -> Self {
33 if !is_aligned(offset.unsigned_abs(), PAGE_SIZE) {
34 panic!(
35 "Invalid offset {}, must be a multiple of page size {}.",
36 offset, PAGE_SIZE,
37 );
38 }
39 Self { offset }
40 }
41
42 fn virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError> {
43 if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) {
44 Ok(PhysicalAddress(pa))
45 } else {
46 Err(MapError::InvalidVirtualAddress(va))
47 }
48 }
49}
50
51impl Translation for LinearTranslation {
52 fn allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress) {
53 let table = PageTable::new();
54 // Assume that the same linear mapping is used everywhere.
55 let va = VirtualAddress(table.as_ptr() as usize);
56
57 let pa = self.virtual_to_physical(va).expect(
58 "Allocated subtable with virtual address which doesn't correspond to any physical address."
59 );
60 (table, pa)
61 }
62
63 unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable>) {
64 // SAFETY: Our caller promises that the memory was allocated by `allocate_table` on this
65 // `LinearTranslation` and not yet deallocated. `allocate_table` used the global allocator
66 // and appropriate layout by calling `PageTable::new()`.
67 unsafe {
68 deallocate(page_table);
69 }
70 }
71
72 fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
73 let signed_pa = pa.0 as isize;
74 if signed_pa < 0 {
75 panic!("Invalid physical address {} for pagetable", pa);
76 }
77 if let Some(va) = signed_pa.checked_sub(self.offset) {
78 if let Some(ptr) = NonNull::new(va as *mut PageTable) {
79 ptr
80 } else {
81 panic!(
82 "Invalid physical address {} for pagetable (translated to virtual address 0)",
83 pa
84 )
85 }
86 } else {
87 panic!("Invalid physical address {} for pagetable", pa);
88 }
89 }
90}
91
92/// Adds two signed values, returning an unsigned value or `None` if it would overflow.
93fn checked_add_to_unsigned(a: isize, b: isize) -> Option<usize> {
94 a.checked_add(b)?.try_into().ok()
95}
96
97/// Manages a level 1 page table using linear mapping, where every virtual address is either
98/// unmapped or mapped to an IPA with a fixed offset.
99///
100/// This assumes that the same linear mapping is used both for the page table being managed, and for
101/// code that is managing it.
102#[derive(Debug)]
103pub struct LinearMap {
104 mapping: Mapping<LinearTranslation>,
105}
106
107impl LinearMap {
108 /// Creates a new identity-mapping page table with the given ASID, root level and offset, for
109 /// use in the given TTBR.
110 ///
111 /// This will map any virtual address `va` which is added to the table to the physical address
112 /// `va + offset`.
113 ///
114 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
115 pub fn new(
116 asid: usize,
117 rootlevel: usize,
118 offset: isize,
119 translation_regime: TranslationRegime,
120 va_range: VaRange,
121 ) -> Self {
122 Self {
123 mapping: Mapping::new(
124 LinearTranslation::new(offset),
125 asid,
126 rootlevel,
127 translation_regime,
128 va_range,
129 ),
130 }
131 }
132
133 /// Returns the size in bytes of the virtual address space which can be mapped in this page
134 /// table.
135 ///
136 /// This is a function of the chosen root level.
137 pub fn size(&self) -> usize {
138 self.mapping.size()
139 }
140
141 /// Activates the page table by programming the physical address of the root page table into
142 /// `TTBRn_ELx`, along with the provided ASID. The previous value of `TTBRn_ELx` is returned so
143 /// that it may later be restored by passing it to [`deactivate`](Self::deactivate).
144 ///
145 /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
146 ///
147 /// # Safety
148 ///
149 /// The caller must ensure that the page table doesn't unmap any memory which the program is
150 /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
151 /// dropped as long as its mappings are required, as it will automatically be deactivated when
152 /// it is dropped.
153 pub unsafe fn activate(&mut self) -> usize {
154 // SAFETY: We delegate the safety requirements to our caller.
155 unsafe { self.mapping.activate() }
156 }
157
158 /// Deactivates the page table, by setting `TTBRn_ELx` to the provided value, and invalidating
159 /// the TLB for this page table's configured ASID. The provided TTBR value should be the value
160 /// returned by the preceding [`activate`](Self::activate) call.
161 ///
162 /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
163 ///
164 /// # Safety
165 ///
166 /// The caller must ensure that the previous page table which this is switching back to doesn't
167 /// unmap any memory which the program is using.
168 pub unsafe fn deactivate(&mut self, previous_ttbr: usize) {
169 // SAFETY: We delegate the safety requirements to our caller.
170 unsafe {
171 self.mapping.deactivate(previous_ttbr);
172 }
173 }
174
175 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
176 /// given flags.
177 ///
178 /// This should generally only be called while the page table is not active. In particular, any
179 /// change that may require break-before-make per the architecture must be made while the page
180 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
181 /// table is active. This function writes block and page entries, but only maps them if `flags`
182 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
183 ///
184 /// # Errors
185 ///
186 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
187 /// address within the `range` would result in overflow.
188 ///
189 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
190 ///
191 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
192 /// largest virtual address covered by the page table given its root level.
193 ///
194 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
195 ///
196 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
197 /// and modifying those would violate architectural break-before-make (BBM) requirements.
198 pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
199 self.map_range_with_constraints(range, flags, Constraints::empty())
200 }
201
202 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
203 /// given flags, taking the given constraints into account.
204 ///
205 /// This should generally only be called while the page table is not active. In particular, any
206 /// change that may require break-before-make per the architecture must be made while the page
207 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
208 /// table is active. This function writes block and page entries, but only maps them if `flags`
209 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
210 ///
211 /// # Errors
212 ///
213 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
214 /// address within the `range` would result in overflow.
215 ///
216 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
217 ///
218 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
219 /// largest virtual address covered by the page table given its root level.
220 ///
221 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
222 ///
223 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
224 /// and modifying those would violate architectural break-before-make (BBM) requirements.
225 pub fn map_range_with_constraints(
226 &mut self,
227 range: &MemoryRegion,
228 flags: Attributes,
229 constraints: Constraints,
230 ) -> Result<(), MapError> {
231 let pa = self
232 .mapping
233 .translation()
234 .virtual_to_physical(range.start())?;
235 self.mapping.map_range(range, pa, flags, constraints)
236 }
237
238 /// Applies the provided updater function to the page table descriptors covering a given
239 /// memory range.
240 ///
241 /// This may involve splitting block entries if the provided range is not currently mapped
242 /// down to its precise boundaries. For visiting all the descriptors covering a memory range
243 /// without potential splitting (and no descriptor updates), use
244 /// [`walk_range`](Self::walk_range) instead.
245 ///
246 /// The updater function receives the following arguments:
247 ///
248 /// - The virtual address range mapped by each page table descriptor. A new descriptor will
249 /// have been allocated before the invocation of the updater function if a page table split
250 /// was needed.
251 /// - A mutable reference to the page table descriptor that permits modifications.
252 /// - The level of a translation table the descriptor belongs to.
253 ///
254 /// The updater function should return:
255 ///
256 /// - `Ok` to continue updating the remaining entries.
257 /// - `Err` to signal an error and stop updating the remaining entries.
258 ///
259 /// This should generally only be called while the page table is not active. In particular, any
260 /// change that may require break-before-make per the architecture must be made while the page
261 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
262 /// table is active.
263 ///
264 /// # Errors
265 ///
266 /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
267 ///
268 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
269 ///
270 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
271 /// largest virtual address covered by the page table given its root level.
272 ///
273 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
274 /// and modifying those would violate architectural break-before-make (BBM) requirements.
275 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
276 where
277 F: Fn(&MemoryRegion, &mut UpdatableDescriptor) -> Result<(), ()> + ?Sized,
278 {
279 self.mapping.modify_range(range, f)
280 }
281
282 /// Applies the provided callback function to the page table descriptors covering a given
283 /// memory range.
284 ///
285 /// The callback function receives the following arguments:
286 ///
287 /// - The range covered by the current step in the walk. This is always a subrange of `range`
288 /// even when the descriptor covers a region that exceeds it.
289 /// - The page table descriptor itself.
290 /// - The level of a translation table the descriptor belongs to.
291 ///
292 /// The callback function should return:
293 ///
294 /// - `Ok` to continue visiting the remaining entries.
295 /// - `Err` to signal an error and stop visiting the remaining entries.
296 ///
297 /// # Errors
298 ///
299 /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
300 ///
301 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
302 ///
303 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
304 /// largest virtual address covered by the page table given its root level.
305 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
306 where
307 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
308 {
309 self.mapping.walk_range(range, f)
310 }
311
312 /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
313 /// freeing the subtable.
314 ///
315 /// This requires walking the whole hierarchy of pagetables, so you may not want to call it
316 /// every time a region is unmapped. You could instead call it when the system is under memory
317 /// pressure.
318 pub fn compact_subtables(&mut self) {
319 self.mapping.compact_subtables();
320 }
321
322 /// Returns the physical address of the root table.
323 ///
324 /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
325 /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
326 /// [`mark_active`](Self::mark_active) after doing so.
327 pub fn root_address(&self) -> PhysicalAddress {
328 self.mapping.root_address()
329 }
330
331 /// Marks the page table as active.
332 ///
333 /// This should be called if the page table is manually activated by calling
334 /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
335 /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
336 /// checks to avoid violating break-before-make requirements.
337 ///
338 /// It is called automatically by [`activate`](Self::activate).
339 pub fn mark_active(&mut self) {
340 self.mapping.mark_active();
341 }
342
343 /// Marks the page table as inactive.
344 ///
345 /// This may be called after manually disabling the use of the page table, such as by setting
346 /// the relevant TTBR to a different address.
347 ///
348 /// It is called automatically by [`deactivate`](Self::deactivate).
349 pub fn mark_inactive(&mut self) {
350 self.mapping.mark_inactive();
351 }
352}
353
354#[cfg(test)]
355mod tests {
356 use super::*;
357 use crate::{
358 MapError,
359 descriptor::Attributes,
360 paging::{BITS_PER_LEVEL, MemoryRegion, PAGE_SIZE},
361 };
362
363 const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
364 const GIB_512_S: isize = 512 * 1024 * 1024 * 1024;
365 const GIB_512: usize = 512 * 1024 * 1024 * 1024;
366 const NORMAL_CACHEABLE: Attributes =
367 Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
368
369 #[test]
370 fn map_valid() {
371 // A single byte at the start of the address space.
372 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
373 assert_eq!(
374 pagetable.map_range(
375 &MemoryRegion::new(0, 1),
376 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
377 ),
378 Ok(())
379 );
380
381 // Two pages at the start of the address space.
382 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
383 assert_eq!(
384 pagetable.map_range(
385 &MemoryRegion::new(0, PAGE_SIZE * 2),
386 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
387 ),
388 Ok(())
389 );
390
391 // A single byte at the end of the address space.
392 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
393 assert_eq!(
394 pagetable.map_range(
395 &MemoryRegion::new(
396 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
397 MAX_ADDRESS_FOR_ROOT_LEVEL_1
398 ),
399 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
400 ),
401 Ok(())
402 );
403
404 // The entire valid address space. Use an offset that is a multiple of the level 2 block
405 // size to avoid mapping everything as pages as that is really slow.
406 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
407 let mut pagetable = LinearMap::new(
408 1,
409 1,
410 LEVEL_2_BLOCK_SIZE as isize,
411 TranslationRegime::El1And0,
412 VaRange::Lower,
413 );
414 assert_eq!(
415 pagetable.map_range(
416 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
417 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
418 ),
419 Ok(())
420 );
421 }
422
423 #[test]
424 fn map_valid_negative_offset() {
425 // A single byte which maps to IPA 0.
426 let mut pagetable = LinearMap::new(
427 1,
428 1,
429 -(PAGE_SIZE as isize),
430 TranslationRegime::El1And0,
431 VaRange::Lower,
432 );
433 assert_eq!(
434 pagetable.map_range(
435 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
436 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
437 ),
438 Ok(())
439 );
440
441 // Two pages at the start of the address space.
442 let mut pagetable = LinearMap::new(
443 1,
444 1,
445 -(PAGE_SIZE as isize),
446 TranslationRegime::El1And0,
447 VaRange::Lower,
448 );
449 assert_eq!(
450 pagetable.map_range(
451 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
452 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
453 ),
454 Ok(())
455 );
456
457 // A single byte at the end of the address space.
458 let mut pagetable = LinearMap::new(
459 1,
460 1,
461 -(PAGE_SIZE as isize),
462 TranslationRegime::El1And0,
463 VaRange::Lower,
464 );
465 assert_eq!(
466 pagetable.map_range(
467 &MemoryRegion::new(
468 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
469 MAX_ADDRESS_FOR_ROOT_LEVEL_1
470 ),
471 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
472 ),
473 Ok(())
474 );
475
476 // The entire valid address space. Use an offset that is a multiple of the level 2 block
477 // size to avoid mapping everything as pages as that is really slow.
478 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
479 let mut pagetable = LinearMap::new(
480 1,
481 1,
482 -(LEVEL_2_BLOCK_SIZE as isize),
483 TranslationRegime::El1And0,
484 VaRange::Lower,
485 );
486 assert_eq!(
487 pagetable.map_range(
488 &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
489 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
490 ),
491 Ok(())
492 );
493 }
494
495 #[test]
496 fn map_out_of_range() {
497 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
498
499 // One byte, just past the edge of the valid range.
500 assert_eq!(
501 pagetable.map_range(
502 &MemoryRegion::new(
503 MAX_ADDRESS_FOR_ROOT_LEVEL_1,
504 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
505 ),
506 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
507 ),
508 Err(MapError::AddressRange(VirtualAddress(
509 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
510 )))
511 );
512
513 // From 0 to just past the valid range.
514 assert_eq!(
515 pagetable.map_range(
516 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
517 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
518 ),
519 Err(MapError::AddressRange(VirtualAddress(
520 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
521 )))
522 );
523 }
524
525 #[test]
526 fn map_invalid_offset() {
527 let mut pagetable = LinearMap::new(1, 1, -4096, TranslationRegime::El1And0, VaRange::Lower);
528
529 // One byte, with an offset which would map it to a negative IPA.
530 assert_eq!(
531 pagetable.map_range(&MemoryRegion::new(0, 1), NORMAL_CACHEABLE),
532 Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
533 );
534 }
535
536 #[test]
537 fn physical_address_in_range_ttbr0() {
538 let translation = LinearTranslation::new(4096);
539 assert_eq!(
540 translation.physical_to_virtual(PhysicalAddress(8192)),
541 NonNull::new(4096 as *mut PageTable).unwrap(),
542 );
543 assert_eq!(
544 translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)),
545 NonNull::new(GIB_512 as *mut PageTable).unwrap(),
546 );
547 }
548
549 #[test]
550 #[should_panic]
551 fn physical_address_to_zero_ttbr0() {
552 let translation = LinearTranslation::new(4096);
553 translation.physical_to_virtual(PhysicalAddress(4096));
554 }
555
556 #[test]
557 #[should_panic]
558 fn physical_address_out_of_range_ttbr0() {
559 let translation = LinearTranslation::new(4096);
560 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
561 }
562
563 #[test]
564 fn physical_address_in_range_ttbr1() {
565 // Map the 512 GiB region at the top of virtual address space to one page above the bottom
566 // of physical address space.
567 let translation = LinearTranslation::new(GIB_512_S + 4096);
568 assert_eq!(
569 translation.physical_to_virtual(PhysicalAddress(8192)),
570 NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(),
571 );
572 assert_eq!(
573 translation.physical_to_virtual(PhysicalAddress(GIB_512)),
574 NonNull::new(-4096_isize as *mut PageTable).unwrap(),
575 );
576 }
577
578 #[test]
579 #[should_panic]
580 fn physical_address_to_zero_ttbr1() {
581 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
582 // address space.
583 let translation = LinearTranslation::new(GIB_512_S);
584 translation.physical_to_virtual(PhysicalAddress(GIB_512));
585 }
586
587 #[test]
588 #[should_panic]
589 fn physical_address_out_of_range_ttbr1() {
590 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
591 // address space.
592 let translation = LinearTranslation::new(GIB_512_S);
593 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
594 }
595
596 #[test]
597 fn virtual_address_out_of_range() {
598 let translation = LinearTranslation::new(-4096);
599 let va = VirtualAddress(1024);
600 assert_eq!(
601 translation.virtual_to_physical(va),
602 Err(MapError::InvalidVirtualAddress(va))
603 )
604 }
605
606 #[test]
607 fn virtual_address_range_ttbr1() {
608 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
609 // address space.
610 let translation = LinearTranslation::new(GIB_512_S);
611
612 // The first page in the region covered by TTBR1.
613 assert_eq!(
614 translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)),
615 Ok(PhysicalAddress(0))
616 );
617 // The last page in the region covered by TTBR1.
618 assert_eq!(
619 translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)),
620 Ok(PhysicalAddress(0x7f_ffff_f000))
621 );
622 }
623
624 #[test]
625 fn block_mapping() {
626 // Test that block mapping is used when the PA is appropriately aligned...
627 let mut pagetable =
628 LinearMap::new(1, 1, 1 << 30, TranslationRegime::El1And0, VaRange::Lower);
629 pagetable
630 .map_range(
631 &MemoryRegion::new(0, 1 << 30),
632 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
633 )
634 .unwrap();
635 assert_eq!(
636 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
637 Some(1)
638 );
639
640 // ...but not when it is not.
641 let mut pagetable =
642 LinearMap::new(1, 1, 1 << 29, TranslationRegime::El1And0, VaRange::Lower);
643 pagetable
644 .map_range(
645 &MemoryRegion::new(0, 1 << 30),
646 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
647 )
648 .unwrap();
649 assert_eq!(
650 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
651 Some(2)
652 );
653 }
654
655 fn make_map() -> LinearMap {
656 let mut lmap = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
657 // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
658 lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), NORMAL_CACHEABLE)
659 .unwrap();
660 lmap
661 }
662
663 #[test]
664 fn update_backwards_range() {
665 let mut lmap = make_map();
666 assert!(
667 lmap.modify_range(&MemoryRegion::new(PAGE_SIZE * 2, 1), &|_range, entry| {
668 entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap())
669 },)
670 .is_err()
671 );
672 }
673
674 #[test]
675 fn update_range() {
676 let mut lmap = make_map();
677 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry| {
678 if !entry.is_table() {
679 entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap())?;
680 }
681 Ok(())
682 })
683 .unwrap();
684 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry| {
685 if !entry.is_table() {
686 assert!(entry.flags().contains(Attributes::SWFLAG_0));
687 assert_eq!(range.end() - range.start(), PAGE_SIZE);
688 }
689 Ok(())
690 })
691 .unwrap();
692 }
693
694 #[test]
695 fn breakup_invalid_block() {
696 const BLOCK_RANGE: usize = 0x200000;
697
698 let mut lmap = LinearMap::new(1, 1, 0x1000, TranslationRegime::El1And0, VaRange::Lower);
699 lmap.map_range(
700 &MemoryRegion::new(0, BLOCK_RANGE),
701 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
702 )
703 .unwrap();
704 lmap.map_range(
705 &MemoryRegion::new(0, PAGE_SIZE),
706 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
707 )
708 .unwrap();
709 lmap.modify_range(&MemoryRegion::new(0, BLOCK_RANGE), &|range, entry| {
710 if entry.level() == 3 {
711 let has_swflag = entry.flags().contains(Attributes::SWFLAG_0);
712 let is_first_page = range.start().0 == 0usize;
713 assert!(has_swflag != is_first_page);
714 }
715 Ok(())
716 })
717 .unwrap();
718 }
719
720 #[test]
721 #[should_panic]
722 fn split_live_block_mapping() -> () {
723 const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
724 let mut lmap = LinearMap::new(
725 1,
726 1,
727 BLOCK_SIZE as isize,
728 TranslationRegime::El1And0,
729 VaRange::Lower,
730 );
731 lmap.map_range(
732 &MemoryRegion::new(0, BLOCK_SIZE),
733 NORMAL_CACHEABLE
734 | Attributes::NON_GLOBAL
735 | Attributes::READ_ONLY
736 | Attributes::VALID
737 | Attributes::ACCESSED,
738 )
739 .unwrap();
740 // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
741 // active for the sake of BBM rules.
742 let ttbr = unsafe { lmap.activate() };
743 lmap.map_range(
744 &MemoryRegion::new(0, PAGE_SIZE),
745 NORMAL_CACHEABLE
746 | Attributes::NON_GLOBAL
747 | Attributes::READ_ONLY
748 | Attributes::VALID
749 | Attributes::ACCESSED,
750 )
751 .unwrap();
752 lmap.map_range(
753 &MemoryRegion::new(PAGE_SIZE, 2 * PAGE_SIZE),
754 NORMAL_CACHEABLE
755 | Attributes::NON_GLOBAL
756 | Attributes::READ_ONLY
757 | Attributes::VALID
758 | Attributes::ACCESSED,
759 )
760 .unwrap();
761 let r = lmap.map_range(
762 &MemoryRegion::new(PAGE_SIZE, 2 * PAGE_SIZE),
763 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
764 );
765 unsafe { lmap.deactivate(ttbr) };
766 r.unwrap();
767 }
768}