aarch64_paging/linearmap.rs
1// Copyright 2022 The aarch64-paging Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! Functionality for managing page tables with linear mapping.
6//!
7//! See [`LinearMap`] for details on how to use it.
8
9use crate::{
10 MapError, Mapping,
11 descriptor::{
12 Descriptor, PagingAttributes, PhysicalAddress, UpdatableDescriptor, VirtualAddress,
13 },
14 paging::{
15 Constraints, MemoryRegion, PAGE_SIZE, PageTable, Translation, TranslationRegime, VaRange,
16 deallocate, is_aligned,
17 },
18};
19use core::marker::PhantomData;
20use core::ptr::NonNull;
21
22/// Linear mapping, where every virtual address is either unmapped or mapped to an IPA with a fixed
23/// offset.
24#[derive(Copy, Clone, Debug, Eq, PartialEq)]
25pub struct LinearTranslation<A: PagingAttributes> {
26 /// The offset from a virtual address to the corresponding (intermediate) physical address.
27 offset: isize,
28 _phantom: PhantomData<A>,
29}
30
31impl<A: PagingAttributes> LinearTranslation<A> {
32 /// Constructs a new linear translation, which will map a virtual address `va` to the
33 /// (intermediate) physical address `va + offset`.
34 ///
35 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
36 pub fn new(offset: isize) -> Self {
37 if !is_aligned(offset.unsigned_abs(), PAGE_SIZE) {
38 panic!(
39 "Invalid offset {}, must be a multiple of page size {}.",
40 offset, PAGE_SIZE,
41 );
42 }
43 Self {
44 offset,
45 _phantom: PhantomData,
46 }
47 }
48
49 fn virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError> {
50 if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) {
51 Ok(PhysicalAddress(pa))
52 } else {
53 Err(MapError::InvalidVirtualAddress(va))
54 }
55 }
56}
57
58impl<A: PagingAttributes> Translation<A> for LinearTranslation<A> {
59 fn allocate_table(&mut self) -> (NonNull<PageTable<A>>, PhysicalAddress) {
60 let table = PageTable::new();
61 // Assume that the same linear mapping is used everywhere.
62 let va = VirtualAddress(table.as_ptr() as usize);
63
64 let pa = self.virtual_to_physical(va).expect(
65 "Allocated subtable with virtual address which doesn't correspond to any physical address."
66 );
67 (table, pa)
68 }
69
70 unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable<A>>) {
71 // SAFETY: Our caller promises that the memory was allocated by `allocate_table` on this
72 // `LinearTranslation` and not yet deallocated. `allocate_table` used the global allocator
73 // and appropriate layout by calling `PageTable::new()`.
74 unsafe {
75 deallocate(page_table);
76 }
77 }
78
79 fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable<A>> {
80 let signed_pa = pa.0 as isize;
81 if signed_pa < 0 {
82 panic!("Invalid physical address {} for pagetable", pa);
83 }
84 if let Some(va) = signed_pa.checked_sub(self.offset) {
85 if let Some(ptr) = NonNull::new(va as *mut PageTable<A>) {
86 ptr
87 } else {
88 panic!(
89 "Invalid physical address {} for pagetable (translated to virtual address 0)",
90 pa
91 )
92 }
93 } else {
94 panic!("Invalid physical address {} for pagetable", pa);
95 }
96 }
97}
98
99/// Adds two signed values, returning an unsigned value or `None` if it would overflow.
100fn checked_add_to_unsigned(a: isize, b: isize) -> Option<usize> {
101 a.checked_add(b)?.try_into().ok()
102}
103
104/// Manages a level 1 page table using linear mapping, where every virtual address is either
105/// unmapped or mapped to an IPA with a fixed offset.
106///
107/// This assumes that the same linear mapping is used both for the page table being managed, and for
108/// code that is managing it.
109#[derive(Debug)]
110pub struct LinearMap<R: TranslationRegime> {
111 mapping: Mapping<LinearTranslation<R::Attributes>, R>,
112}
113
114impl<R: TranslationRegime<Asid = (), VaRange = ()>> LinearMap<R> {
115 /// Creates a new identity-mapping page table with the given root level and offset, for
116 /// use in the given TTBR.
117 ///
118 /// This will map any virtual address `va` which is added to the table to the physical address
119 /// `va + offset`.
120 ///
121 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
122 pub fn new(rootlevel: usize, offset: isize, regime: R) -> Self {
123 Self {
124 mapping: Mapping::new(LinearTranslation::new(offset), rootlevel, regime),
125 }
126 }
127}
128
129impl<R: TranslationRegime<Asid = usize, VaRange = VaRange>> LinearMap<R> {
130 /// Creates a new identity-mapping page table with the given ASID, root level and offset, for
131 /// use in the given TTBR.
132 ///
133 /// This will map any virtual address `va` which is added to the table to the physical address
134 /// `va + offset`.
135 ///
136 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
137 pub fn with_asid(
138 asid: usize,
139 rootlevel: usize,
140 offset: isize,
141 regime: R,
142 va_range: VaRange,
143 ) -> Self {
144 Self {
145 mapping: Mapping::with_asid_and_va_range(
146 LinearTranslation::new(offset),
147 asid,
148 rootlevel,
149 regime,
150 va_range,
151 ),
152 }
153 }
154}
155
156impl<R: TranslationRegime> LinearMap<R> {
157 /// Returns the size in bytes of the virtual address space which can be mapped in this page
158 /// table.
159 ///
160 /// This is a function of the chosen root level.
161 pub fn size(&self) -> usize {
162 self.mapping.size()
163 }
164
165 /// Activates the page table by programming the physical address of the root page table into
166 /// `TTBRn_ELx`, along with the provided ASID. The previous value of `TTBRn_ELx` is returned so
167 /// that it may later be restored by passing it to [`deactivate`](Self::deactivate).
168 ///
169 /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
170 ///
171 /// # Safety
172 ///
173 /// The caller must ensure that the page table doesn't unmap any memory which the program is
174 /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
175 /// dropped as long as its mappings are required, as it will automatically be deactivated when
176 /// it is dropped.
177 pub unsafe fn activate(&mut self) -> usize {
178 // SAFETY: We delegate the safety requirements to our caller.
179 unsafe { self.mapping.activate() }
180 }
181
182 /// Deactivates the page table, by setting `TTBRn_ELx` to the provided value, and invalidating
183 /// the TLB for this page table's configured ASID. The provided TTBR value should be the value
184 /// returned by the preceding [`activate`](Self::activate) call.
185 ///
186 /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
187 ///
188 /// # Safety
189 ///
190 /// The caller must ensure that the previous page table which this is switching back to doesn't
191 /// unmap any memory which the program is using.
192 pub unsafe fn deactivate(&mut self, previous_ttbr: usize) {
193 // SAFETY: We delegate the safety requirements to our caller.
194 unsafe {
195 self.mapping.deactivate(previous_ttbr);
196 }
197 }
198
199 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
200 /// given flags.
201 ///
202 /// This should generally only be called while the page table is not active. In particular, any
203 /// change that may require break-before-make per the architecture must be made while the page
204 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
205 /// table is active. This function writes block and page entries, but only maps them if `flags`
206 /// contains [`PagingAttributes::VALID`], otherwise the entries remain invalid.
207 ///
208 /// # Errors
209 ///
210 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
211 /// address within the `range` would result in overflow.
212 ///
213 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
214 ///
215 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
216 /// largest virtual address covered by the page table given its root level.
217 ///
218 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
219 ///
220 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
221 /// and modifying those would violate architectural break-before-make (BBM) requirements.
222 pub fn map_range(
223 &mut self,
224 range: &MemoryRegion,
225 flags: R::Attributes,
226 ) -> Result<(), MapError> {
227 self.map_range_with_constraints(range, flags, Constraints::empty())
228 }
229
230 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
231 /// given flags, taking the given constraints into account.
232 ///
233 /// This should generally only be called while the page table is not active. In particular, any
234 /// change that may require break-before-make per the architecture must be made while the page
235 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
236 /// table is active. This function writes block and page entries, but only maps them if `flags`
237 /// contains [`PagingAttributes::VALID`], otherwise the entries remain invalid.
238 ///
239 /// # Errors
240 ///
241 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
242 /// address within the `range` would result in overflow.
243 ///
244 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
245 ///
246 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
247 /// largest virtual address covered by the page table given its root level.
248 ///
249 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
250 ///
251 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
252 /// and modifying those would violate architectural break-before-make (BBM) requirements.
253 pub fn map_range_with_constraints(
254 &mut self,
255 range: &MemoryRegion,
256 flags: R::Attributes,
257 constraints: Constraints,
258 ) -> Result<(), MapError> {
259 let pa = self
260 .mapping
261 .translation()
262 .virtual_to_physical(range.start())?;
263 self.mapping.map_range(range, pa, flags, constraints)
264 }
265
266 /// Applies the provided updater function to the page table descriptors covering a given
267 /// memory range.
268 ///
269 /// This may involve splitting block entries if the provided range is not currently mapped
270 /// down to its precise boundaries. For visiting all the descriptors covering a memory range
271 /// without potential splitting (and no descriptor updates), use
272 /// [`walk_range`](Self::walk_range) instead.
273 ///
274 /// The updater function receives the following arguments:
275 ///
276 /// - The virtual address range mapped by each page table descriptor. A new descriptor will
277 /// have been allocated before the invocation of the updater function if a page table split
278 /// was needed.
279 /// - A mutable reference to the page table descriptor that permits modifications.
280 /// - The level of a translation table the descriptor belongs to.
281 ///
282 /// The updater function should return:
283 ///
284 /// - `Ok` to continue updating the remaining entries.
285 /// - `Err` to signal an error and stop updating the remaining entries.
286 ///
287 /// This should generally only be called while the page table is not active. In particular, any
288 /// change that may require break-before-make per the architecture must be made while the page
289 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
290 /// table is active.
291 ///
292 /// # Errors
293 ///
294 /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
295 ///
296 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
297 ///
298 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
299 /// largest virtual address covered by the page table given its root level.
300 ///
301 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
302 /// and modifying those would violate architectural break-before-make (BBM) requirements.
303 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
304 where
305 F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
306 {
307 self.mapping.modify_range(range, f)
308 }
309
310 /// Applies the provided callback function to the page table descriptors covering a given
311 /// memory range.
312 ///
313 /// The callback function receives the following arguments:
314 ///
315 /// - The range covered by the current step in the walk. This is always a subrange of `range`
316 /// even when the descriptor covers a region that exceeds it.
317 /// - The page table descriptor itself.
318 /// - The level of a translation table the descriptor belongs to.
319 ///
320 /// The callback function should return:
321 ///
322 /// - `Ok` to continue visiting the remaining entries.
323 /// - `Err` to signal an error and stop visiting the remaining entries.
324 ///
325 /// # Errors
326 ///
327 /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
328 ///
329 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
330 ///
331 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
332 /// largest virtual address covered by the page table given its root level.
333 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
334 where
335 F: FnMut(&MemoryRegion, &Descriptor<R::Attributes>, usize) -> Result<(), ()>,
336 {
337 self.mapping.walk_range(range, f)
338 }
339
340 /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
341 /// freeing the subtable.
342 ///
343 /// This requires walking the whole hierarchy of pagetables, so you may not want to call it
344 /// every time a region is unmapped. You could instead call it when the system is under memory
345 /// pressure.
346 pub fn compact_subtables(&mut self) {
347 self.mapping.compact_subtables();
348 }
349
350 /// Returns the physical address of the root table.
351 ///
352 /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
353 /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
354 /// [`mark_active`](Self::mark_active) after doing so.
355 pub fn root_address(&self) -> PhysicalAddress {
356 self.mapping.root_address()
357 }
358
359 /// Marks the page table as active.
360 ///
361 /// This should be called if the page table is manually activated by calling
362 /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
363 /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
364 /// checks to avoid violating break-before-make requirements.
365 ///
366 /// It is called automatically by [`activate`](Self::activate).
367 pub fn mark_active(&mut self) {
368 self.mapping.mark_active();
369 }
370
371 /// Marks the page table as inactive.
372 ///
373 /// This may be called after manually disabling the use of the page table, such as by setting
374 /// the relevant TTBR to a different address.
375 ///
376 /// It is called automatically by [`deactivate`](Self::deactivate).
377 pub fn mark_inactive(&mut self) {
378 self.mapping.mark_inactive();
379 }
380}
381
382#[cfg(test)]
383mod tests {
384 use super::*;
385 use crate::descriptor::El1Attributes;
386 use crate::paging::El1And0;
387 use crate::{
388 MapError,
389 paging::{BITS_PER_LEVEL, MemoryRegion, PAGE_SIZE},
390 };
391
392 const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
393 const GIB_512_S: isize = 512 * 1024 * 1024 * 1024;
394 const GIB_512: usize = 512 * 1024 * 1024 * 1024;
395 const NORMAL_CACHEABLE: El1Attributes =
396 El1Attributes::ATTRIBUTE_INDEX_1.union(El1Attributes::INNER_SHAREABLE);
397
398 #[test]
399 fn map_valid() {
400 // A single byte at the start of the address space.
401 let mut pagetable = LinearMap::with_asid(1, 1, 4096, El1And0, VaRange::Lower);
402 assert_eq!(
403 pagetable.map_range(
404 &MemoryRegion::new(0, 1),
405 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
406 ),
407 Ok(())
408 );
409
410 // Two pages at the start of the address space.
411 let mut pagetable = LinearMap::with_asid(1, 1, 4096, El1And0, VaRange::Lower);
412 assert_eq!(
413 pagetable.map_range(
414 &MemoryRegion::new(0, PAGE_SIZE * 2),
415 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
416 ),
417 Ok(())
418 );
419
420 // A single byte at the end of the address space.
421 let mut pagetable = LinearMap::with_asid(1, 1, 4096, El1And0, VaRange::Lower);
422 assert_eq!(
423 pagetable.map_range(
424 &MemoryRegion::new(
425 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
426 MAX_ADDRESS_FOR_ROOT_LEVEL_1
427 ),
428 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
429 ),
430 Ok(())
431 );
432
433 // The entire valid address space. Use an offset that is a multiple of the level 2 block
434 // size to avoid mapping everything as pages as that is really slow.
435 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
436 let mut pagetable =
437 LinearMap::with_asid(1, 1, LEVEL_2_BLOCK_SIZE as isize, El1And0, VaRange::Lower);
438 assert_eq!(
439 pagetable.map_range(
440 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
441 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
442 ),
443 Ok(())
444 );
445 }
446
447 #[test]
448 fn map_valid_negative_offset() {
449 // A single byte which maps to IPA 0.
450 let mut pagetable =
451 LinearMap::with_asid(1, 1, -(PAGE_SIZE as isize), El1And0, VaRange::Lower);
452 assert_eq!(
453 pagetable.map_range(
454 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
455 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
456 ),
457 Ok(())
458 );
459
460 // Two pages at the start of the address space.
461 let mut pagetable =
462 LinearMap::with_asid(1, 1, -(PAGE_SIZE as isize), El1And0, VaRange::Lower);
463 assert_eq!(
464 pagetable.map_range(
465 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
466 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
467 ),
468 Ok(())
469 );
470
471 // A single byte at the end of the address space.
472 let mut pagetable =
473 LinearMap::with_asid(1, 1, -(PAGE_SIZE as isize), El1And0, VaRange::Lower);
474 assert_eq!(
475 pagetable.map_range(
476 &MemoryRegion::new(
477 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
478 MAX_ADDRESS_FOR_ROOT_LEVEL_1
479 ),
480 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
481 ),
482 Ok(())
483 );
484
485 // The entire valid address space. Use an offset that is a multiple of the level 2 block
486 // size to avoid mapping everything as pages as that is really slow.
487 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
488 let mut pagetable = LinearMap::with_asid(
489 1,
490 1,
491 -(LEVEL_2_BLOCK_SIZE as isize),
492 El1And0,
493 VaRange::Lower,
494 );
495 assert_eq!(
496 pagetable.map_range(
497 &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
498 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
499 ),
500 Ok(())
501 );
502 }
503
504 #[test]
505 fn map_out_of_range() {
506 let mut pagetable = LinearMap::with_asid(1, 1, 4096, El1And0, VaRange::Lower);
507
508 // One byte, just past the edge of the valid range.
509 assert_eq!(
510 pagetable.map_range(
511 &MemoryRegion::new(
512 MAX_ADDRESS_FOR_ROOT_LEVEL_1,
513 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
514 ),
515 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
516 ),
517 Err(MapError::AddressRange(VirtualAddress(
518 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
519 )))
520 );
521
522 // From 0 to just past the valid range.
523 assert_eq!(
524 pagetable.map_range(
525 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
526 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED
527 ),
528 Err(MapError::AddressRange(VirtualAddress(
529 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
530 )))
531 );
532 }
533
534 #[test]
535 fn map_invalid_offset() {
536 let mut pagetable = LinearMap::with_asid(1, 1, -4096, El1And0, VaRange::Lower);
537
538 // One byte, with an offset which would map it to a negative IPA.
539 assert_eq!(
540 pagetable.map_range(&MemoryRegion::new(0, 1), NORMAL_CACHEABLE),
541 Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
542 );
543 }
544
545 #[test]
546 fn physical_address_in_range_ttbr0() {
547 let translation = LinearTranslation::<El1Attributes>::new(4096);
548 assert_eq!(
549 translation.physical_to_virtual(PhysicalAddress(8192)),
550 NonNull::new(4096 as *mut PageTable<El1Attributes>).unwrap(),
551 );
552 assert_eq!(
553 translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)),
554 NonNull::new(GIB_512 as *mut PageTable<El1Attributes>).unwrap(),
555 );
556 }
557
558 #[test]
559 #[should_panic]
560 fn physical_address_to_zero_ttbr0() {
561 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(4096);
562 translation.physical_to_virtual(PhysicalAddress(4096));
563 }
564
565 #[test]
566 #[should_panic]
567 fn physical_address_out_of_range_ttbr0() {
568 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(4096);
569 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
570 }
571
572 #[test]
573 fn physical_address_in_range_ttbr1() {
574 // Map the 512 GiB region at the top of virtual address space to one page above the bottom
575 // of physical address space.
576 let translation = LinearTranslation::new(GIB_512_S + 4096);
577 assert_eq!(
578 translation.physical_to_virtual(PhysicalAddress(8192)),
579 NonNull::new((4096 - GIB_512_S) as *mut PageTable<El1Attributes>).unwrap(),
580 );
581 assert_eq!(
582 translation.physical_to_virtual(PhysicalAddress(GIB_512)),
583 NonNull::new(-4096_isize as *mut PageTable<El1Attributes>).unwrap(),
584 );
585 }
586
587 #[test]
588 #[should_panic]
589 fn physical_address_to_zero_ttbr1() {
590 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
591 // address space.
592 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(GIB_512_S);
593 translation.physical_to_virtual(PhysicalAddress(GIB_512));
594 }
595
596 #[test]
597 #[should_panic]
598 fn physical_address_out_of_range_ttbr1() {
599 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
600 // address space.
601 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(GIB_512_S);
602 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
603 }
604
605 #[test]
606 fn virtual_address_out_of_range() {
607 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(-4096);
608 let va = VirtualAddress(1024);
609 assert_eq!(
610 translation.virtual_to_physical(va),
611 Err(MapError::InvalidVirtualAddress(va))
612 )
613 }
614
615 #[test]
616 fn virtual_address_range_ttbr1() {
617 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
618 // address space.
619 let translation: LinearTranslation<El1Attributes> = LinearTranslation::new(GIB_512_S);
620
621 // The first page in the region covered by TTBR1.
622 assert_eq!(
623 translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)),
624 Ok(PhysicalAddress(0))
625 );
626 // The last page in the region covered by TTBR1.
627 assert_eq!(
628 translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)),
629 Ok(PhysicalAddress(0x7f_ffff_f000))
630 );
631 }
632
633 #[test]
634 fn block_mapping() {
635 // Test that block mapping is used when the PA is appropriately aligned...
636 let mut pagetable = LinearMap::with_asid(1, 1, 1 << 30, El1And0, VaRange::Lower);
637 pagetable
638 .map_range(
639 &MemoryRegion::new(0, 1 << 30),
640 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED,
641 )
642 .unwrap();
643 assert_eq!(
644 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
645 Some(1)
646 );
647
648 // ...but not when it is not.
649 let mut pagetable = LinearMap::with_asid(1, 1, 1 << 29, El1And0, VaRange::Lower);
650 pagetable
651 .map_range(
652 &MemoryRegion::new(0, 1 << 30),
653 NORMAL_CACHEABLE | El1Attributes::VALID | El1Attributes::ACCESSED,
654 )
655 .unwrap();
656 assert_eq!(
657 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
658 Some(2)
659 );
660 }
661
662 fn make_map() -> LinearMap<El1And0> {
663 let mut lmap = LinearMap::with_asid(1, 1, 4096, El1And0, VaRange::Lower);
664 // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
665 lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), NORMAL_CACHEABLE)
666 .unwrap();
667 lmap
668 }
669
670 #[test]
671 fn update_backwards_range() {
672 let mut lmap = make_map();
673 assert!(
674 lmap.modify_range(&MemoryRegion::new(PAGE_SIZE * 2, 1), &|_range, entry| {
675 entry.modify_flags(
676 El1Attributes::SWFLAG_0,
677 El1Attributes::from_bits(0usize).unwrap(),
678 )
679 },)
680 .is_err()
681 );
682 }
683
684 #[test]
685 fn update_range() {
686 let mut lmap = make_map();
687 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry| {
688 if !entry.is_table() {
689 entry.modify_flags(
690 El1Attributes::SWFLAG_0,
691 El1Attributes::from_bits(0usize).unwrap(),
692 )?;
693 }
694 Ok(())
695 })
696 .unwrap();
697 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry| {
698 if !entry.is_table() {
699 assert!(entry.flags().contains(El1Attributes::SWFLAG_0));
700 assert_eq!(range.end() - range.start(), PAGE_SIZE);
701 }
702 Ok(())
703 })
704 .unwrap();
705 }
706
707 #[test]
708 fn breakup_invalid_block() {
709 const BLOCK_RANGE: usize = 0x200000;
710
711 let mut lmap = LinearMap::with_asid(1, 1, 0x1000, El1And0, VaRange::Lower);
712 lmap.map_range(
713 &MemoryRegion::new(0, BLOCK_RANGE),
714 NORMAL_CACHEABLE | El1Attributes::NON_GLOBAL | El1Attributes::SWFLAG_0,
715 )
716 .unwrap();
717 lmap.map_range(
718 &MemoryRegion::new(0, PAGE_SIZE),
719 NORMAL_CACHEABLE
720 | El1Attributes::NON_GLOBAL
721 | El1Attributes::VALID
722 | El1Attributes::ACCESSED,
723 )
724 .unwrap();
725 lmap.modify_range(&MemoryRegion::new(0, BLOCK_RANGE), &|range, entry| {
726 if entry.level() == 3 {
727 let has_swflag = entry.flags().contains(El1Attributes::SWFLAG_0);
728 let is_first_page = range.start().0 == 0usize;
729 assert!(has_swflag != is_first_page);
730 }
731 Ok(())
732 })
733 .unwrap();
734 }
735
736 #[test]
737 #[should_panic]
738 fn split_live_block_mapping() -> () {
739 const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
740 let mut lmap = LinearMap::with_asid(1, 1, BLOCK_SIZE as isize, El1And0, VaRange::Lower);
741 lmap.map_range(
742 &MemoryRegion::new(0, BLOCK_SIZE),
743 NORMAL_CACHEABLE
744 | El1Attributes::NON_GLOBAL
745 | El1Attributes::READ_ONLY
746 | El1Attributes::VALID
747 | El1Attributes::ACCESSED,
748 )
749 .unwrap();
750 // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
751 // active for the sake of BBM rules.
752 let ttbr = unsafe { lmap.activate() };
753 lmap.map_range(
754 &MemoryRegion::new(0, PAGE_SIZE),
755 NORMAL_CACHEABLE
756 | El1Attributes::NON_GLOBAL
757 | El1Attributes::READ_ONLY
758 | El1Attributes::VALID
759 | El1Attributes::ACCESSED,
760 )
761 .unwrap();
762 lmap.map_range(
763 &MemoryRegion::new(PAGE_SIZE, 2 * PAGE_SIZE),
764 NORMAL_CACHEABLE
765 | El1Attributes::NON_GLOBAL
766 | El1Attributes::READ_ONLY
767 | El1Attributes::VALID
768 | El1Attributes::ACCESSED,
769 )
770 .unwrap();
771 let r = lmap.map_range(
772 &MemoryRegion::new(PAGE_SIZE, 2 * PAGE_SIZE),
773 NORMAL_CACHEABLE
774 | El1Attributes::NON_GLOBAL
775 | El1Attributes::VALID
776 | El1Attributes::ACCESSED,
777 );
778 unsafe { lmap.deactivate(ttbr) };
779 r.unwrap();
780 }
781}