1use crate::MapError;
9#[cfg(feature = "alloc")]
10use alloc::alloc::{Layout, alloc_zeroed, dealloc, handle_alloc_error};
11use bitflags::bitflags;
12use core::fmt::{self, Debug, Display, Formatter};
13use core::marker::PhantomData;
14use core::ops::{Add, Range, Sub};
15use core::ptr::NonNull;
16use core::sync::atomic::{AtomicUsize, Ordering};
17
18const PAGE_SHIFT: usize = 12;
19
20const LEAF_LEVEL: usize = 3;
22
23pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
25
26pub const BITS_PER_LEVEL: usize = PAGE_SHIFT - 3;
29
30#[derive(Copy, Clone, Debug, Eq, PartialEq)]
32pub enum VaRange {
33 Lower,
36 Upper,
39}
40
41#[derive(Copy, Clone, Debug, Eq, PartialEq)]
45pub enum TranslationRegime {
46 El3,
48 El2,
50 El2And0,
52 El1And0,
54}
55
56impl TranslationRegime {
57 pub(crate) fn supports_asid(self) -> bool {
61 matches!(self, Self::El2And0 | Self::El1And0)
62 }
63}
64
65#[derive(Copy, Clone, Default, Eq, Ord, PartialEq, PartialOrd)]
67#[repr(transparent)]
68pub struct VirtualAddress(pub usize);
69
70impl Display for VirtualAddress {
71 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
72 write!(f, "{:#018x}", self.0)
73 }
74}
75
76impl Debug for VirtualAddress {
77 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
78 write!(f, "VirtualAddress({})", self)
79 }
80}
81
82impl Sub for VirtualAddress {
83 type Output = usize;
84
85 fn sub(self, other: Self) -> Self::Output {
86 self.0 - other.0
87 }
88}
89
90impl Add<usize> for VirtualAddress {
91 type Output = Self;
92
93 fn add(self, other: usize) -> Self {
94 Self(self.0 + other)
95 }
96}
97
98impl Sub<usize> for VirtualAddress {
99 type Output = Self;
100
101 fn sub(self, other: usize) -> Self {
102 Self(self.0 - other)
103 }
104}
105
106#[derive(Clone, Eq, PartialEq)]
108pub struct MemoryRegion(Range<VirtualAddress>);
109
110#[derive(Copy, Clone, Default, Eq, Ord, PartialEq, PartialOrd)]
113#[repr(transparent)]
114pub struct PhysicalAddress(pub usize);
115
116impl Display for PhysicalAddress {
117 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
118 write!(f, "{:#018x}", self.0)
119 }
120}
121
122impl Debug for PhysicalAddress {
123 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
124 write!(f, "PhysicalAddress({})", self)
125 }
126}
127
128impl Sub for PhysicalAddress {
129 type Output = usize;
130
131 fn sub(self, other: Self) -> Self::Output {
132 self.0 - other.0
133 }
134}
135
136impl Add<usize> for PhysicalAddress {
137 type Output = Self;
138
139 fn add(self, other: usize) -> Self {
140 Self(self.0 + other)
141 }
142}
143
144impl Sub<usize> for PhysicalAddress {
145 type Output = Self;
146
147 fn sub(self, other: usize) -> Self {
148 Self(self.0 - other)
149 }
150}
151
152pub(crate) fn granularity_at_level(level: usize) -> usize {
155 PAGE_SIZE << ((LEAF_LEVEL - level) * BITS_PER_LEVEL)
156}
157
158pub trait Translation {
162 fn allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress);
165
166 unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable>);
173
174 fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>;
176}
177
178impl MemoryRegion {
179 pub const fn new(start: usize, end: usize) -> MemoryRegion {
184 MemoryRegion(
185 VirtualAddress(align_down(start, PAGE_SIZE))..VirtualAddress(align_up(end, PAGE_SIZE)),
186 )
187 }
188
189 pub const fn start(&self) -> VirtualAddress {
191 self.0.start
192 }
193
194 pub const fn end(&self) -> VirtualAddress {
196 self.0.end
197 }
198
199 pub const fn len(&self) -> usize {
201 self.0.end.0 - self.0.start.0
202 }
203
204 pub const fn is_empty(&self) -> bool {
206 self.0.start.0 == self.0.end.0
207 }
208
209 fn split(&self, level: usize) -> ChunkedIterator {
210 ChunkedIterator {
211 range: self,
212 granularity: granularity_at_level(level),
213 start: self.0.start.0,
214 }
215 }
216
217 pub(crate) fn is_block(&self, level: usize) -> bool {
219 let gran = granularity_at_level(level);
220 (self.0.start.0 | self.0.end.0) & (gran - 1) == 0
221 }
222}
223
224impl From<Range<VirtualAddress>> for MemoryRegion {
225 fn from(range: Range<VirtualAddress>) -> Self {
226 Self::new(range.start.0, range.end.0)
227 }
228}
229
230impl Display for MemoryRegion {
231 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
232 write!(f, "{}..{}", self.0.start, self.0.end)
233 }
234}
235
236impl Debug for MemoryRegion {
237 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
238 Display::fmt(self, f)
239 }
240}
241
242bitflags! {
243 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
245 pub struct Constraints: usize {
246 const NO_BLOCK_MAPPINGS = 1 << 0;
248 const NO_CONTIGUOUS_HINT = 1 << 1;
250 }
251}
252
253pub struct RootTable<T: Translation> {
255 table: PageTableWithLevel<T>,
256 translation: T,
257 pa: PhysicalAddress,
258 translation_regime: TranslationRegime,
259 va_range: VaRange,
260}
261
262impl<T: Translation> RootTable<T> {
263 pub fn new(
269 mut translation: T,
270 level: usize,
271 translation_regime: TranslationRegime,
272 va_range: VaRange,
273 ) -> Self {
274 if level > LEAF_LEVEL {
275 panic!("Invalid root table level {}.", level);
276 }
277 if !translation_regime.supports_asid() && va_range != VaRange::Lower {
278 panic!(
279 "{:?} doesn't have an upper virtual address range.",
280 translation_regime
281 );
282 }
283 let (table, pa) = PageTableWithLevel::new(&mut translation, level);
284 RootTable {
285 table,
286 translation,
287 pa,
288 translation_regime,
289 va_range,
290 }
291 }
292
293 pub fn size(&self) -> usize {
298 granularity_at_level(self.table.level) << BITS_PER_LEVEL
299 }
300
301 pub fn map_range(
308 &mut self,
309 range: &MemoryRegion,
310 pa: PhysicalAddress,
311 flags: Attributes,
312 constraints: Constraints,
313 ) -> Result<(), MapError> {
314 if flags.contains(Attributes::TABLE_OR_PAGE) {
315 return Err(MapError::InvalidFlags(Attributes::TABLE_OR_PAGE));
316 }
317 self.verify_region(range)?;
318 self.table
319 .map_range(&mut self.translation, range, pa, flags, constraints);
320 Ok(())
321 }
322
323 pub fn to_physical(&self) -> PhysicalAddress {
325 self.pa
326 }
327
328 pub fn va_range(&self) -> VaRange {
332 self.va_range
333 }
334
335 pub fn translation_regime(&self) -> TranslationRegime {
337 self.translation_regime
338 }
339
340 pub fn translation(&self) -> &T {
342 &self.translation
343 }
344
345 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
383 where
384 F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
385 {
386 self.verify_region(range)?;
387 self.table.modify_range(&mut self.translation, range, f)
388 }
389
390 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
414 where
415 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
416 {
417 self.visit_range(range, &mut |mr, desc, level| {
418 f(mr, desc, level).map_err(|_| MapError::PteUpdateFault(desc.bits()))
419 })
420 }
421
422 pub(crate) fn visit_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
424 where
425 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), MapError>,
426 {
427 self.verify_region(range)?;
428 self.table.visit_range(&self.translation, range, f)
429 }
430
431 #[cfg(test)]
436 pub(crate) fn mapping_level(&self, va: VirtualAddress) -> Option<usize> {
437 self.table.mapping_level(&self.translation, va)
438 }
439
440 fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> {
442 if region.end() < region.start() {
443 return Err(MapError::RegionBackwards(region.clone()));
444 }
445 match self.va_range {
446 VaRange::Lower => {
447 if (region.start().0 as isize) < 0 {
448 return Err(MapError::AddressRange(region.start()));
449 } else if region.end().0 > self.size() {
450 return Err(MapError::AddressRange(region.end()));
451 }
452 }
453 VaRange::Upper => {
454 if region.start().0 as isize >= 0
455 || (region.start().0 as isize).unsigned_abs() > self.size()
456 {
457 return Err(MapError::AddressRange(region.start()));
458 }
459 }
460 }
461 Ok(())
462 }
463}
464
465impl<T: Translation> Debug for RootTable<T> {
466 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
467 writeln!(
468 f,
469 "RootTable {{ pa: {}, level: {}, table:",
470 self.pa, self.table.level
471 )?;
472 self.table.fmt_indented(f, &self.translation, 0)?;
473 write!(f, "}}")
474 }
475}
476
477impl<T: Translation> Drop for RootTable<T> {
478 fn drop(&mut self) {
479 unsafe { self.table.free(&mut self.translation) }
484 }
485}
486
487struct ChunkedIterator<'a> {
488 range: &'a MemoryRegion,
489 granularity: usize,
490 start: usize,
491}
492
493impl Iterator for ChunkedIterator<'_> {
494 type Item = MemoryRegion;
495
496 fn next(&mut self) -> Option<MemoryRegion> {
497 if !self.range.0.contains(&VirtualAddress(self.start)) {
498 return None;
499 }
500 let end = self
501 .range
502 .0
503 .end
504 .0
505 .min((self.start | (self.granularity - 1)) + 1);
506 let c = MemoryRegion::new(self.start, end);
507 self.start = end;
508 Some(c)
509 }
510}
511
512bitflags! {
513 #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
515 pub struct Attributes: usize {
516 const VALID = 1 << 0;
517 const TABLE_OR_PAGE = 1 << 1;
518
519 const ATTRIBUTE_INDEX_0 = 0 << 2;
520 const ATTRIBUTE_INDEX_1 = 1 << 2;
521 const ATTRIBUTE_INDEX_2 = 2 << 2;
522 const ATTRIBUTE_INDEX_3 = 3 << 2;
523 const ATTRIBUTE_INDEX_4 = 4 << 2;
524 const ATTRIBUTE_INDEX_5 = 5 << 2;
525 const ATTRIBUTE_INDEX_6 = 6 << 2;
526 const ATTRIBUTE_INDEX_7 = 7 << 2;
527
528 const OUTER_SHAREABLE = 2 << 8;
529 const INNER_SHAREABLE = 3 << 8;
530
531 const NS = 1 << 5;
532 const USER = 1 << 6;
533 const READ_ONLY = 1 << 7;
534 const ACCESSED = 1 << 10;
535 const NON_GLOBAL = 1 << 11;
536 const GP = 1 << 50;
538 const DBM = 1 << 51;
539 const PXN = 1 << 53;
541 const UXN = 1 << 54;
544
545 const SWFLAG_0 = 1 << 55;
547 const SWFLAG_1 = 1 << 56;
548 const SWFLAG_2 = 1 << 57;
549 const SWFLAG_3 = 1 << 58;
550
551 const PXN_TABLE = 1 << 59;
552 const XN_TABLE = 1 << 60;
553 const AP_TABLE_NO_EL0 = 1 << 61;
554 const AP_TABLE_NO_WRITE = 1 << 62;
555 const NS_TABLE = 1 << 63;
556 }
557}
558
559impl Attributes {
560 pub const SHAREABILITY_MASK: Self = Self::INNER_SHAREABLE;
562
563 pub const ATTRIBUTE_INDEX_MASK: Self = Self::ATTRIBUTE_INDEX_7;
565}
566
567#[derive(Debug)]
571struct PageTableWithLevel<T: Translation> {
572 table: NonNull<PageTable>,
573 level: usize,
574 _translation: PhantomData<T>,
575}
576
577unsafe impl<T: Translation + Send> Send for PageTableWithLevel<T> {}
580
581unsafe impl<T: Translation + Sync> Sync for PageTableWithLevel<T> {}
583
584impl<T: Translation> PageTableWithLevel<T> {
585 fn new(translation: &mut T, level: usize) -> (Self, PhysicalAddress) {
588 assert!(level <= LEAF_LEVEL);
589 let (table, pa) = translation.allocate_table();
590 (
591 Self::from_pointer(table, level),
594 pa,
595 )
596 }
597
598 fn from_pointer(table: NonNull<PageTable>, level: usize) -> Self {
599 Self {
600 table,
601 level,
602 _translation: PhantomData,
603 }
604 }
605
606 fn get_entry(&self, va: VirtualAddress) -> &Descriptor {
608 let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
609 let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
610 let table = unsafe { self.table.as_ref() };
614 &table.entries[index]
615 }
616
617 fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor {
619 let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
620 let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
621 let table = unsafe { self.table.as_mut() };
625 &mut table.entries[index]
626 }
627
628 fn split_entry(
631 translation: &mut T,
632 chunk: &MemoryRegion,
633 entry: &mut Descriptor,
634 level: usize,
635 ) -> Self {
636 let granularity = granularity_at_level(level);
637 let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
638 let old_flags = entry.flags();
639 let old_pa = entry.output_address();
640 if !old_flags.contains(Attributes::TABLE_OR_PAGE)
641 && (!old_flags.is_empty() || old_pa.0 != 0)
642 {
643 let a = align_down(chunk.0.start.0, granularity);
646 let b = align_up(chunk.0.end.0, granularity);
647 subtable.map_range(
648 translation,
649 &MemoryRegion::new(a, b),
650 old_pa,
651 old_flags,
652 Constraints::empty(),
653 );
654 }
655 entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID);
659 subtable
660 }
661
662 fn map_range(
672 &mut self,
673 translation: &mut T,
674 range: &MemoryRegion,
675 mut pa: PhysicalAddress,
676 flags: Attributes,
677 constraints: Constraints,
678 ) {
679 let level = self.level;
680 let granularity = granularity_at_level(level);
681
682 for chunk in range.split(level) {
683 let entry = self.get_entry_mut(chunk.0.start);
684
685 if level == LEAF_LEVEL {
686 entry.set(pa, flags | Attributes::TABLE_OR_PAGE);
688 } else if chunk.is_block(level)
689 && !entry.is_table_or_page()
690 && is_aligned(pa.0, granularity)
691 && !constraints.contains(Constraints::NO_BLOCK_MAPPINGS)
692 {
693 entry.set(pa, flags);
697 } else {
698 let mut subtable = entry
699 .subtable(translation, level)
700 .unwrap_or_else(|| Self::split_entry(translation, &chunk, entry, level));
701 subtable.map_range(translation, &chunk, pa, flags, constraints);
702 }
703 pa.0 += chunk.len();
704 }
705 }
706
707 fn fmt_indented(
708 &self,
709 f: &mut Formatter,
710 translation: &T,
711 indentation: usize,
712 ) -> Result<(), fmt::Error> {
713 const WIDTH: usize = 3;
714 let table = unsafe { self.table.as_ref() };
717
718 let mut i = 0;
719 while i < table.entries.len() {
720 if table.entries[i].bits() == 0 {
721 let first_zero = i;
722 while i < table.entries.len() && table.entries[i].bits() == 0 {
723 i += 1;
724 }
725 if i - 1 == first_zero {
726 writeln!(f, "{:indentation$}{: <WIDTH$}: 0", "", first_zero)?;
727 } else {
728 writeln!(f, "{:indentation$}{: <WIDTH$}-{}: 0", "", first_zero, i - 1)?;
729 }
730 } else {
731 writeln!(
732 f,
733 "{:indentation$}{: <WIDTH$}: {:?}",
734 "", i, table.entries[i],
735 )?;
736 if let Some(subtable) = table.entries[i].subtable(translation, self.level) {
737 subtable.fmt_indented(f, translation, indentation + 2)?;
738 }
739 i += 1;
740 }
741 }
742 Ok(())
743 }
744
745 unsafe fn free(&mut self, translation: &mut T) {
753 let table = unsafe { self.table.as_ref() };
756 for entry in &table.entries {
757 if let Some(mut subtable) = entry.subtable(translation, self.level) {
758 unsafe {
761 subtable.free(translation);
762 }
763 }
764 }
765 unsafe {
768 translation.deallocate_table(self.table);
770 }
771 }
772
773 fn modify_range<F>(
776 &mut self,
777 translation: &mut T,
778 range: &MemoryRegion,
779 f: &F,
780 ) -> Result<(), MapError>
781 where
782 F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
783 {
784 let level = self.level;
785 for chunk in range.split(level) {
786 let entry = self.get_entry_mut(chunk.0.start);
787 if let Some(mut subtable) = entry.subtable(translation, level).or_else(|| {
788 if !chunk.is_block(level) {
789 Some(Self::split_entry(translation, &chunk, entry, level))
792 } else {
793 None
794 }
795 }) {
796 subtable.modify_range(translation, &chunk, f)?;
797 } else {
798 f(&chunk, entry, level).map_err(|_| MapError::PteUpdateFault(entry.bits()))?;
799 }
800 }
801 Ok(())
802 }
803
804 fn visit_range<F, E>(&self, translation: &T, range: &MemoryRegion, f: &mut F) -> Result<(), E>
807 where
808 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), E>,
809 {
810 let level = self.level;
811 for chunk in range.split(level) {
812 let entry = self.get_entry(chunk.0.start);
813 if let Some(subtable) = entry.subtable(translation, level) {
814 subtable.visit_range(translation, &chunk, f)?;
815 } else {
816 f(&chunk, entry, level)?;
817 }
818 }
819 Ok(())
820 }
821
822 #[cfg(test)]
827 fn mapping_level(&self, translation: &T, va: VirtualAddress) -> Option<usize> {
828 let entry = self.get_entry(va);
829 if let Some(subtable) = entry.subtable(translation, self.level) {
830 subtable.mapping_level(translation, va)
831 } else {
832 if entry.is_valid() {
833 Some(self.level)
834 } else {
835 None
836 }
837 }
838 }
839}
840
841#[repr(C, align(4096))]
843pub struct PageTable {
844 entries: [Descriptor; 1 << BITS_PER_LEVEL],
845}
846
847impl PageTable {
848 pub const EMPTY: Self = Self {
850 entries: [Descriptor::EMPTY; 1 << BITS_PER_LEVEL],
851 };
852
853 #[cfg(feature = "alloc")]
856 pub fn new() -> NonNull<Self> {
857 unsafe { allocate_zeroed() }
859 }
860
861 pub fn write_to(&self, page: &mut [u8]) -> Result<(), ()> {
866 if page.len() != self.entries.len() * size_of::<Descriptor>() {
867 return Err(());
868 }
869 for (chunk, desc) in page
870 .chunks_exact_mut(size_of::<Descriptor>())
871 .zip(self.entries.iter())
872 {
873 chunk.copy_from_slice(&desc.bits().to_le_bytes());
874 }
875 Ok(())
876 }
877}
878
879impl Default for PageTable {
880 fn default() -> Self {
881 Self::EMPTY
882 }
883}
884
885pub(crate) type DescriptorBits = usize;
886
887#[repr(C)]
895pub struct Descriptor(AtomicUsize);
896
897impl Descriptor {
898 pub const EMPTY: Self = Self(AtomicUsize::new(0));
900
901 const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48);
902
903 pub(crate) fn bits(&self) -> DescriptorBits {
906 self.0.load(Ordering::Acquire)
907 }
908
909 pub fn output_address(&self) -> PhysicalAddress {
914 PhysicalAddress(self.bits() & Self::PHYSICAL_ADDRESS_BITMASK)
915 }
916
917 pub fn flags(&self) -> Attributes {
920 Attributes::from_bits_retain(self.bits() & !Self::PHYSICAL_ADDRESS_BITMASK)
921 }
922
923 pub fn modify_flags(&mut self, set: Attributes, clear: Attributes) {
927 let oldval = self.bits();
928 let flags = (oldval | set.bits()) & !clear.bits();
929
930 if (oldval ^ flags) & Attributes::TABLE_OR_PAGE.bits() != 0 {
931 panic!("Cannot convert between table and block/page descriptors\n");
932 }
933
934 self.0.store(flags, Ordering::Release);
935 }
936
937 pub fn is_valid(&self) -> bool {
939 (self.bits() & Attributes::VALID.bits()) != 0
940 }
941
942 pub fn is_table_or_page(&self) -> bool {
944 self.flags()
945 .contains(Attributes::TABLE_OR_PAGE | Attributes::VALID)
946 }
947
948 pub(crate) fn set(&mut self, pa: PhysicalAddress, flags: Attributes) {
949 self.0.store(
950 (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.bits(),
951 Ordering::Release,
952 );
953 }
954
955 fn subtable<T: Translation>(
956 &self,
957 translation: &T,
958 level: usize,
959 ) -> Option<PageTableWithLevel<T>> {
960 if level < LEAF_LEVEL && self.is_table_or_page() {
961 let output_address = self.output_address();
962 let table = translation.physical_to_virtual(output_address);
963 return Some(PageTableWithLevel::from_pointer(table, level + 1));
964 }
965 None
966 }
967
968 pub(crate) fn clone(&self) -> Self {
969 Descriptor(AtomicUsize::new(self.bits()))
970 }
971}
972
973impl Debug for Descriptor {
974 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
975 write!(f, "{:#016x}", self.bits())?;
976 if self.is_valid() {
977 write!(f, " ({}, {:?})", self.output_address(), self.flags())?;
978 }
979 Ok(())
980 }
981}
982
983#[cfg(feature = "alloc")]
989unsafe fn allocate_zeroed<T>() -> NonNull<T> {
990 let layout = Layout::new::<T>();
991 assert_ne!(layout.size(), 0);
992 let pointer = unsafe { alloc_zeroed(layout) };
994 if pointer.is_null() {
995 handle_alloc_error(layout);
996 }
997 unsafe { NonNull::new_unchecked(pointer as *mut T) }
999}
1000
1001#[cfg(feature = "alloc")]
1008pub(crate) unsafe fn deallocate<T>(ptr: NonNull<T>) {
1009 let layout = Layout::new::<T>();
1010 unsafe {
1012 dealloc(ptr.as_ptr() as *mut u8, layout);
1013 }
1014}
1015
1016const fn align_down(value: usize, alignment: usize) -> usize {
1017 value & !(alignment - 1)
1018}
1019
1020const fn align_up(value: usize, alignment: usize) -> usize {
1021 ((value - 1) | (alignment - 1)) + 1
1022}
1023
1024pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool {
1025 value & (alignment - 1) == 0
1026}
1027
1028#[cfg(test)]
1029mod tests {
1030 use super::*;
1031 #[cfg(feature = "alloc")]
1032 use crate::idmap::IdTranslation;
1033 #[cfg(feature = "alloc")]
1034 use alloc::{format, string::ToString, vec, vec::Vec};
1035
1036 #[cfg(feature = "alloc")]
1037 #[test]
1038 fn display_memory_region() {
1039 let region = MemoryRegion::new(0x1234, 0x56789);
1040 assert_eq!(
1041 ®ion.to_string(),
1042 "0x0000000000001000..0x0000000000057000"
1043 );
1044 assert_eq!(
1045 &format!("{:?}", region),
1046 "0x0000000000001000..0x0000000000057000"
1047 );
1048 }
1049
1050 #[test]
1051 fn subtract_virtual_address() {
1052 let low = VirtualAddress(0x12);
1053 let high = VirtualAddress(0x1234);
1054 assert_eq!(high - low, 0x1222);
1055 }
1056
1057 #[cfg(debug_assertions)]
1058 #[test]
1059 #[should_panic]
1060 fn subtract_virtual_address_overflow() {
1061 let low = VirtualAddress(0x12);
1062 let high = VirtualAddress(0x1234);
1063
1064 let _ = low - high;
1066 }
1067
1068 #[test]
1069 fn add_virtual_address() {
1070 assert_eq!(VirtualAddress(0x1234) + 0x42, VirtualAddress(0x1276));
1071 }
1072
1073 #[test]
1074 fn subtract_physical_address() {
1075 let low = PhysicalAddress(0x12);
1076 let high = PhysicalAddress(0x1234);
1077 assert_eq!(high - low, 0x1222);
1078 }
1079
1080 #[cfg(debug_assertions)]
1081 #[test]
1082 #[should_panic]
1083 fn subtract_physical_address_overflow() {
1084 let low = PhysicalAddress(0x12);
1085 let high = PhysicalAddress(0x1234);
1086
1087 let _ = low - high;
1089 }
1090
1091 #[test]
1092 fn add_physical_address() {
1093 assert_eq!(PhysicalAddress(0x1234) + 0x42, PhysicalAddress(0x1276));
1094 }
1095
1096 #[test]
1097 fn invalid_descriptor() {
1098 let desc = Descriptor(AtomicUsize::new(0usize));
1099 assert!(!desc.is_valid());
1100 assert!(!desc.flags().contains(Attributes::VALID));
1101 }
1102
1103 #[test]
1104 fn set_descriptor() {
1105 const PHYSICAL_ADDRESS: usize = 0x12340000;
1106 let mut desc = Descriptor(AtomicUsize::new(0usize));
1107 assert!(!desc.is_valid());
1108 desc.set(
1109 PhysicalAddress(PHYSICAL_ADDRESS),
1110 Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID,
1111 );
1112 assert!(desc.is_valid());
1113 assert_eq!(
1114 desc.flags(),
1115 Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID
1116 );
1117 assert_eq!(desc.output_address(), PhysicalAddress(PHYSICAL_ADDRESS));
1118 }
1119
1120 #[test]
1121 fn modify_descriptor_flags() {
1122 let mut desc = Descriptor(AtomicUsize::new(0usize));
1123 assert!(!desc.is_valid());
1124 desc.set(
1125 PhysicalAddress(0x12340000),
1126 Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
1127 );
1128 desc.modify_flags(
1129 Attributes::DBM | Attributes::SWFLAG_3,
1130 Attributes::VALID | Attributes::SWFLAG_1,
1131 );
1132 assert!(!desc.is_valid());
1133 assert_eq!(
1134 desc.flags(),
1135 Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_3 | Attributes::DBM
1136 );
1137 }
1138
1139 #[test]
1140 #[should_panic]
1141 fn modify_descriptor_table_or_page_flag() {
1142 let mut desc = Descriptor(AtomicUsize::new(0usize));
1143 assert!(!desc.is_valid());
1144 desc.set(
1145 PhysicalAddress(0x12340000),
1146 Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
1147 );
1148 desc.modify_flags(Attributes::VALID, Attributes::TABLE_OR_PAGE);
1149 }
1150
1151 #[cfg(feature = "alloc")]
1152 #[test]
1153 fn unaligned_chunks() {
1154 let region = MemoryRegion::new(0x0000_2000, 0x0020_5000);
1155 let chunks = region.split(LEAF_LEVEL - 1).collect::<Vec<_>>();
1156 assert_eq!(
1157 chunks,
1158 vec![
1159 MemoryRegion::new(0x0000_2000, 0x0020_0000),
1160 MemoryRegion::new(0x0020_0000, 0x0020_5000),
1161 ]
1162 );
1163 }
1164
1165 #[cfg(feature = "alloc")]
1166 #[test]
1167 #[should_panic]
1168 fn no_el2_ttbr1() {
1169 RootTable::<IdTranslation>::new(IdTranslation, 1, TranslationRegime::El2, VaRange::Upper);
1170 }
1171
1172 #[cfg(feature = "alloc")]
1173 #[test]
1174 #[should_panic]
1175 fn no_el3_ttbr1() {
1176 RootTable::<IdTranslation>::new(IdTranslation, 1, TranslationRegime::El3, VaRange::Upper);
1177 }
1178
1179 #[test]
1180 fn table_or_page() {
1181 assert!(!Descriptor(AtomicUsize::new(0b00)).is_table_or_page());
1183 assert!(!Descriptor(AtomicUsize::new(0b10)).is_table_or_page());
1184
1185 assert!(!Descriptor(AtomicUsize::new(0b01)).is_table_or_page());
1187
1188 assert!(Descriptor(AtomicUsize::new(0b11)).is_table_or_page());
1190 }
1191
1192 #[test]
1193 fn table_or_page_unknown_bits() {
1194 const UNKNOWN: usize = 1 << 50 | 1 << 52;
1196
1197 assert!(!Descriptor(AtomicUsize::new(UNKNOWN | 0b00)).is_table_or_page());
1199 assert!(!Descriptor(AtomicUsize::new(UNKNOWN | 0b10)).is_table_or_page());
1200
1201 assert!(!Descriptor(AtomicUsize::new(UNKNOWN | 0b01)).is_table_or_page());
1203
1204 assert!(Descriptor(AtomicUsize::new(UNKNOWN | 0b11)).is_table_or_page());
1206 }
1207}