1use core::marker::PhantomData;
2
3use tock_registers::{LocalRegisterCopy, register_bitfields};
5
6pub trait Granule: Clone + Copy {
7 const M: u32;
8 const SIZE: usize = 2usize.pow(Self::M);
9 const MASK: u64 = (1u64 << Self::M) - 1; }
11
12#[derive(Clone, Copy)]
13pub struct Granule4KB {}
14
15impl Granule for Granule4KB {
16 const M: u32 = 12; }
18
19#[derive(Clone, Copy)]
20pub struct Granule16KB {}
21
22impl Granule for Granule16KB {
23 const M: u32 = 14; }
25
26#[derive(Clone, Copy)]
27pub struct Granule64KB {}
28
29impl Granule for Granule64KB {
30 const M: u32 = 16; }
32
33pub trait OA: Clone + Copy {
34 const BITS: usize;
35}
36
37#[derive(Clone, Copy)]
38pub struct OA48 {}
39
40impl OA for OA48 {
41 const BITS: usize = 48; }
43
44#[derive(Clone, Copy)]
45pub struct OA52 {}
46
47impl OA for OA52 {
48 const BITS: usize = 52; }
50
51#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
54pub enum AccessPermission {
55 PrivilegedReadWrite = 0b00,
59
60 ReadWrite = 0b01,
63
64 PrivilegedReadOnly = 0b10,
68
69 ReadOnly = 0b11,
72}
73
74impl AccessPermission {
75 pub const fn as_bits(self) -> u8 {
77 self as u8
78 }
79
80 pub const fn from_bits(bits: u8) -> Option<Self> {
82 match bits & 0b11 {
83 0b00 => Some(Self::PrivilegedReadWrite),
84 0b01 => Some(Self::ReadWrite),
85 0b10 => Some(Self::PrivilegedReadOnly),
86 0b11 => Some(Self::ReadOnly),
87 _ => None,
88 }
89 }
90
91 pub const fn allows_unprivileged(self) -> bool {
93 matches!(self, Self::ReadWrite | Self::ReadOnly)
94 }
95
96 pub const fn allows_privileged_write(self) -> bool {
98 matches!(self, Self::PrivilegedReadWrite | Self::ReadWrite)
99 }
100
101 pub const fn allows_unprivileged_write(self) -> bool {
103 matches!(self, Self::ReadWrite)
104 }
105}
106
107#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
109pub enum Shareability {
110 NonShareable,
111 OuterShareable,
112 InnerShareable,
113}
114
115register_bitfields![u64,
116 TTE64_REG [
119 VALID OFFSET(0) NUMBITS(1) [
121 Invalid = 0,
122 Valid = 1
123 ],
124
125 TYPE OFFSET(1) NUMBITS(1) [
128 Block = 0, Table = 1 ],
131
132 ATTR_INDX OFFSET(2) NUMBITS(3) [],
134
135 NS OFFSET(5) NUMBITS(1) [
137 Secure = 0,
138 NonSecure = 1
139 ],
140
141 AP OFFSET(6) NUMBITS(2) [
145 PrivilegedReadWrite = 0b00, ReadWrite = 0b01, PrivilegedReadOnly = 0b10, ReadOnly = 0b11 ],
150
151 SH OFFSET(8) NUMBITS(2) [
153 NonShareable = 0b00,
154 OuterShareable = 0b10,
155 InnerShareable = 0b11
156 ],
157
158 AF OFFSET(10) NUMBITS(1) [
160 NotAccessed = 0,
161 Accessed = 1
162 ],
163
164 NG OFFSET(11) NUMBITS(1) [
166 Global = 0,
167 NotGlobal = 1
168 ],
169
170 ADDR OFFSET(12) NUMBITS(38) [],
171
172 DBM OFFSET(51) NUMBITS(1) [
174 ReadOnly = 0,
175 Writable = 1
176 ],
177
178 CONTIG OFFSET(52) NUMBITS(1) [
180 NotContiguous = 0,
181 Contiguous = 1
182 ],
183
184 PXN OFFSET(53) NUMBITS(1) [
186 ExecuteAllowed = 0,
187 ExecuteNever = 1
188 ],
189
190 XN_UXN OFFSET(54) NUMBITS(1) [
192 ExecuteAllowed = 0,
193 ExecuteNever = 1
194 ],
195
196 SW_RESERVED OFFSET(55) NUMBITS(4) []
198 ]
199];
200
201#[derive(Clone, Copy)]
202pub struct TTE64<G: Granule, O: OA> {
203 reg: LocalRegisterCopy<u64, TTE64_REG::Register>,
204 _marker: PhantomData<(G, O)>,
205}
206
207impl<G: Granule, O: OA> TTE64<G, O> {
208 pub const fn new(value: u64) -> Self {
210 Self {
211 reg: LocalRegisterCopy::new(value),
212 _marker: PhantomData,
213 }
214 }
215
216 pub const fn invalid() -> Self {
218 Self::new(0)
219 }
220
221 pub fn new_table(table_addr: u64) -> Self {
223 let mut tte = Self::new(0);
224
225 tte.reg
226 .modify(TTE64_REG::VALID::Valid + TTE64_REG::TYPE::Table + TTE64_REG::AF::Accessed);
227 tte.set_address(table_addr);
228 tte
229 }
230
231 pub fn new_block(block_addr: u64) -> Self {
233 let mut tte = Self::new(0);
234
235 tte.reg
236 .modify(TTE64_REG::VALID::Valid + TTE64_REG::TYPE::Block + TTE64_REG::AF::Accessed);
237 tte.set_address(block_addr);
238 tte
239 }
240
241 pub fn get(&self) -> u64 {
243 self.reg.get()
244 }
245
246 pub fn is_valid(&self) -> bool {
248 self.reg.is_set(TTE64_REG::VALID)
249 }
250
251 pub fn set_is_valid(&mut self, val: bool) {
252 if val {
253 self.reg.modify(TTE64_REG::VALID::Valid);
254 } else {
255 self.reg.modify(TTE64_REG::VALID::Invalid);
256 }
257 }
258
259 pub fn is_table(&self) -> bool {
261 self.is_valid() && self.reg.is_set(TTE64_REG::TYPE)
262 }
263
264 pub fn is_block(&self) -> bool {
266 self.is_valid() && !self.reg.is_set(TTE64_REG::TYPE)
267 }
268
269 pub fn set_is_table(&mut self) {
270 self.reg.modify(TTE64_REG::TYPE::Table);
271 }
272
273 pub fn set_is_block(&mut self) {
274 self.reg.modify(TTE64_REG::TYPE::Block);
275 }
276
277 pub fn set_address(&mut self, addr: u64) {
278 assert!(
279 addr & G::MASK == 0,
280 "Address must be aligned to granule size"
281 );
282 assert!(
283 addr < (1u64 << O::BITS),
284 "Address exceeds output address width"
285 );
286 let val = addr >> TTE64_REG::ADDR.shift; self.reg.modify(TTE64_REG::ADDR.val(val));
288 }
289
290 pub fn address(&self) -> u64 {
293 if !self.is_valid() {
294 return 0;
295 }
296
297 let raw_value = self.reg.get();
298 let m = G::M; let bit_start = m;
301 let bit_end =
302
303 if O::BITS == 52 && (G::M == 12 || G::M == 14) {
305 50
306 } else {
307 48
308 };
309 let mask = ((1u64 << (bit_end - bit_start + 1)) - 1) << bit_start;
310 raw_value & mask
311 }
312
313 pub fn address_with_page_level(&self, level: usize) -> u64 {
314 if self.is_table() {
315 return self.address();
316 }
317 let raw_addr = self.reg.get();
318 let n = match (G::M, level) {
319 (12, 0) => 39,
320 (12, 1) => 30,
321 (12, 2) => 21,
322 (14, 1) => 36,
323 (14, 2) => 25,
324 (16, 1) => 42,
325 (16, 2) => 29,
326 _ => panic!("Invalid granule size or level combination"),
327 };
328
329 let bit_start = n;
330 let bit_end = if O::BITS == 52 && (G::M == 12 || G::M == 14) {
332 50
333 } else {
334 48
335 };
336 let mask = ((1u64 << (bit_end - bit_start + 1)) - 1) << bit_start;
337 raw_addr & mask
338 }
339
340 pub fn is_accessed(&self) -> bool {
342 self.reg.is_set(TTE64_REG::AF)
343 }
344
345 pub fn attr_index(&self) -> u64 {
347 self.reg.read(TTE64_REG::ATTR_INDX)
348 }
349
350 pub fn set_attr_index(&mut self, index: u64) {
351 assert!(index < 8, "Attribute index must be less than 8");
352 self.reg.modify(TTE64_REG::ATTR_INDX.val(index));
353 }
354
355 pub fn is_executable(&self) -> bool {
360 !self.reg.is_set(TTE64_REG::XN_UXN)
361 }
362
363 pub fn set_executable(&mut self, val: bool) {
376 if val {
377 self.reg.modify(TTE64_REG::XN_UXN::ExecuteAllowed);
378 } else {
379 self.reg.modify(TTE64_REG::XN_UXN::ExecuteNever);
380 }
381 }
382
383 pub fn is_privileged_executable(&self) -> bool {
388 !self.reg.is_set(TTE64_REG::PXN)
389 }
390
391 pub fn set_privileged_executable(&mut self, val: bool) {
407 if val {
408 self.reg.modify(TTE64_REG::PXN::ExecuteAllowed);
409 } else {
410 self.reg.modify(TTE64_REG::PXN::ExecuteNever);
411 }
412 }
413
414 pub fn access_permission(&self) -> AccessPermission {
416 AccessPermission::from_bits(self.reg.read(TTE64_REG::AP) as _).unwrap()
417 }
418
419 pub fn set_access_permission(&mut self, permission: AccessPermission) {
420 self.reg
421 .modify(TTE64_REG::AP.val(permission.as_bits() as u64));
422 }
423
424 pub fn shareability(&self) -> Shareability {
426 match self.reg.read_as_enum(TTE64_REG::SH) {
427 Some(TTE64_REG::SH::Value::NonShareable) => Shareability::NonShareable,
428 Some(TTE64_REG::SH::Value::OuterShareable) => Shareability::OuterShareable,
429 Some(TTE64_REG::SH::Value::InnerShareable) => Shareability::InnerShareable,
430 None => unreachable!("invalid value"),
431 }
432 }
433
434 pub fn set_shareability(&mut self, shareability: Shareability) {
435 self.reg.modify(match shareability {
436 Shareability::NonShareable => TTE64_REG::SH::NonShareable,
437 Shareability::OuterShareable => TTE64_REG::SH::OuterShareable,
438 Shareability::InnerShareable => TTE64_REG::SH::InnerShareable,
439 });
440 }
441
442 pub fn set_access(&mut self) {
444 self.reg.modify(TTE64_REG::AF::Accessed);
445 }
446
447 pub fn clear_access(&mut self) {
449 self.reg.modify(TTE64_REG::AF::NotAccessed);
450 }
451
452 pub fn is_contiguous(&self) -> bool {
454 self.reg.is_set(TTE64_REG::CONTIG)
455 }
456
457 pub fn set_contiguous(&mut self) {
459 self.reg.modify(TTE64_REG::CONTIG::Contiguous);
460 }
461
462 pub fn is_global(&self) -> bool {
464 !self.reg.is_set(TTE64_REG::NG)
465 }
466
467 pub fn set_not_global(&mut self) {
469 self.reg.modify(TTE64_REG::NG::NotGlobal);
470 }
471
472 pub fn is_dirty_writable(&self) -> bool {
474 self.reg.is_set(TTE64_REG::DBM)
475 }
476
477 pub fn sw_reserved(&self) -> u64 {
479 self.reg.read(TTE64_REG::SW_RESERVED)
480 }
481
482 pub fn set_sw_reserved(&mut self, value: u64) {
484 self.reg.modify(TTE64_REG::SW_RESERVED.val(value & 0xF));
485 }
486}
487
488pub type TTE4K48 = TTE64<Granule4KB, OA48>;
491
492pub type TTE4K52 = TTE64<Granule4KB, OA52>;
494
495pub type TTE16K48 = TTE64<Granule16KB, OA48>;
497
498pub type TTE16K52 = TTE64<Granule16KB, OA52>;
500
501pub type TTE64K48 = TTE64<Granule64KB, OA48>;
503
504pub type TTE64K52 = TTE64<Granule64KB, OA52>;
506
507pub mod block_sizes {
509 pub mod granule_4k {
511 pub const LEVEL1_BLOCK_SIZE: usize = 1024 * 1024 * 1024; pub const LEVEL2_BLOCK_SIZE: usize = 2 * 1024 * 1024; pub const LEVEL3_PAGE_SIZE: usize = 4 * 1024; }
515
516 pub mod granule_16k {
518 pub const LEVEL1_BLOCK_SIZE: usize = 64 * 1024 * 1024 * 1024; pub const LEVEL2_BLOCK_SIZE: usize = 32 * 1024 * 1024; pub const LEVEL3_PAGE_SIZE: usize = 16 * 1024; }
522
523 pub mod granule_64k {
525 pub const LEVEL1_BLOCK_SIZE: usize = 4 * 1024 * 1024 * 1024; pub const LEVEL2_BLOCK_SIZE: usize = 512 * 1024 * 1024; pub const LEVEL3_PAGE_SIZE: usize = 64 * 1024; }
529}
530
531impl<G: Granule, O: OA> TTE64<G, O> {
533 pub fn calculate_index(va: u64, level: usize) -> usize {
535 match (G::M, level) {
536 (12, 0) => ((va >> 39) & 0x1FF) as usize, (12, 1) => ((va >> 30) & 0x1FF) as usize, (12, 2) => ((va >> 21) & 0x1FF) as usize, (12, 3) => ((va >> 12) & 0x1FF) as usize, (14, 0) => ((va >> 47) & 0x1) as usize, (14, 1) => ((va >> 36) & 0x7FF) as usize, (14, 2) => ((va >> 25) & 0x7FF) as usize, (14, 3) => ((va >> 14) & 0x7FF) as usize, (16, 1) => ((va >> 42) & 0x3F) as usize, (16, 2) => ((va >> 29) & 0x1FFF) as usize, (16, 3) => ((va >> 16) & 0x1FFF) as usize, _ => panic!("Invalid granule size or level combination"),
551 }
552 }
553
554 pub fn is_aligned(addr: u64) -> bool {
556 (addr & G::MASK) == 0
557 }
558
559 pub fn align_down(addr: u64) -> u64 {
561 addr & !G::MASK
562 }
563
564 pub fn align_up(addr: u64) -> u64 {
566 (addr + G::MASK) & !G::MASK
567 }
568}
569
570#[cfg(test)]
571mod tests {
572 use super::*;
573
574 #[test]
575 fn test_address_extraction_4k_48bit() {
576 type TTE = TTE64<Granule4KB, OA48>;
578
579 let table_addr = 0x1000_0000_1000; let tte_table = TTE::new_table(table_addr);
582 assert_eq!(tte_table.address(), table_addr);
583
584 let block = 2 * 1024 * 1024; let block_addr = 0x2000_0000_1000 + block; let tte_block = TTE::new_block(block_addr);
588 assert_eq!(
589 tte_block.address_with_page_level(2),
590 0x2000_0000_0000 + block
591 );
592 }
593
594 #[test]
595 fn test_address_extraction_4k_52bit() {
596 type TTE = TTE64<Granule4KB, OA52>;
598
599 let table_addr = (1 << 50) - 0x1000; let tte_table = TTE::new_table(table_addr);
601 let read_addr = tte_table.address();
602 assert_eq!(
603 read_addr, table_addr,
604 "want {:#x} != read {:#x} address mismatch",
605 table_addr, read_addr
606 );
607 }
608
609 #[test]
610 fn test_address_extraction_16k_48bit() {
611 type TTE = TTE64<Granule16KB, OA48>;
613
614 let table_addr = (1 << 47) + 16 * 1024; let tte_table = TTE::new_table(table_addr);
617 let read = tte_table.address();
618 assert_eq!(
619 table_addr, read,
620 "want {:#x} != read {:#x} address mismatch",
621 table_addr, read
622 );
623
624 let block_addr = 0x2000_0000_0000; let tte_block = TTE::new_block(block_addr);
627 assert_eq!(tte_block.address(), block_addr);
628 }
629
630 #[test]
631 fn test_address_extraction_16k_52bit() {
632 type TTE = TTE64<Granule16KB, OA52>;
634
635 let table_addr = (1 << 50) - 0x4000; let tte_table = TTE::new_table(table_addr); assert_eq!(tte_table.address(), table_addr);
640 }
641
642 #[test]
643 fn test_address_extraction_64k_48bit() {
644 type TTE = TTE64<Granule64KB, OA48>;
646
647 let table_addr = 0x1000_0001_0000; let tte_table = TTE::new_table(table_addr);
650 assert_eq!(tte_table.address(), table_addr);
651
652 let block_addr = 0x2000_0002_0000; let tte_block = TTE::new_block(block_addr);
655 assert_eq!(tte_block.address(), block_addr);
656 }
657
658 #[test]
659 fn test_address_extraction_64k_52bit() {
660 type TTE = TTE64<Granule64KB, OA52>;
662
663 let table_addr = 0xf00_1001_0000u64; let tte_table = TTE::new_table(table_addr); assert_eq!(
667 table_addr,
668 tte_table.address(),
669 "want {:#x} != read {:#x} address mismatch",
670 table_addr,
671 tte_table.address()
672 );
673 }
674
675 #[test]
676 fn test_invalid_tte_address() {
677 type TTE = TTE64<Granule4KB, OA48>;
679
680 let tte_invalid = TTE::invalid();
681 assert_eq!(tte_invalid.address(), 0);
682 assert!(!tte_invalid.is_valid());
683 }
684
685 #[test]
686 fn test_granule_constants() {
687 assert_eq!(Granule4KB::M, 12); assert_eq!(Granule16KB::M, 14); assert_eq!(Granule64KB::M, 16); assert_eq!(Granule4KB::SIZE, 4096);
694 assert_eq!(Granule16KB::SIZE, 16384);
695 assert_eq!(Granule64KB::SIZE, 65536);
696
697 assert_eq!(Granule4KB::MASK, 0xFFF);
699 assert_eq!(Granule16KB::MASK, 0x3FFF);
700 assert_eq!(Granule64KB::MASK, 0xFFFF);
701 }
702}