1#![cfg_attr(target_os = "none", no_std)]
2
3use core::{fmt::Display, ops::Deref, ptr::NonNull, sync::atomic::Ordering};
4
5pub use anyhow::Error;
6
7pub trait MmioOp: Sync + Send + 'static {
8 fn ioremap(&self, addr: PhysAddr, size: usize) -> Result<Mmio, Error>;
9 fn iounmap(&self, mmio: &Mmio);
10}
11
12static mut MMIO_OP: Option<&'static dyn MmioOp> = None;
13static INIT: core::sync::atomic::AtomicBool = core::sync::atomic::AtomicBool::new(false);
14
15pub fn init(mmio_op: &'static dyn MmioOp) {
16 if INIT
17 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
18 .is_err()
19 {
20 return;
21 }
22
23 unsafe {
24 MMIO_OP = Some(mmio_op);
25 }
26}
27
28pub unsafe fn ioremap(addr: PhysAddr, size: usize) -> Result<Mmio, Error> {
32 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
33 mmio_op.ioremap(addr, size)
34}
35
36pub unsafe fn iounmap(mmio: &Mmio) {
40 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
41 mmio_op.iounmap(mmio);
42}
43
44pub fn ioremap_guard(addr: PhysAddr, size: usize) -> Result<MmioGuard, Error> {
45 let mmio = unsafe { ioremap(addr, size)? };
46 Ok(MmioGuard(mmio))
47}
48
49#[derive(
50 Default,
51 derive_more::From,
52 derive_more::Into,
53 Clone,
54 Copy,
55 derive_more::Debug,
56 derive_more::Display,
57 PartialEq,
58 Eq,
59 PartialOrd,
60 Ord,
61 Hash,
62)]
63#[repr(transparent)]
64#[debug("PhysAddr({_0:#x})")]
65#[display("{_0:#x}")]
66pub struct PhysAddr(usize);
67
68impl PhysAddr {
69 pub fn as_usize(&self) -> usize {
70 self.0
71 }
72}
73
74impl From<u64> for PhysAddr {
75 fn from(value: u64) -> Self {
76 PhysAddr(value as usize)
77 }
78}
79
80#[derive(Debug, Clone)]
81pub struct Mmio {
82 phys: PhysAddr,
83 virt: NonNull<u8>,
84 size: usize,
85}
86
87impl Mmio {
88 pub unsafe fn new(phys: PhysAddr, virt: NonNull<u8>, size: usize) -> Self {
92 Mmio { phys, virt, size }
93 }
94
95 pub fn phys_addr(&self) -> PhysAddr {
96 self.phys
97 }
98
99 pub fn as_slice(&self) -> &[u8] {
100 unsafe { core::slice::from_raw_parts(self.virt.as_ptr(), self.size) }
101 }
102
103 pub fn as_ptr(&self) -> *mut u8 {
104 self.virt.as_ptr()
105 }
106
107 pub fn size(&self) -> usize {
108 self.size
109 }
110
111 pub fn read<T>(&self, offset: usize) -> T {
112 assert!(offset < self.size);
113 unsafe { self.virt.add(offset).cast::<T>().read_volatile() }
114 }
115
116 pub fn write<T>(&self, offset: usize, value: T) {
117 assert!(offset < self.size);
118 unsafe { self.virt.add(offset).cast::<T>().write_volatile(value) }
119 }
120}
121
122pub struct MmioGuard(Mmio);
123
124impl Deref for MmioGuard {
125 type Target = Mmio;
126
127 fn deref(&self) -> &Self::Target {
128 &self.0
129 }
130}
131
132impl Drop for MmioGuard {
133 fn drop(&mut self) {
134 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
135 mmio_op.iounmap(self);
136 }
137}
138
139unsafe impl Send for Mmio {}
140unsafe impl Sync for Mmio {}
141
142impl Display for Mmio {
143 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
144 write!(
145 f,
146 "Mmio [{}, {:#x}) -> virt: {:#p}",
147 self.phys,
148 self.phys.0 + self.size,
149 self.virt
150 )
151 }
152}
153
154#[cfg(all(test, not(target_os = "none")))]
155mod tests {
156 use super::Mmio;
157
158 struct DummyMmioOp;
159 impl super::MmioOp for DummyMmioOp {
160 fn ioremap(&self, addr: super::PhysAddr, size: usize) -> Option<Mmio> {
161 Some(Mmio {
162 phys: addr,
163 virt: core::ptr::NonNull::dangling(),
164 size,
165 })
166 }
167
168 fn iounmap(&self, _mmio: &Mmio) {}
169 }
170
171 #[test]
172 fn test_mmio_new() {
173 super::init(&DummyMmioOp);
174
175 let addr = Mmio {
176 phys: super::PhysAddr(0x1000),
177 virt: core::ptr::NonNull::dangling(),
178 size: 0x100,
179 };
180 println!("Mmio address: {:?}", addr);
181 println!("Mmio address display: {}", addr);
182 }
183}