1#![cfg_attr(target_os = "none", no_std)]
2
3use core::{fmt::Display, ops::Deref, ptr::NonNull, sync::atomic::Ordering};
4
5pub use anyhow::Error;
6
7pub trait MmioOp: Sync + Send + 'static {
8 fn ioremap(&self, addr: MmioAddr, size: usize) -> Result<Mmio, Error>;
9 fn iounmap(&self, mmio: &Mmio);
10}
11
12static mut MMIO_OP: Option<&'static dyn MmioOp> = None;
13static INIT: core::sync::atomic::AtomicBool = core::sync::atomic::AtomicBool::new(false);
14
15pub fn init(mmio_op: &'static dyn MmioOp) {
16 if INIT
17 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
18 .is_err()
19 {
20 return;
21 }
22
23 unsafe {
24 MMIO_OP = Some(mmio_op);
25 }
26}
27
28pub unsafe fn ioremap(addr: MmioAddr, size: usize) -> Result<Mmio, Error> {
32 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
33 mmio_op.ioremap(addr, size)
34}
35
36pub unsafe fn iounmap(mmio: &Mmio) {
40 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
41 mmio_op.iounmap(mmio);
42}
43
44pub fn ioremap_guard(addr: MmioAddr, size: usize) -> Result<MmioGuard, Error> {
45 let mmio = unsafe { ioremap(addr, size)? };
46 Ok(MmioGuard(mmio))
47}
48
49#[derive(
51 Default,
52 derive_more::From,
53 derive_more::Into,
54 Clone,
55 Copy,
56 derive_more::Debug,
57 derive_more::Display,
58 PartialEq,
59 Eq,
60 PartialOrd,
61 Ord,
62 Hash,
63)]
64#[repr(transparent)]
65#[debug("PhysAddr({_0:#x})")]
66#[display("{_0:#x}")]
67pub struct MmioAddr(usize);
68
69impl MmioAddr {
70 pub fn as_usize(&self) -> usize {
71 self.0
72 }
73}
74
75impl From<u64> for MmioAddr {
76 fn from(value: u64) -> Self {
77 MmioAddr(value as usize)
78 }
79}
80
81#[derive(Debug, Clone)]
82pub struct Mmio {
83 phys: MmioAddr,
84 virt: NonNull<u8>,
85 size: usize,
86}
87
88impl Mmio {
89 pub unsafe fn new(phys: MmioAddr, virt: NonNull<u8>, size: usize) -> Self {
93 Mmio { phys, virt, size }
94 }
95
96 pub fn phys_addr(&self) -> MmioAddr {
97 self.phys
98 }
99
100 pub fn as_slice(&self) -> &[u8] {
101 unsafe { core::slice::from_raw_parts(self.virt.as_ptr(), self.size) }
102 }
103
104 pub fn as_ptr(&self) -> *mut u8 {
105 self.virt.as_ptr()
106 }
107
108 pub fn size(&self) -> usize {
109 self.size
110 }
111
112 pub fn read<T>(&self, offset: usize) -> T {
113 assert!(offset < self.size);
114 unsafe { self.virt.add(offset).cast::<T>().read_volatile() }
115 }
116
117 pub fn write<T>(&self, offset: usize, value: T) {
118 assert!(offset < self.size);
119 unsafe { self.virt.add(offset).cast::<T>().write_volatile(value) }
120 }
121}
122
123pub struct MmioGuard(Mmio);
124
125impl Deref for MmioGuard {
126 type Target = Mmio;
127
128 fn deref(&self) -> &Self::Target {
129 &self.0
130 }
131}
132
133impl Drop for MmioGuard {
134 fn drop(&mut self) {
135 let mmio_op = unsafe { MMIO_OP.expect("MmioOp is not initialized") };
136 mmio_op.iounmap(self);
137 }
138}
139
140unsafe impl Send for Mmio {}
141unsafe impl Sync for Mmio {}
142
143impl Display for Mmio {
144 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
145 write!(
146 f,
147 "Mmio [{}, {:#x}) -> virt: {:#p}",
148 self.phys,
149 self.phys.0 + self.size,
150 self.virt
151 )
152 }
153}
154
155#[cfg(all(test, not(target_os = "none")))]
156mod tests {
157 use super::Mmio;
158
159 struct DummyMmioOp;
160 impl super::MmioOp for DummyMmioOp {
161 fn ioremap(&self, addr: super::PhysAddr, size: usize) -> Option<Mmio> {
162 Some(Mmio {
163 phys: addr,
164 virt: core::ptr::NonNull::dangling(),
165 size,
166 })
167 }
168
169 fn iounmap(&self, _mmio: &Mmio) {}
170 }
171
172 #[test]
173 fn test_mmio_new() {
174 super::init(&DummyMmioOp);
175
176 let addr = Mmio {
177 phys: super::PhysAddr(0x1000),
178 virt: core::ptr::NonNull::dangling(),
179 size: 0x100,
180 };
181 println!("Mmio address: {:?}", addr);
182 println!("Mmio address display: {}", addr);
183 }
184}