1use super::capability::RuntimeRegisterSpaceOffset;
4use accessor::marker::AccessorTypeSpecifier;
5use accessor::marker::ReadOnly;
6use accessor::marker::ReadWrite;
7use accessor::marker::Readable;
8use accessor::single;
9use accessor::Mapper;
10use core::convert::TryFrom;
11use core::convert::TryInto;
12use core::marker::PhantomData;
13
14#[derive(Debug)]
19pub struct Runtime<M>
20where
21 M: Mapper,
22{
23 pub mfindex: single::ReadWrite<MicroframeIndexRegister, M>,
25}
26impl<M> Runtime<M>
27where
28 M: Mapper,
29{
30 pub unsafe fn new(mmio_base: usize, rtoff: RuntimeRegisterSpaceOffset, mapper: M) -> Self {
41 let base = mmio_base + usize::try_from(rtoff.get()).unwrap();
42
43 Self {
44 mfindex: single::ReadWrite::new(base, mapper),
45 }
46 }
47}
48
49#[repr(transparent)]
51#[derive(Copy, Clone)]
52pub struct MicroframeIndexRegister(u32);
53impl MicroframeIndexRegister {
54 ro_field!(0..=13, microframe_index, "Microframe Index", u16);
55}
56impl_debug_from_methods! {
57 MicroframeIndexRegister {
58 microframe_index,
59 }
60}
61
62#[repr(C)]
64#[derive(Debug)]
65pub struct InterrupterRegisterSet<M>
66where
67 M: Mapper + Clone,
68{
69 base: usize,
70 mapper: M,
71}
72
73impl<M> InterrupterRegisterSet<M>
74where
75 M: Mapper + Clone,
76{
77 pub unsafe fn new(mmio_base: usize, rtoff: RuntimeRegisterSpaceOffset, mapper: M) -> Self {
89 let base = mmio_base + usize::try_from(rtoff.get()).unwrap() + 0x20;
90 assert!(base % 0x20 == 0, "base is not aligned");
91
92 Self { base, mapper }
93 }
94
95 pub fn interrupter(&self, index: usize) -> Interrupter<'_, M, ReadOnly> {
101 unsafe { Interrupter::new(self.base, index, self.mapper.clone()) }
102 }
103
104 pub fn interrupter_mut(&mut self, index: usize) -> Interrupter<'_, M, ReadWrite> {
110 unsafe { Interrupter::new(self.base, index, self.mapper.clone()) }
111 }
112}
113
114#[derive(Debug)]
116pub struct Interrupter<'a, M, A>
117where
118 M: Mapper + Clone,
119 A: AccessorTypeSpecifier + Readable,
120{
121 pub iman: single::Generic<InterrupterManagementRegister, M, A>,
123 pub imod: single::Generic<InterrupterModerationRegister, M, A>,
125 pub erstsz: single::Generic<EventRingSegmentTableSizeRegister, M, A>,
127 pub erstba: single::Generic<EventRingSegmentTableBaseAddressRegister, M, A>,
129 pub erdp: single::Generic<EventRingDequeuePointerRegister, M, A>,
131 _marker: PhantomData<&'a InterrupterRegisterSet<M>>,
134}
135
136impl<M, A> Interrupter<'_, M, A>
137where
138 M: Mapper + Clone,
139 A: AccessorTypeSpecifier + Readable,
140{
141 unsafe fn new(interrupter_register_set_base: usize, index: usize, mapper: M) -> Self {
151 assert!(index < 1024, "index out of range");
152 let base = interrupter_register_set_base + index * 0x20;
153 Self {
154 iman: single::Generic::new(base, mapper.clone()),
155 imod: single::Generic::new(base + 0x4, mapper.clone()),
156 erstsz: single::Generic::new(base + 0x8, mapper.clone()),
157 erstba: single::Generic::new(base + 0x10, mapper.clone()),
158 erdp: single::Generic::new(base + 0x18, mapper),
159 _marker: PhantomData,
160 }
161 }
162}
163
164#[repr(transparent)]
166#[derive(Copy, Clone)]
167pub struct InterrupterManagementRegister(u32);
168impl InterrupterManagementRegister {
169 rw1c_bit!(0, interrupt_pending, "Interrupt Pending");
170 rw_bit!(1, interrupt_enable, "Interrupt Enable");
171}
172impl_debug_from_methods! {
173 InterrupterManagementRegister {
174 interrupt_pending,
175 interrupt_enable,
176 }
177}
178
179#[repr(transparent)]
181#[derive(Copy, Clone, Default)]
182pub struct InterrupterModerationRegister(u32);
183impl InterrupterModerationRegister {
184 rw_field!(
185 0..=15,
186 interrupt_moderation_interval,
187 "Interrupt Moderation Interval",
188 u16
189 );
190 rw_field!(
191 16..=31,
192 interrupt_moderation_counter,
193 "Interrupt Moderation Counter",
194 u16
195 );
196}
197impl_debug_from_methods! {
198 InterrupterModerationRegister{
199 interrupt_moderation_interval,
200 interrupt_moderation_counter,
201 }
202}
203
204#[repr(transparent)]
206#[derive(Copy, Clone, Debug)]
207pub struct EventRingSegmentTableSizeRegister(u32);
208impl EventRingSegmentTableSizeRegister {
209 #[must_use]
211 pub fn get(self) -> u16 {
212 self.0.try_into().unwrap()
213 }
214
215 pub fn set(&mut self, s: u16) {
217 self.0 = s.into();
218 }
219}
220
221#[repr(transparent)]
223#[derive(Copy, Clone, Debug)]
224pub struct EventRingSegmentTableBaseAddressRegister(u64);
225impl EventRingSegmentTableBaseAddressRegister {
226 #[must_use]
228 pub fn get(self) -> u64 {
229 self.0
230 }
231
232 pub fn set(&mut self, a: u64) {
238 assert!(
239 a.trailing_zeros() >= 6,
240 "The Event Ring Segment Table Base Address must be 64-byte aligned."
241 );
242 self.0 = a;
243 }
244}
245
246#[repr(transparent)]
248#[derive(Copy, Clone, Default)]
249pub struct EventRingDequeuePointerRegister(u64);
250impl EventRingDequeuePointerRegister {
251 rw_field!(
252 0..=2,
253 dequeue_erst_segment_index,
254 "Dequeue ERST Segment Index",
255 u8
256 );
257 rw1c_bit!(3, event_handler_busy, "Event Handler Busy");
258
259 #[must_use]
261 pub fn event_ring_dequeue_pointer(self) -> u64 {
262 self.0 & !0b1111
263 }
264
265 pub fn set_event_ring_dequeue_pointer(&mut self, p: u64) {
271 assert!(
272 p.trailing_zeros() >= 4,
273 "The Event Ring Dequeue Pointer must be 16-byte aligned."
274 );
275 self.0 = p;
276 }
277}
278impl_debug_from_methods! {
279 EventRingDequeuePointerRegister{
280 dequeue_erst_segment_index,
281 event_handler_busy,
282 event_ring_dequeue_pointer
283 }
284}