embedded_shadow/view/
kernel.rs

1use crate::table::ShadowTable;
2
3/// Hardware/kernel-side view of the shadow table.
4///
5/// Provides read/write access without marking blocks dirty, plus
6/// methods to query and clear dirty state. Used by hardware drivers
7/// to sync shadow data to/from actual hardware registers.
8pub struct KernelView<'a, const TS: usize, const BS: usize, const BC: usize>
9where
10    bitmaps::BitsImpl<BC>: bitmaps::Bits,
11{
12    table: &'a mut ShadowTable<TS, BS, BC>,
13}
14
15impl<'a, const TS: usize, const BS: usize, const BC: usize> KernelView<'a, TS, BS, BC>
16where
17    bitmaps::BitsImpl<BC>: bitmaps::Bits,
18{
19    pub(crate) fn new(table: &'a mut ShadowTable<TS, BS, BC>) -> Self {
20        Self { table }
21    }
22}
23
24impl<'a, const TS: usize, const BS: usize, const BC: usize> KernelView<'a, TS, BS, BC>
25where
26    bitmaps::BitsImpl<BC>: bitmaps::Bits,
27{
28    /// Reads data from the shadow table without marking dirty.
29    pub fn read_range(&self, addr: u16, out: &mut [u8]) -> Result<(), crate::ShadowError> {
30        self.table.read_range(addr, out)
31    }
32
33    /// Writes data to the shadow table without marking dirty.
34    ///
35    /// Use this to update the shadow after reading from hardware.
36    pub fn write_range(&mut self, addr: u16, data: &[u8]) -> Result<(), crate::ShadowError> {
37        self.table.write_range(addr, data)?;
38        Ok(())
39    }
40
41    /// Iterates over each dirty block, providing its address and data.
42    pub fn for_each_dirty_block<F>(&self, mut f: F) -> Result<(), crate::ShadowError>
43    where
44        F: FnMut(u16, &[u8]) -> Result<(), crate::ShadowError>,
45    {
46        self.table.for_each_dirty_block(|addr, data| f(addr, data))
47    }
48
49    /// Returns true if any block overlapping the given range is dirty.
50    pub fn is_dirty(&self, addr: u16, len: usize) -> Result<bool, crate::ShadowError> {
51        self.table.is_dirty(addr, len)
52    }
53
54    /// Returns true if any block in the table is dirty.
55    pub fn any_dirty(&self) -> bool {
56        self.table.any_dirty()
57    }
58
59    /// Clears all dirty flags in the table.
60    pub fn clear_dirty(&mut self) {
61        self.table.clear_dirty()
62    }
63}
64
65#[cfg(test)]
66mod tests {
67    use super::*;
68    use crate::test_support::TestTable;
69
70    #[test]
71    fn kernel_read_does_not_mark_dirty() {
72        let mut table = TestTable::new();
73        let view = KernelView::new(&mut table);
74
75        let mut buf = [0u8; 4];
76        view.read_range(0, &mut buf).unwrap();
77
78        assert!(!view.any_dirty());
79    }
80
81    #[test]
82    fn kernel_write_does_not_mark_dirty() {
83        let mut table = TestTable::new();
84        let mut view = KernelView::new(&mut table);
85
86        view.write_range(0, &[0xFF; 4]).unwrap();
87
88        assert!(!view.any_dirty());
89
90        // Verify data was actually written
91        let mut buf = [0u8; 4];
92        view.read_range(0, &mut buf).unwrap();
93        assert_eq!(buf, [0xFF; 4]);
94    }
95
96    #[test]
97    fn kernel_clear_dirty_clears_all_blocks() {
98        let mut table = TestTable::new();
99        // Manually mark some blocks dirty
100        table.mark_dirty(0, 16).unwrap();
101        table.mark_dirty(32, 16).unwrap();
102
103        let mut view = KernelView::new(&mut table);
104        assert!(view.any_dirty());
105
106        view.clear_dirty();
107
108        assert!(!view.any_dirty());
109    }
110
111    #[test]
112    fn for_each_dirty_block_iterates_only_dirty() {
113        let mut table = TestTable::new();
114        // Mark only block 0 (bytes 0-15) and block 2 (bytes 32-47) dirty
115        table.mark_dirty(0, 16).unwrap();
116        table.mark_dirty(32, 16).unwrap();
117
118        let view = KernelView::new(&mut table);
119
120        let mut count = 0;
121        let mut addrs = [0u16; 4];
122        view.for_each_dirty_block(|addr, _data| {
123            addrs[count] = addr;
124            count += 1;
125            Ok(())
126        })
127        .unwrap();
128
129        assert_eq!(count, 2);
130        assert_eq!(addrs[0], 0);
131        assert_eq!(addrs[1], 32);
132    }
133
134    #[test]
135    fn for_each_dirty_block_provides_correct_data() {
136        let mut table = TestTable::new();
137        table.write_range(0, &[0xAA; 16]).unwrap();
138        table.mark_dirty(0, 16).unwrap();
139
140        let view = KernelView::new(&mut table);
141
142        view.for_each_dirty_block(|addr, data| {
143            assert_eq!(addr, 0);
144            assert_eq!(data.len(), 16);
145            assert!(data.iter().all(|&b| b == 0xAA));
146            Ok(())
147        })
148        .unwrap();
149    }
150
151    #[test]
152    fn is_dirty_partial_overlap_returns_true() {
153        let mut table = TestTable::new();
154        // Mark block 0 (bytes 0-15) dirty
155        table.mark_dirty(0, 16).unwrap();
156
157        let view = KernelView::new(&mut table);
158
159        // Query overlaps with dirty block
160        assert!(view.is_dirty(8, 8).unwrap());
161        // Query is entirely within dirty block
162        assert!(view.is_dirty(0, 4).unwrap());
163        // Query spans dirty and clean blocks
164        assert!(view.is_dirty(8, 16).unwrap());
165        // Query is entirely in clean block
166        assert!(!view.is_dirty(16, 8).unwrap());
167    }
168
169    #[test]
170    fn any_dirty_returns_false_after_clear() {
171        let mut table = TestTable::new();
172        table.mark_dirty(0, 64).unwrap(); // Mark all blocks
173
174        let mut view = KernelView::new(&mut table);
175        assert!(view.any_dirty());
176
177        view.clear_dirty();
178        assert!(!view.any_dirty());
179    }
180}