embedded_shadow/
table.rs

1use crate::helpers::{block_span, range_span};
2use bitmaps::{Bitmap, Bits, BitsImpl};
3
4use crate::error::ShadowError;
5
6pub(crate) struct ShadowTable<const TS: usize, const BS: usize, const BC: usize>
7where
8    BitsImpl<BC>: Bits,
9{
10    bytes: [u8; TS],
11    dirty: Bitmap<BC>,
12}
13
14impl<const TS: usize, const BS: usize, const BC: usize> ShadowTable<TS, BS, BC>
15where
16    BitsImpl<BC>: Bits,
17{
18    pub(crate) fn new() -> Self {
19        debug_assert!(
20            TS == BS * BC,
21            "Total size must match block size x block count",
22        );
23
24        Self {
25            bytes: [0; TS],
26            dirty: Bitmap::new(),
27        }
28    }
29
30    fn apply_dirty_range(&mut self, addr: u16, len: usize, dirty: bool) -> Result<(), ShadowError> {
31        let (sb, eb) = block_span::<TS, BS, BC>(addr, len)?;
32        for block in sb..=eb {
33            self.dirty.set(block, dirty);
34        }
35        Ok(())
36    }
37
38    pub(crate) fn read_range(&self, addr: u16, out: &mut [u8]) -> Result<(), ShadowError> {
39        let (offset, end) = range_span::<TS>(addr, out.len())?;
40        out.copy_from_slice(&self.bytes[offset..end]);
41        Ok(())
42    }
43
44    pub(crate) fn write_range(&mut self, addr: u16, data: &[u8]) -> Result<(), ShadowError> {
45        let (offset, end) = range_span::<TS>(addr, data.len())?;
46        self.bytes[offset..end].copy_from_slice(data);
47
48        Ok(())
49    }
50
51    pub(crate) fn for_each_dirty_block<F>(&self, mut f: F) -> Result<(), ShadowError>
52    where
53        F: FnMut(u16, &[u8]) -> Result<(), ShadowError>,
54    {
55        let mut idx = self.dirty.first_index();
56        while let Some(block) = idx {
57            let off = block * BS;
58            let buf = &self.bytes[off..(off + BS)];
59            f(off as u16, buf)?;
60            idx = self.dirty.next_index(block);
61        }
62        Ok(())
63    }
64
65    pub(crate) fn is_dirty(&self, addr: u16, len: usize) -> Result<bool, ShadowError> {
66        let (sb, eb) = block_span::<TS, BS, BC>(addr, len)?;
67        for block in sb..=eb {
68            if self.dirty.get(block) {
69                return Ok(true);
70            }
71        }
72        Ok(false)
73    }
74
75    pub(crate) fn any_dirty(&self) -> bool {
76        !self.dirty.is_empty()
77    }
78
79    pub(crate) fn mark_dirty(&mut self, addr: u16, len: usize) -> Result<(), ShadowError> {
80        self.apply_dirty_range(addr, len, true)
81    }
82
83    pub(crate) fn clear_dirty(&mut self) {
84        self.dirty = Bitmap::new();
85    }
86}
87
88#[cfg(test)]
89mod tests {
90    use super::*;
91
92    // 16-byte table, 4-byte blocks, 4 dirty blocks
93    type TestTable = ShadowTable<16, 4, 4>;
94
95    #[test]
96    fn new_table_has_no_dirty_blocks() {
97        let table: TestTable = ShadowTable::new();
98        assert!(!table.is_dirty(0, 16).unwrap());
99    }
100
101    #[test]
102    fn mark_dirty_single_block() {
103        let mut table: TestTable = ShadowTable::new();
104        table.mark_dirty(0, 1).unwrap();
105        assert!(table.is_dirty(0, 1).unwrap());
106        assert!(table.is_dirty(0, 4).unwrap()); // whole block 0
107        assert!(!table.is_dirty(4, 4).unwrap()); // block 1
108    }
109
110    #[test]
111    fn mark_dirty_spanning_blocks() {
112        let mut table: TestTable = ShadowTable::new();
113        // Mark bytes 2-5 dirty (spans blocks 0 and 1)
114        table.mark_dirty(2, 4).unwrap();
115        assert!(table.is_dirty(0, 4).unwrap()); // block 0
116        assert!(table.is_dirty(4, 4).unwrap()); // block 1
117        assert!(!table.is_dirty(8, 4).unwrap()); // block 2
118    }
119
120    #[test]
121    fn is_dirty_zero_len_returns_error() {
122        let mut table: TestTable = ShadowTable::new();
123        table.mark_dirty(0, 16).unwrap();
124        assert_eq!(table.is_dirty(0, 0), Err(ShadowError::ZeroLength));
125    }
126
127    #[test]
128    fn is_dirty_out_of_bounds_returns_error() {
129        let table: TestTable = ShadowTable::new();
130        assert_eq!(table.is_dirty(15, 2), Err(ShadowError::OutOfBounds));
131        assert_eq!(table.is_dirty(20, 1), Err(ShadowError::OutOfBounds));
132    }
133
134    #[test]
135    fn any_dirty_returns_correct_value() {
136        let mut table: TestTable = ShadowTable::new();
137        assert!(!table.any_dirty());
138
139        table.mark_dirty(0, 1).unwrap();
140        assert!(table.any_dirty());
141    }
142
143    #[test]
144    fn read_write_range() {
145        let mut table: TestTable = ShadowTable::new();
146        let data = [1, 2, 3, 4];
147        table.write_range(4, &data).unwrap();
148
149        let mut out = [0u8; 4];
150        table.read_range(4, &mut out).unwrap();
151        assert_eq!(out, data);
152    }
153
154    #[test]
155    fn read_write_range_errors() {
156        let mut table: TestTable = ShadowTable::new();
157
158        // Zero length
159        assert_eq!(table.read_range(0, &mut []), Err(ShadowError::ZeroLength));
160        assert_eq!(table.write_range(0, &[]), Err(ShadowError::ZeroLength));
161
162        // Out of bounds
163        let mut out = [0u8; 4];
164        assert_eq!(
165            table.read_range(14, &mut out),
166            Err(ShadowError::OutOfBounds)
167        );
168        assert_eq!(
169            table.write_range(14, &[1, 2, 3, 4]),
170            Err(ShadowError::OutOfBounds)
171        );
172    }
173
174    #[test]
175    fn partial_block_queries() {
176        let mut table: TestTable = ShadowTable::new();
177        table.mark_dirty(4, 4).unwrap(); // only block 1
178
179        // Queries that include block 1 should return dirty
180        assert!(table.is_dirty(3, 2).unwrap()); // spans blocks 0-1
181        assert!(table.is_dirty(4, 1).unwrap()); // just block 1
182        assert!(table.is_dirty(6, 3).unwrap()); // spans blocks 1-2
183
184        // Queries that don't include block 1
185        assert!(!table.is_dirty(0, 4).unwrap()); // block 0 only
186        assert!(!table.is_dirty(8, 8).unwrap()); // blocks 2-3
187    }
188}