embedded_shadow/staged/
patch.rs

1use heapless::Vec;
2
3use crate::{ShadowError, types::StagingBuffer};
4
5#[derive(Clone, Copy)]
6struct StagedWrite {
7    addr: u16,
8    len: u16,
9    off: u16, // offset into data vec
10}
11
12/// Fixed-capacity staging buffer for transactional writes.
13///
14/// `DC` is the data capacity in bytes, `EC` is the max number of entries.
15pub struct PatchStagingBuffer<const DC: usize, const EC: usize> {
16    data: Vec<u8, DC>,
17    entries: Vec<StagedWrite, EC>,
18}
19
20impl<const DC: usize, const EC: usize> PatchStagingBuffer<DC, EC> {
21    pub const fn new() -> Self {
22        Self {
23            data: Vec::new(),
24            entries: Vec::new(),
25        }
26    }
27
28    fn push_bytes(&mut self, bytes: &[u8]) -> Result<u16, ShadowError> {
29        let off = self.data.len();
30        if off + bytes.len() > DC {
31            return Err(ShadowError::StageFull);
32        }
33
34        self.data
35            .extend_from_slice(bytes)
36            .map_err(|_| ShadowError::StageFull)?;
37
38        Ok(off as u16)
39    }
40}
41
42impl<const DC: usize, const EC: usize> Default for PatchStagingBuffer<DC, EC> {
43    fn default() -> Self {
44        Self::new()
45    }
46}
47
48impl<const DC: usize, const EC: usize> StagingBuffer for PatchStagingBuffer<DC, EC> {
49    fn any_staged(&self) -> bool {
50        !self.entries.is_empty()
51    }
52
53    fn for_each_staged<F>(&self, mut f: F) -> Result<(), ShadowError>
54    where
55        F: FnMut(u16, &[u8]) -> Result<(), ShadowError>,
56    {
57        for e in self.entries.iter() {
58            let buf = &self.data[e.off as usize..(e.off + e.len) as usize];
59            f(e.addr, buf)?;
60        }
61        Ok(())
62    }
63
64    fn write_staged(&mut self, addr: u16, data: &[u8]) -> Result<(), ShadowError> {
65        let off = self.push_bytes(data)?;
66
67        let entry = StagedWrite {
68            addr,
69            len: data.len() as u16,
70            off,
71        };
72
73        self.entries
74            .push(entry)
75            .map_err(|_| ShadowError::StageFull)?;
76
77        Ok(())
78    }
79
80    fn apply_overlay(&self, addr: u16, out: &mut [u8]) -> Result<(), ShadowError> {
81        if !self.any_staged() {
82            return Ok(());
83        }
84
85        // overlay staged writes onto out
86        for e in self.entries.iter() {
87            let start = e.addr as usize;
88            let end = start + e.len as usize;
89            let out_start = addr as usize;
90            let out_end = out_start + out.len();
91
92            // Check for overlap
93            if end <= out_start || start >= out_end {
94                continue; // No overlap
95            }
96
97            // Calculate overlapping range
98            let overlap_start = start.max(out_start);
99            let overlap_end = end.min(out_end);
100
101            let data_i = overlap_start - start + e.off as usize;
102            let out_i = overlap_start - out_start;
103            let n = overlap_end - overlap_start;
104
105            // Write staged data into the output buffer
106            out[out_i..out_i + n].copy_from_slice(&self.data[data_i..data_i + n]);
107        }
108
109        Ok(())
110    }
111
112    fn clear_staged(&mut self) -> Result<(), ShadowError> {
113        self.data.clear();
114        self.entries.clear();
115        Ok(())
116    }
117}
118
119#[cfg(test)]
120mod tests {
121    use super::*;
122    use crate::test_support::TestStage;
123
124    #[test]
125    fn write_staged_accumulates_entries() {
126        let mut stage = TestStage::new();
127
128        assert!(!stage.any_staged());
129
130        stage.write_staged(0, &[0x01, 0x02]).unwrap();
131        assert!(stage.any_staged());
132
133        stage.write_staged(10, &[0x03, 0x04]).unwrap();
134
135        let mut count = 0;
136        stage
137            .for_each_staged(|_, _| {
138                count += 1;
139                Ok(())
140            })
141            .unwrap();
142        assert_eq!(count, 2);
143    }
144
145    #[test]
146    fn stage_full_on_data_overflow() {
147        let mut stage = TestStage::new();
148
149        // Fill most of the data buffer (64 bytes capacity)
150        stage.write_staged(0, &[0xFF; 60]).unwrap();
151
152        // This should fail - only 4 bytes left but trying to write 8
153        assert_eq!(
154            stage.write_staged(100, &[0xAA; 8]),
155            Err(ShadowError::StageFull)
156        );
157    }
158
159    #[test]
160    fn stage_full_on_entry_overflow() {
161        let mut stage = TestStage::new();
162
163        // Fill all entry slots (8 max)
164        for i in 0..8 {
165            stage.write_staged(i * 2, &[0x01]).unwrap();
166        }
167
168        // 9th entry should fail
169        assert_eq!(
170            stage.write_staged(100, &[0x01]),
171            Err(ShadowError::StageFull)
172        );
173    }
174
175    #[test]
176    fn clear_staged_empties_buffer() {
177        let mut stage = TestStage::new();
178        stage.write_staged(0, &[0x01, 0x02, 0x03]).unwrap();
179        stage.write_staged(10, &[0x04, 0x05]).unwrap();
180
181        assert!(stage.any_staged());
182
183        stage.clear_staged().unwrap();
184
185        assert!(!stage.any_staged());
186    }
187
188    #[test]
189    fn apply_overlay_no_overlap_unchanged() {
190        let mut stage = TestStage::new();
191
192        // Stage write at address 20-23
193        stage.write_staged(20, &[0xAA, 0xBB, 0xCC, 0xDD]).unwrap();
194
195        // Read range 0-3 (no overlap with staged)
196        let mut out = [0x11, 0x22, 0x33, 0x44];
197        stage.apply_overlay(0, &mut out).unwrap();
198
199        // Output unchanged
200        assert_eq!(out, [0x11, 0x22, 0x33, 0x44]);
201    }
202
203    #[test]
204    fn apply_overlay_full_overlap() {
205        let mut stage = TestStage::new();
206
207        // Stage write at address 0-3
208        stage.write_staged(0, &[0xAA, 0xBB, 0xCC, 0xDD]).unwrap();
209
210        // Read range 0-3 (full overlap)
211        let mut out = [0x00; 4];
212        stage.apply_overlay(0, &mut out).unwrap();
213
214        assert_eq!(out, [0xAA, 0xBB, 0xCC, 0xDD]);
215    }
216
217    #[test]
218    fn apply_overlay_partial_overlap_start() {
219        let mut stage = TestStage::new();
220
221        // Stage write at address 4-7
222        stage.write_staged(4, &[0xAA, 0xBB, 0xCC, 0xDD]).unwrap();
223
224        // Read range 0-7 (overlaps staged at 4-7)
225        let mut out = [0x00; 8];
226        stage.apply_overlay(0, &mut out).unwrap();
227
228        // First 4 bytes unchanged, last 4 have staged data
229        assert_eq!(out, [0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD]);
230    }
231
232    #[test]
233    fn apply_overlay_multiple_overlapping_writes() {
234        let mut stage = TestStage::new();
235
236        // Stage two overlapping writes at same address
237        stage.write_staged(0, &[0x11, 0x22, 0x33, 0x44]).unwrap();
238        stage.write_staged(2, &[0xAA, 0xBB]).unwrap(); // Overwrites bytes 2-3
239
240        let mut out = [0x00; 4];
241        stage.apply_overlay(0, &mut out).unwrap();
242
243        // Later write wins for overlapping region
244        assert_eq!(out, [0x11, 0x22, 0xAA, 0xBB]);
245    }
246}