cu_sdlogger/
logger.rs

1use alloc::sync::Arc;
2use bincode::config::standard;
3use bincode::enc::write::Writer as BincodeWriter;
4use bincode::error::EncodeError;
5use bincode::{encode_into_slice, encode_into_writer, Encode};
6use core::cell::UnsafeCell;
7use cu29::prelude::*;
8use embedded_sdmmc::{Block, BlockCount, BlockDevice, BlockIdx};
9
10const BLK: usize = 512;
11
12pub struct ForceSyncSend<T>(UnsafeCell<T>);
13impl<T> ForceSyncSend<T> {
14    pub const fn new(inner: T) -> Self {
15        Self(UnsafeCell::new(inner))
16    }
17    #[inline]
18    fn inner(&self) -> &T {
19        unsafe { &*self.0.get() }
20    }
21    #[inline]
22    #[allow(clippy::mut_from_ref)]
23    fn inner_mut(&self) -> &mut T {
24        unsafe { &mut *self.0.get() }
25    }
26}
27unsafe impl<T> Send for ForceSyncSend<T> {}
28unsafe impl<T> Sync for ForceSyncSend<T> {}
29
30impl<B: BlockDevice> BlockDevice for ForceSyncSend<B> {
31    type Error = B::Error;
32    fn read(&self, blocks: &mut [Block], start: BlockIdx) -> Result<(), Self::Error> {
33        self.inner_mut().read(blocks, start)
34    }
35    fn write(&self, blocks: &[Block], start: BlockIdx) -> Result<(), Self::Error> {
36        self.inner_mut().write(blocks, start)
37    }
38    fn num_blocks(&self) -> Result<BlockCount, Self::Error> {
39        self.inner().num_blocks()
40    }
41}
42
43/// Implements a bincode `Writer` for linear logging over a block device.
44pub struct SdBlockWriter<BD: BlockDevice> {
45    bd: Arc<ForceSyncSend<BD>>,
46    current_blk: BlockIdx, // absolute block number for payload
47    position_blk: usize,   // 0..512
48    capacity_bytes: usize, // payload capacity for this section
49    written: usize,        // payload bytes written so far
50    buffer: Block,         // RMW buffer for current block
51}
52
53impl<BD: BlockDevice> SdBlockWriter<BD> {
54    pub fn new(bd: Arc<ForceSyncSend<BD>>, start_block: BlockIdx, capacity_bytes: usize) -> Self {
55        Self {
56            bd,
57            current_blk: start_block,
58            position_blk: 0,
59            capacity_bytes,
60            written: 0,
61            buffer: Block::new(),
62        }
63    }
64
65    #[inline]
66    fn flush_full(&mut self) -> Result<(), EncodeError> {
67        self.bd
68            .write(core::slice::from_ref(&self.buffer), self.current_blk)
69            .expect("write failed on full block");
70        self.current_blk += BlockCount(1);
71        self.position_blk = 0;
72        self.buffer = Block::new();
73        Ok(())
74    }
75
76    /// Force-flush the current tail block if partially filled.
77    pub fn flush_tail(&mut self) -> Result<(), EncodeError> {
78        if self.position_blk != 0 {
79            self.bd
80                .write(core::slice::from_ref(&self.buffer), self.current_blk)
81                .expect("write failed on flush");
82            // Advance to the next block, start fresh at the boundary.
83            self.current_blk += BlockCount(1);
84            self.position_blk = 0;
85            self.buffer = Block::new();
86        }
87        Ok(())
88    }
89}
90
91impl<BD: BlockDevice> BincodeWriter for SdBlockWriter<BD> {
92    fn write(&mut self, mut bytes: &[u8]) -> Result<(), EncodeError> {
93        if self
94            .written
95            .checked_add(bytes.len())
96            .is_none_or(|w| w > self.capacity_bytes)
97        {
98            return Err(EncodeError::UnexpectedEnd);
99        }
100
101        if self.position_blk != 0 {
102            let take = core::cmp::min(BLK - self.position_blk, bytes.len());
103            self.buffer.as_mut()[self.position_blk..self.position_blk + take]
104                .copy_from_slice(&bytes[..take]);
105            self.position_blk += take;
106            self.written += take;
107            bytes = &bytes[take..];
108            if self.position_blk == BLK {
109                self.flush_full()?;
110            }
111        }
112
113        while bytes.len() >= BLK {
114            let mut blk = Block::new();
115            blk.as_mut().copy_from_slice(&bytes[..BLK]);
116            self.bd
117                .write(core::slice::from_ref(&blk), self.current_blk)
118                .expect("write failed");
119            self.current_blk += BlockCount(1);
120            self.written += BLK;
121            bytes = &bytes[BLK..];
122        }
123
124        if !bytes.is_empty() {
125            let n = bytes.len();
126            self.buffer.as_mut()[self.position_blk..self.position_blk + n].copy_from_slice(bytes);
127            self.position_blk += n;
128            self.written += n;
129            if self.position_blk == BLK {
130                self.flush_full()?;
131            }
132        }
133
134        Ok(())
135    }
136}
137
138pub struct EMMCSectionStorage<BD: BlockDevice> {
139    bd: Arc<ForceSyncSend<BD>>,
140    start_block: BlockIdx,
141    content_writer: SdBlockWriter<BD>,
142}
143
144impl<BD: BlockDevice> EMMCSectionStorage<BD> {
145    fn new(bd: Arc<ForceSyncSend<BD>>, start_block: BlockIdx, data_capacity: usize) -> Self {
146        // data_capacity is the space left in that section minus the header.
147        let content_writer =
148            SdBlockWriter::new(bd.clone(), start_block + BlockCount(1), data_capacity); // +1 to skip the header
149        Self {
150            bd,
151            start_block,
152            content_writer,
153        }
154    }
155}
156
157impl<BD: BlockDevice> SectionStorage for EMMCSectionStorage<BD> {
158    fn initialize<E: Encode>(&mut self, header: &E) -> Result<usize, EncodeError> {
159        self.post_update_header(header)?;
160        Ok(SECTION_HEADER_COMPACT_SIZE as usize)
161    }
162
163    fn post_update_header<E: Encode>(&mut self, header: &E) -> Result<usize, EncodeError> {
164        // Re-encode header and write again to header blocks.
165        let mut block = Block::new();
166        let wrote = encode_into_slice(header, block.as_mut(), standard())?;
167        self.bd
168            .write(&[block], self.start_block)
169            .map_err(|_| EncodeError::UnexpectedEnd)?;
170        Ok(wrote)
171    }
172
173    fn append<E: Encode>(&mut self, entry: &E) -> Result<usize, EncodeError> {
174        let bf = self.content_writer.written;
175        encode_into_writer(entry, &mut self.content_writer, standard())
176            .map_err(|_| EncodeError::UnexpectedEnd)?;
177        Ok(self.content_writer.written - bf)
178    }
179
180    fn flush(&mut self) -> CuResult<usize> {
181        let bf = self.content_writer.written;
182        self.content_writer
183            .flush_tail()
184            .map_err(|_| CuError::from("flush failed"))?;
185        Ok(self.content_writer.written - bf)
186    }
187}
188
189pub struct EMMCLogger<BD: BlockDevice> {
190    bd: Arc<ForceSyncSend<BD>>,
191    next_block: BlockIdx,
192    last_block: BlockIdx,
193}
194
195impl<BD: BlockDevice> EMMCLogger<BD> {
196    pub fn new(bd: BD, start: BlockIdx, size: BlockCount) -> CuResult<Self> {
197        let main_header = MainHeader {
198            magic: MAIN_MAGIC,
199            first_section_offset: BLK as u16,
200            page_size: BLK as u16,
201        };
202        let mut block: Block = Block::new();
203
204        encode_into_slice(&main_header, block.as_mut(), standard())
205            .map_err(|_| CuError::from("Could not encode the main header"))?;
206
207        bd.write(&[block], start)
208            .map_err(|_| CuError::from("Could not write main header"))?;
209
210        let next_block = start + BlockCount(1); // +1 to skip the main header
211        let last_block = start + size;
212
213        Ok(Self {
214            bd: Arc::new(ForceSyncSend::new(bd)),
215            next_block,
216            last_block,
217        })
218    }
219
220    // Allocate a section in this logger and return the start block index.
221    fn alloc_section(&mut self, size: BlockCount) -> CuResult<BlockIdx> {
222        let start = self.next_block;
223        self.next_block += size;
224        if self.next_block > self.last_block {
225            return Err(CuError::from("out of space"));
226        }
227        Ok(start)
228    }
229}
230
231impl<BD> UnifiedLogWrite<EMMCSectionStorage<BD>> for EMMCLogger<BD>
232where
233    BD: BlockDevice + Send + Sync + 'static,
234{
235    fn add_section(
236        &mut self,
237        entry_type: UnifiedLogType,
238        requested_section_size: usize,
239    ) -> CuResult<SectionHandle<EMMCSectionStorage<BD>>> {
240        let block_size = SECTION_HEADER_COMPACT_SIZE; // 512
241        if block_size != 512 {
242            panic!("EMMC: only 512 byte blocks supported");
243        }
244
245        let section_header = SectionHeader {
246            magic: SECTION_MAGIC,
247            block_size,
248            entry_type,
249            offset_to_next_section: requested_section_size as u32,
250            used: 0,
251        };
252
253        let section_size_in_blks: u32 = (requested_section_size / block_size as usize) as u32 + 1; // always round up
254        let start_block = self.alloc_section(BlockCount(section_size_in_blks))?;
255
256        let storage = EMMCSectionStorage::new(
257            Arc::clone(&self.bd),
258            start_block,
259            ((section_size_in_blks - 1) * block_size as u32) as usize,
260        );
261
262        // Create handle (this will call `storage.initialize(header)`).
263        SectionHandle::create(section_header, storage)
264    }
265
266    fn flush_section(&mut self, section: &mut SectionHandle<EMMCSectionStorage<BD>>) {
267        // and the end of the stream is ok.
268        section
269            .get_storage_mut()
270            .flush()
271            .expect("EMMC: flush failed");
272        // and be sure the header is up-to-date
273        section
274            .post_update_header()
275            .expect("EMMC: post update header failed");
276    }
277
278    fn status(&self) -> UnifiedLogStatus {
279        UnifiedLogStatus {
280            total_used_space: (self.next_block.0 as usize) * BLK,
281            total_allocated_space: (self.next_block.0 as usize) * BLK,
282        }
283    }
284}