Skip to main content

cu_sdlogger/
logger.rs

1use crate::sdmmc::{Block, BlockCount, BlockDevice, BlockIdx};
2use alloc::sync::Arc;
3use bincode::config::standard;
4use bincode::enc::write::Writer as BincodeWriter;
5use bincode::error::EncodeError;
6use bincode::{Encode, encode_into_slice, encode_into_writer};
7use core::cell::UnsafeCell;
8use cu29::prelude::*;
9
10const BLK: usize = 512;
11
12/// Wrapper to share a block device behind `Arc` without adding locking.
13///
14/// # Safety
15/// This type provides unsynchronized interior mutability. Only use it when
16/// all access is externally serialized (single-threaded execution or a
17/// higher-level mutex).
18pub struct ForceSyncSend<T>(UnsafeCell<T>);
19impl<T> ForceSyncSend<T> {
20    pub const fn new(inner: T) -> Self {
21        Self(UnsafeCell::new(inner))
22    }
23    #[inline]
24    fn inner(&self) -> &T {
25        // SAFETY: Callers must serialize access to avoid mutable aliasing.
26        unsafe { &*self.0.get() }
27    }
28    #[inline]
29    #[allow(clippy::mut_from_ref)]
30    fn inner_mut(&self) -> &mut T {
31        // SAFETY: Callers must ensure exclusive access (no concurrent use).
32        unsafe { &mut *self.0.get() }
33    }
34}
35// SAFETY: This wrapper does not synchronize access; callers must serialize use.
36unsafe impl<T: Send> Send for ForceSyncSend<T> {}
37// SAFETY: Sharing across threads is only safe if access is externally serialized.
38unsafe impl<T: Send> Sync for ForceSyncSend<T> {}
39
40impl<B: BlockDevice> BlockDevice for ForceSyncSend<B> {
41    type Error = B::Error;
42
43    #[cfg(all(feature = "eh02", not(feature = "eh1")))]
44    fn read(&self, blocks: &mut [Block], start: BlockIdx, reason: &str) -> Result<(), Self::Error> {
45        self.inner_mut().read(blocks, start, reason)
46    }
47
48    #[cfg(feature = "eh1")]
49    fn read(&self, blocks: &mut [Block], start: BlockIdx) -> Result<(), Self::Error> {
50        self.inner_mut().read(blocks, start)
51    }
52
53    fn write(&self, blocks: &[Block], start: BlockIdx) -> Result<(), Self::Error> {
54        self.inner_mut().write(blocks, start)
55    }
56    fn num_blocks(&self) -> Result<BlockCount, Self::Error> {
57        self.inner().num_blocks()
58    }
59}
60
61/// Implements a bincode `Writer` for linear logging over a block device.
62pub struct SdBlockWriter<BD: BlockDevice> {
63    bd: Arc<ForceSyncSend<BD>>,
64    current_blk: BlockIdx, // absolute block number for payload
65    position_blk: usize,   // 0..512
66    capacity_bytes: usize, // payload capacity for this section
67    written: usize,        // payload bytes written so far
68    buffer: Block,         // RMW buffer for current block
69}
70
71impl<BD: BlockDevice> SdBlockWriter<BD> {
72    pub fn new(bd: Arc<ForceSyncSend<BD>>, start_block: BlockIdx, capacity_bytes: usize) -> Self {
73        Self {
74            bd,
75            current_blk: start_block,
76            position_blk: 0,
77            capacity_bytes,
78            written: 0,
79            buffer: Block::new(),
80        }
81    }
82
83    #[inline]
84    fn flush_full(&mut self) -> Result<(), EncodeError> {
85        self.bd
86            .write(core::slice::from_ref(&self.buffer), self.current_blk)
87            .expect("write failed on full block");
88        self.current_blk += BlockCount(1);
89        self.position_blk = 0;
90        self.buffer = Block::new();
91        Ok(())
92    }
93
94    /// Force-flush the current tail block if partially filled.
95    pub fn flush_tail(&mut self) -> Result<(), EncodeError> {
96        if self.position_blk != 0 {
97            self.bd
98                .write(core::slice::from_ref(&self.buffer), self.current_blk)
99                .expect("write failed on flush");
100            // Advance to the next block, start fresh at the boundary.
101            self.current_blk += BlockCount(1);
102            self.position_blk = 0;
103            self.buffer = Block::new();
104        }
105        Ok(())
106    }
107}
108
109impl<BD: BlockDevice> BincodeWriter for SdBlockWriter<BD> {
110    fn write(&mut self, mut bytes: &[u8]) -> Result<(), EncodeError> {
111        if self
112            .written
113            .checked_add(bytes.len())
114            .is_none_or(|w| w > self.capacity_bytes)
115        {
116            return Err(EncodeError::UnexpectedEnd);
117        }
118
119        if self.position_blk != 0 {
120            let take = core::cmp::min(BLK - self.position_blk, bytes.len());
121            self.buffer.as_mut()[self.position_blk..self.position_blk + take]
122                .copy_from_slice(&bytes[..take]);
123            self.position_blk += take;
124            self.written += take;
125            bytes = &bytes[take..];
126            if self.position_blk == BLK {
127                self.flush_full()?;
128            }
129        }
130
131        while bytes.len() >= BLK {
132            let mut blk = Block::new();
133            blk.as_mut().copy_from_slice(&bytes[..BLK]);
134            self.bd
135                .write(core::slice::from_ref(&blk), self.current_blk)
136                .expect("write failed");
137            self.current_blk += BlockCount(1);
138            self.written += BLK;
139            bytes = &bytes[BLK..];
140        }
141
142        if !bytes.is_empty() {
143            let n = bytes.len();
144            self.buffer.as_mut()[self.position_blk..self.position_blk + n].copy_from_slice(bytes);
145            self.position_blk += n;
146            self.written += n;
147            if self.position_blk == BLK {
148                self.flush_full()?;
149            }
150        }
151
152        Ok(())
153    }
154}
155
156pub struct EMMCSectionStorage<BD: BlockDevice> {
157    bd: Arc<ForceSyncSend<BD>>,
158    start_block: BlockIdx,
159    content_writer: SdBlockWriter<BD>,
160}
161
162impl<BD: BlockDevice> EMMCSectionStorage<BD> {
163    fn new(bd: Arc<ForceSyncSend<BD>>, start_block: BlockIdx, data_capacity: usize) -> Self {
164        // data_capacity is the space left in that section minus the header.
165        let content_writer =
166            SdBlockWriter::new(bd.clone(), start_block + BlockCount(1), data_capacity); // +1 to skip the header
167        Self {
168            bd,
169            start_block,
170            content_writer,
171        }
172    }
173}
174
175impl<BD: BlockDevice + Send> SectionStorage for EMMCSectionStorage<BD> {
176    fn initialize<E: Encode>(&mut self, header: &E) -> Result<usize, EncodeError> {
177        self.post_update_header(header)?;
178        Ok(SECTION_HEADER_COMPACT_SIZE as usize)
179    }
180
181    fn post_update_header<E: Encode>(&mut self, header: &E) -> Result<usize, EncodeError> {
182        // Re-encode header and write again to header blocks.
183        let mut block = Block::new();
184        let wrote = encode_into_slice(header, block.as_mut(), standard())?;
185        self.bd
186            .write(&[block], self.start_block)
187            .map_err(|_| EncodeError::UnexpectedEnd)?;
188        Ok(wrote)
189    }
190
191    fn append<E: Encode>(&mut self, entry: &E) -> Result<usize, EncodeError> {
192        let bf = self.content_writer.written;
193        encode_into_writer(entry, &mut self.content_writer, standard())
194            .map_err(|_| EncodeError::UnexpectedEnd)?;
195        Ok(self.content_writer.written - bf)
196    }
197
198    fn flush(&mut self) -> CuResult<usize> {
199        let bf = self.content_writer.written;
200        self.content_writer
201            .flush_tail()
202            .map_err(|_| CuError::from("flush failed"))?;
203        Ok(self.content_writer.written - bf)
204    }
205}
206
207pub struct EMMCLogger<BD: BlockDevice> {
208    bd: Arc<ForceSyncSend<BD>>,
209    next_block: BlockIdx,
210    last_block: BlockIdx,
211    temporary_end_marker: Option<BlockIdx>,
212}
213
214impl<BD: BlockDevice> EMMCLogger<BD> {
215    pub fn new(bd: BD, start: BlockIdx, size: BlockCount) -> CuResult<Self> {
216        let main_header = MainHeader {
217            magic: MAIN_MAGIC,
218            first_section_offset: BLK as u16,
219            page_size: BLK as u16,
220        };
221        let mut block: Block = Block::new();
222
223        encode_into_slice(&main_header, block.as_mut(), standard())
224            .map_err(|_| CuError::from("Could not encode the main header"))?;
225
226        bd.write(&[block], start)
227            .map_err(|_| CuError::from("Could not write main header"))?;
228
229        let next_block = start + BlockCount(1); // +1 to skip the main header
230        let last_block = start + size;
231
232        Ok(Self {
233            bd: Arc::new(ForceSyncSend::new(bd)),
234            next_block,
235            last_block,
236            temporary_end_marker: None,
237        })
238    }
239
240    // Allocate a section in this logger and return the start block index.
241    fn alloc_section(&mut self, size: BlockCount) -> CuResult<BlockIdx> {
242        let start = self.next_block;
243        self.next_block += size;
244        if self.next_block > self.last_block {
245            return Err(CuError::from("out of space"));
246        }
247        Ok(start)
248    }
249
250    fn clear_temporary_end_marker(&mut self) {
251        if let Some(marker) = self.temporary_end_marker.take() {
252            self.next_block = marker;
253        }
254    }
255
256    fn write_end_marker(&mut self, temporary: bool) -> CuResult<()> {
257        let block_size = SECTION_HEADER_COMPACT_SIZE as usize;
258        let blocks_needed = 1; // header only
259        let start_block = self.next_block;
260        let end_block = start_block + BlockCount(blocks_needed as u32);
261        if end_block > self.last_block {
262            return Err(CuError::from("out of space"));
263        }
264
265        let header = SectionHeader {
266            magic: SECTION_MAGIC,
267            block_size: SECTION_HEADER_COMPACT_SIZE,
268            entry_type: UnifiedLogType::LastEntry,
269            offset_to_next_section: (blocks_needed * block_size) as u32,
270            used: 0,
271            is_open: temporary,
272        };
273
274        let mut header_block = Block::new();
275        encode_into_slice(&header, header_block.as_mut(), standard())
276            .map_err(|_| CuError::from("Could not encode end-of-log header"))?;
277        self.bd
278            .write(&[header_block], start_block)
279            .map_err(|_| CuError::from("Could not write end-of-log header"))?;
280
281        self.temporary_end_marker = Some(start_block);
282        self.next_block = end_block;
283        Ok(())
284    }
285}
286
287impl<BD> UnifiedLogWrite<EMMCSectionStorage<BD>> for EMMCLogger<BD>
288where
289    BD: BlockDevice + Send + Sync + 'static,
290{
291    fn add_section(
292        &mut self,
293        entry_type: UnifiedLogType,
294        requested_section_size: usize,
295    ) -> CuResult<SectionHandle<EMMCSectionStorage<BD>>> {
296        self.clear_temporary_end_marker();
297        let block_size = SECTION_HEADER_COMPACT_SIZE; // 512
298        if block_size != 512 {
299            panic!("EMMC: only 512 byte blocks supported");
300        }
301
302        let section_header = SectionHeader {
303            magic: SECTION_MAGIC,
304            block_size,
305            entry_type,
306            offset_to_next_section: requested_section_size as u32,
307            used: 0,
308            is_open: true,
309        };
310
311        let section_size_in_blks: u32 = (requested_section_size / block_size as usize) as u32 + 1; // always round up
312        let start_block = self.alloc_section(BlockCount(section_size_in_blks))?;
313
314        let storage = EMMCSectionStorage::new(
315            Arc::clone(&self.bd),
316            start_block,
317            ((section_size_in_blks - 1) * block_size as u32) as usize,
318        );
319
320        // Create handle (this will call `storage.initialize(header)`).
321        let handle = SectionHandle::create(section_header, storage)?;
322        self.write_end_marker(true)?;
323        Ok(handle)
324    }
325
326    fn flush_section(&mut self, section: &mut SectionHandle<EMMCSectionStorage<BD>>) {
327        section.mark_closed();
328        // and the end of the stream is ok.
329        section
330            .get_storage_mut()
331            .flush()
332            .expect("EMMC: flush failed");
333        // and be sure the header is up-to-date
334        section
335            .post_update_header()
336            .expect("EMMC: post update header failed");
337    }
338
339    fn status(&self) -> UnifiedLogStatus {
340        UnifiedLogStatus {
341            total_used_space: (self.next_block.0 as usize) * BLK,
342            total_allocated_space: (self.next_block.0 as usize) * BLK,
343        }
344    }
345}
346
347impl<BD: BlockDevice> Drop for EMMCLogger<BD> {
348    fn drop(&mut self) {
349        self.clear_temporary_end_marker();
350        if let Err(e) = self.write_end_marker(false) {
351            panic!("Failed to flush the unified logger: {}", e);
352        }
353    }
354}