memvid_core/io/
wal.rs

1use std::fs::File;
2use std::io::{Read, Seek, SeekFrom, Write};
3
4use crate::{
5    constants::{WAL_CHECKPOINT_PERIOD, WAL_CHECKPOINT_THRESHOLD},
6    error::{MemvidError, Result},
7    types::Header,
8};
9
10// Each WAL record header: [seq: u64][len: u32][reserved: 4 bytes][checksum: 32 bytes]
11const ENTRY_HEADER_SIZE: usize = 48;
12
13#[derive(Debug, Clone, Copy, PartialEq)]
14pub struct WalStats {
15    pub region_size: u64,
16    pub pending_bytes: u64,
17    pub appends_since_checkpoint: u64,
18    pub sequence: u64,
19}
20
21#[derive(Debug, Clone, PartialEq, Eq)]
22pub struct WalRecord {
23    pub sequence: u64,
24    pub payload: Vec<u8>,
25}
26
27#[derive(Debug)]
28pub struct EmbeddedWal {
29    file: File,
30    region_offset: u64,
31    region_size: u64,
32    write_head: u64,
33    checkpoint_head: u64,
34    pending_bytes: u64,
35    sequence: u64,
36    checkpoint_sequence: u64,
37    appends_since_checkpoint: u64,
38    read_only: bool,
39}
40
41impl EmbeddedWal {
42    pub fn open(file: &File, header: &Header) -> Result<Self> {
43        Self::open_internal(file, header, false)
44    }
45
46    pub fn open_read_only(file: &File, header: &Header) -> Result<Self> {
47        Self::open_internal(file, header, true)
48    }
49
50    fn open_internal(file: &File, header: &Header, read_only: bool) -> Result<Self> {
51        if header.wal_size == 0 {
52            return Err(MemvidError::InvalidHeader {
53                reason: "wal_size must be non-zero".into(),
54            });
55        }
56        let mut clone = file.try_clone()?;
57        let region_offset = header.wal_offset;
58        let region_size = header.wal_size;
59        let checkpoint_sequence = header.wal_sequence;
60
61        let (entries, next_head) = Self::scan_records(&mut clone, region_offset, region_size)?;
62
63        let pending_bytes = entries
64            .iter()
65            .filter(|entry| entry.sequence > checkpoint_sequence)
66            .map(|entry| entry.total_size)
67            .sum();
68        let sequence = entries
69            .last()
70            .map(|entry| entry.sequence)
71            .unwrap_or(checkpoint_sequence);
72
73        let mut wal = Self {
74            file: clone,
75            region_offset,
76            region_size,
77            write_head: next_head % region_size,
78            checkpoint_head: header.wal_checkpoint_pos % region_size,
79            pending_bytes,
80            sequence,
81            checkpoint_sequence,
82            appends_since_checkpoint: 0,
83            read_only,
84        };
85
86        if !wal.read_only {
87            wal.initialise_sentinel()?;
88        }
89        Ok(wal)
90    }
91
92    fn assert_writable(&self) -> Result<()> {
93        if self.read_only {
94            return Err(MemvidError::Lock(
95                "wal is read-only; reopen memory with write access".into(),
96            ));
97        }
98        Ok(())
99    }
100
101    pub fn append_entry(&mut self, payload: &[u8]) -> Result<u64> {
102        self.assert_writable()?;
103        let payload_len = payload.len();
104        if payload_len > u32::MAX as usize {
105            return Err(MemvidError::CheckpointFailed {
106                reason: "WAL payload too large".into(),
107            });
108        }
109
110        let entry_size = ENTRY_HEADER_SIZE as u64 + payload_len as u64;
111        if entry_size > self.region_size {
112            return Err(MemvidError::CheckpointFailed {
113                reason: "embedded WAL region too small for entry".into(),
114            });
115        }
116        if self.pending_bytes + entry_size > self.region_size {
117            return Err(MemvidError::CheckpointFailed {
118                reason: "embedded WAL region full".into(),
119            });
120        }
121
122        // Check if we need to wrap around
123        let wrapping = self.write_head + entry_size > self.region_size;
124        if wrapping {
125            // If wrapping would overwrite uncommitted data, return "WAL full" error
126            // instead of silently overwriting. This triggers WAL growth.
127            // The checkpoint_head marks where committed data starts - if we wrap and would
128            // write over any pending (uncommitted) data, we must grow the WAL instead.
129            if self.pending_bytes > 0 {
130                return Err(MemvidError::CheckpointFailed {
131                    reason: "embedded WAL region full".into(),
132                });
133            }
134            self.write_head = 0;
135        }
136
137        let next_sequence = self.sequence + 1;
138        tracing::debug!(
139            wal.write_head = self.write_head,
140            wal.sequence = next_sequence,
141            wal.payload_len = payload_len,
142            "wal append entry"
143        );
144        self.write_record(self.write_head, next_sequence, payload)?;
145
146        self.write_head = (self.write_head + entry_size) % self.region_size;
147        self.pending_bytes += entry_size;
148        self.sequence = self.sequence.wrapping_add(1);
149        self.appends_since_checkpoint = self.appends_since_checkpoint.saturating_add(1);
150
151        self.maybe_write_sentinel()?;
152
153        Ok(self.sequence)
154    }
155
156    pub fn should_checkpoint(&self) -> bool {
157        if self.read_only || self.region_size == 0 {
158            return false;
159        }
160        let occupancy = self.pending_bytes as f64 / self.region_size as f64;
161        occupancy >= WAL_CHECKPOINT_THRESHOLD
162            || self.appends_since_checkpoint >= WAL_CHECKPOINT_PERIOD
163    }
164
165    pub fn record_checkpoint(&mut self, header: &mut Header) -> Result<()> {
166        self.assert_writable()?;
167        self.checkpoint_head = self.write_head;
168        self.pending_bytes = 0;
169        self.appends_since_checkpoint = 0;
170        self.checkpoint_sequence = self.sequence;
171        header.wal_checkpoint_pos = self.checkpoint_head;
172        header.wal_sequence = self.checkpoint_sequence;
173        self.maybe_write_sentinel()
174    }
175
176    pub fn pending_records(&mut self) -> Result<Vec<WalRecord>> {
177        self.records_after(self.checkpoint_sequence)
178    }
179
180    pub fn records_after(&mut self, sequence: u64) -> Result<Vec<WalRecord>> {
181        let (entries, next_head) =
182            Self::scan_records(&mut self.file, self.region_offset, self.region_size)?;
183
184        self.sequence = entries
185            .last()
186            .map(|entry| entry.sequence)
187            .unwrap_or(self.sequence);
188        self.pending_bytes = entries
189            .iter()
190            .filter(|entry| entry.sequence > self.checkpoint_sequence)
191            .map(|entry| entry.total_size)
192            .sum();
193        self.write_head = next_head % self.region_size;
194        if !self.read_only {
195            self.initialise_sentinel()?;
196        }
197
198        Ok(entries
199            .into_iter()
200            .filter(|entry| entry.sequence > sequence)
201            .map(|entry| WalRecord {
202                sequence: entry.sequence,
203                payload: entry.payload,
204            })
205            .collect())
206    }
207
208    pub fn stats(&self) -> WalStats {
209        WalStats {
210            region_size: self.region_size,
211            pending_bytes: self.pending_bytes,
212            appends_since_checkpoint: self.appends_since_checkpoint,
213            sequence: self.sequence,
214        }
215    }
216
217    pub fn region_offset(&self) -> u64 {
218        self.region_offset
219    }
220
221    pub fn file(&self) -> &File {
222        &self.file
223    }
224
225    fn initialise_sentinel(&mut self) -> Result<()> {
226        self.maybe_write_sentinel()
227    }
228
229    fn write_record(&mut self, position: u64, sequence: u64, payload: &[u8]) -> Result<()> {
230        self.assert_writable()?;
231        let digest = blake3::hash(payload);
232        let mut header = [0u8; ENTRY_HEADER_SIZE];
233        header[..8].copy_from_slice(&sequence.to_le_bytes());
234        header[8..12].copy_from_slice(&(payload.len() as u32).to_le_bytes());
235        header[16..48].copy_from_slice(digest.as_bytes());
236
237        // Atomic write: combine header and payload into single buffer
238        // This prevents corruption if the file is closed mid-write
239        let mut combined = Vec::with_capacity(ENTRY_HEADER_SIZE + payload.len());
240        combined.extend_from_slice(&header);
241        combined.extend_from_slice(payload);
242
243        self.seek_and_write(position, &combined)?;
244        if tracing::enabled!(tracing::Level::DEBUG) {
245            if let Err(err) = self.debug_verify_header(position, sequence, payload.len()) {
246                tracing::warn!(error = %err, "wal header verify failed");
247            }
248        }
249
250        // Force fsync to ensure data is durable before returning
251        // Critical for preventing corruption during rapid file operations
252        self.file.sync_all()?;
253
254        Ok(())
255    }
256
257    fn write_zero_header(&mut self, position: u64) -> Result<u64> {
258        self.assert_writable()?;
259        if self.region_size == 0 {
260            return Ok(0);
261        }
262        let mut pos = position % self.region_size;
263        let remaining = self.region_size - pos;
264        if remaining < ENTRY_HEADER_SIZE as u64 {
265            if remaining > 0 {
266                let zero_tail = vec![0u8; remaining as usize];
267                self.seek_and_write(pos, &zero_tail)?;
268            }
269            pos = 0;
270        }
271        let zero = [0u8; ENTRY_HEADER_SIZE];
272        self.seek_and_write(pos, &zero)?;
273        Ok(pos)
274    }
275
276    fn seek_and_write(&mut self, position: u64, bytes: &[u8]) -> Result<()> {
277        self.assert_writable()?;
278        let pos = position % self.region_size;
279        let absolute = self.region_offset + pos;
280        self.file.seek(SeekFrom::Start(absolute))?;
281        self.file.write_all(bytes)?;
282        Ok(())
283    }
284
285    fn maybe_write_sentinel(&mut self) -> Result<()> {
286        if self.read_only || self.region_size == 0 {
287            return Ok(());
288        }
289        if self.pending_bytes >= self.region_size {
290            return Ok(());
291        }
292        // Sentinel marks end of valid entries - always keep write_head in sync
293        let next = self.write_zero_header(self.write_head)?;
294        self.write_head = next;
295        Ok(())
296    }
297
298    fn scan_records(file: &mut File, offset: u64, size: u64) -> Result<(Vec<ScannedRecord>, u64)> {
299        let mut records = Vec::new();
300        let mut cursor = 0u64;
301        while cursor + ENTRY_HEADER_SIZE as u64 <= size {
302            file.seek(SeekFrom::Start(offset + cursor))?;
303            let mut header = [0u8; ENTRY_HEADER_SIZE];
304            file.read_exact(&mut header)?;
305
306            let sequence = u64::from_le_bytes(header[..8].try_into().map_err(|_| {
307                MemvidError::WalCorruption {
308                    offset: cursor,
309                    reason: "invalid wal sequence header".into(),
310                }
311            })?);
312            let length = u32::from_le_bytes(header[8..12].try_into().map_err(|_| {
313                MemvidError::WalCorruption {
314                    offset: cursor,
315                    reason: "invalid wal length header".into(),
316                }
317            })?) as u64;
318            let checksum = &header[16..48];
319
320            if sequence == 0 && length == 0 {
321                break;
322            }
323            if length == 0 || cursor + ENTRY_HEADER_SIZE as u64 + length > size {
324                tracing::error!(
325                    wal.scan_offset = cursor,
326                    wal.sequence = sequence,
327                    wal.length = length,
328                    wal.region_size = size,
329                    "wal record length invalid"
330                );
331                return Err(MemvidError::WalCorruption {
332                    offset: cursor,
333                    reason: "wal record length invalid".into(),
334                });
335            }
336
337            let mut payload = vec![0u8; length as usize];
338            file.read_exact(&mut payload)?;
339            let expected = blake3::hash(&payload);
340            if expected.as_bytes() != checksum {
341                return Err(MemvidError::WalCorruption {
342                    offset: cursor,
343                    reason: "wal record checksum mismatch".into(),
344                });
345            }
346
347            records.push(ScannedRecord {
348                sequence,
349                payload,
350                total_size: ENTRY_HEADER_SIZE as u64 + length,
351            });
352
353            cursor += ENTRY_HEADER_SIZE as u64 + length;
354        }
355
356        Ok((records, cursor))
357    }
358}
359
360#[derive(Debug)]
361struct ScannedRecord {
362    sequence: u64,
363    payload: Vec<u8>,
364    total_size: u64,
365}
366
367impl EmbeddedWal {
368    fn debug_verify_header(
369        &mut self,
370        position: u64,
371        expected_sequence: u64,
372        expected_len: usize,
373    ) -> Result<()> {
374        if self.region_size == 0 {
375            return Ok(());
376        }
377        let pos = position % self.region_size;
378        let absolute = self.region_offset + pos;
379        let mut buf = [0u8; ENTRY_HEADER_SIZE];
380        self.file.seek(SeekFrom::Start(absolute))?;
381        self.file.read_exact(&mut buf)?;
382        let seq = u64::from_le_bytes(buf[..8].try_into().unwrap());
383        let len = u32::from_le_bytes(buf[8..12].try_into().unwrap());
384        tracing::debug!(
385            wal.verify_position = pos,
386            wal.verify_sequence = seq,
387            wal.expected_sequence = expected_sequence,
388            wal.verify_length = len,
389            wal.expected_length = expected_len,
390            "wal header verify"
391        );
392        Ok(())
393    }
394}
395
396#[cfg(test)]
397mod tests {
398    use super::*;
399    use crate::constants::WAL_OFFSET;
400    use std::io::{Seek, SeekFrom, Write};
401    use tempfile::tempfile;
402
403    fn header_for(size: u64) -> Header {
404        Header {
405            magic: *b"MV2\0",
406            version: 0x0201,
407            footer_offset: 0,
408            wal_offset: WAL_OFFSET,
409            wal_size: size,
410            wal_checkpoint_pos: 0,
411            wal_sequence: 0,
412            toc_checksum: [0u8; 32],
413        }
414    }
415
416    fn prepare_wal(size: u64) -> (File, Header) {
417        let file = tempfile().expect("temp file");
418        file.set_len(WAL_OFFSET + size).expect("set_len");
419        let header = header_for(size);
420        (file, header)
421    }
422
423    #[test]
424    fn append_and_recover() {
425        let (file, header) = prepare_wal(1024);
426        let mut wal = EmbeddedWal::open(&file, &header).expect("open wal");
427
428        wal.append_entry(b"first").expect("append first");
429        wal.append_entry(b"second").expect("append second");
430
431        let records = wal.records_after(0).expect("records");
432        assert_eq!(records.len(), 2);
433        assert_eq!(records[0].payload, b"first");
434        assert_eq!(records[0].sequence, 1);
435        assert_eq!(records[1].payload, b"second");
436        assert_eq!(records[1].sequence, 2);
437    }
438
439    #[test]
440    fn wrap_and_checkpoint() {
441        let size = (ENTRY_HEADER_SIZE as u64 * 2) + 64;
442        let (file, mut header) = prepare_wal(size);
443        let mut wal = EmbeddedWal::open(&file, &header).expect("open wal");
444
445        wal.append_entry(&vec![0xAA; 32]).expect("append a");
446        wal.append_entry(&vec![0xBB; 32]).expect("append b");
447        wal.record_checkpoint(&mut header).expect("checkpoint");
448
449        assert!(wal.pending_records().expect("pending").is_empty());
450
451        wal.append_entry(&vec![0xCC; 32]).expect("append c");
452        let records = wal.pending_records().expect("after append");
453        assert_eq!(records.len(), 1);
454        assert_eq!(records[0].payload, vec![0xCC; 32]);
455    }
456
457    #[test]
458    fn corrupted_record_reports_offset() {
459        let (mut file, header) = prepare_wal(64);
460        // Write a record header that claims an impossible length so scan_records trips.
461        file.seek(SeekFrom::Start(header.wal_offset)).expect("seek");
462        let mut record = [0u8; ENTRY_HEADER_SIZE];
463        record[..8].copy_from_slice(&1u64.to_le_bytes()); // sequence
464        record[8..12].copy_from_slice(&(u32::MAX).to_le_bytes()); // absurd length
465        file.write_all(&record).expect("write corrupt header");
466        file.sync_all().expect("sync");
467
468        let err = EmbeddedWal::open(&file, &header).expect_err("open should fail");
469        match err {
470            MemvidError::WalCorruption { offset, reason } => {
471                assert_eq!(offset, 0);
472                assert!(reason.contains("length"), "reason should mention length");
473            }
474            other => panic!("unexpected error: {other:?}"),
475        }
476    }
477}