sqlrite/sql/pager/pager.rs
1//! Long-lived page cache + WAL-backed commits.
2//!
3//! A `Pager` wraps an open `.sqlrite` file plus its `-wal` sidecar. It owns
4//! three maps of page bytes:
5//!
6//! - `on_disk`: snapshot of the main file as last checkpointed. Frozen
7//! across regular commits — the main file is only rewritten
8//! when the checkpointer (Phase 4d) runs.
9//! - `wal_cache`: latest committed body for each page that has been
10//! appended to the WAL since the last checkpoint. Populated
11//! at open by replaying the WAL, and kept in lockstep with
12//! each successful `commit`.
13//! - `staged`: pages queued for the next commit, not yet in the WAL.
14//!
15//! **Read precedence.** `read_page` consults `staged → wal_cache → on_disk`,
16//! so both uncommitted writes and WAL-resident committed writes shadow the
17//! frozen main file. A bounds check against `current_header.page_count`
18//! hides pages that have been logically truncated by a shrink-commit even
19//! though their bytes are still present in `on_disk` (the real truncation
20//! waits for the checkpointer).
21//!
22//! **Commit flow.** `commit` compares each staged page against the
23//! effective committed state (wal_cache layered on on_disk) and appends a
24//! WAL frame only for pages whose bytes actually differ. A final "commit"
25//! frame for page 0 carries the new encoded header and the post-commit
26//! page count in its `commit_page_count` field. That frame is fsync'd.
27//! The main file is not touched.
28//!
29//! **Checkpoint flow (Phase 4d).** When the WAL accumulates past
30//! `AUTO_CHECKPOINT_THRESHOLD_FRAMES` frames (tracked on `Wal`), `commit`
31//! opportunistically folds them back into the main file: write every
32//! WAL-resident page at its proper offset, overwrite the main-file
33//! header, truncate the file to `page_count * PAGE_SIZE` bytes, `fsync`,
34//! then `Wal::truncate` the sidecar (which rolls the salt so any stale
35//! tail bytes from the old generation can't be misread as valid). Reads
36//! stay consistent if a crash hits mid-checkpoint — the WAL still holds
37//! the authoritative bytes until its header is rewritten, and the
38//! checkpointer is idempotent, so rerunning is safe.
39//!
40//! This matters because higher layers re-serialize the entire database on
41//! every auto-save. Without the diff, even a one-row UPDATE would append a
42//! frame for every page of every table. With the diff, unchanged tables —
43//! whose encoded pages hash identically across saves — simply stay out of
44//! the WAL.
45//!
46//! **Locking (Phase 4a → 4e).** Every `Pager` takes an advisory lock on
47//! its main file and on its WAL sidecar. The mode is driven by
48//! [`AccessMode`]:
49//!
50//! - `ReadWrite` → `flock(LOCK_EX)` — one writer, no other openers.
51//! - `ReadOnly` → `flock(LOCK_SH)` — multiple readers coexist; any writer
52//! is excluded.
53//!
54//! Both locks are tied to their file descriptors and release
55//! automatically when the `Pager` drops. On collision the opener gets
56//! a clean typed error rather than racing silently. POSIX flock is
57//! "multiple readers OR one writer", not both — true concurrent
58//! reader-and-writer access would need a shared-memory coordination
59//! file and read marks, which is not on the roadmap.
60
61use std::collections::HashMap;
62use std::fs::{File, OpenOptions};
63use std::path::{Path, PathBuf};
64
65use crate::error::{Result, SQLRiteError};
66use crate::sql::pager::file::FileStorage;
67use crate::sql::pager::header::{DbHeader, decode_header, encode_header};
68use crate::sql::pager::page::PAGE_SIZE;
69use crate::sql::pager::wal::Wal;
70
71/// Returns the WAL sidecar path for a main `.sqlrite` file: appends
72/// the `-wal` suffix to the full path (so `foo.sqlrite` pairs with
73/// `foo.sqlrite-wal`). Matches SQLite's convention.
74pub(crate) fn wal_path_for(main: &Path) -> PathBuf {
75 let mut os = main.as_os_str().to_owned();
76 os.push("-wal");
77 PathBuf::from(os)
78}
79
80/// How a `Pager` (or `Wal`) intends to use the file: mutating writes vs.
81/// consistent-snapshot reads. Drives the OS-level lock mode, and the
82/// Pager uses it to reject mutation attempts on read-only openers.
83///
84/// - `ReadWrite` takes `flock(LOCK_EX)` — one writer, no other openers.
85/// - `ReadOnly` takes `flock(LOCK_SH)` — multiple readers can coexist;
86/// a writer is excluded.
87///
88/// This is POSIX-flock semantics, so "multiple readers AND one writer"
89/// isn't supported yet. True concurrent reader-writer access would need
90/// a shared-memory coordination file and read marks — that's deferred.
91#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum AccessMode {
93 ReadWrite,
94 ReadOnly,
95}
96
97/// Acquires an advisory lock on `file`, mapping the OS-level "lock
98/// held" error to a clean SQLRite error. `Exclusive` on Unix is
99/// `flock(LOCK_EX | LOCK_NB)`; `Shared` is `flock(LOCK_SH | LOCK_NB)`.
100/// On Windows, `LockFileEx` with the corresponding flags.
101///
102/// We call fs2's trait methods fully qualified because `std::fs::File`
103/// gained its own `try_lock_*` inherent methods in Rust 1.84 with a
104/// different error type — qualifying nails down which one we mean.
105#[cfg(feature = "file-locks")]
106pub(crate) fn acquire_lock(file: &File, path: &Path, mode: AccessMode) -> Result<()> {
107 let res = match mode {
108 AccessMode::ReadWrite => fs2::FileExt::try_lock_exclusive(file),
109 AccessMode::ReadOnly => fs2::FileExt::try_lock_shared(file),
110 };
111 res.map_err(|e| {
112 let how = match mode {
113 AccessMode::ReadWrite => {
114 "is in use (another process has it open; readers and writers are exclusive)"
115 }
116 AccessMode::ReadOnly => {
117 "is locked for writing by another process (read-only open blocked until the writer closes)"
118 }
119 };
120 SQLRiteError::General(format!(
121 "database '{}' {how} ({e})",
122 path.display()
123 ))
124 })
125}
126
127/// No-op variant for builds without the `file-locks` feature (most
128/// notably the WASM SDK, where `fs2` doesn't compile against
129/// wasm32-unknown-unknown). The Pager still refuses to touch a
130/// read-only open via `AccessMode`, but there's no OS-level
131/// multi-process coordination — the caller is trusted to avoid
132/// conflicting opens. Fine for WASM, where file-backed opens
133/// aren't exposed in the MVP anyway.
134#[cfg(not(feature = "file-locks"))]
135pub(crate) fn acquire_lock(_file: &File, _path: &Path, _mode: AccessMode) -> Result<()> {
136 Ok(())
137}
138
139/// How many WAL frames may accumulate between auto-checkpoints before
140/// `commit` opportunistically folds them back into the main file. Kept
141/// low enough that the WAL stays bounded on write-heavy workloads;
142/// high enough that small bursts don't thrash the main file. SQLite
143/// defaults to 1000; our target DBs are smaller so 100 is plenty.
144const AUTO_CHECKPOINT_THRESHOLD_FRAMES: usize = 100;
145
146pub struct Pager {
147 /// Main-file I/O handle. Regular commits leave it alone; the
148 /// checkpointer writes accumulated WAL pages back here.
149 storage: FileStorage,
150 current_header: DbHeader,
151 /// Byte snapshot of the main file as last checkpointed. The
152 /// checkpointer is the only thing that mutates it.
153 on_disk: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
154 /// Pages queued for the next commit. `commit` drains this.
155 staged: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
156 /// The committed WAL's view of each page. Populated at open by
157 /// replaying the log, and kept in sync with each successful commit.
158 /// Layered on top of `on_disk` for read resolution.
159 wal_cache: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
160 /// Write-ahead log sidecar. Present on a read-write Pager; `None`
161 /// on a read-only Pager that either found no WAL on disk or doesn't
162 /// retain the handle after initial replay. Reads consult
163 /// `wal_cache` (already populated at open) either way.
164 wal: Option<Wal>,
165 /// `ReadWrite` allows `commit` / `checkpoint`; `ReadOnly` rejects
166 /// them with a typed error. `stage_page` stays open on both modes
167 /// (it only touches the in-memory `staged` map) — any staged bytes
168 /// simply never reach disk on a read-only Pager because `commit` is
169 /// the gate.
170 access_mode: AccessMode,
171}
172
173impl Pager {
174 /// Opens an existing database file for read-write access. Shorthand
175 /// for [`Pager::open_with_mode`] with [`AccessMode::ReadWrite`].
176 pub fn open(path: &Path) -> Result<Self> {
177 Self::open_with_mode(path, AccessMode::ReadWrite)
178 }
179
180 /// Opens an existing database file for read-only access — takes
181 /// a shared advisory lock that coexists with other readers but is
182 /// excluded by any writer. `commit` and `checkpoint` return a clean
183 /// error rather than panic; `stage_page` stays a no-op-to-disk
184 /// (bytes sit in the in-memory `staged` map that `commit` would
185 /// have drained).
186 ///
187 /// If the WAL sidecar doesn't exist, the open succeeds with an
188 /// empty `wal_cache` — a read-only caller can't materialize a
189 /// sidecar on its own, and a DB that never had WAL writes is fine
190 /// to read straight from the main file.
191 pub fn open_read_only(path: &Path) -> Result<Self> {
192 Self::open_with_mode(path, AccessMode::ReadOnly)
193 }
194
195 /// Opens an existing database file with the given access mode.
196 /// Loads every main-file page into `on_disk`, then opens the WAL
197 /// sidecar (read-only mode uses a shared lock and skips sidecar
198 /// creation; read-write creates the sidecar if missing) and layers
199 /// committed frames into `wal_cache`.
200 pub fn open_with_mode(path: &Path, mode: AccessMode) -> Result<Self> {
201 let file = match mode {
202 AccessMode::ReadWrite => OpenOptions::new().read(true).write(true).open(path)?,
203 AccessMode::ReadOnly => OpenOptions::new().read(true).open(path)?,
204 };
205 acquire_lock(&file, path, mode)?;
206 let mut storage = FileStorage::new(file);
207 let mut header = storage.read_header()?;
208
209 let mut on_disk = HashMap::with_capacity(header.page_count.saturating_sub(1) as usize);
210 // page 0 is the header itself; regular pages live at 1..page_count.
211 for page_num in 1..header.page_count {
212 let buf = read_raw_page(&mut storage, page_num)?;
213 on_disk.insert(page_num, buf);
214 }
215
216 let wal_path = wal_path_for(path);
217 let (wal_handle, wal_cache) = match mode {
218 AccessMode::ReadWrite => {
219 // Create the sidecar if it's missing — a pre-Phase-4c
220 // file or a DB that was hand-deleted down to just the
221 // main file both need a fresh empty WAL to be writable.
222 let mut wal = if wal_path.exists() {
223 Wal::open_with_mode(&wal_path, mode)?
224 } else {
225 Wal::create(&wal_path)?
226 };
227 let mut cache: HashMap<u32, Box<[u8; PAGE_SIZE]>> = HashMap::new();
228 wal.load_committed_into(&mut cache)?;
229 (Some(wal), cache)
230 }
231 AccessMode::ReadOnly => {
232 // Read-only mustn't create files. If the sidecar is
233 // absent, treat the WAL as empty and serve reads from
234 // the main file alone.
235 if wal_path.exists() {
236 let mut wal = Wal::open_with_mode(&wal_path, mode)?;
237 let mut cache: HashMap<u32, Box<[u8; PAGE_SIZE]>> = HashMap::new();
238 wal.load_committed_into(&mut cache)?;
239 // We don't need to retain the WAL handle in
240 // read-only mode — the cache is all reads need and
241 // dropping the handle releases the shared lock on
242 // the sidecar early. Keep it, though, so the lock
243 // spans the whole Pager lifetime: a checkpointer
244 // process grabbing LOCK_EX on the WAL while our
245 // reader still has wal_cache loaded would be
246 // correct for reads but surprising semantically.
247 (Some(wal), cache)
248 } else {
249 (None, HashMap::new())
250 }
251 }
252 };
253
254 // If the WAL committed a new page 0, that frame's body is the
255 // up-to-date header — decode it and let it override what the
256 // main file's stale header says.
257 if let Some(page0) = wal_cache.get(&0) {
258 header = decode_header(page0.as_ref())?;
259 } else if let Some(w) = wal_handle.as_ref()
260 && let Some(committed_pc) = w.last_commit_page_count()
261 {
262 // Belt-and-suspenders: even if the latest commit frame didn't
263 // land on page 0 (shouldn't happen under the current commit
264 // layout, but keeps us correct if that ever changes), trust
265 // its page count.
266 header.page_count = committed_pc;
267 }
268
269 Ok(Self {
270 storage,
271 current_header: header,
272 on_disk,
273 staged: HashMap::new(),
274 wal_cache,
275 wal: wal_handle,
276 access_mode: mode,
277 })
278 }
279
280 /// Creates a fresh database file. Page 0 is the header; page 1 is an
281 /// empty `TableLeaf` that serves as the initial `sqlrite_master` root
282 /// (zero rows, no user tables yet). A matching empty WAL sidecar is
283 /// created alongside it — any pre-existing WAL at the target path is
284 /// truncated.
285 pub fn create(path: &Path) -> Result<Self> {
286 use crate::sql::pager::page::{PAGE_HEADER_SIZE, PageType};
287 use crate::sql::pager::table_page::TablePage;
288
289 let file = OpenOptions::new()
290 .read(true)
291 .write(true)
292 .create(true)
293 .truncate(true)
294 .open(path)?;
295 acquire_lock(&file, path, AccessMode::ReadWrite)?;
296 let mut storage = FileStorage::new(file);
297
298 let empty_master = TablePage::empty();
299 let mut page1 = Box::new([0u8; PAGE_SIZE]);
300 page1[0] = PageType::TableLeaf as u8;
301 page1[1..5].copy_from_slice(&0u32.to_le_bytes());
302 page1[5..7].copy_from_slice(&0u16.to_le_bytes());
303 page1[PAGE_HEADER_SIZE..].copy_from_slice(empty_master.as_bytes());
304
305 let header = DbHeader {
306 page_count: 2,
307 schema_root_page: 1,
308 };
309
310 // Write the file synchronously so the initial create is durable and
311 // subsequent `Pager::open` calls see a valid header + page 1.
312 storage.seek_to(0)?;
313 storage.write_all(&encode_header(&header))?;
314 storage.write_all(page1.as_ref())?;
315 storage.flush()?;
316
317 // Sidecar WAL — fresh, no frames yet.
318 let wal = Wal::create(&wal_path_for(path))?;
319
320 let mut on_disk = HashMap::new();
321 on_disk.insert(1, page1);
322
323 Ok(Self {
324 storage,
325 current_header: header,
326 on_disk,
327 staged: HashMap::new(),
328 wal_cache: HashMap::new(),
329 wal: Some(wal),
330 access_mode: AccessMode::ReadWrite,
331 })
332 }
333
334 pub fn header(&self) -> DbHeader {
335 self.current_header
336 }
337
338 /// Returns the mode this Pager was opened in. Callers can use this
339 /// to bail out of a write path earlier than the Pager itself would.
340 pub fn access_mode(&self) -> AccessMode {
341 self.access_mode
342 }
343
344 fn require_writable(&self, op: &'static str) -> Result<()> {
345 if self.access_mode == AccessMode::ReadOnly {
346 return Err(SQLRiteError::General(format!(
347 "cannot {op}: database is opened read-only"
348 )));
349 }
350 Ok(())
351 }
352
353 /// Reads a page, preferring staged content, then the WAL-committed
354 /// overlay, then the frozen main-file snapshot. Returns `None` for
355 /// pages beyond the current page count (pages that have been logically
356 /// truncated by a shrink-commit stay in `on_disk` until checkpoint,
357 /// but a bounds check hides them from readers).
358 pub fn read_page(&self, page_num: u32) -> Option<&[u8; PAGE_SIZE]> {
359 // Staged pages are "the future" and should always shadow everything
360 // else, even pages we're about to extend beyond the old page count.
361 if let Some(b) = self.staged.get(&page_num) {
362 return Some(b);
363 }
364 // A page that's been logically dropped shouldn't be readable even
365 // if its bytes linger in on_disk until the next checkpoint.
366 if page_num >= self.current_header.page_count {
367 return None;
368 }
369 if let Some(b) = self.wal_cache.get(&page_num) {
370 return Some(b.as_ref());
371 }
372 self.on_disk.get(&page_num).map(|b| b.as_ref())
373 }
374
375 /// Queues `bytes` as the new content of page `page_num`. The write only
376 /// reaches disk when `commit` is called.
377 pub fn stage_page(&mut self, page_num: u32, bytes: [u8; PAGE_SIZE]) {
378 self.staged.insert(page_num, Box::new(bytes));
379 }
380
381 /// Discards all staged pages. Useful when beginning a new full re-save
382 /// from scratch; the higher layer can also just overwrite pages without
383 /// clearing since `stage_page` replaces.
384 pub fn clear_staged(&mut self) {
385 self.staged.clear();
386 }
387
388 /// Commits all staged pages into the WAL. Only pages whose bytes differ
389 /// from the effective committed state (wal_cache layered on on_disk)
390 /// produce frames. A final commit frame carries the new page 0 (encoded
391 /// header) and is fsync'd; that seals the transaction. The main file is
392 /// left untouched — it only changes when the checkpointer (Phase 4d)
393 /// runs.
394 ///
395 /// Returns the number of dirty *data* frames appended (excluding the
396 /// implicit page-0 commit frame that's always written).
397 pub fn commit(&mut self, new_header: DbHeader) -> Result<usize> {
398 self.require_writable("commit")?;
399 let wal = self
400 .wal
401 .as_mut()
402 .expect("read-write Pager must carry a WAL handle");
403
404 // Decide which staged pages carry bytes that aren't already live.
405 // Effective committed state = wal_cache overlaid on on_disk.
406 let staged = std::mem::take(&mut self.staged);
407 let mut dirty: Vec<(u32, Box<[u8; PAGE_SIZE]>)> = staged
408 .into_iter()
409 .filter(|(n, bytes)| {
410 let existing = self.wal_cache.get(n).or_else(|| self.on_disk.get(n));
411 match existing {
412 Some(e) => e.as_ref() != bytes.as_ref(),
413 None => true,
414 }
415 })
416 .collect();
417 // Append in ascending page order so the log replays deterministically
418 // and sequential reads during checkpoint stay sequential.
419 dirty.sort_by_key(|(n, _)| *n);
420 let writes = dirty.len();
421
422 for (n, bytes) in &dirty {
423 wal.append_frame(*n, bytes.as_ref(), None)?;
424 }
425
426 // Seal the transaction. The commit frame carries the new page 0
427 // (encoded header) in its body and the new page count in its
428 // commit_page_count field — together they're the single atomic
429 // record that says "this is the new committed state".
430 let page0 = encode_header(&new_header);
431 wal.append_frame(0, &page0, Some(new_header.page_count))?;
432 let frame_count_after_commit = wal.frame_count();
433
434 // Promote every frame we just wrote into wal_cache so subsequent
435 // reads see the latest committed bytes without touching the WAL.
436 for (n, bytes) in dirty {
437 self.wal_cache.insert(n, bytes);
438 }
439 self.wal_cache.insert(0, Box::new(page0));
440
441 self.current_header = new_header;
442
443 // Keep the WAL bounded. Under write-heavy load, un-flushed frames
444 // accumulate; past the threshold we fold them back into the main
445 // file opportunistically so open doesn't have to replay an
446 // arbitrarily long log on the next start.
447 if frame_count_after_commit >= AUTO_CHECKPOINT_THRESHOLD_FRAMES {
448 self.checkpoint()?;
449 }
450
451 Ok(writes)
452 }
453
454 /// Folds all WAL-resident pages back into the main file and truncates
455 /// the WAL. Returns the number of data pages written to the main
456 /// file (excludes the header).
457 ///
458 /// **Crash safety — two fsync barriers.** The main-file writes happen
459 /// in two phases separated by a barrier, matching SQLite's checkpoint
460 /// ordering:
461 ///
462 /// 1. Write every `wal_cache` data page at its `page_num * PAGE_SIZE`
463 /// offset in the main file.
464 /// 2. **`fsync`** — force those data pages to stable storage *before*
465 /// the header publishes the new state. Without this barrier, a
466 /// filesystem or disk-cache reordering could land the header first,
467 /// leaving a main file that claims "N pages" over stale data.
468 /// 3. Rewrite the main-file header at offset 0. This is the
469 /// checkpoint's "commit point" — after it hits disk the main file
470 /// alone tells the truth.
471 /// 4. `set_len` shrinks the tail if `page_count` dropped.
472 /// 5. **`fsync`** — force the header + set_len durable.
473 /// 6. `Wal::truncate` resets the sidecar (rolls salt, writes new
474 /// header, fsync). Running this *after* the main file is fully
475 /// durable means a crash between 5 and 6 leaves a stale WAL over a
476 /// current main file; readers still see the right bytes because
477 /// wal_cache (replayed from the stale WAL on next open) would be
478 /// byte-identical to what's in the main file. A retry of
479 /// `checkpoint` then truncates cleanly.
480 ///
481 /// A crash between 1 and 2 can leave partial data-page writes, but
482 /// since the header hasn't moved yet, the main file still reads as
483 /// its pre-checkpoint self — the WAL is intact and authoritative,
484 /// and a retry rewrites the same bytes.
485 pub fn checkpoint(&mut self) -> Result<usize> {
486 self.require_writable("checkpoint")?;
487 // `require_writable` already guaranteed we're ReadWrite; in
488 // ReadWrite mode `wal` is always `Some` (it's only `None` for
489 // ReadOnly opens of a DB that had no sidecar on disk).
490 let wal_frame_count = self.wal.as_ref().map(|w| w.frame_count()).unwrap_or(0);
491
492 // Nothing to flush? Skip the fsyncs and get out.
493 if wal_frame_count == 0 && self.wal_cache.is_empty() {
494 return Ok(0);
495 }
496
497 // Step 1 — write every WAL-resident data page to the main file.
498 // Page 0 (header) is handled separately via write_header, and any
499 // pages past the new page count are skipped here (set_len will
500 // drop them when the file shrinks).
501 let page_count = self.current_header.page_count;
502 let mut pages: Vec<u32> = self
503 .wal_cache
504 .keys()
505 .copied()
506 .filter(|&n| n != 0 && n < page_count)
507 .collect();
508 pages.sort_unstable();
509 let written = pages.len();
510 for page_num in &pages {
511 let bytes = self
512 .wal_cache
513 .get(page_num)
514 .expect("iterated key must resolve");
515 self.storage
516 .seek_to((*page_num as u64) * (PAGE_SIZE as u64))?;
517 self.storage.write_all(bytes.as_ref())?;
518 }
519
520 // Step 2 — first durability barrier. Data pages must hit stable
521 // storage before the header publishes the new page count /
522 // schema root, or a reordered writeback could expose a
523 // half-migrated file on crash.
524 if written > 0 {
525 self.storage.flush()?;
526 }
527
528 // Step 3 — rewrite the main-file header. This is the checkpoint's
529 // atomic record.
530 self.storage.write_header(&self.current_header)?;
531
532 // Step 4 — shrink the main file if the committed page count is
533 // smaller than what the file physically holds.
534 self.storage.truncate_to_pages(page_count)?;
535
536 // Step 5 — second durability barrier. Makes header + set_len
537 // durable together before we touch the WAL.
538 self.storage.flush()?;
539
540 // Step 6 — reset the WAL sidecar. Runs before the in-memory
541 // cache swap so that if `wal.truncate` fails (disk full, EIO)
542 // we leave the in-memory state untouched rather than having
543 // wal_cache empty + on_disk updated + WAL un-truncated, which
544 // the Pager can't easily recover from on its own. Here a
545 // failure means the main file is already consistent on disk
546 // (steps 2 + 5 fsynced it); we just leave the stale WAL in
547 // place for the next checkpoint attempt.
548 self.wal
549 .as_mut()
550 .expect("read-write Pager must carry a WAL handle")
551 .truncate()?;
552
553 // Promote wal_cache into on_disk and drop everything that's no
554 // longer live. Page 0 is special — it's never materialized in
555 // on_disk (we read it lazily via storage.read_header on open).
556 for (n, bytes) in self.wal_cache.drain().filter(|(n, _)| *n != 0) {
557 if n < page_count {
558 self.on_disk.insert(n, bytes);
559 }
560 }
561 self.on_disk.retain(|&n, _| n < page_count);
562
563 Ok(written)
564 }
565}
566
567fn read_raw_page(storage: &mut FileStorage, page_num: u32) -> Result<Box<[u8; PAGE_SIZE]>> {
568 storage.seek_to((page_num as u64) * (PAGE_SIZE as u64))?;
569 let mut buf = Box::new([0u8; PAGE_SIZE]);
570 storage.read_exact(buf.as_mut())?;
571 Ok(buf)
572}
573
574impl std::fmt::Debug for Pager {
575 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
576 f.debug_struct("Pager")
577 .field("access_mode", &self.access_mode)
578 .field("page_count", &self.current_header.page_count)
579 .field("schema_root_page", &self.current_header.schema_root_page)
580 .field("cached_pages", &self.on_disk.len())
581 .field("staged_pages", &self.staged.len())
582 .field("wal_pages", &self.wal_cache.len())
583 .field(
584 "wal_frames",
585 &self.wal.as_ref().map(|w| w.frame_count()).unwrap_or(0),
586 )
587 .finish()
588 }
589}
590
591#[cfg(test)]
592mod tests {
593 use super::*;
594
595 fn tmp_path(name: &str) -> std::path::PathBuf {
596 let mut p = std::env::temp_dir();
597 let pid = std::process::id();
598 let nanos = std::time::SystemTime::now()
599 .duration_since(std::time::UNIX_EPOCH)
600 .map(|d| d.as_nanos())
601 .unwrap_or(0);
602 p.push(format!("sqlrite-pager-{pid}-{nanos}-{name}.sqlrite"));
603 p
604 }
605
606 /// Remove both the main file and its `-wal` sidecar — leaving either
607 /// behind can destabilize later test runs on the same tmp dir.
608 fn cleanup(path: &Path) {
609 let _ = std::fs::remove_file(path);
610 let _ = std::fs::remove_file(wal_path_for(path));
611 }
612
613 fn make_page(first_byte: u8) -> [u8; PAGE_SIZE] {
614 let mut buf = [0u8; PAGE_SIZE];
615 buf[0] = first_byte;
616 buf
617 }
618
619 #[test]
620 fn create_then_open_round_trips() {
621 let path = tmp_path("create_open");
622 {
623 let p = Pager::create(&path).unwrap();
624 assert_eq!(p.header().page_count, 2);
625 assert_eq!(p.header().schema_root_page, 1);
626 }
627 let p2 = Pager::open(&path).unwrap();
628 assert_eq!(p2.header().page_count, 2);
629 cleanup(&path);
630 }
631
632 #[test]
633 fn create_spawns_wal_sidecar() {
634 // Phase 4c: `Pager::create` must produce an empty WAL sidecar
635 // alongside the main file so the first commit has somewhere to
636 // append frames.
637 use crate::sql::pager::wal::WAL_HEADER_SIZE;
638 let path = tmp_path("wal_sidecar");
639 let _p = Pager::create(&path).unwrap();
640 let wal = wal_path_for(&path);
641 assert!(wal.exists(), "WAL sidecar should exist after create");
642 // An empty WAL is just its header.
643 let len = std::fs::metadata(&wal).unwrap().len();
644 assert_eq!(
645 len, WAL_HEADER_SIZE as u64,
646 "fresh WAL should be header-only"
647 );
648 cleanup(&path);
649 }
650
651 #[test]
652 fn commit_writes_only_dirty_pages() {
653 let path = tmp_path("diff");
654 let mut p = Pager::create(&path).unwrap();
655
656 // Initial state: page 1 is the empty-catalog schema page.
657 // Stage three "table-data" pages.
658 p.stage_page(2, make_page(0xAA));
659 p.stage_page(3, make_page(0xBB));
660 p.stage_page(4, make_page(0xCC));
661 let writes = p
662 .commit(DbHeader {
663 page_count: 5,
664 schema_root_page: 1,
665 })
666 .unwrap();
667 // 3 dirty data pages (pages 2, 3, 4). The page-0 commit frame is
668 // implicit and not counted.
669 assert_eq!(writes, 3);
670
671 // Re-stage the same bytes for pages 2 and 3, and changed bytes for 4.
672 p.stage_page(2, make_page(0xAA));
673 p.stage_page(3, make_page(0xBB));
674 p.stage_page(4, make_page(0xDD));
675 let writes = p
676 .commit(DbHeader {
677 page_count: 5,
678 schema_root_page: 1,
679 })
680 .unwrap();
681 assert_eq!(writes, 1, "only the changed page should have been written");
682
683 // Reopen and confirm the content is as expected. The bytes live in
684 // the WAL — the main file still has the empty init state — so this
685 // also verifies the WAL-replay path.
686 drop(p);
687 let p2 = Pager::open(&path).unwrap();
688 assert_eq!(p2.read_page(2).unwrap()[0], 0xAA);
689 assert_eq!(p2.read_page(3).unwrap()[0], 0xBB);
690 assert_eq!(p2.read_page(4).unwrap()[0], 0xDD);
691
692 cleanup(&path);
693 }
694
695 #[test]
696 fn second_pager_on_same_file_is_rejected() {
697 // Phase 4a regression: two simultaneous read-write Pagers against
698 // the same file used to silently race. Now the second one must
699 // error out. Phase 4e reworded the lock-contention message; the
700 // stable substring we assert on is "in use".
701 let path = tmp_path("lock_contention");
702 let _first = Pager::create(&path).unwrap();
703
704 let second = Pager::open(&path);
705 assert!(second.is_err(), "expected lock-contention error, got Ok");
706 let msg = format!("{}", second.unwrap_err());
707 assert!(
708 msg.contains("in use"),
709 "error message should signal lock contention; got: {msg}"
710 );
711
712 // After the first Pager drops, both the main-file and WAL locks
713 // release and a fresh open succeeds — confirming the locks are
714 // tied to Pager lifetime, not leaked across instances.
715 drop(_first);
716 let third = Pager::open(&path);
717 assert!(third.is_ok(), "reopen after drop should succeed: {third:?}");
718
719 cleanup(&path);
720 }
721
722 #[test]
723 fn commit_leaves_main_file_untouched_and_shrink_hides_dropped_pages() {
724 // Phase 4c: commits now go to the WAL; the main file stays frozen
725 // until the checkpointer runs. Page-count shrinks still hide the
726 // logically-dropped pages from readers (via a bounds check in
727 // read_page) even though their bytes linger in the main file.
728 let path = tmp_path("shrink");
729 let mut p = Pager::create(&path).unwrap();
730 let main_size_after_create = std::fs::metadata(&path).unwrap().len();
731
732 p.stage_page(2, make_page(1));
733 p.stage_page(3, make_page(2));
734 p.stage_page(4, make_page(3));
735 p.commit(DbHeader {
736 page_count: 5,
737 schema_root_page: 1,
738 })
739 .unwrap();
740
741 // Main file unchanged: the page-2..4 bytes went into the WAL.
742 assert_eq!(
743 std::fs::metadata(&path).unwrap().len(),
744 main_size_after_create,
745 "main file must stay frozen across commits"
746 );
747 // WAL, however, has grown: 3 dirty frames + 1 commit frame.
748 let wal_size = std::fs::metadata(wal_path_for(&path)).unwrap().len();
749 assert!(
750 wal_size > 32,
751 "WAL should contain frames after a commit, got size {wal_size}"
752 );
753
754 // Shrink to 3 pages.
755 p.commit(DbHeader {
756 page_count: 3,
757 schema_root_page: 1,
758 })
759 .unwrap();
760
761 // Page 4 is now logically dropped — read_page hides it.
762 assert!(p.read_page(4).is_none());
763 // And page 2 is still visible under the new count.
764 assert_eq!(p.read_page(2).unwrap()[0], 1);
765
766 // Reopen confirms the committed page count survives.
767 drop(p);
768 let p2 = Pager::open(&path).unwrap();
769 assert_eq!(p2.header().page_count, 3);
770 assert!(p2.read_page(4).is_none());
771
772 cleanup(&path);
773 }
774
775 #[test]
776 fn wal_replay_on_reopen_restores_committed_state() {
777 // End-to-end: do a commit, close, reopen, and verify every staged
778 // page is visible. This is the core Phase 4c promise — committed
779 // writes survive a close/reopen via the WAL even though the main
780 // file wasn't touched.
781 let path = tmp_path("wal_replay");
782 {
783 let mut p = Pager::create(&path).unwrap();
784 p.stage_page(2, make_page(0x11));
785 p.stage_page(3, make_page(0x22));
786 p.commit(DbHeader {
787 page_count: 4,
788 schema_root_page: 1,
789 })
790 .unwrap();
791 }
792
793 let p2 = Pager::open(&path).unwrap();
794 assert_eq!(p2.header().page_count, 4);
795 assert_eq!(p2.read_page(2).unwrap()[0], 0x11);
796 assert_eq!(p2.read_page(3).unwrap()[0], 0x22);
797 cleanup(&path);
798 }
799
800 #[test]
801 fn orphan_dirty_frame_in_wal_is_invisible_on_reopen() {
802 // Simulates a crash between a dirty frame being written and the
803 // commit frame being appended. The Pager's open-time WAL replay
804 // should not surface the dirty bytes — reads must still return
805 // the previous-committed content.
806 let path = tmp_path("orphan_dirty");
807 {
808 let mut p = Pager::create(&path).unwrap();
809 p.stage_page(2, make_page(0xCC));
810 p.commit(DbHeader {
811 page_count: 3,
812 schema_root_page: 1,
813 })
814 .unwrap();
815 }
816
817 // Open the WAL directly and append a dirty frame for page 2 with
818 // *different* bytes — no commit frame follows. A later
819 // `Pager::open` must ignore this orphan frame.
820 {
821 let mut w = crate::sql::pager::wal::Wal::open(&wal_path_for(&path)).unwrap();
822 let mut other = Box::new([0u8; PAGE_SIZE]);
823 other[0] = 0x99;
824 w.append_frame(2, &other, None).unwrap();
825 }
826
827 let p = Pager::open(&path).unwrap();
828 assert_eq!(
829 p.read_page(2).unwrap()[0],
830 0xCC,
831 "orphan dirty frame must not shadow the last committed page"
832 );
833 cleanup(&path);
834 }
835
836 #[test]
837 fn two_commits_only_stage_the_delta() {
838 // Diffing vs. the effective state (wal_cache + on_disk) means a
839 // repeated identical commit writes zero dirty data frames. A commit
840 // frame is still appended, but that's implicit.
841 let path = tmp_path("diff_delta");
842 let mut p = Pager::create(&path).unwrap();
843 p.stage_page(2, make_page(0x77));
844 let first = p
845 .commit(DbHeader {
846 page_count: 3,
847 schema_root_page: 1,
848 })
849 .unwrap();
850 assert_eq!(first, 1);
851
852 // Stage the same byte again.
853 p.stage_page(2, make_page(0x77));
854 let second = p
855 .commit(DbHeader {
856 page_count: 3,
857 schema_root_page: 1,
858 })
859 .unwrap();
860 assert_eq!(second, 0, "no data frames should be re-appended");
861
862 cleanup(&path);
863 }
864
865 // -------------------------------------------------------------------
866 // Phase 4d — Checkpointer
867 // -------------------------------------------------------------------
868
869 #[test]
870 fn explicit_checkpoint_folds_wal_into_main_file_and_truncates_wal() {
871 use crate::sql::pager::wal::WAL_HEADER_SIZE;
872 let path = tmp_path("ckpt_explicit");
873 let mut p = Pager::create(&path).unwrap();
874
875 p.stage_page(2, make_page(0xA1));
876 p.stage_page(3, make_page(0xB2));
877 p.commit(DbHeader {
878 page_count: 4,
879 schema_root_page: 1,
880 })
881 .unwrap();
882
883 // Pre-checkpoint: WAL has frames, main file is still the initial size.
884 let wal = wal_path_for(&path);
885 assert!(std::fs::metadata(&wal).unwrap().len() > WAL_HEADER_SIZE as u64);
886
887 let written = p.checkpoint().unwrap();
888 assert_eq!(written, 2, "both data pages should flush to main file");
889
890 // WAL is now empty (just the header) with a rolled salt + bumped seq.
891 let wal_len = std::fs::metadata(&wal).unwrap().len();
892 assert_eq!(wal_len, WAL_HEADER_SIZE as u64);
893
894 // Main file is exactly page_count pages long.
895 let main_len = std::fs::metadata(&path).unwrap().len();
896 assert_eq!(main_len, 4 * PAGE_SIZE as u64);
897
898 // Drop + reopen: main file alone must carry the latest content.
899 // (The WAL is empty, so any surviving correctness is on the main file.)
900 drop(p);
901 let p2 = Pager::open(&path).unwrap();
902 assert_eq!(p2.header().page_count, 4);
903 assert_eq!(p2.read_page(2).unwrap()[0], 0xA1);
904 assert_eq!(p2.read_page(3).unwrap()[0], 0xB2);
905
906 cleanup(&path);
907 }
908
909 #[test]
910 fn checkpoint_is_idempotent() {
911 // Two back-to-back checkpoints: the second must be a no-op and
912 // must not error. (The first drains wal_cache; the second sees
913 // nothing to do.)
914 let path = tmp_path("ckpt_idempotent");
915 let mut p = Pager::create(&path).unwrap();
916 p.stage_page(2, make_page(0x42));
917 p.commit(DbHeader {
918 page_count: 3,
919 schema_root_page: 1,
920 })
921 .unwrap();
922
923 let first = p.checkpoint().unwrap();
924 assert_eq!(first, 1);
925 let second = p.checkpoint().unwrap();
926 assert_eq!(second, 0, "second checkpoint should be a no-op");
927
928 cleanup(&path);
929 }
930
931 #[test]
932 fn checkpoint_with_shrink_truncates_main_file() {
933 // Grow to 5 pages, checkpoint; shrink to 3 pages, checkpoint.
934 // After the second checkpoint the main file must physically
935 // be 3 * PAGE_SIZE bytes — previous-tail pages are gone.
936 let path = tmp_path("ckpt_shrink");
937 let mut p = Pager::create(&path).unwrap();
938 p.stage_page(2, make_page(1));
939 p.stage_page(3, make_page(2));
940 p.stage_page(4, make_page(3));
941 p.commit(DbHeader {
942 page_count: 5,
943 schema_root_page: 1,
944 })
945 .unwrap();
946 p.checkpoint().unwrap();
947 assert_eq!(
948 std::fs::metadata(&path).unwrap().len(),
949 5 * PAGE_SIZE as u64
950 );
951
952 // Shrink.
953 p.commit(DbHeader {
954 page_count: 3,
955 schema_root_page: 1,
956 })
957 .unwrap();
958 p.checkpoint().unwrap();
959 assert_eq!(
960 std::fs::metadata(&path).unwrap().len(),
961 3 * PAGE_SIZE as u64,
962 "main file should shrink to new page_count after checkpoint"
963 );
964 // Page 4 is gone both physically and logically.
965 assert!(p.read_page(4).is_none());
966
967 cleanup(&path);
968 }
969
970 #[test]
971 fn auto_checkpoint_fires_past_frame_threshold() {
972 // Do just enough commits to push the WAL past
973 // AUTO_CHECKPOINT_THRESHOLD_FRAMES. After the crossing commit,
974 // the WAL should be back to header-only (auto-checkpoint ran)
975 // while the main file carries every committed byte.
976 use crate::sql::pager::wal::WAL_HEADER_SIZE;
977 let path = tmp_path("ckpt_auto");
978 let mut p = Pager::create(&path).unwrap();
979
980 // Each commit appends: 1 dirty data frame + 1 commit frame for
981 // page 0 = 2 frames. So ceil(THRESHOLD / 2) commits gets us past
982 // the trigger.
983 let commits_needed = AUTO_CHECKPOINT_THRESHOLD_FRAMES.div_ceil(2);
984 for i in 0..commits_needed {
985 p.stage_page(2, make_page((i & 0xff) as u8));
986 p.commit(DbHeader {
987 page_count: 3,
988 schema_root_page: 1,
989 })
990 .unwrap();
991 }
992
993 // Auto-checkpoint must have fired at least once during that loop.
994 let wal_len = std::fs::metadata(wal_path_for(&path)).unwrap().len();
995 assert_eq!(
996 wal_len, WAL_HEADER_SIZE as u64,
997 "auto-checkpoint should have truncated the WAL"
998 );
999
1000 // Last committed byte for page 2 is the latest (commits_needed - 1 & 0xff).
1001 let expected = ((commits_needed - 1) & 0xff) as u8;
1002 assert_eq!(p.read_page(2).unwrap()[0], expected);
1003
1004 cleanup(&path);
1005 }
1006
1007 // -------------------------------------------------------------------
1008 // Phase 4e — shared/exclusive lock modes
1009 // -------------------------------------------------------------------
1010
1011 #[test]
1012 fn two_read_only_openers_coexist() {
1013 // Phase 4e: multiple read-only openers take shared locks and
1014 // must not exclude each other.
1015 let path = tmp_path("ro_coexist");
1016 {
1017 let mut p = Pager::create(&path).unwrap();
1018 p.stage_page(2, make_page(0x55));
1019 p.commit(DbHeader {
1020 page_count: 3,
1021 schema_root_page: 1,
1022 })
1023 .unwrap();
1024 }
1025
1026 let reader1 = Pager::open_read_only(&path).unwrap();
1027 let reader2 = Pager::open_read_only(&path).unwrap();
1028 // Both see the committed content.
1029 assert_eq!(reader1.read_page(2).unwrap()[0], 0x55);
1030 assert_eq!(reader2.read_page(2).unwrap()[0], 0x55);
1031 assert_eq!(reader1.access_mode(), AccessMode::ReadOnly);
1032
1033 cleanup(&path);
1034 }
1035
1036 #[test]
1037 fn read_write_blocks_read_only_and_vice_versa() {
1038 // A live exclusive lock blocks a shared-lock open, and a live
1039 // shared lock blocks an exclusive-lock open. Both error messages
1040 // mention that the database is in use.
1041 let path = tmp_path("rw_vs_ro");
1042 let _writer = Pager::create(&path).unwrap();
1043
1044 // Writer holds LOCK_EX — reader can't take LOCK_SH.
1045 let reader_attempt = Pager::open_read_only(&path);
1046 assert!(reader_attempt.is_err());
1047 let msg = format!("{}", reader_attempt.unwrap_err());
1048 assert!(
1049 msg.contains("locked for writing"),
1050 "read-only open while writer holds lock should mention writer; got: {msg}"
1051 );
1052
1053 drop(_writer);
1054
1055 // Now a reader comes in; a second read-write must be rejected.
1056 let _reader = Pager::open_read_only(&path).unwrap();
1057 let writer_attempt = Pager::open(&path);
1058 assert!(writer_attempt.is_err());
1059 let msg = format!("{}", writer_attempt.unwrap_err());
1060 assert!(
1061 msg.contains("in use"),
1062 "read-write open while reader holds lock should mention contention; got: {msg}"
1063 );
1064
1065 cleanup(&path);
1066 }
1067
1068 #[test]
1069 fn read_only_pager_rejects_mutations() {
1070 let path = tmp_path("ro_rejects");
1071 {
1072 // Seed with some content so an RO open has something to read.
1073 let mut p = Pager::create(&path).unwrap();
1074 p.stage_page(2, make_page(0x33));
1075 p.commit(DbHeader {
1076 page_count: 3,
1077 schema_root_page: 1,
1078 })
1079 .unwrap();
1080 }
1081
1082 let mut ro = Pager::open_read_only(&path).unwrap();
1083 let commit_err = ro
1084 .commit(DbHeader {
1085 page_count: 3,
1086 schema_root_page: 1,
1087 })
1088 .unwrap_err();
1089 assert!(
1090 format!("{commit_err}").contains("read-only"),
1091 "commit on RO pager should surface 'read-only'; got: {commit_err}"
1092 );
1093 let ckpt_err = ro.checkpoint().unwrap_err();
1094 assert!(
1095 format!("{ckpt_err}").contains("read-only"),
1096 "checkpoint on RO pager should surface 'read-only'; got: {ckpt_err}"
1097 );
1098
1099 // Reads still work.
1100 assert_eq!(ro.read_page(2).unwrap()[0], 0x33);
1101
1102 cleanup(&path);
1103 }
1104
1105 #[test]
1106 fn read_only_open_without_wal_sidecar_succeeds() {
1107 // A file-backed DB whose -wal sidecar was deleted (or a Phase-
1108 // 4a-vintage file predating Phase 4c) must still be openable
1109 // read-only. The Pager serves reads straight from on_disk with
1110 // an empty wal_cache.
1111 let path = tmp_path("ro_no_wal");
1112 {
1113 let mut p = Pager::create(&path).unwrap();
1114 p.stage_page(2, make_page(0x44));
1115 p.commit(DbHeader {
1116 page_count: 3,
1117 schema_root_page: 1,
1118 })
1119 .unwrap();
1120 // Force the WAL into the main file before we nuke it.
1121 p.checkpoint().unwrap();
1122 }
1123 // Nuke the sidecar.
1124 std::fs::remove_file(wal_path_for(&path)).unwrap();
1125
1126 let ro = Pager::open_read_only(&path).unwrap();
1127 assert_eq!(ro.read_page(2).unwrap()[0], 0x44);
1128 // No WAL materialized by a read-only open.
1129 assert!(!wal_path_for(&path).exists());
1130 cleanup(&path);
1131 }
1132
1133 #[test]
1134 fn reopen_after_crash_between_data_write_and_header_write_recovers_via_wal() {
1135 // Simulates a crash between step 2 (data-page fsync) and step 3
1136 // (header write) of `checkpoint`: the main file has new data
1137 // pages but still carries the old header, AND the WAL still
1138 // holds every committed frame. Next open must reconstruct the
1139 // post-commit view via the WAL (wal_cache[0] overrides the stale
1140 // main-file header).
1141 use std::io::{Seek, SeekFrom, Write};
1142
1143 let path = tmp_path("ckpt_crash_mid_flush");
1144 {
1145 let mut p = Pager::create(&path).unwrap();
1146 p.stage_page(2, make_page(0xEE));
1147 p.commit(DbHeader {
1148 page_count: 3,
1149 schema_root_page: 1,
1150 })
1151 .unwrap();
1152 // Manually write the committed page 2 into the main file at
1153 // offset 2*PAGE_SIZE to simulate the first half of a
1154 // checkpoint that only got as far as step 2. The header
1155 // stays at the pre-commit state (page_count=2 from create).
1156 // Drop the pager first so its exclusive lock releases.
1157 }
1158 {
1159 let mut f = std::fs::OpenOptions::new().write(true).open(&path).unwrap();
1160 f.seek(SeekFrom::Start(2 * PAGE_SIZE as u64)).unwrap();
1161 f.write_all(&make_page(0xEE)).unwrap();
1162 f.sync_all().unwrap();
1163 // NB: we didn't extend the file past its original length in
1164 // the create-only state; the write_all grew it implicitly.
1165 // The header at offset 0 is still the original "page_count=2".
1166 }
1167
1168 // Reopen. Main-file header says 2 pages; WAL replay should
1169 // override that to 3, and wal_cache[2] should shadow whatever
1170 // the main file now holds for page 2 (which happens to be the
1171 // same byte here — the point is the Pager doesn't depend on
1172 // that coincidence).
1173 let p2 = Pager::open(&path).unwrap();
1174 assert_eq!(p2.header().page_count, 3);
1175 assert_eq!(p2.read_page(2).unwrap()[0], 0xEE);
1176 cleanup(&path);
1177 }
1178
1179 #[test]
1180 fn auto_checkpoint_crosses_threshold_mid_loop() {
1181 // Pins the exact-threshold semantics: `commit` must trigger a
1182 // checkpoint as soon as the WAL's frame count hits the threshold,
1183 // not later. Catches a regression where someone accidentally
1184 // lowers it to `>` or bumps it into a different accounting.
1185 let path = tmp_path("ckpt_threshold_crossing");
1186 let mut p = Pager::create(&path).unwrap();
1187 let commits_to_cross = AUTO_CHECKPOINT_THRESHOLD_FRAMES.div_ceil(2);
1188 for i in 0..commits_to_cross - 1 {
1189 p.stage_page(2, make_page((i & 0xff) as u8));
1190 p.commit(DbHeader {
1191 page_count: 3,
1192 schema_root_page: 1,
1193 })
1194 .unwrap();
1195 }
1196 // One short of the threshold — WAL must not yet have been flushed.
1197 let pre = std::fs::metadata(wal_path_for(&path)).unwrap().len();
1198 assert!(
1199 pre > crate::sql::pager::wal::WAL_HEADER_SIZE as u64,
1200 "WAL should still carry frames right before the crossing commit"
1201 );
1202
1203 // The crossing commit: this one's the trigger.
1204 p.stage_page(2, make_page(0xff));
1205 p.commit(DbHeader {
1206 page_count: 3,
1207 schema_root_page: 1,
1208 })
1209 .unwrap();
1210 let post = std::fs::metadata(wal_path_for(&path)).unwrap().len();
1211 assert_eq!(
1212 post,
1213 crate::sql::pager::wal::WAL_HEADER_SIZE as u64,
1214 "WAL must be header-only right after the threshold-crossing commit"
1215 );
1216
1217 cleanup(&path);
1218 }
1219}