Skip to main content

sqlrite/sql/pager/
mod.rs

1//! On-disk persistence for a `Database`, using fixed-size paged files.
2//!
3//! The file is a sequence of 4 KiB pages. Page 0 holds the header
4//! (magic, version, page count, schema-root pointer). Every other page carries
5//! a small per-page header (type tag + next-page pointer + payload length)
6//! followed by a payload of up to 4089 bytes.
7//!
8//! **Storage strategy (format version 2, Phase 3c.5).**
9//!
10//! - Each `Table`'s rows live as **cells** in a chain of `TableLeaf` pages.
11//!   Cell layout and slot directory are in `cell.rs` / `table_page.rs`;
12//!   cells that exceed the inline threshold spill into an overflow chain
13//!   via `overflow.rs`.
14//! - The schema catalog is itself a regular table named `sqlrite_master`,
15//!   with one row per user table:
16//!       `(name TEXT PRIMARY KEY, sql TEXT NOT NULL,
17//!         rootpage INTEGER NOT NULL, last_rowid INTEGER NOT NULL)`
18//!   This is the SQLite-style approach: the schema of `sqlrite_master`
19//!   itself is hardcoded into the engine so the open path can bootstrap.
20//! - Page 0's `schema_root_page` field points at the first leaf of
21//!   `sqlrite_master`.
22//!
23//! **Format version.** Version 2 is not compatible with files produced by
24//! earlier commits. Opening a v1 file returns a clean error — users on
25//! old files have to regenerate them from CREATE/INSERT, as there's no
26//! production data to migrate yet.
27
28// Data-layer modules. Not every helper in these modules is used by save/open
29// yet — some exist for tests, some for future maintenance operations.
30// Module-level #[allow(dead_code)] keeps the build quiet without dotting
31// the modules with per-item attributes.
32#[allow(dead_code)]
33pub mod cell;
34pub mod file;
35pub mod header;
36#[allow(dead_code)]
37pub mod index_cell;
38#[allow(dead_code)]
39pub mod interior_page;
40pub mod overflow;
41pub mod page;
42pub mod pager;
43#[allow(dead_code)]
44pub mod table_page;
45#[allow(dead_code)]
46pub mod varint;
47#[allow(dead_code)]
48pub mod wal;
49
50use std::collections::{BTreeMap, HashMap};
51use std::path::Path;
52use std::sync::{Arc, Mutex};
53
54use sqlparser::dialect::SQLiteDialect;
55use sqlparser::parser::Parser;
56
57use crate::error::{Result, SQLRiteError};
58use crate::sql::db::database::Database;
59use crate::sql::db::secondary_index::{IndexOrigin, SecondaryIndex};
60use crate::sql::db::table::{Column, DataType, Row, Table, Value};
61use crate::sql::pager::cell::Cell;
62use crate::sql::pager::header::DbHeader;
63use crate::sql::pager::index_cell::IndexCell;
64use crate::sql::pager::interior_page::{InteriorCell, InteriorPage};
65use crate::sql::pager::overflow::{
66    OVERFLOW_THRESHOLD, OverflowRef, PagedEntry, read_overflow_chain, write_overflow_chain,
67};
68use crate::sql::pager::page::{PAGE_HEADER_SIZE, PAGE_SIZE, PAYLOAD_PER_PAGE, PageType};
69use crate::sql::pager::pager::Pager;
70use crate::sql::pager::table_page::TablePage;
71use crate::sql::parser::create::CreateQuery;
72
73// Re-export so callers can spell `sql::pager::AccessMode` without
74// reaching into the `pager::pager::pager` submodule path.
75pub use crate::sql::pager::pager::AccessMode;
76
77/// Name of the internal catalog table. Reserved — user CREATEs of this
78/// name must be rejected upstream.
79pub const MASTER_TABLE_NAME: &str = "sqlrite_master";
80
81/// Opens a database file in read-write mode. Shorthand for
82/// [`open_database_with_mode`] with [`AccessMode::ReadWrite`].
83pub fn open_database(path: &Path, db_name: String) -> Result<Database> {
84    open_database_with_mode(path, db_name, AccessMode::ReadWrite)
85}
86
87/// Opens a database file in read-only mode. Acquires a shared OS-level
88/// advisory lock, so other read-only openers coexist but any writer is
89/// excluded. Attempts to mutate the returned `Database` (e.g. an
90/// `INSERT`, or a `save_database` call against it) bottom out in a
91/// `cannot commit: database is opened read-only` error from the Pager.
92pub fn open_database_read_only(path: &Path, db_name: String) -> Result<Database> {
93    open_database_with_mode(path, db_name, AccessMode::ReadOnly)
94}
95
96/// Opens a database file and reconstructs the in-memory `Database`,
97/// leaving the long-lived `Pager` attached for subsequent auto-save
98/// (read-write) or consistent-snapshot reads (read-only).
99pub fn open_database_with_mode(path: &Path, db_name: String, mode: AccessMode) -> Result<Database> {
100    let pager = Pager::open_with_mode(path, mode)?;
101
102    // 1. Load sqlrite_master from the tree at header.schema_root_page.
103    let mut master = build_empty_master_table();
104    load_table_rows(&pager, &mut master, pager.header().schema_root_page)?;
105
106    // 2. Two passes over master rows: first build every user table, then
107    //    attach secondary indexes. Indexes need their base table to exist
108    //    before we can populate them. Auto-indexes are created at table
109    //    build time so we only have to load explicit indexes from disk
110    //    (but we also reload the auto-index CONTENT because Table::new
111    //    built it empty).
112    let mut db = Database::new(db_name);
113    let mut index_rows: Vec<IndexCatalogRow> = Vec::new();
114
115    for rowid in master.rowids() {
116        let ty = take_text(&master, "type", rowid)?;
117        let name = take_text(&master, "name", rowid)?;
118        let sql = take_text(&master, "sql", rowid)?;
119        let rootpage = take_integer(&master, "rootpage", rowid)? as u32;
120        let last_rowid = take_integer(&master, "last_rowid", rowid)?;
121
122        match ty.as_str() {
123            "table" => {
124                let (parsed_name, columns) = parse_create_sql(&sql)?;
125                if parsed_name != name {
126                    return Err(SQLRiteError::Internal(format!(
127                        "sqlrite_master row '{name}' carries SQL for '{parsed_name}' — corrupt catalog?"
128                    )));
129                }
130                let mut table = build_empty_table(&name, columns, last_rowid);
131                if rootpage != 0 {
132                    load_table_rows(&pager, &mut table, rootpage)?;
133                }
134                if last_rowid > table.last_rowid {
135                    table.last_rowid = last_rowid;
136                }
137                db.tables.insert(name, table);
138            }
139            "index" => {
140                index_rows.push(IndexCatalogRow {
141                    name,
142                    sql,
143                    rootpage,
144                });
145            }
146            other => {
147                return Err(SQLRiteError::Internal(format!(
148                    "sqlrite_master row '{name}' has unknown type '{other}'"
149                )));
150            }
151        }
152    }
153
154    // Second pass: attach each index to its table.
155    for row in index_rows {
156        attach_index(&mut db, &pager, row)?;
157    }
158
159    db.source_path = Some(path.to_path_buf());
160    db.pager = Some(pager);
161    Ok(db)
162}
163
164/// Catalog row for a secondary index — deferred until after every table is
165/// loaded so the index's base table exists by the time we populate it.
166struct IndexCatalogRow {
167    name: String,
168    sql: String,
169    rootpage: u32,
170}
171
172/// Persists `db` to disk. Same diff-commit behavior as before: only pages
173/// whose bytes actually changed get written.
174pub fn save_database(db: &mut Database, path: &Path) -> Result<()> {
175    let same_path = db.source_path.as_deref() == Some(path);
176    let mut pager = if same_path {
177        match db.pager.take() {
178            Some(p) => p,
179            None if path.exists() => Pager::open(path)?,
180            None => Pager::create(path)?,
181        }
182    } else if path.exists() {
183        Pager::open(path)?
184    } else {
185        Pager::create(path)?
186    };
187
188    pager.clear_staged();
189
190    // Page 0 is the header; payload pages start at 1.
191    let mut next_free_page: u32 = 1;
192
193    // 1. Stage each user table's B-Tree, collecting master-row info.
194    //    `kind` is "table" or "index" — master has one row per each.
195    let mut master_rows: Vec<CatalogEntry> = Vec::new();
196
197    let mut table_names: Vec<&String> = db.tables.keys().collect();
198    table_names.sort();
199    for name in table_names {
200        if name == MASTER_TABLE_NAME {
201            return Err(SQLRiteError::Internal(format!(
202                "user table cannot be named '{MASTER_TABLE_NAME}' (reserved)"
203            )));
204        }
205        let table = &db.tables[name];
206        let (rootpage, new_next) = stage_table_btree(&mut pager, table, next_free_page)?;
207        next_free_page = new_next;
208        master_rows.push(CatalogEntry {
209            kind: "table".into(),
210            name: name.clone(),
211            sql: table_to_create_sql(table),
212            rootpage,
213            last_rowid: table.last_rowid,
214        });
215    }
216
217    // 2. Stage each secondary index's B-Tree. Indexes persist in a
218    //    deterministic order: sorted by (owning_table, index_name).
219    let mut index_entries: Vec<(&Table, &SecondaryIndex)> = Vec::new();
220    for table in db.tables.values() {
221        for idx in &table.secondary_indexes {
222            index_entries.push((table, idx));
223        }
224    }
225    index_entries
226        .sort_by(|(ta, ia), (tb, ib)| ta.tb_name.cmp(&tb.tb_name).then(ia.name.cmp(&ib.name)));
227    for (_table, idx) in index_entries {
228        let (rootpage, new_next) = stage_index_btree(&mut pager, idx, next_free_page)?;
229        next_free_page = new_next;
230        master_rows.push(CatalogEntry {
231            kind: "index".into(),
232            name: idx.name.clone(),
233            sql: idx.synthesized_sql(),
234            rootpage,
235            last_rowid: 0,
236        });
237    }
238
239    // 3. Build an in-memory sqlrite_master with one row per table or index,
240    //    then stage it via the same tree-build path.
241    let mut master = build_empty_master_table();
242    for (i, entry) in master_rows.into_iter().enumerate() {
243        let rowid = (i as i64) + 1;
244        master.restore_row(
245            rowid,
246            vec![
247                Some(Value::Text(entry.kind)),
248                Some(Value::Text(entry.name)),
249                Some(Value::Text(entry.sql)),
250                Some(Value::Integer(entry.rootpage as i64)),
251                Some(Value::Integer(entry.last_rowid)),
252            ],
253        )?;
254    }
255    let (master_root, master_next) = stage_table_btree(&mut pager, &master, next_free_page)?;
256    next_free_page = master_next;
257
258    pager.commit(DbHeader {
259        page_count: next_free_page,
260        schema_root_page: master_root,
261    })?;
262
263    if same_path {
264        db.pager = Some(pager);
265    }
266    Ok(())
267}
268
269/// Build material for a single row in sqlrite_master.
270struct CatalogEntry {
271    kind: String, // "table" or "index"
272    name: String,
273    sql: String,
274    rootpage: u32,
275    last_rowid: i64,
276}
277
278// -------------------------------------------------------------------------
279// sqlrite_master — hardcoded catalog table schema
280
281fn build_empty_master_table() -> Table {
282    // Phase 3e: `type` is the first column, matching SQLite's convention.
283    // It distinguishes `'table'` rows from `'index'` rows.
284    let columns = vec![
285        Column::new("type".into(), "text".into(), false, true, false),
286        Column::new("name".into(), "text".into(), true, true, true),
287        Column::new("sql".into(), "text".into(), false, true, false),
288        Column::new("rootpage".into(), "integer".into(), false, true, false),
289        Column::new("last_rowid".into(), "integer".into(), false, true, false),
290    ];
291    build_empty_table(MASTER_TABLE_NAME, columns, 0)
292}
293
294/// Reads a required Text column from a known-good catalog row.
295fn take_text(table: &Table, col: &str, rowid: i64) -> Result<String> {
296    match table.get_value(col, rowid) {
297        Some(Value::Text(s)) => Ok(s),
298        other => Err(SQLRiteError::Internal(format!(
299            "sqlrite_master column '{col}' at rowid {rowid}: expected Text, got {other:?}"
300        ))),
301    }
302}
303
304/// Reads a required Integer column from a known-good catalog row.
305fn take_integer(table: &Table, col: &str, rowid: i64) -> Result<i64> {
306    match table.get_value(col, rowid) {
307        Some(Value::Integer(v)) => Ok(v),
308        other => Err(SQLRiteError::Internal(format!(
309            "sqlrite_master column '{col}' at rowid {rowid}: expected Integer, got {other:?}"
310        ))),
311    }
312}
313
314// -------------------------------------------------------------------------
315// CREATE-TABLE SQL synthesis and re-parsing
316
317/// Synthesizes a CREATE TABLE SQL string that recreates the table's schema.
318/// Deterministic: same schema → same SQL, so diffing commits stay stable.
319fn table_to_create_sql(table: &Table) -> String {
320    let mut parts = Vec::with_capacity(table.columns.len());
321    for c in &table.columns {
322        let ty = match c.datatype {
323            DataType::Integer => "INTEGER",
324            DataType::Text => "TEXT",
325            DataType::Real => "REAL",
326            DataType::Bool => "BOOLEAN",
327            DataType::None | DataType::Invalid => "TEXT",
328        };
329        let mut piece = format!("{} {}", c.column_name, ty);
330        if c.is_pk {
331            piece.push_str(" PRIMARY KEY");
332        } else {
333            if c.is_unique {
334                piece.push_str(" UNIQUE");
335            }
336            if c.not_null {
337                piece.push_str(" NOT NULL");
338            }
339        }
340        parts.push(piece);
341    }
342    format!("CREATE TABLE {} ({});", table.tb_name, parts.join(", "))
343}
344
345/// Reverses `table_to_create_sql`: feeds the SQL back through `sqlparser`
346/// and produces our internal column list. Returns `(table_name, columns)`.
347fn parse_create_sql(sql: &str) -> Result<(String, Vec<Column>)> {
348    let dialect = SQLiteDialect {};
349    let mut ast = Parser::parse_sql(&dialect, sql).map_err(SQLRiteError::from)?;
350    let stmt = ast.pop().ok_or_else(|| {
351        SQLRiteError::Internal("sqlrite_master row held an empty SQL string".to_string())
352    })?;
353    let create = CreateQuery::new(&stmt)?;
354    let columns = create
355        .columns
356        .into_iter()
357        .map(|pc| Column::new(pc.name, pc.datatype, pc.is_pk, pc.not_null, pc.is_unique))
358        .collect();
359    Ok((create.table_name, columns))
360}
361
362// -------------------------------------------------------------------------
363// In-memory table (re)construction
364
365/// Builds an empty in-memory `Table` given the declared columns.
366fn build_empty_table(name: &str, columns: Vec<Column>, last_rowid: i64) -> Table {
367    let rows: Arc<Mutex<HashMap<String, Row>>> = Arc::new(Mutex::new(HashMap::new()));
368    let mut secondary_indexes: Vec<SecondaryIndex> = Vec::new();
369    {
370        let mut map = rows.lock().expect("rows mutex poisoned");
371        for col in &columns {
372            let row = match col.datatype {
373                DataType::Integer => Row::Integer(BTreeMap::new()),
374                DataType::Text => Row::Text(BTreeMap::new()),
375                DataType::Real => Row::Real(BTreeMap::new()),
376                DataType::Bool => Row::Bool(BTreeMap::new()),
377                _ => Row::None,
378            };
379            map.insert(col.column_name.clone(), row);
380
381            // Auto-create UNIQUE/PK indexes so the restored table has the
382            // same shape Table::new would have built from fresh SQL.
383            if (col.is_pk || col.is_unique)
384                && matches!(col.datatype, DataType::Integer | DataType::Text)
385            {
386                if let Ok(idx) = SecondaryIndex::new(
387                    SecondaryIndex::auto_name(name, &col.column_name),
388                    name.to_string(),
389                    col.column_name.clone(),
390                    &col.datatype,
391                    true,
392                    IndexOrigin::Auto,
393                ) {
394                    secondary_indexes.push(idx);
395                }
396            }
397        }
398    }
399
400    let primary_key = columns
401        .iter()
402        .find(|c| c.is_pk)
403        .map(|c| c.column_name.clone())
404        .unwrap_or_else(|| "-1".to_string());
405
406    Table {
407        tb_name: name.to_string(),
408        columns,
409        rows,
410        secondary_indexes,
411        last_rowid,
412        primary_key,
413    }
414}
415
416// -------------------------------------------------------------------------
417// Leaf-chain read / write
418
419/// Walks a table's B-Tree from `root_page`, following the leftmost-child
420/// chain down to the first leaf, then iterating leaves via their sibling
421/// `next_page` pointers. Every cell is decoded and replayed into `table`.
422///
423/// Open-path note: we eagerly materialize the entire table into `Table`'s
424/// in-memory maps. Phase 5 will introduce a `Cursor` that hits the pager
425/// on demand so queries can stream through the tree without a full upfront
426/// load.
427/// Re-parses `CREATE INDEX` SQL from sqlrite_master and restores the
428/// index on its base table by walking the tree of index cells at
429/// `rootpage`. The base table is expected to already be in `db.tables`.
430fn attach_index(db: &mut Database, pager: &Pager, row: IndexCatalogRow) -> Result<()> {
431    let (table_name, column_name, is_unique) = parse_create_index_sql(&row.sql)?;
432
433    let table = db.get_table_mut(table_name.clone()).map_err(|_| {
434        SQLRiteError::Internal(format!(
435            "index '{}' references unknown table '{table_name}' (sqlrite_master out of sync?)",
436            row.name
437        ))
438    })?;
439    let datatype = table
440        .columns
441        .iter()
442        .find(|c| c.column_name == column_name)
443        .map(|c| clone_datatype(&c.datatype))
444        .ok_or_else(|| {
445            SQLRiteError::Internal(format!(
446                "index '{}' references unknown column '{column_name}' on '{table_name}'",
447                row.name
448            ))
449        })?;
450
451    // An auto-index on this column may already exist (built by
452    // build_empty_table for UNIQUE/PK columns). If the names match, reuse
453    // the slot instead of adding a duplicate entry.
454    let existing_slot = table
455        .secondary_indexes
456        .iter()
457        .position(|i| i.name == row.name);
458    let idx = match existing_slot {
459        Some(i) => {
460            // Drain any entries that may have been populated during table
461            // restore_row calls — we're about to repopulate from the
462            // persisted tree.
463            table.secondary_indexes.remove(i)
464        }
465        None => SecondaryIndex::new(
466            row.name.clone(),
467            table_name.clone(),
468            column_name.clone(),
469            &datatype,
470            is_unique,
471            IndexOrigin::Explicit,
472        )?,
473    };
474    let mut idx = idx;
475    // Wipe any stale entries from the auto path so the load is idempotent.
476    let is_unique_flag = idx.is_unique;
477    let origin = idx.origin;
478    idx = SecondaryIndex::new(
479        idx.name,
480        idx.table_name,
481        idx.column_name,
482        &datatype,
483        is_unique_flag,
484        origin,
485    )?;
486
487    // Populate from the index tree's cells.
488    load_index_rows(pager, &mut idx, row.rootpage)?;
489
490    table.secondary_indexes.push(idx);
491    Ok(())
492}
493
494/// Walks the leaves of an index B-Tree rooted at `root_page` and inserts
495/// every `(value, rowid)` pair into `idx`.
496fn load_index_rows(pager: &Pager, idx: &mut SecondaryIndex, root_page: u32) -> Result<()> {
497    if root_page == 0 {
498        return Ok(());
499    }
500    let first_leaf = find_leftmost_leaf(pager, root_page)?;
501    let mut current = first_leaf;
502    while current != 0 {
503        let page_buf = pager
504            .read_page(current)
505            .ok_or_else(|| SQLRiteError::Internal(format!("missing index leaf page {current}")))?;
506        if page_buf[0] != PageType::TableLeaf as u8 {
507            return Err(SQLRiteError::Internal(format!(
508                "page {current} tagged {} but expected TableLeaf (index)",
509                page_buf[0]
510            )));
511        }
512        let next_leaf = u32::from_le_bytes(page_buf[1..5].try_into().unwrap());
513        let payload: &[u8; PAYLOAD_PER_PAGE] = (&page_buf[PAGE_HEADER_SIZE..])
514            .try_into()
515            .map_err(|_| SQLRiteError::Internal("index leaf payload size".to_string()))?;
516        let leaf = TablePage::from_bytes(payload);
517
518        for slot in 0..leaf.slot_count() {
519            // Slots on an index page hold KIND_INDEX cells; decode directly.
520            let offset = leaf.slot_offset_raw(slot)?;
521            let (ic, _) = IndexCell::decode(leaf.as_bytes(), offset)?;
522            idx.insert(&ic.value, ic.rowid)?;
523        }
524        current = next_leaf;
525    }
526    Ok(())
527}
528
529/// Minimal recognizer for the synthesized-or-user `CREATE INDEX` SQL we
530/// store in sqlrite_master. Returns `(table_name, column_name, is_unique)`.
531///
532/// Uses sqlparser so user-supplied SQL with extra whitespace, case, etc.
533/// still works; the only shape we accept is single-column indexes.
534fn parse_create_index_sql(sql: &str) -> Result<(String, String, bool)> {
535    use sqlparser::ast::{CreateIndex, Expr, Statement};
536
537    let dialect = SQLiteDialect {};
538    let mut ast = Parser::parse_sql(&dialect, sql).map_err(SQLRiteError::from)?;
539    let Some(Statement::CreateIndex(CreateIndex {
540        table_name,
541        columns,
542        unique,
543        ..
544    })) = ast.pop()
545    else {
546        return Err(SQLRiteError::Internal(format!(
547            "sqlrite_master index row's SQL isn't a CREATE INDEX: {sql}"
548        )));
549    };
550    if columns.len() != 1 {
551        return Err(SQLRiteError::NotImplemented(
552            "multi-column indexes aren't supported yet".to_string(),
553        ));
554    }
555    let col = match &columns[0].column.expr {
556        Expr::Identifier(ident) => ident.value.clone(),
557        Expr::CompoundIdentifier(parts) => {
558            parts.last().map(|p| p.value.clone()).unwrap_or_default()
559        }
560        other => {
561            return Err(SQLRiteError::Internal(format!(
562                "unsupported indexed column expression: {other:?}"
563            )));
564        }
565    };
566    Ok((table_name.to_string(), col, unique))
567}
568
569/// Cheap clone helper — `DataType` doesn't derive `Clone` elsewhere.
570fn clone_datatype(dt: &DataType) -> DataType {
571    match dt {
572        DataType::Integer => DataType::Integer,
573        DataType::Text => DataType::Text,
574        DataType::Real => DataType::Real,
575        DataType::Bool => DataType::Bool,
576        DataType::None => DataType::None,
577        DataType::Invalid => DataType::Invalid,
578    }
579}
580
581/// Stages an index's B-Tree at `start_page`. Each leaf cell is a
582/// `KIND_INDEX` entry carrying `(original_rowid, value)`. Returns
583/// `(root_page, next_free_page)`.
584///
585/// The tree's shape matches a regular table's — leaves chained via
586/// `next_page`, optional interior layer above. `Cell::peek_rowid` works
587/// uniformly for index cells (same prefix as local cells), so the
588/// existing slot directory and binary search carry over.
589fn stage_index_btree(
590    pager: &mut Pager,
591    idx: &SecondaryIndex,
592    start_page: u32,
593) -> Result<(u32, u32)> {
594    // Build the leaves.
595    let (leaves, mut next_free_page) = stage_index_leaves(pager, idx, start_page)?;
596    if leaves.len() == 1 {
597        return Ok((leaves[0].0, next_free_page));
598    }
599    let mut level: Vec<(u32, i64)> = leaves;
600    while level.len() > 1 {
601        let (next_level, new_next_free) = stage_interior_level(pager, &level, next_free_page)?;
602        next_free_page = new_next_free;
603        level = next_level;
604    }
605    Ok((level[0].0, next_free_page))
606}
607
608/// Packs the index's (value, rowid) entries into a sibling-chained run
609/// of `TableLeaf` pages. Iteration order matches `SecondaryIndex::iter_entries`
610/// (ascending value; rowids in insertion order within a value), which is
611/// also ascending by the "cell rowid" carried in each IndexCell (the
612/// original row's rowid) — so Cell::peek_rowid + the slot directory's
613/// rowid ordering stays consistent.
614fn stage_index_leaves(
615    pager: &mut Pager,
616    idx: &SecondaryIndex,
617    start_page: u32,
618) -> Result<(Vec<(u32, i64)>, u32)> {
619    let mut leaves: Vec<(u32, i64)> = Vec::new();
620    let mut current_leaf = TablePage::empty();
621    let mut current_leaf_page = start_page;
622    let mut current_max_rowid: Option<i64> = None;
623    let mut next_free_page = start_page + 1;
624
625    // Sort the entries by original rowid so the in-page slot directory,
626    // which binary-searches by rowid, stays valid. (iter_entries orders by
627    // value; we reorder here for B-Tree correctness.)
628    let mut entries: Vec<(Value, i64)> = idx.iter_entries().collect();
629    entries.sort_by_key(|(_, r)| *r);
630
631    for (value, rowid) in entries {
632        let cell = IndexCell::new(rowid, value);
633        let entry_bytes = cell.encode()?;
634
635        if !current_leaf.would_fit(entry_bytes.len()) {
636            let next_leaf_page_num = next_free_page;
637            emit_leaf(pager, current_leaf_page, &current_leaf, next_leaf_page_num);
638            leaves.push((current_leaf_page, current_max_rowid.unwrap_or(i64::MIN)));
639            current_leaf = TablePage::empty();
640            current_leaf_page = next_leaf_page_num;
641            next_free_page += 1;
642
643            if !current_leaf.would_fit(entry_bytes.len()) {
644                return Err(SQLRiteError::Internal(format!(
645                    "index entry of {} bytes exceeds empty-page capacity {}",
646                    entry_bytes.len(),
647                    current_leaf.free_space()
648                )));
649            }
650        }
651        current_leaf.insert_entry(rowid, &entry_bytes)?;
652        current_max_rowid = Some(rowid);
653    }
654
655    emit_leaf(pager, current_leaf_page, &current_leaf, 0);
656    leaves.push((current_leaf_page, current_max_rowid.unwrap_or(i64::MIN)));
657    Ok((leaves, next_free_page))
658}
659
660fn load_table_rows(pager: &Pager, table: &mut Table, root_page: u32) -> Result<()> {
661    let first_leaf = find_leftmost_leaf(pager, root_page)?;
662    let mut current = first_leaf;
663    while current != 0 {
664        let page_buf = pager
665            .read_page(current)
666            .ok_or_else(|| SQLRiteError::Internal(format!("missing leaf page {current}")))?;
667        if page_buf[0] != PageType::TableLeaf as u8 {
668            return Err(SQLRiteError::Internal(format!(
669                "page {current} tagged {} but expected TableLeaf",
670                page_buf[0]
671            )));
672        }
673        let next_leaf = u32::from_le_bytes(page_buf[1..5].try_into().unwrap());
674        let payload: &[u8; PAYLOAD_PER_PAGE] = (&page_buf[PAGE_HEADER_SIZE..])
675            .try_into()
676            .map_err(|_| SQLRiteError::Internal("leaf payload slice size".to_string()))?;
677        let leaf = TablePage::from_bytes(payload);
678
679        for slot in 0..leaf.slot_count() {
680            let entry = leaf.entry_at(slot)?;
681            let cell = match entry {
682                PagedEntry::Local(c) => c,
683                PagedEntry::Overflow(r) => {
684                    let body_bytes =
685                        read_overflow_chain(pager, r.first_overflow_page, r.total_body_len)?;
686                    let (c, _) = Cell::decode(&body_bytes, 0)?;
687                    c
688                }
689            };
690            table.restore_row(cell.rowid, cell.values)?;
691        }
692        current = next_leaf;
693    }
694    Ok(())
695}
696
697/// Descends from `root_page` through `InteriorNode` pages, always taking
698/// the leftmost child, until a `TableLeaf` is reached. Returns that leaf's
699/// page number. A root that's already a leaf is returned as-is.
700fn find_leftmost_leaf(pager: &Pager, root_page: u32) -> Result<u32> {
701    let mut current = root_page;
702    loop {
703        let page_buf = pager.read_page(current).ok_or_else(|| {
704            SQLRiteError::Internal(format!("missing page {current} during tree descent"))
705        })?;
706        match page_buf[0] {
707            t if t == PageType::TableLeaf as u8 => return Ok(current),
708            t if t == PageType::InteriorNode as u8 => {
709                let payload: &[u8; PAYLOAD_PER_PAGE] =
710                    (&page_buf[PAGE_HEADER_SIZE..]).try_into().map_err(|_| {
711                        SQLRiteError::Internal("interior payload slice size".to_string())
712                    })?;
713                let interior = InteriorPage::from_bytes(payload);
714                current = interior.leftmost_child()?;
715            }
716            other => {
717                return Err(SQLRiteError::Internal(format!(
718                    "unexpected page type {other} during tree descent at page {current}"
719                )));
720            }
721        }
722    }
723}
724
725/// Stages a table's B-Tree starting at `start_page`. Returns
726/// `(root_page, next_free_page)`. Builds bottom-up:
727///
728/// 1. Pack all row cells into `TableLeaf` pages, chaining them via each
729///    leaf's `next_page` sibling pointer (for fast sequential scans).
730/// 2. If the table fits in a single leaf, that leaf is the root.
731/// 3. Otherwise, group leaves into `InteriorNode` pages; recurse up the
732///    tree until one root remains.
733///
734/// Deterministic: same in-memory rows → same pages at same offsets, so
735/// the Pager's diff commit still skips unchanged tables.
736fn stage_table_btree(pager: &mut Pager, table: &Table, start_page: u32) -> Result<(u32, u32)> {
737    let (leaves, mut next_free_page) = stage_leaves(pager, table, start_page)?;
738    if leaves.len() == 1 {
739        return Ok((leaves[0].0, next_free_page));
740    }
741    let mut level: Vec<(u32, i64)> = leaves;
742    while level.len() > 1 {
743        let (next_level, new_next_free) = stage_interior_level(pager, &level, next_free_page)?;
744        next_free_page = new_next_free;
745        level = next_level;
746    }
747    Ok((level[0].0, next_free_page))
748}
749
750/// Packs the table's rows into a sibling-linked chain of `TableLeaf` pages.
751/// Returns each leaf's `(page_number, max_rowid)` (used by the next level
752/// up to build divider cells) and the first free page after the chain
753/// including any overflow pages allocated for oversized cells.
754fn stage_leaves(
755    pager: &mut Pager,
756    table: &Table,
757    start_page: u32,
758) -> Result<(Vec<(u32, i64)>, u32)> {
759    let mut leaves: Vec<(u32, i64)> = Vec::new();
760    let mut current_leaf = TablePage::empty();
761    let mut current_leaf_page = start_page;
762    let mut current_max_rowid: Option<i64> = None;
763    let mut next_free_page = start_page + 1;
764
765    for rowid in table.rowids() {
766        let entry_bytes = build_row_entry(pager, table, rowid, &mut next_free_page)?;
767
768        if !current_leaf.would_fit(entry_bytes.len()) {
769            // Commit the current leaf. Its sibling next_page is the page
770            // number where the new leaf will go — which is next_free_page
771            // right now (no overflow pages have been allocated between
772            // this decision and the new leaf's allocation below).
773            let next_leaf_page_num = next_free_page;
774            emit_leaf(pager, current_leaf_page, &current_leaf, next_leaf_page_num);
775            leaves.push((current_leaf_page, current_max_rowid.unwrap_or(i64::MIN)));
776            current_leaf = TablePage::empty();
777            current_leaf_page = next_leaf_page_num;
778            next_free_page += 1;
779            // current_max_rowid is reassigned by the insert below; no need
780            // to zero it out here.
781
782            if !current_leaf.would_fit(entry_bytes.len()) {
783                return Err(SQLRiteError::Internal(format!(
784                    "entry of {} bytes exceeds empty-page capacity {}",
785                    entry_bytes.len(),
786                    current_leaf.free_space()
787                )));
788            }
789        }
790        current_leaf.insert_entry(rowid, &entry_bytes)?;
791        current_max_rowid = Some(rowid);
792    }
793
794    // Final leaf: sibling next_page = 0 (end of chain).
795    emit_leaf(pager, current_leaf_page, &current_leaf, 0);
796    leaves.push((current_leaf_page, current_max_rowid.unwrap_or(i64::MIN)));
797    Ok((leaves, next_free_page))
798}
799
800/// Encodes a single row's on-leaf entry — either the local cell bytes, or
801/// an `OverflowRef` pointing at a freshly-allocated overflow chain if the
802/// encoded cell exceeded the inline threshold. Advances `next_free_page`
803/// past any overflow pages used.
804fn build_row_entry(
805    pager: &mut Pager,
806    table: &Table,
807    rowid: i64,
808    next_free_page: &mut u32,
809) -> Result<Vec<u8>> {
810    let values = table.extract_row(rowid);
811    let local_cell = Cell::new(rowid, values);
812    let local_bytes = local_cell.encode()?;
813    if local_bytes.len() > OVERFLOW_THRESHOLD {
814        let overflow_start = *next_free_page;
815        *next_free_page = write_overflow_chain(pager, &local_bytes, overflow_start)?;
816        Ok(OverflowRef {
817            rowid,
818            total_body_len: local_bytes.len() as u64,
819            first_overflow_page: overflow_start,
820        }
821        .encode())
822    } else {
823        Ok(local_bytes)
824    }
825}
826
827/// Builds one level of `InteriorNode` pages above the given children.
828/// Each interior packs as many dividers as will fit; the last child
829/// assigned to an interior becomes its `rightmost_child`. Returns the
830/// emitted interior pages as `(page_number, max_rowid_in_subtree)` so the
831/// next level can build on top of them.
832fn stage_interior_level(
833    pager: &mut Pager,
834    children: &[(u32, i64)],
835    start_page: u32,
836) -> Result<(Vec<(u32, i64)>, u32)> {
837    let mut next_level: Vec<(u32, i64)> = Vec::new();
838    let mut next_free_page = start_page;
839    let mut idx = 0usize;
840
841    while idx < children.len() {
842        let interior_page_num = next_free_page;
843        next_free_page += 1;
844
845        // Seed the interior with the first unassigned child as its
846        // rightmost. As we add more children, the previous rightmost
847        // graduates to being a divider and the new arrival takes over
848        // as rightmost.
849        let (mut rightmost_child_page, mut rightmost_child_max) = children[idx];
850        idx += 1;
851        let mut interior = InteriorPage::empty(rightmost_child_page);
852
853        while idx < children.len() {
854            let new_divider_cell = InteriorCell {
855                divider_rowid: rightmost_child_max,
856                child_page: rightmost_child_page,
857            };
858            let new_divider_bytes = new_divider_cell.encode();
859            if !interior.would_fit(new_divider_bytes.len()) {
860                break;
861            }
862            interior.insert_divider(rightmost_child_max, rightmost_child_page)?;
863            let (next_child_page, next_child_max) = children[idx];
864            interior.set_rightmost_child(next_child_page);
865            rightmost_child_page = next_child_page;
866            rightmost_child_max = next_child_max;
867            idx += 1;
868        }
869
870        emit_interior(pager, interior_page_num, &interior);
871        next_level.push((interior_page_num, rightmost_child_max));
872    }
873
874    Ok((next_level, next_free_page))
875}
876
877/// Wraps a `TablePage` in the 7-byte page header and hands it to the pager.
878fn emit_leaf(pager: &mut Pager, page_num: u32, leaf: &TablePage, next_leaf: u32) {
879    let mut buf = [0u8; PAGE_SIZE];
880    buf[0] = PageType::TableLeaf as u8;
881    buf[1..5].copy_from_slice(&next_leaf.to_le_bytes());
882    // For leaf pages the legacy `payload_len` field isn't used — the slot
883    // directory self-describes. Zero it by convention.
884    buf[5..7].copy_from_slice(&0u16.to_le_bytes());
885    buf[PAGE_HEADER_SIZE..].copy_from_slice(leaf.as_bytes());
886    pager.stage_page(page_num, buf);
887}
888
889/// Wraps an `InteriorPage` in the 7-byte page header. Interior pages
890/// don't use `next_page` (there's no sibling chain between interiors);
891/// `payload_len` is also unused (the slot directory self-describes).
892fn emit_interior(pager: &mut Pager, page_num: u32, interior: &InteriorPage) {
893    let mut buf = [0u8; PAGE_SIZE];
894    buf[0] = PageType::InteriorNode as u8;
895    buf[1..5].copy_from_slice(&0u32.to_le_bytes());
896    buf[5..7].copy_from_slice(&0u16.to_le_bytes());
897    buf[PAGE_HEADER_SIZE..].copy_from_slice(interior.as_bytes());
898    pager.stage_page(page_num, buf);
899}
900
901#[cfg(test)]
902mod tests {
903    use super::*;
904    use crate::sql::process_command;
905
906    fn seed_db() -> Database {
907        let mut db = Database::new("test".to_string());
908        process_command(
909            "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT NOT NULL UNIQUE, age INTEGER);",
910            &mut db,
911        )
912        .unwrap();
913        process_command(
914            "INSERT INTO users (name, age) VALUES ('alice', 30);",
915            &mut db,
916        )
917        .unwrap();
918        process_command("INSERT INTO users (name, age) VALUES ('bob', 25);", &mut db).unwrap();
919        process_command(
920            "CREATE TABLE notes (id INTEGER PRIMARY KEY, body TEXT);",
921            &mut db,
922        )
923        .unwrap();
924        process_command("INSERT INTO notes (body) VALUES ('hello');", &mut db).unwrap();
925        db
926    }
927
928    fn tmp_path(name: &str) -> std::path::PathBuf {
929        let mut p = std::env::temp_dir();
930        let pid = std::process::id();
931        let nanos = std::time::SystemTime::now()
932            .duration_since(std::time::UNIX_EPOCH)
933            .map(|d| d.as_nanos())
934            .unwrap_or(0);
935        p.push(format!("sqlrite-{pid}-{nanos}-{name}.sqlrite"));
936        p
937    }
938
939    /// Phase 4c: every .sqlrite has a `-wal` sidecar now. Delete both so
940    /// `/tmp` doesn't accumulate orphan WALs across test runs.
941    fn cleanup(path: &std::path::Path) {
942        let _ = std::fs::remove_file(path);
943        let mut wal = path.as_os_str().to_owned();
944        wal.push("-wal");
945        let _ = std::fs::remove_file(std::path::PathBuf::from(wal));
946    }
947
948    #[test]
949    fn round_trip_preserves_schema_and_data() {
950        let path = tmp_path("roundtrip");
951        let mut db = seed_db();
952        save_database(&mut db, &path).expect("save");
953
954        let loaded = open_database(&path, "test".to_string()).expect("open");
955        assert_eq!(loaded.tables.len(), 2);
956
957        let users = loaded.get_table("users".to_string()).expect("users table");
958        assert_eq!(users.columns.len(), 3);
959        let rowids = users.rowids();
960        assert_eq!(rowids.len(), 2);
961        let names: Vec<String> = rowids
962            .iter()
963            .filter_map(|r| match users.get_value("name", *r) {
964                Some(Value::Text(s)) => Some(s),
965                _ => None,
966            })
967            .collect();
968        assert!(names.contains(&"alice".to_string()));
969        assert!(names.contains(&"bob".to_string()));
970
971        let notes = loaded.get_table("notes".to_string()).expect("notes table");
972        assert_eq!(notes.rowids().len(), 1);
973
974        cleanup(&path);
975    }
976
977    #[test]
978    fn round_trip_survives_writes_after_load() {
979        let path = tmp_path("after_load");
980        save_database(&mut seed_db(), &path).unwrap();
981
982        {
983            let mut db = open_database(&path, "test".to_string()).unwrap();
984            process_command(
985                "INSERT INTO users (name, age) VALUES ('carol', 40);",
986                &mut db,
987            )
988            .unwrap();
989            save_database(&mut db, &path).unwrap();
990        } // db drops → its exclusive lock releases before we reopen below.
991
992        let db2 = open_database(&path, "test".to_string()).unwrap();
993        let users = db2.get_table("users".to_string()).unwrap();
994        assert_eq!(users.rowids().len(), 3);
995
996        cleanup(&path);
997    }
998
999    #[test]
1000    fn open_rejects_garbage_file() {
1001        let path = tmp_path("bad");
1002        std::fs::write(&path, b"not a sqlrite database, just bytes").unwrap();
1003        let result = open_database(&path, "x".to_string());
1004        assert!(result.is_err());
1005        cleanup(&path);
1006    }
1007
1008    #[test]
1009    fn many_small_rows_spread_across_leaves() {
1010        let path = tmp_path("many_rows");
1011        let mut db = Database::new("big".to_string());
1012        process_command(
1013            "CREATE TABLE things (id INTEGER PRIMARY KEY, data TEXT);",
1014            &mut db,
1015        )
1016        .unwrap();
1017        for i in 0..200 {
1018            let body = "x".repeat(200);
1019            let q = format!("INSERT INTO things (data) VALUES ('row-{i}-{body}');");
1020            process_command(&q, &mut db).unwrap();
1021        }
1022        save_database(&mut db, &path).unwrap();
1023        let loaded = open_database(&path, "big".to_string()).unwrap();
1024        let things = loaded.get_table("things".to_string()).unwrap();
1025        assert_eq!(things.rowids().len(), 200);
1026        cleanup(&path);
1027    }
1028
1029    #[test]
1030    fn huge_row_goes_through_overflow() {
1031        let path = tmp_path("overflow_row");
1032        let mut db = Database::new("big".to_string());
1033        process_command(
1034            "CREATE TABLE docs (id INTEGER PRIMARY KEY, body TEXT);",
1035            &mut db,
1036        )
1037        .unwrap();
1038        let body = "A".repeat(10_000);
1039        process_command(
1040            &format!("INSERT INTO docs (body) VALUES ('{body}');"),
1041            &mut db,
1042        )
1043        .unwrap();
1044        save_database(&mut db, &path).unwrap();
1045
1046        let loaded = open_database(&path, "big".to_string()).unwrap();
1047        let docs = loaded.get_table("docs".to_string()).unwrap();
1048        let rowids = docs.rowids();
1049        assert_eq!(rowids.len(), 1);
1050        let stored = docs.get_value("body", rowids[0]);
1051        match stored {
1052            Some(Value::Text(s)) => assert_eq!(s.len(), 10_000),
1053            other => panic!("expected Text, got {other:?}"),
1054        }
1055        cleanup(&path);
1056    }
1057
1058    #[test]
1059    fn create_sql_synthesis_round_trips() {
1060        // Build a table via CREATE, then verify table_to_create_sql +
1061        // parse_create_sql reproduce an equivalent column list.
1062        let mut db = Database::new("x".to_string());
1063        process_command(
1064            "CREATE TABLE t (id INTEGER PRIMARY KEY, tag TEXT UNIQUE, note TEXT NOT NULL);",
1065            &mut db,
1066        )
1067        .unwrap();
1068        let t = db.get_table("t".to_string()).unwrap();
1069        let sql = table_to_create_sql(t);
1070        let (name, cols) = parse_create_sql(&sql).unwrap();
1071        assert_eq!(name, "t");
1072        assert_eq!(cols.len(), 3);
1073        assert!(cols[0].is_pk);
1074        assert!(cols[1].is_unique);
1075        assert!(cols[2].not_null);
1076    }
1077
1078    #[test]
1079    fn sqlrite_master_is_not_exposed_as_a_user_table() {
1080        // After open, the public db.tables map should not list the master.
1081        let path = tmp_path("no_master");
1082        save_database(&mut seed_db(), &path).unwrap();
1083        let loaded = open_database(&path, "x".to_string()).unwrap();
1084        assert!(!loaded.tables.contains_key(MASTER_TABLE_NAME));
1085        cleanup(&path);
1086    }
1087
1088    #[test]
1089    fn multi_leaf_table_produces_an_interior_root() {
1090        // 200 fat rows force the table into multiple leaves, which means
1091        // save_database must build at least one InteriorNode above them.
1092        // The test verifies the round-trip works and confirms the root is
1093        // indeed an interior page (not a leaf) by reading the page type
1094        // directly out of the open pager.
1095        let path = tmp_path("multi_leaf_interior");
1096        let mut db = Database::new("big".to_string());
1097        process_command(
1098            "CREATE TABLE things (id INTEGER PRIMARY KEY, data TEXT);",
1099            &mut db,
1100        )
1101        .unwrap();
1102        for i in 0..200 {
1103            let body = "x".repeat(200);
1104            let q = format!("INSERT INTO things (data) VALUES ('row-{i}-{body}');");
1105            process_command(&q, &mut db).unwrap();
1106        }
1107        save_database(&mut db, &path).unwrap();
1108
1109        // Confirm the round-trip preserved all 200 rows.
1110        let loaded = open_database(&path, "big".to_string()).unwrap();
1111        let things = loaded.get_table("things".to_string()).unwrap();
1112        assert_eq!(things.rowids().len(), 200);
1113
1114        // Peek at `things`'s root page via the pager attached to the
1115        // loaded DB and check it's an InteriorNode, not a leaf.
1116        let pager = loaded
1117            .pager
1118            .as_ref()
1119            .expect("loaded DB should have a pager");
1120        // sqlrite_master's row for `things` holds its root page. Easiest
1121        // way to find it: walk the leaf chain by using find_leftmost_leaf
1122        // and then hop one level up. Simpler: read the master, scan for
1123        // the "things" row, look up rootpage.
1124        let mut master = build_empty_master_table();
1125        load_table_rows(pager, &mut master, pager.header().schema_root_page).unwrap();
1126        let things_root = master
1127            .rowids()
1128            .into_iter()
1129            .find_map(|r| match master.get_value("name", r) {
1130                Some(Value::Text(s)) if s == "things" => match master.get_value("rootpage", r) {
1131                    Some(Value::Integer(p)) => Some(p as u32),
1132                    _ => None,
1133                },
1134                _ => None,
1135            })
1136            .expect("things should appear in sqlrite_master");
1137        let root_buf = pager.read_page(things_root).unwrap();
1138        assert_eq!(
1139            root_buf[0],
1140            PageType::InteriorNode as u8,
1141            "expected a multi-leaf table to have an interior root, got tag {}",
1142            root_buf[0]
1143        );
1144
1145        cleanup(&path);
1146    }
1147
1148    #[test]
1149    fn explicit_index_persists_across_save_and_open() {
1150        let path = tmp_path("idx_persist");
1151        let mut db = Database::new("idx".to_string());
1152        process_command(
1153            "CREATE TABLE users (id INTEGER PRIMARY KEY, tag TEXT);",
1154            &mut db,
1155        )
1156        .unwrap();
1157        for i in 1..=5 {
1158            let tag = if i % 2 == 0 { "odd" } else { "even" };
1159            process_command(
1160                &format!("INSERT INTO users (tag) VALUES ('{tag}');"),
1161                &mut db,
1162            )
1163            .unwrap();
1164        }
1165        process_command("CREATE INDEX users_tag_idx ON users (tag);", &mut db).unwrap();
1166        save_database(&mut db, &path).unwrap();
1167
1168        let loaded = open_database(&path, "idx".to_string()).unwrap();
1169        let users = loaded.get_table("users".to_string()).unwrap();
1170        let idx = users
1171            .index_by_name("users_tag_idx")
1172            .expect("explicit index should survive save/open");
1173        assert_eq!(idx.column_name, "tag");
1174        assert!(!idx.is_unique);
1175        // 5 rows: rowids 2, 4 are "odd" (i % 2 == 0 when i is 2 or 4) — 2 entries;
1176        // rowids 1, 3, 5 are "even" (i % 2 != 0) — 3 entries.
1177        let even_rowids = idx.lookup(&Value::Text("even".into()));
1178        let odd_rowids = idx.lookup(&Value::Text("odd".into()));
1179        assert_eq!(even_rowids.len(), 3);
1180        assert_eq!(odd_rowids.len(), 2);
1181
1182        cleanup(&path);
1183    }
1184
1185    #[test]
1186    fn auto_indexes_for_unique_columns_survive_save_open() {
1187        let path = tmp_path("auto_idx_persist");
1188        let mut db = Database::new("a".to_string());
1189        process_command(
1190            "CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT NOT NULL UNIQUE);",
1191            &mut db,
1192        )
1193        .unwrap();
1194        process_command("INSERT INTO users (email) VALUES ('a@x');", &mut db).unwrap();
1195        process_command("INSERT INTO users (email) VALUES ('b@x');", &mut db).unwrap();
1196        save_database(&mut db, &path).unwrap();
1197
1198        let loaded = open_database(&path, "a".to_string()).unwrap();
1199        let users = loaded.get_table("users".to_string()).unwrap();
1200        // Every UNIQUE column auto-creates an index; the load path populated
1201        // it from the persisted entries.
1202        let auto_name = SecondaryIndex::auto_name("users", "email");
1203        let idx = users
1204            .index_by_name(&auto_name)
1205            .expect("auto index should be restored");
1206        assert!(idx.is_unique);
1207        assert_eq!(idx.lookup(&Value::Text("a@x".into())).len(), 1);
1208        assert_eq!(idx.lookup(&Value::Text("b@x".into())).len(), 1);
1209
1210        cleanup(&path);
1211    }
1212
1213    #[test]
1214    fn deep_tree_round_trips() {
1215        // Force a 3-level tree by bypassing process_command (which prints
1216        // the full table on every INSERT, making large bulk loads O(N^2)
1217        // in I/O). We build the Table directly via restore_row.
1218        use crate::sql::db::table::Column as TableColumn;
1219
1220        let path = tmp_path("deep_tree");
1221        let mut db = Database::new("deep".to_string());
1222        let columns = vec![
1223            TableColumn::new("id".into(), "integer".into(), true, true, true),
1224            TableColumn::new("s".into(), "text".into(), false, true, false),
1225        ];
1226        let mut table = build_empty_table("t", columns, 0);
1227        // ~900-byte rows → ~4 rows per leaf. 6000 rows → ~1500 leaves,
1228        // which with interior fanout ~400 needs 2 interior levels (3-level
1229        // tree total, counting leaves).
1230        for i in 1..=6_000i64 {
1231            let body = "q".repeat(900);
1232            table
1233                .restore_row(
1234                    i,
1235                    vec![
1236                        Some(Value::Integer(i)),
1237                        Some(Value::Text(format!("r-{i}-{body}"))),
1238                    ],
1239                )
1240                .unwrap();
1241        }
1242        db.tables.insert("t".to_string(), table);
1243        save_database(&mut db, &path).unwrap();
1244
1245        let loaded = open_database(&path, "deep".to_string()).unwrap();
1246        let t = loaded.get_table("t".to_string()).unwrap();
1247        assert_eq!(t.rowids().len(), 6_000);
1248
1249        // Confirm the tree actually grew past 2 levels — i.e., the root's
1250        // leftmost child is itself an interior page, not a leaf.
1251        let pager = loaded.pager.as_ref().unwrap();
1252        let mut master = build_empty_master_table();
1253        load_table_rows(pager, &mut master, pager.header().schema_root_page).unwrap();
1254        let t_root = master
1255            .rowids()
1256            .into_iter()
1257            .find_map(|r| match master.get_value("name", r) {
1258                Some(Value::Text(s)) if s == "t" => match master.get_value("rootpage", r) {
1259                    Some(Value::Integer(p)) => Some(p as u32),
1260                    _ => None,
1261                },
1262                _ => None,
1263            })
1264            .expect("t in sqlrite_master");
1265        let root_buf = pager.read_page(t_root).unwrap();
1266        assert_eq!(root_buf[0], PageType::InteriorNode as u8);
1267        let root_payload: &[u8; PAYLOAD_PER_PAGE] =
1268            (&root_buf[PAGE_HEADER_SIZE..]).try_into().unwrap();
1269        let root_interior = InteriorPage::from_bytes(root_payload);
1270        let child = root_interior.leftmost_child().unwrap();
1271        let child_buf = pager.read_page(child).unwrap();
1272        assert_eq!(
1273            child_buf[0],
1274            PageType::InteriorNode as u8,
1275            "expected 3-level tree: root's leftmost child should also be InteriorNode",
1276        );
1277
1278        cleanup(&path);
1279    }
1280}