use libsql::Connection;
use crate::errors::{TokenSaveError, Result};
const LATEST_VERSION: u32 = 6;
async fn get_version(conn: &Connection) -> Result<u32> {
let mut rows = conn.query("PRAGMA user_version", ()).await.map_err(|e| {
TokenSaveError::Database {
message: format!("failed to read user_version: {e}"),
operation: "get_version".to_string(),
}
})?;
let row = rows.next().await.map_err(|e| TokenSaveError::Database {
message: format!("failed to read user_version row: {e}"),
operation: "get_version".to_string(),
})?;
match row {
Some(r) => {
let v: i64 = r.get(0).map_err(|e| TokenSaveError::Database {
message: format!("failed to read user_version value: {e}"),
operation: "get_version".to_string(),
})?;
Ok(v as u32)
}
None => Ok(0),
}
}
async fn set_version(conn: &Connection, version: u32) -> Result<()> {
conn.execute(&format!("PRAGMA user_version = {version}"), ())
.await
.map_err(|e| TokenSaveError::Database {
message: format!("failed to set user_version: {e}"),
operation: "set_version".to_string(),
})?;
Ok(())
}
pub async fn create_schema(conn: &Connection) -> Result<()> {
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS nodes (
id TEXT PRIMARY KEY,
kind TEXT NOT NULL,
name TEXT NOT NULL,
qualified_name TEXT NOT NULL,
file_path TEXT NOT NULL,
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
start_column INTEGER NOT NULL,
end_column INTEGER NOT NULL,
docstring TEXT,
signature TEXT,
visibility TEXT NOT NULL DEFAULT 'private',
is_async INTEGER NOT NULL DEFAULT 0,
branches INTEGER NOT NULL DEFAULT 0,
loops INTEGER NOT NULL DEFAULT 0,
returns INTEGER NOT NULL DEFAULT 0,
max_nesting INTEGER NOT NULL DEFAULT 0,
unsafe_blocks INTEGER NOT NULL DEFAULT 0,
unchecked_calls INTEGER NOT NULL DEFAULT 0,
assertions INTEGER NOT NULL DEFAULT 0,
updated_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS edges (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source TEXT NOT NULL,
target TEXT NOT NULL,
kind TEXT NOT NULL,
line INTEGER,
FOREIGN KEY (source) REFERENCES nodes(id) ON DELETE CASCADE,
FOREIGN KEY (target) REFERENCES nodes(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS files (
path TEXT PRIMARY KEY,
content_hash TEXT NOT NULL,
size INTEGER NOT NULL,
modified_at INTEGER NOT NULL,
indexed_at INTEGER NOT NULL,
node_count INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS unresolved_refs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
from_node_id TEXT NOT NULL,
reference_name TEXT NOT NULL,
reference_kind TEXT NOT NULL,
line INTEGER NOT NULL,
col INTEGER NOT NULL,
file_path TEXT NOT NULL,
FOREIGN KEY (from_node_id) REFERENCES nodes(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS vectors (
node_id TEXT PRIMARY KEY,
embedding BLOB NOT NULL,
model TEXT NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (node_id) REFERENCES nodes(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS metadata (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);
CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
name, qualified_name, docstring, signature,
content='nodes', content_rowid='rowid'
);
CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes BEGIN
INSERT INTO nodes_fts(rowid, name, qualified_name, docstring, signature)
VALUES (NEW.rowid, NEW.name, NEW.qualified_name, NEW.docstring, NEW.signature);
END;
CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes BEGIN
INSERT INTO nodes_fts(nodes_fts, rowid, name, qualified_name, docstring, signature)
VALUES ('delete', OLD.rowid, OLD.name, OLD.qualified_name, OLD.docstring, OLD.signature);
END;
CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes BEGIN
INSERT INTO nodes_fts(nodes_fts, rowid, name, qualified_name, docstring, signature)
VALUES ('delete', OLD.rowid, OLD.name, OLD.qualified_name, OLD.docstring, OLD.signature);
INSERT INTO nodes_fts(rowid, name, qualified_name, docstring, signature)
VALUES (NEW.rowid, NEW.name, NEW.qualified_name, NEW.docstring, NEW.signature);
END;
CREATE INDEX IF NOT EXISTS idx_nodes_kind ON nodes(kind);
CREATE INDEX IF NOT EXISTS idx_nodes_name ON nodes(name);
CREATE INDEX IF NOT EXISTS idx_nodes_qualified_name ON nodes(qualified_name);
CREATE INDEX IF NOT EXISTS idx_nodes_file_path ON nodes(file_path);
CREATE INDEX IF NOT EXISTS idx_nodes_file_path_start_line ON nodes(file_path, start_line);
CREATE INDEX IF NOT EXISTS idx_edges_source_kind ON edges(source, kind);
CREATE INDEX IF NOT EXISTS idx_edges_target_kind ON edges(target, kind);
CREATE INDEX IF NOT EXISTS idx_edges_kind ON edges(kind);
CREATE UNIQUE INDEX IF NOT EXISTS idx_edges_unique
ON edges(source, target, kind, COALESCE(line, -1));
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_from_node_id ON unresolved_refs(from_node_id);
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_reference_name ON unresolved_refs(reference_name);
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_file_path ON unresolved_refs(file_path);
CREATE INDEX IF NOT EXISTS idx_nodes_lower_name ON nodes(lower(name));",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("failed to create schema: {e}"),
operation: "create_schema".to_string(),
})?;
set_version(conn, LATEST_VERSION).await?;
Ok(())
}
pub async fn migrate(conn: &Connection) -> Result<bool> {
debug_assert!(LATEST_VERSION > 0, "LATEST_VERSION must be positive");
let current = get_version(conn).await?;
debug_assert!(current <= LATEST_VERSION, "database version {} is ahead of code version {}", current, LATEST_VERSION);
if current >= LATEST_VERSION {
return Ok(false);
}
eprintln!(
"[tokensave] migrating database schema v{current} → v{LATEST_VERSION}…"
);
conn.execute("BEGIN EXCLUSIVE", ())
.await
.map_err(|e| TokenSaveError::Database {
message: format!("failed to acquire exclusive lock: {e}"),
operation: "migrate".to_string(),
})?;
let current = get_version(conn).await?;
let result = run_migrations(conn, current).await;
match result {
Ok(()) => {
conn.execute("COMMIT", ())
.await
.map_err(|e| TokenSaveError::Database {
message: format!("failed to commit migrations: {e}"),
operation: "migrate".to_string(),
})?;
Ok(true)
}
Err(e) => {
let _ = conn.execute("ROLLBACK", ()).await;
Err(e)
}
}
}
async fn run_migrations(conn: &Connection, current: u32) -> Result<()> {
debug_assert!(current < LATEST_VERSION, "run_migrations called when already at latest version");
for version in (current + 1)..=LATEST_VERSION {
run_migration(conn, version).await?;
set_version(conn, version).await?;
}
Ok(())
}
async fn run_migration(conn: &Connection, version: u32) -> Result<()> {
match version {
1 => migrate_v1(conn).await,
2 => migrate_v2(conn).await,
3 => migrate_v3(conn).await,
4 => migrate_v4(conn).await,
5 => migrate_v5(conn).await,
6 => migrate_v6(conn).await,
_ => Err(TokenSaveError::Database {
message: format!("unknown migration version: {version}"),
operation: "run_migration".to_string(),
}),
}
}
async fn migrate_v1(conn: &Connection) -> Result<()> {
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS nodes (
id TEXT PRIMARY KEY,
kind TEXT NOT NULL,
name TEXT NOT NULL,
qualified_name TEXT NOT NULL,
file_path TEXT NOT NULL,
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
start_column INTEGER NOT NULL,
end_column INTEGER NOT NULL,
docstring TEXT,
signature TEXT,
visibility TEXT NOT NULL DEFAULT 'private',
is_async INTEGER NOT NULL DEFAULT 0,
updated_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS edges (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source TEXT NOT NULL,
target TEXT NOT NULL,
kind TEXT NOT NULL,
line INTEGER,
FOREIGN KEY (source) REFERENCES nodes(id) ON DELETE CASCADE,
FOREIGN KEY (target) REFERENCES nodes(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS files (
path TEXT PRIMARY KEY,
content_hash TEXT NOT NULL,
size INTEGER NOT NULL,
modified_at INTEGER NOT NULL,
indexed_at INTEGER NOT NULL,
node_count INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS unresolved_refs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
from_node_id TEXT NOT NULL,
reference_name TEXT NOT NULL,
reference_kind TEXT NOT NULL,
line INTEGER NOT NULL,
col INTEGER NOT NULL,
file_path TEXT NOT NULL,
FOREIGN KEY (from_node_id) REFERENCES nodes(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS vectors (
node_id TEXT PRIMARY KEY,
embedding BLOB NOT NULL,
model TEXT NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (node_id) REFERENCES nodes(id) ON DELETE CASCADE
);",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v1: failed to create tables: {e}"),
operation: "migrate_v1".to_string(),
})?;
conn.execute_batch(
"CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
name,
qualified_name,
docstring,
signature,
content='nodes',
content_rowid='rowid'
);
CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes BEGIN
INSERT INTO nodes_fts(rowid, name, qualified_name, docstring, signature)
VALUES (NEW.rowid, NEW.name, NEW.qualified_name, NEW.docstring, NEW.signature);
END;
CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes BEGIN
INSERT INTO nodes_fts(nodes_fts, rowid, name, qualified_name, docstring, signature)
VALUES ('delete', OLD.rowid, OLD.name, OLD.qualified_name, OLD.docstring, OLD.signature);
END;
CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes BEGIN
INSERT INTO nodes_fts(nodes_fts, rowid, name, qualified_name, docstring, signature)
VALUES ('delete', OLD.rowid, OLD.name, OLD.qualified_name, OLD.docstring, OLD.signature);
INSERT INTO nodes_fts(rowid, name, qualified_name, docstring, signature)
VALUES (NEW.rowid, NEW.name, NEW.qualified_name, NEW.docstring, NEW.signature);
END;",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v1: failed to create FTS: {e}"),
operation: "migrate_v1".to_string(),
})?;
conn.execute_batch(
"CREATE INDEX IF NOT EXISTS idx_nodes_kind ON nodes(kind);
CREATE INDEX IF NOT EXISTS idx_nodes_name ON nodes(name);
CREATE INDEX IF NOT EXISTS idx_nodes_qualified_name ON nodes(qualified_name);
CREATE INDEX IF NOT EXISTS idx_nodes_file_path ON nodes(file_path);
CREATE INDEX IF NOT EXISTS idx_nodes_file_path_start_line ON nodes(file_path, start_line);
CREATE INDEX IF NOT EXISTS idx_edges_source ON edges(source);
CREATE INDEX IF NOT EXISTS idx_edges_target ON edges(target);
CREATE INDEX IF NOT EXISTS idx_edges_kind ON edges(kind);
CREATE INDEX IF NOT EXISTS idx_edges_source_kind ON edges(source, kind);
CREATE INDEX IF NOT EXISTS idx_edges_target_kind ON edges(target, kind);
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_from_node_id ON unresolved_refs(from_node_id);
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_reference_name ON unresolved_refs(reference_name);
CREATE INDEX IF NOT EXISTS idx_unresolved_refs_file_path ON unresolved_refs(file_path);",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v1: failed to create indexes: {e}"),
operation: "migrate_v1".to_string(),
})?;
Ok(())
}
async fn migrate_v2(conn: &Connection) -> Result<()> {
conn.execute(
"CREATE TABLE IF NOT EXISTS metadata (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
)",
(),
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v2: failed to create metadata table: {e}"),
operation: "migrate_v2".to_string(),
})?;
conn.execute("DROP TABLE IF EXISTS schema_versions", ())
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v2: failed to drop schema_versions: {e}"),
operation: "migrate_v2".to_string(),
})?;
Ok(())
}
async fn migrate_v3(conn: &Connection) -> Result<()> {
conn.execute_batch(
"ALTER TABLE nodes ADD COLUMN branches INTEGER NOT NULL DEFAULT 0;
ALTER TABLE nodes ADD COLUMN loops INTEGER NOT NULL DEFAULT 0;
ALTER TABLE nodes ADD COLUMN returns INTEGER NOT NULL DEFAULT 0;
ALTER TABLE nodes ADD COLUMN max_nesting INTEGER NOT NULL DEFAULT 0;",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v3: failed to add complexity columns: {e}"),
operation: "migrate_v3".to_string(),
})?;
Ok(())
}
async fn migrate_v4(conn: &Connection) -> Result<()> {
conn.execute_batch(
"ALTER TABLE nodes ADD COLUMN unsafe_blocks INTEGER NOT NULL DEFAULT 0;
ALTER TABLE nodes ADD COLUMN unchecked_calls INTEGER NOT NULL DEFAULT 0;
ALTER TABLE nodes ADD COLUMN assertions INTEGER NOT NULL DEFAULT 0;",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v4: failed to add safety metric columns: {e}"),
operation: "migrate_v4".to_string(),
})?;
Ok(())
}
async fn migrate_v5(conn: &Connection) -> Result<()> {
conn.execute_batch(
"CREATE TABLE edges_dedup (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source TEXT NOT NULL,
target TEXT NOT NULL,
kind TEXT NOT NULL,
line INTEGER,
FOREIGN KEY (source) REFERENCES nodes(id) ON DELETE CASCADE,
FOREIGN KEY (target) REFERENCES nodes(id) ON DELETE CASCADE
);
INSERT INTO edges_dedup (source, target, kind, line)
SELECT DISTINCT source, target, kind, line FROM edges;
DROP TABLE edges;
ALTER TABLE edges_dedup RENAME TO edges;
CREATE INDEX idx_edges_source ON edges(source);
CREATE INDEX idx_edges_target ON edges(target);
CREATE INDEX idx_edges_kind ON edges(kind);
CREATE INDEX idx_edges_source_kind ON edges(source, kind);
CREATE INDEX idx_edges_target_kind ON edges(target, kind);
CREATE UNIQUE INDEX idx_edges_unique
ON edges(source, target, kind, COALESCE(line, -1));",
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v5: failed to deduplicate edges: {e}"),
operation: "migrate_v5".to_string(),
})?;
Ok(())
}
async fn migrate_v6(conn: &Connection) -> Result<()> {
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_nodes_lower_name ON nodes(lower(name))",
(),
)
.await
.map_err(|e| TokenSaveError::Database {
message: format!("v6: failed to create lower(name) index: {e}"),
operation: "migrate_v6".to_string(),
})?;
Ok(())
}