use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use rayon::prelude::*;
use walkdir::WalkDir;
use crate::branch;
use crate::branch_meta::{self, BranchMeta};
use crate::config::{get_tokensave_dir, is_excluded, load_config, save_config, TokenSaveConfig};
use crate::context::ContextBuilder;
use crate::db::Database;
use crate::errors::{TokenSaveError, Result};
use crate::extraction::LanguageRegistry;
use crate::graph::{GraphQueryManager, GraphTraverser};
use crate::resolution::ReferenceResolver;
use crate::sync;
use crate::types::*;
pub struct TokenSave {
db: Database,
config: TokenSaveConfig,
project_root: PathBuf,
registry: LanguageRegistry,
active_branch: Option<String>,
serving_branch: Option<String>,
fallback_warning: Option<String>,
}
pub struct IndexResult {
pub file_count: usize,
pub node_count: usize,
pub edge_count: usize,
pub duration_ms: u64,
}
#[derive(Debug)]
pub struct SyncResult {
pub files_added: usize,
pub files_modified: usize,
pub files_removed: usize,
pub duration_ms: u64,
pub added_paths: Vec<String>,
pub modified_paths: Vec<String>,
pub removed_paths: Vec<String>,
pub skipped_paths: Vec<(String, String)>,
}
pub fn current_timestamp() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64
}
impl TokenSave {
pub async fn init(project_root: &Path) -> Result<Self> {
let config = TokenSaveConfig {
root_dir: project_root.to_string_lossy().to_string(),
..TokenSaveConfig::default()
};
save_config(project_root, &config)?;
let db_path = get_tokensave_dir(project_root).join("tokensave.db");
let (db, _migrated) = Database::initialize(&db_path).await?;
let active_branch = branch::current_branch(project_root);
let default_branch = branch::detect_default_branch(project_root)
.or_else(|| active_branch.clone());
if let Some(ref default) = default_branch {
let meta = BranchMeta::new(default);
let _ = branch_meta::save_branch_meta(&get_tokensave_dir(project_root), &meta);
}
Ok(Self {
db,
config,
project_root: project_root.to_path_buf(),
registry: LanguageRegistry::new(),
active_branch,
serving_branch: None,
fallback_warning: None,
})
}
pub async fn open(project_root: &Path) -> Result<Self> {
let config = load_config(project_root)?;
let tokensave_dir = get_tokensave_dir(project_root);
let active_branch = branch::current_branch(project_root);
let (db_path, serving_branch, fallback_warning) =
Self::resolve_db_for_branch(project_root, &tokensave_dir, active_branch.as_deref());
if !db_path.exists() {
return Err(TokenSaveError::Config {
message: format!(
"no TokenSave database found at '{}'; run 'tokensave sync' first",
db_path.display()
),
});
}
let crashed = has_dirty_sentinel(project_root);
if crashed {
eprintln!(
"[tokensave] previous operation was interrupted — checking database integrity…"
);
}
let open_result = Database::open(&db_path).await;
let (db, migrated) = match open_result {
Ok(pair) => pair,
Err(ref e) if Database::is_corruption_error(e) || crashed => {
print_corruption_warning();
delete_db_files(&db_path);
clear_dirty_sentinel(project_root);
let (db, _) = Database::initialize(&db_path).await?;
let ts = Self {
db,
config,
project_root: project_root.to_path_buf(),
registry: LanguageRegistry::new(),
active_branch: active_branch.clone(),
serving_branch: serving_branch.clone(),
fallback_warning: fallback_warning.clone(),
};
ts.index_all_with_progress(|c, t, f| {
eprintln!("[tokensave] re-indexing [{c}/{t}] {f}");
})
.await?;
eprintln!("[tokensave] re-index complete.");
return Ok(ts);
}
Err(e) => return Err(e),
};
if crashed {
let intact = db.quick_check().await.unwrap_or(false);
if !intact {
print_corruption_warning();
drop(db);
delete_db_files(&db_path);
clear_dirty_sentinel(project_root);
let (new_db, _) = Database::initialize(&db_path).await?;
let ts = Self {
db: new_db,
config,
project_root: project_root.to_path_buf(),
registry: LanguageRegistry::new(),
active_branch: active_branch.clone(),
serving_branch: serving_branch.clone(),
fallback_warning: fallback_warning.clone(),
};
ts.index_all_with_progress(|c, t, f| {
eprintln!("[tokensave] re-indexing [{c}/{t}] {f}");
})
.await?;
eprintln!("[tokensave] re-index complete.");
return Ok(ts);
}
clear_dirty_sentinel(project_root);
}
let ts = Self {
db,
config,
project_root: project_root.to_path_buf(),
registry: LanguageRegistry::new(),
active_branch,
serving_branch,
fallback_warning,
};
if migrated {
eprintln!("[tokensave] schema changed — performing full re-index…");
ts.index_all_with_progress(|current, total, file| {
eprintln!("[tokensave] re-indexing [{current}/{total}] {file}");
}).await?;
eprintln!("[tokensave] re-index complete.");
}
Ok(ts)
}
fn resolve_db_for_branch(
project_root: &Path,
tokensave_dir: &Path,
branch: Option<&str>,
) -> (PathBuf, Option<String>, Option<String>) {
let default_db = tokensave_dir.join("tokensave.db");
let Some(meta) = branch_meta::load_branch_meta(tokensave_dir) else {
return (default_db, None, None);
};
let Some(branch) = branch else {
return (default_db, Some(meta.default_branch.clone()), Some("detached HEAD — using default branch index".to_string()));
};
if let Some(path) = branch::resolve_branch_db_path(tokensave_dir, branch, &meta) {
if path.exists() {
return (path, Some(branch.to_string()), None);
}
}
if let Some(ancestor) = branch::find_nearest_tracked_ancestor(project_root, branch, &meta)
{
if let Some(path) = branch::resolve_branch_db_path(tokensave_dir, &ancestor, &meta) {
if path.exists() {
return (
path,
Some(ancestor.clone()),
Some(format!(
"branch '{branch}' is not tracked — serving from '{ancestor}'. \
Run `tokensave branch add {branch}` to track it."
)),
);
}
}
}
let serving = meta.default_branch.clone();
(
default_db,
Some(serving),
Some(format!(
"branch '{branch}' is not tracked — serving from '{}'. \
Run `tokensave branch add {branch}` to track it.",
meta.default_branch
)),
)
}
pub async fn open_branch(project_root: &Path, branch_name: &str) -> Result<Self> {
let config = load_config(project_root)?;
let tokensave_dir = get_tokensave_dir(project_root);
let meta = branch_meta::load_branch_meta(&tokensave_dir).ok_or_else(|| {
TokenSaveError::Config {
message: "no branch tracking configured — run `tokensave branch add` first"
.to_string(),
}
})?;
let db_path =
branch::resolve_branch_db_path(&tokensave_dir, branch_name, &meta).ok_or_else(
|| TokenSaveError::Config {
message: format!("branch '{branch_name}' is not tracked"),
},
)?;
if !db_path.exists() {
return Err(TokenSaveError::Config {
message: format!(
"DB for branch '{branch_name}' not found at '{}'",
db_path.display()
),
});
}
let (db, _) = Database::open(&db_path).await?;
Ok(Self {
db,
config,
project_root: project_root.to_path_buf(),
registry: LanguageRegistry::new(),
active_branch: Some(branch_name.to_string()),
serving_branch: Some(branch_name.to_string()),
fallback_warning: None,
})
}
pub fn list_tracked_branches(project_root: &Path) -> Option<Vec<String>> {
let tokensave_dir = get_tokensave_dir(project_root);
let meta = branch_meta::load_branch_meta(&tokensave_dir)?;
Some(meta.branches.keys().cloned().collect())
}
pub fn is_initialized(project_root: &Path) -> bool {
get_tokensave_dir(project_root)
.join("tokensave.db")
.exists()
}
}
fn write_dirty_sentinel(project_root: &Path) {
let path = get_tokensave_dir(project_root).join("dirty");
let _ = std::fs::write(
&path,
format!(
"pid={}\ntime={}\nversion={}",
std::process::id(),
current_timestamp(),
env!("CARGO_PKG_VERSION"),
),
);
}
fn clear_dirty_sentinel(project_root: &Path) {
let path = get_tokensave_dir(project_root).join("dirty");
let _ = std::fs::remove_file(path);
}
fn has_dirty_sentinel(project_root: &Path) -> bool {
get_tokensave_dir(project_root).join("dirty").exists()
}
fn delete_db_files(db_path: &std::path::Path) {
let _ = std::fs::remove_file(db_path);
let mut wal = db_path.to_path_buf();
wal.set_extension("db-wal");
let _ = std::fs::remove_file(&wal);
wal.set_extension("db-shm");
let _ = std::fs::remove_file(&wal);
}
fn print_corruption_warning() {
let version = env!("CARGO_PKG_VERSION");
eprintln!("[tokensave] \x1b[33m⚠database corruption detected — rebuilding index\x1b[0m");
eprintln!("[tokensave]");
eprintln!("[tokensave] This was likely caused by a crash or kill during indexing.");
eprintln!("[tokensave] Please report this at:");
eprintln!("[tokensave] https://github.com/aovestdipaperino/tokensave/issues");
eprintln!(
"[tokensave] Include: tokensave version (v{version}), OS, and what happened before the crash."
);
eprintln!("[tokensave]");
}
struct SyncLockGuard {
path: PathBuf,
}
impl Drop for SyncLockGuard {
fn drop(&mut self) {
let _ = std::fs::remove_file(&self.path);
}
}
fn try_acquire_sync_lock(project_root: &Path) -> Result<SyncLockGuard> {
let lock_path = get_tokensave_dir(project_root).join("sync.lock");
let pid = std::process::id();
match std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&lock_path)
{
Ok(mut f) => {
use std::io::Write;
let _ = write!(f, "{pid}");
return Ok(SyncLockGuard { path: lock_path });
}
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
}
Err(e) => {
return Err(TokenSaveError::SyncLock {
message: format!("could not create lockfile: {e}"),
});
}
}
let contents = std::fs::read_to_string(&lock_path).unwrap_or_default();
if let Ok(existing_pid) = contents.trim().parse::<u32>() {
if is_pid_alive(existing_pid) {
return Err(TokenSaveError::SyncLock {
message: format!(
"another sync is already in progress (PID {existing_pid}). \
If this is stale, remove {}",
lock_path.display()
),
});
}
}
let _ = std::fs::remove_file(&lock_path);
let mut f = std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&lock_path)
.map_err(|e| TokenSaveError::SyncLock {
message: format!("could not reclaim lockfile: {e}"),
})?;
use std::io::Write;
let _ = write!(f, "{pid}");
Ok(SyncLockGuard { path: lock_path })
}
fn is_pid_alive(pid: u32) -> bool {
std::process::Command::new("kill")
.args(["-0", &pid.to_string()])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.map(|s| s.success())
.unwrap_or(false)
}
impl TokenSave {
pub fn add_skip_folders(&mut self, folders: &[String]) {
for folder in folders {
self.config.exclude.push(format!("{folder}/**"));
}
}
pub async fn index_all(&self) -> Result<IndexResult> {
self.index_all_with_progress(|_, _, _| {}).await
}
pub async fn index_all_with_progress<F>(&self, on_file: F) -> Result<IndexResult>
where
F: Fn(usize, usize, &str),
{
debug_assert!(self.project_root.exists(), "project root does not exist");
debug_assert!(self.project_root.is_dir(), "project root is not a directory");
let _lock = try_acquire_sync_lock(&self.project_root)?;
write_dirty_sentinel(&self.project_root);
let start = Instant::now();
self.db.clear().await?;
self.db.begin_bulk_load().await?;
let files = self.scan_files()?;
let total = files.len();
let project_root = &self.project_root;
let registry = &self.registry;
let extractions: Vec<_> = files
.par_iter()
.filter_map(|file_path| {
let abs_path = project_root.join(file_path);
let source = std::fs::read_to_string(&abs_path).ok()?;
let extractor = registry.extractor_for_file(file_path)?;
let mut result = extractor.extract(file_path, &source);
result.sanitize();
let hash = sync::content_hash(&source);
let size = source.len() as u64;
let mtime = sync::file_stat(&abs_path)
.map_or_else(current_timestamp, |(m, _)| m);
Some((file_path.clone(), result, hash, size, mtime))
})
.collect();
let mut all_nodes = Vec::new();
let mut all_edges = Vec::new();
let mut all_unresolved = Vec::new();
let mut file_records = Vec::new();
let mut total_nodes = 0;
let total_edges;
for (idx, (file_path, result, hash, size, mtime)) in extractions.iter().enumerate() {
on_file(idx + 1, total, file_path);
total_nodes += result.nodes.len();
all_nodes.extend_from_slice(&result.nodes);
all_edges.extend_from_slice(&result.edges);
all_unresolved.extend_from_slice(&result.unresolved_refs);
file_records.push(FileRecord {
path: file_path.clone(),
content_hash: hash.clone(),
size: *size,
modified_at: *mtime,
indexed_at: current_timestamp(),
node_count: result.nodes.len() as u32,
});
}
if !all_unresolved.is_empty() {
let resolver = ReferenceResolver::from_nodes(&self.db, &all_nodes);
let resolution = resolver.resolve_all(&all_unresolved);
all_edges.extend(resolver.create_edges(&resolution.resolved));
}
all_nodes.sort_unstable_by(|a, b| a.id.cmp(&b.id));
all_edges.sort_unstable_by(|a, b| {
(&a.source, &a.target, a.kind.as_str(), &a.line)
.cmp(&(&b.source, &b.target, b.kind.as_str(), &b.line))
});
all_edges.dedup_by(|a, b| {
a.source == b.source && a.target == b.target && a.kind == b.kind && a.line == b.line
});
file_records.sort_unstable_by(|a, b| a.path.cmp(&b.path));
total_edges = all_edges.len();
self.db.insert_nodes(&all_nodes).await?;
self.db.insert_edges(&all_edges).await?;
self.db.upsert_files(&file_records).await?;
self.db.end_bulk_load().await?;
let now_str = current_timestamp().to_string();
self.db.set_metadata("last_full_sync_at", &now_str).await?;
self.db.set_metadata("last_sync_at", &now_str).await?;
let result = IndexResult {
file_count: files.len(),
node_count: total_nodes,
edge_count: total_edges,
duration_ms: start.elapsed().as_millis() as u64,
};
debug_assert!(result.node_count >= result.file_count || result.file_count == 0,
"fewer nodes than files is unexpected");
debug_assert!(result.duration_ms > 0 || result.file_count == 0,
"non-empty index completed in zero milliseconds");
clear_dirty_sentinel(&self.project_root);
Ok(result)
}
pub async fn sync(&self) -> Result<SyncResult> {
self.sync_with_progress(|_, _, _| {}).await
}
pub async fn sync_with_progress<F>(&self, on_progress: F) -> Result<SyncResult>
where
F: Fn(usize, usize, &str),
{
debug_assert!(self.project_root.exists(), "sync: project root does not exist");
debug_assert!(self.project_root.is_dir(), "sync: project root is not a directory");
let _lock = try_acquire_sync_lock(&self.project_root)?;
write_dirty_sentinel(&self.project_root);
let start = Instant::now();
on_progress(0, 0, "scanning files");
let current_files = self.scan_files()?;
on_progress(0, 0, "checking file timestamps");
let project_root = &self.project_root;
let file_stats: Vec<(String, i64, u64)> = current_files
.par_iter()
.filter_map(|path| {
let abs_path = project_root.join(path);
let (mtime, size) = sync::file_stat(&abs_path)?;
Some((path.clone(), mtime, size))
})
.collect();
let db_files = self.db.get_all_files().await?;
let db_map: HashMap<String, FileRecord> = db_files
.into_iter()
.map(|f| (f.path.clone(), f))
.collect();
let mut new_files: Vec<String> = Vec::new();
let mut stat_changed: Vec<String> = Vec::new();
let mut current_set: std::collections::HashSet<&str> =
std::collections::HashSet::with_capacity(file_stats.len());
let mut stat_map: HashMap<String, (i64, u64)> =
HashMap::with_capacity(file_stats.len());
for (path, mtime, size) in &file_stats {
current_set.insert(path.as_str());
stat_map.insert(path.clone(), (*mtime, *size));
match db_map.get(path) {
None => new_files.push(path.clone()),
Some(record) => {
if record.modified_at != *mtime || record.size != *size as u64 {
stat_changed.push(path.clone());
}
}
}
}
let removed: Vec<String> = db_map
.keys()
.filter(|path| !current_set.contains(path.as_str()))
.cloned()
.collect();
on_progress(0, 0, "hashing changed files");
let needs_read: Vec<&String> = new_files.iter().chain(stat_changed.iter()).collect();
let hash_results: Vec<_> = needs_read
.par_iter()
.map(|path| {
let abs_path = project_root.join(path.as_str());
match sync::read_source_file(&abs_path) {
Ok(source) => Ok(((*path).clone(), sync::content_hash(&source))),
Err(e) => Err(((*path).clone(), e.to_string())),
}
})
.collect();
let mut skipped: Vec<(String, String)> = Vec::new();
let mut hash_map: HashMap<String, String> = HashMap::new();
for result in hash_results {
match result {
Ok((path, hash)) => {
hash_map.insert(path, hash);
}
Err((path, reason)) => {
skipped.push((path, reason));
}
}
}
on_progress(0, 0, "detecting changes");
let mut stale: Vec<String> = Vec::new();
let mut mtime_only_changed: Vec<String> = Vec::new();
for path in &stat_changed {
if let Some(new_hash) = hash_map.get(path) {
if let Some(record) = db_map.get(path) {
if record.content_hash != *new_hash {
stale.push(path.clone());
} else {
mtime_only_changed.push(path.clone());
}
}
}
}
for path in &mtime_only_changed {
if let (Some(record), Some(&(mtime, size))) =
(db_map.get(path), stat_map.get(path))
{
let updated = FileRecord {
modified_at: mtime,
size,
..record.clone()
};
self.db.upsert_file(&updated).await?;
}
}
for path in &removed {
on_progress(0, 0, &format!("removing {path}"));
self.db.delete_file(path).await?;
}
let to_index: Vec<String> = stale.iter().chain(new_files.iter()).cloned().collect();
let registry = &self.registry;
let sync_extractions: Vec<_> = to_index
.par_iter()
.filter_map(|file_path| {
let abs_path = project_root.join(file_path);
let source = sync::read_source_file(&abs_path).ok()?;
let extractor = registry.extractor_for_file(file_path)?;
let mut result = extractor.extract(file_path, &source);
result.sanitize();
let hash = sync::content_hash(&source);
let size = source.len() as u64;
let mtime = stat_map.get(file_path).map_or_else(
|| current_timestamp(),
|&(m, _)| m,
);
Some((file_path.clone(), result, hash, size, mtime))
})
.collect();
let total = sync_extractions.len();
for (idx, (file_path, result, hash, size, mtime)) in
sync_extractions.iter().enumerate()
{
on_progress(idx + 1, total, file_path);
self.db.delete_nodes_by_file(file_path).await?;
self.db.insert_nodes(&result.nodes).await?;
self.db.insert_edges(&result.edges).await?;
if !result.unresolved_refs.is_empty() {
self.db.insert_unresolved_refs(&result.unresolved_refs).await?;
}
let file_record = FileRecord {
path: file_path.clone(),
content_hash: hash.clone(),
size: *size,
modified_at: *mtime,
indexed_at: current_timestamp(),
node_count: result.nodes.len() as u32,
};
self.db.upsert_file(&file_record).await?;
}
if !to_index.is_empty() {
on_progress(0, 0, "resolving references");
let unresolved = self.db.get_unresolved_refs().await?;
if !unresolved.is_empty() {
let resolver = ReferenceResolver::new(&self.db).await;
let resolution = resolver.resolve_all(&unresolved);
let edges = resolver.create_edges(&resolution.resolved);
if !edges.is_empty() {
self.db.insert_edges(&edges).await?;
}
}
}
self.db
.set_metadata("last_sync_at", ¤t_timestamp().to_string())
.await?;
clear_dirty_sentinel(&self.project_root);
Ok(SyncResult {
files_added: new_files.len(),
files_modified: stale.len(),
files_removed: removed.len(),
duration_ms: start.elapsed().as_millis() as u64,
added_paths: new_files,
modified_paths: stale,
skipped_paths: skipped,
removed_paths: removed,
})
}
fn scan_files(&self) -> Result<Vec<String>> {
debug_assert!(self.project_root.is_dir(), "scan_files: project_root is not a directory");
let supported_exts = self.registry.supported_extensions();
debug_assert!(!supported_exts.is_empty(), "scan_files: no supported extensions registered");
if self.config.git_ignore {
let files = self.scan_files_with_gitignore(&supported_exts)?;
if files.is_empty() {
let has_source = WalkDir::new(&self.project_root)
.max_depth(2)
.into_iter()
.filter_map(|e| e.ok())
.any(|e| {
e.file_type().is_file()
&& e.path()
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| supported_exts.contains(&ext))
});
if has_source {
eprintln!("warning: gitignore-aware scan found no files; falling back to plain walk (project may be gitignored by parent repo)");
return self.scan_files_walkdir(&supported_exts);
}
}
Ok(files)
} else {
self.scan_files_walkdir(&supported_exts)
}
}
fn scan_files_walkdir(
&self,
supported_exts: &[&str],
) -> Result<Vec<String>> {
let mut files = Vec::new();
for entry in WalkDir::new(&self.project_root)
.follow_links(false)
.into_iter()
.filter_entry(|e| {
if e.depth() == 0 {
return true;
}
let name = e.file_name().to_string_lossy();
!name.starts_with('.') && name != "target"
})
{
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
if !entry.file_type().is_file() {
continue;
}
if let Some(rel_str) = self.accept_file(entry.path(), supported_exts) {
files.push(rel_str);
}
}
Ok(files)
}
fn scan_files_with_gitignore(
&self,
supported_exts: &[&str],
) -> Result<Vec<String>> {
let mut files = Vec::new();
let walker = ignore::WalkBuilder::new(&self.project_root)
.follow_links(false)
.hidden(true) .git_ignore(true)
.git_global(true)
.git_exclude(true)
.build();
for entry in walker {
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
let Some(ft) = entry.file_type() else {
continue;
};
if !ft.is_file() {
continue;
}
if let Some(rel_str) = self.accept_file(entry.path(), supported_exts) {
files.push(rel_str);
}
}
Ok(files)
}
fn accept_file(
&self,
path: &Path,
supported_exts: &[&str],
) -> Option<String> {
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
if !supported_exts.contains(&ext) {
return None;
}
let relative = path.strip_prefix(&self.project_root).ok()?;
let rel_str = relative.to_string_lossy().replace('\\', "/");
if is_excluded(&rel_str, &self.config) {
return None;
}
let metadata = std::fs::metadata(path).ok()?;
if metadata.len() > self.config.max_file_size {
return None;
}
Some(rel_str)
}
}
impl TokenSave {
pub async fn search(&self, query: &str, limit: usize) -> Result<Vec<SearchResult>> {
self.db.search_nodes(query, limit).await
}
pub async fn get_stats(&self) -> Result<GraphStats> {
self.db.get_stats().await
}
pub async fn get_node(&self, id: &str) -> Result<Option<Node>> {
self.db.get_node_by_id(id).await
}
pub async fn get_callers(&self, node_id: &str, max_depth: usize) -> Result<Vec<(Node, Edge)>> {
let traverser = GraphTraverser::new(&self.db);
traverser.get_callers(node_id, max_depth).await
}
pub async fn get_callees(&self, node_id: &str, max_depth: usize) -> Result<Vec<(Node, Edge)>> {
let traverser = GraphTraverser::new(&self.db);
traverser.get_callees(node_id, max_depth).await
}
pub async fn get_impact_radius(&self, node_id: &str, max_depth: usize) -> Result<Subgraph> {
let traverser = GraphTraverser::new(&self.db);
traverser.get_impact_radius(node_id, max_depth).await
}
pub async fn get_call_graph(&self, node_id: &str, depth: usize) -> Result<Subgraph> {
let traverser = GraphTraverser::new(&self.db);
traverser.get_call_graph(node_id, depth).await
}
pub async fn find_dead_code(&self, kinds: &[NodeKind]) -> Result<Vec<Node>> {
let qm = GraphQueryManager::new(&self.db);
qm.find_dead_code(kinds).await
}
pub async fn get_nodes_by_file(&self, file_path: &str) -> Result<Vec<Node>> {
self.db.get_nodes_by_file(file_path).await
}
pub async fn get_all_nodes(&self) -> Result<Vec<Node>> {
self.db.get_all_nodes().await
}
pub async fn get_incoming_edges(&self, node_id: &str) -> Result<Vec<Edge>> {
self.db.get_incoming_edges(node_id, &[]).await
}
pub async fn get_outgoing_edges(&self, node_id: &str) -> Result<Vec<Edge>> {
self.db.get_outgoing_edges(node_id, &[]).await
}
pub async fn get_all_edges(&self) -> Result<Vec<Edge>> {
self.db.get_all_edges().await
}
pub async fn get_ranked_nodes_by_edge_kind(
&self,
edge_kind: &EdgeKind,
node_kind: Option<&NodeKind>,
incoming: bool,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(Node, u64)>> {
self.db
.get_ranked_nodes_by_edge_kind(edge_kind, node_kind, incoming, path_prefix, limit)
.await
}
pub async fn get_largest_nodes(
&self,
node_kind: Option<&NodeKind>,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(Node, u32)>> {
self.db.get_largest_nodes(node_kind, path_prefix, limit).await
}
pub async fn get_file_coupling(
&self,
fan_in: bool,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(String, u64)>> {
self.db.get_file_coupling(fan_in, path_prefix, limit).await
}
pub async fn get_inheritance_depth(
&self,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(Node, u64)>> {
self.db.get_inheritance_depth(path_prefix, limit).await
}
pub async fn get_node_distribution(
&self,
path_prefix: Option<&str>,
) -> Result<Vec<(String, String, u64)>> {
self.db.get_node_distribution(path_prefix).await
}
pub async fn get_call_edges(&self, path_prefix: Option<&str>) -> Result<Vec<(String, String)>> {
self.db.get_call_edges(path_prefix).await
}
pub async fn get_complexity_ranked(
&self,
node_kind: Option<&NodeKind>,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(Node, u32, u64, u64, u64)>> {
self.db.get_complexity_ranked(node_kind, path_prefix, limit).await
}
pub async fn get_undocumented_public_symbols(
&self,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<Node>> {
self.db
.get_undocumented_public_symbols(path_prefix, limit)
.await
}
pub async fn get_god_classes(
&self,
path_prefix: Option<&str>,
limit: usize,
) -> Result<Vec<(Node, u64, u64, u64)>> {
self.db.get_god_classes(path_prefix, limit).await
}
pub async fn find_circular_dependencies(&self) -> Result<Vec<Vec<String>>> {
let qm = GraphQueryManager::new(&self.db);
qm.find_circular_dependencies().await
}
pub async fn build_context(&self, task: &str, options: &BuildContextOptions) -> Result<TaskContext> {
let builder = ContextBuilder::new(&self.db, &self.project_root);
builder.build_context(task, options).await
}
pub async fn get_all_files(&self) -> Result<Vec<FileRecord>> {
self.db.get_all_files().await
}
pub async fn get_file_dependents(&self, file_path: &str) -> Result<Vec<String>> {
let qm = GraphQueryManager::new(&self.db);
qm.get_file_dependents(file_path).await
}
pub async fn get_file_token_map(&self) -> Result<HashMap<String, u64>> {
let files = self.db.get_all_files().await?;
Ok(files.into_iter().map(|f| (f.path, f.size / 4)).collect())
}
pub async fn get_tokens_saved(&self) -> Result<u64> {
match self.db.get_metadata("tokens_saved").await? {
Some(v) => Ok(v.parse::<u64>().unwrap_or(0)),
None => Ok(0),
}
}
pub async fn set_tokens_saved(&self, value: u64) -> Result<()> {
self.db
.set_metadata("tokens_saved", &value.to_string())
.await
}
pub async fn get_local_counter(&self) -> Result<u64> {
match self.db.get_metadata("local_counter").await? {
Some(v) => Ok(v.parse::<u64>().unwrap_or(0)),
None => Ok(0),
}
}
pub async fn reset_local_counter(&self) -> Result<()> {
self.db.set_metadata("local_counter", "0").await
}
pub async fn add_local_counter(&self, delta: u64) -> Result<()> {
let current = self.get_local_counter().await?;
self.db
.set_metadata("local_counter", &(current + delta).to_string())
.await
}
pub async fn get_nodes_by_dir(&self, dir: &str, kinds: &[NodeKind]) -> Result<Vec<Node>> {
self.db.get_nodes_by_dir(dir, kinds).await
}
pub async fn get_internal_edges(&self, node_ids: &[String]) -> Result<Vec<Edge>> {
self.db.get_internal_edges(node_ids).await
}
pub async fn checkpoint(&self) -> Result<()> {
self.db.checkpoint().await
}
pub async fn optimize(&self) -> Result<()> {
self.db.optimize().await
}
pub fn get_config(&self) -> &TokenSaveConfig {
&self.config
}
pub fn project_root(&self) -> &Path {
&self.project_root
}
pub fn active_branch(&self) -> Option<&str> {
self.active_branch.as_deref()
}
pub fn serving_branch(&self) -> Option<&str> {
self.serving_branch.as_deref()
}
pub fn fallback_warning(&self) -> Option<&str> {
self.fallback_warning.as_deref()
}
pub fn is_fallback(&self) -> bool {
self.fallback_warning.is_some()
}
}
impl TokenSave {
pub async fn check_file_staleness(&self, file_paths: &[String]) -> Vec<String> {
let mut stale = Vec::new();
for path in file_paths {
if let Ok(Some(record)) = self.db.get_file(path).await {
let abs_path = self.project_root.join(path);
if let Ok(metadata) = std::fs::metadata(&abs_path) {
if let Ok(mtime) = metadata.modified() {
let mtime_secs = mtime
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64;
if mtime_secs > record.indexed_at {
stale.push(path.clone());
}
}
}
}
}
stale
}
pub async fn last_index_time(&self) -> Result<i64> {
self.db.last_index_time().await
}
pub fn git_commits_since(&self, since_timestamp: i64) -> usize {
let repo = match gix::open(&self.project_root) {
Ok(r) => r,
Err(_) => return 0,
};
let head = match repo.head_commit() {
Ok(h) => h,
Err(_) => return 0,
};
let sorting = gix::revision::walk::Sorting::ByCommitTimeCutoff {
order: gix::traverse::commit::simple::CommitTimeOrder::NewestFirst,
seconds: since_timestamp,
};
let walk = match head.ancestors().sorting(sorting).all() {
Ok(w) => w,
Err(_) => return 0,
};
walk.filter_map(|r| r.ok()).count()
}
}
pub fn is_test_file(path: &str) -> bool {
let test_segments = [
"test/", "tests/", "__tests__/", "spec/", "e2e/",
".test.", ".spec.", "_test.", "_spec.",
];
let lower = path.to_ascii_lowercase();
test_segments.iter().any(|s| lower.contains(s))
}