use std::cell::{Ref, RefCell, RefMut};
use std::io::{self, BufWriter};
use std::path::{Component, Path, PathBuf};
use std::sync::{mpsc, Arc, Mutex};
use std::time::{Duration, Instant};
use lsp_types::FileChangeType;
use notify::RecommendedWatcher;
use crate::backup::BackupStore;
use crate::bash_background::{BgCompletion, BgTaskRegistry};
use crate::callgraph::CallGraph;
use crate::checkpoint::CheckpointStore;
use crate::config::Config;
use crate::language::LanguageProvider;
use crate::lsp::manager::LspManager;
use crate::lsp::registry::is_config_file_path_with_custom;
use crate::parser::{SharedSymbolCache, SymbolCache};
use crate::protocol::{ProgressFrame, PushFrame, StatusChangedFrame, StatusPayload};
pub type ProgressSender = Arc<Box<dyn Fn(PushFrame) + Send + Sync>>;
pub type SharedProgressSender = Arc<Mutex<Option<ProgressSender>>>;
pub type SharedStdoutWriter = Arc<Mutex<BufWriter<io::Stdout>>>;
const STATUS_DEBOUNCE_MS: u64 = 1_000;
pub struct StatusEmitter {
latest: Arc<Mutex<Option<StatusPayload>>>,
notify: mpsc::Sender<()>,
}
impl StatusEmitter {
fn new(progress_sender: SharedProgressSender) -> Self {
let (notify, rx) = mpsc::channel();
let latest = Arc::new(Mutex::new(None));
let latest_for_thread = Arc::clone(&latest);
std::thread::spawn(move || {
status_debounce_loop(rx, latest_for_thread, progress_sender);
});
Self { latest, notify }
}
pub fn signal(&self, snapshot: StatusPayload) {
if let Ok(mut latest) = self.latest.lock() {
*latest = Some(snapshot);
}
let _ = self.notify.send(());
}
}
fn status_debounce_loop(
rx: mpsc::Receiver<()>,
latest: Arc<Mutex<Option<StatusPayload>>>,
progress_sender: SharedProgressSender,
) {
while rx.recv().is_ok() {
let deadline = Instant::now() + Duration::from_millis(STATUS_DEBOUNCE_MS);
while let Some(remaining) = deadline.checked_duration_since(Instant::now()) {
match rx.recv_timeout(remaining) {
Ok(()) => continue,
Err(mpsc::RecvTimeoutError::Timeout) => break,
Err(mpsc::RecvTimeoutError::Disconnected) => return,
}
}
let snapshot = latest.lock().ok().and_then(|mut latest| latest.take());
let Some(snapshot) = snapshot else { continue };
let sender = progress_sender
.lock()
.ok()
.and_then(|sender| sender.clone());
if let Some(sender) = sender {
sender(PushFrame::StatusChanged(StatusChangedFrame::new(
None, snapshot,
)));
}
}
}
use crate::search_index::SearchIndex;
use crate::semantic_index::SemanticIndex;
#[derive(Debug, Clone)]
pub enum SemanticIndexStatus {
Disabled,
Building {
stage: String,
files: Option<usize>,
entries_done: Option<usize>,
entries_total: Option<usize>,
},
Ready,
Failed(String),
}
pub enum SemanticIndexEvent {
Progress {
stage: String,
files: Option<usize>,
entries_done: Option<usize>,
entries_total: Option<usize>,
},
Ready(SemanticIndex),
Failed(String),
}
fn normalize_path(path: &Path) -> PathBuf {
let mut result = PathBuf::new();
for component in path.components() {
match component {
Component::ParentDir => {
if !result.pop() {
result.push(component);
}
}
Component::CurDir => {} _ => result.push(component),
}
}
result
}
fn resolve_with_existing_ancestors(path: &Path) -> PathBuf {
let mut existing = path.to_path_buf();
let mut tail_segments = Vec::new();
while !existing.exists() {
if let Some(name) = existing.file_name() {
tail_segments.push(name.to_owned());
} else {
break;
}
existing = match existing.parent() {
Some(parent) => parent.to_path_buf(),
None => break,
};
}
let mut resolved = std::fs::canonicalize(&existing).unwrap_or(existing);
for segment in tail_segments.into_iter().rev() {
resolved.push(segment);
}
resolved
}
fn path_error_response(
req_id: &str,
path: &Path,
resolved_root: &Path,
) -> crate::protocol::Response {
crate::protocol::Response::error(
req_id,
"path_outside_root",
format!(
"path '{}' is outside the project root '{}'",
path.display(),
resolved_root.display()
),
)
}
fn reject_escaping_symlink(
req_id: &str,
original_path: &Path,
candidate: &Path,
resolved_root: &Path,
raw_root: &Path,
) -> Result<(), crate::protocol::Response> {
let mut current = PathBuf::new();
for component in candidate.components() {
current.push(component);
let Ok(metadata) = std::fs::symlink_metadata(¤t) else {
continue;
};
if !metadata.file_type().is_symlink() {
continue;
}
let inside_root = current.starts_with(resolved_root) || current.starts_with(raw_root);
if !inside_root {
continue;
}
iterative_follow_chain(req_id, original_path, ¤t, resolved_root)?;
}
Ok(())
}
fn iterative_follow_chain(
req_id: &str,
original_path: &Path,
start: &Path,
resolved_root: &Path,
) -> Result<(), crate::protocol::Response> {
let mut link = start.to_path_buf();
let mut depth = 0usize;
loop {
if depth > 40 {
return Err(path_error_response(req_id, original_path, resolved_root));
}
let target = match std::fs::read_link(&link) {
Ok(t) => t,
Err(_) => {
return Err(path_error_response(req_id, original_path, resolved_root));
}
};
let resolved_target = if target.is_absolute() {
normalize_path(&target)
} else {
let parent = link.parent().unwrap_or_else(|| Path::new(""));
normalize_path(&parent.join(&target))
};
let canonical_target =
std::fs::canonicalize(&resolved_target).unwrap_or_else(|_| resolved_target.clone());
if !canonical_target.starts_with(resolved_root)
&& !resolved_target.starts_with(resolved_root)
{
return Err(path_error_response(req_id, original_path, resolved_root));
}
match std::fs::symlink_metadata(&resolved_target) {
Ok(meta) if meta.file_type().is_symlink() => {
link = resolved_target;
depth += 1;
}
_ => break, }
}
Ok(())
}
pub struct AppContext {
provider: Box<dyn LanguageProvider>,
backup: RefCell<BackupStore>,
checkpoint: RefCell<CheckpointStore>,
config: RefCell<Config>,
canonical_cache_root: RefCell<Option<PathBuf>>,
is_worktree_bridge: RefCell<bool>,
git_common_dir: RefCell<Option<PathBuf>>,
callgraph: RefCell<Option<CallGraph>>,
search_index: RefCell<Option<SearchIndex>>,
search_index_rx: RefCell<Option<crossbeam_channel::Receiver<SearchIndex>>>,
symbol_cache: SharedSymbolCache,
semantic_index: RefCell<Option<SemanticIndex>>,
semantic_index_rx: RefCell<Option<crossbeam_channel::Receiver<SemanticIndexEvent>>>,
semantic_index_status: RefCell<SemanticIndexStatus>,
semantic_embedding_model: RefCell<Option<crate::semantic_index::EmbeddingModel>>,
watcher: RefCell<Option<RecommendedWatcher>>,
watcher_rx: RefCell<Option<mpsc::Receiver<notify::Result<notify::Event>>>>,
lsp_manager: RefCell<LspManager>,
lsp_child_registry: crate::lsp::child_registry::LspChildRegistry,
stdout_writer: SharedStdoutWriter,
progress_sender: SharedProgressSender,
status_emitter: StatusEmitter,
bash_background: BgTaskRegistry,
filter_registry: crate::compress::SharedFilterRegistry,
filter_registry_loaded: std::sync::atomic::AtomicBool,
bash_compress_flag: Arc<std::sync::atomic::AtomicBool>,
gitignore: RefCell<Option<Arc<ignore::gitignore::Gitignore>>>,
}
impl AppContext {
pub fn new(provider: Box<dyn LanguageProvider>, config: Config) -> Self {
let bash_compress_enabled = config.experimental_bash_compress;
let progress_sender = Arc::new(Mutex::new(None));
let stdout_writer = Arc::new(Mutex::new(BufWriter::new(io::stdout())));
let status_emitter = StatusEmitter::new(Arc::clone(&progress_sender));
let symbol_cache = provider
.as_any()
.downcast_ref::<crate::parser::TreeSitterProvider>()
.map(|provider| provider.symbol_cache())
.unwrap_or_else(|| Arc::new(std::sync::RwLock::new(SymbolCache::new())));
let lsp_child_registry = crate::lsp::child_registry::LspChildRegistry::new();
let mut lsp_manager = LspManager::new();
lsp_manager.set_child_registry(lsp_child_registry.clone());
AppContext {
provider,
backup: RefCell::new(BackupStore::new()),
checkpoint: RefCell::new(CheckpointStore::new()),
config: RefCell::new(config),
canonical_cache_root: RefCell::new(None),
is_worktree_bridge: RefCell::new(false),
git_common_dir: RefCell::new(None),
callgraph: RefCell::new(None),
search_index: RefCell::new(None),
search_index_rx: RefCell::new(None),
symbol_cache,
semantic_index: RefCell::new(None),
semantic_index_rx: RefCell::new(None),
semantic_index_status: RefCell::new(SemanticIndexStatus::Disabled),
semantic_embedding_model: RefCell::new(None),
watcher: RefCell::new(None),
watcher_rx: RefCell::new(None),
lsp_manager: RefCell::new(lsp_manager),
lsp_child_registry,
stdout_writer,
progress_sender: Arc::clone(&progress_sender),
status_emitter,
bash_background: BgTaskRegistry::new(progress_sender),
filter_registry: Arc::new(std::sync::RwLock::new(
crate::compress::toml_filter::FilterRegistry::default(),
)),
filter_registry_loaded: std::sync::atomic::AtomicBool::new(false),
bash_compress_flag: Arc::new(std::sync::atomic::AtomicBool::new(bash_compress_enabled)),
gitignore: RefCell::new(None),
}
}
pub fn gitignore(&self) -> Option<Arc<ignore::gitignore::Gitignore>> {
self.gitignore.borrow().clone()
}
pub fn rebuild_gitignore(&self) {
use ignore::gitignore::GitignoreBuilder;
use std::path::Path;
let root_raw = match self.config().project_root.clone() {
Some(r) => r,
None => {
*self.gitignore.borrow_mut() = None;
return;
}
};
let root = std::fs::canonicalize(&root_raw).unwrap_or(root_raw);
let mut builder = GitignoreBuilder::new(&root);
let root_ignore = Path::new(&root).join(".gitignore");
if root_ignore.exists() {
if let Some(err) = builder.add(&root_ignore) {
crate::slog_warn!(
"gitignore parse error in {}: {}",
root_ignore.display(),
err
);
}
}
let info_exclude = Path::new(&root).join(".git").join("info").join("exclude");
if info_exclude.exists() {
if let Some(err) = builder.add(&info_exclude) {
crate::slog_warn!(
"gitignore parse error in {}: {}",
info_exclude.display(),
err
);
}
}
let walker = ignore::WalkBuilder::new(&root)
.standard_filters(true)
.hidden(false)
.max_depth(Some(8))
.filter_entry(|entry| {
let name = entry.file_name().to_string_lossy();
!matches!(
name.as_ref(),
"node_modules" | "target" | ".git" | ".opencode" | ".alfonso"
)
})
.build();
for entry in walker.flatten() {
if entry.file_name() == ".gitignore" && entry.path() != root_ignore {
if let Some(err) = builder.add(entry.path()) {
crate::slog_warn!(
"nested gitignore parse error in {}: {}",
entry.path().display(),
err
);
}
}
}
match builder.build() {
Ok(gi) => {
let count = gi.num_ignores();
if count > 0 {
crate::slog_info!("gitignore matcher built: {} pattern(s)", count);
*self.gitignore.borrow_mut() = Some(Arc::new(gi));
} else {
*self.gitignore.borrow_mut() = None;
}
}
Err(err) => {
crate::slog_warn!("gitignore matcher build failed: {}", err);
*self.gitignore.borrow_mut() = None;
}
}
}
pub fn bash_compress_flag(&self) -> Arc<std::sync::atomic::AtomicBool> {
Arc::clone(&self.bash_compress_flag)
}
pub fn sync_bash_compress_flag(&self) {
let value = self.config().experimental_bash_compress;
self.bash_compress_flag
.store(value, std::sync::atomic::Ordering::Relaxed);
}
pub fn set_bash_compress_enabled(&self, enabled: bool) {
self.config_mut().experimental_bash_compress = enabled;
self.bash_compress_flag
.store(enabled, std::sync::atomic::Ordering::Relaxed);
}
pub fn filter_registry(
&self,
) -> std::sync::RwLockReadGuard<'_, crate::compress::toml_filter::FilterRegistry> {
self.ensure_filter_registry_loaded();
match self.filter_registry.read() {
Ok(g) => g,
Err(poisoned) => poisoned.into_inner(),
}
}
pub fn shared_filter_registry(&self) -> crate::compress::SharedFilterRegistry {
self.ensure_filter_registry_loaded();
Arc::clone(&self.filter_registry)
}
pub fn reset_filter_registry(&self) {
let new_registry = crate::compress::build_registry_for_context(self);
match self.filter_registry.write() {
Ok(mut slot) => *slot = new_registry,
Err(poisoned) => *poisoned.into_inner() = new_registry,
}
self.filter_registry_loaded
.store(true, std::sync::atomic::Ordering::Release);
}
fn ensure_filter_registry_loaded(&self) {
use std::sync::atomic::Ordering;
if self.filter_registry_loaded.load(Ordering::Acquire) {
return;
}
let new_registry = crate::compress::build_registry_for_context(self);
if let Ok(mut slot) = self.filter_registry.write() {
*slot = new_registry;
self.filter_registry_loaded.store(true, Ordering::Release);
}
}
pub fn lsp_child_registry(&self) -> crate::lsp::child_registry::LspChildRegistry {
self.lsp_child_registry.clone()
}
pub fn stdout_writer(&self) -> SharedStdoutWriter {
Arc::clone(&self.stdout_writer)
}
pub fn set_progress_sender(&self, sender: Option<ProgressSender>) {
if let Ok(mut progress_sender) = self.progress_sender.lock() {
*progress_sender = sender;
}
}
pub fn emit_progress(&self, frame: ProgressFrame) {
let Ok(progress_sender) = self.progress_sender.lock().map(|sender| sender.clone()) else {
return;
};
if let Some(sender) = progress_sender.as_ref() {
sender(PushFrame::Progress(frame));
}
}
pub fn status_emitter(&self) -> &StatusEmitter {
&self.status_emitter
}
pub fn progress_sender_handle(&self) -> Option<ProgressSender> {
self.progress_sender
.lock()
.ok()
.and_then(|sender| sender.clone())
}
pub fn bash_background(&self) -> &BgTaskRegistry {
&self.bash_background
}
pub fn drain_bg_completions(&self) -> Vec<BgCompletion> {
self.bash_background.drain_completions()
}
pub fn provider(&self) -> &dyn LanguageProvider {
self.provider.as_ref()
}
pub fn backup(&self) -> &RefCell<BackupStore> {
&self.backup
}
pub fn checkpoint(&self) -> &RefCell<CheckpointStore> {
&self.checkpoint
}
pub fn config(&self) -> Ref<'_, Config> {
self.config.borrow()
}
pub fn config_mut(&self) -> RefMut<'_, Config> {
self.config.borrow_mut()
}
pub fn set_canonical_cache_root(&self, root: PathBuf) {
debug_assert!(root.is_absolute());
*self.canonical_cache_root.borrow_mut() = Some(root);
}
pub fn canonical_cache_root(&self) -> PathBuf {
self.canonical_cache_root
.borrow()
.clone()
.expect("canonical_cache_root accessed before handle_configure")
}
pub fn canonical_cache_root_opt(&self) -> Option<PathBuf> {
self.canonical_cache_root.borrow().clone()
}
pub fn set_cache_role(&self, is_worktree_bridge: bool, git_common_dir: Option<PathBuf>) {
*self.is_worktree_bridge.borrow_mut() = is_worktree_bridge;
*self.git_common_dir.borrow_mut() = git_common_dir;
}
pub fn is_worktree_bridge(&self) -> bool {
*self.is_worktree_bridge.borrow()
}
pub fn cache_role(&self) -> &'static str {
if self.canonical_cache_root.borrow().is_none() {
"not_initialized"
} else if self.is_worktree_bridge() {
"worktree"
} else {
"main"
}
}
pub fn callgraph(&self) -> &RefCell<Option<CallGraph>> {
&self.callgraph
}
pub fn search_index(&self) -> &RefCell<Option<SearchIndex>> {
&self.search_index
}
pub fn search_index_rx(&self) -> &RefCell<Option<crossbeam_channel::Receiver<SearchIndex>>> {
&self.search_index_rx
}
pub fn symbol_cache(&self) -> SharedSymbolCache {
Arc::clone(&self.symbol_cache)
}
pub fn reset_symbol_cache(&self) -> u64 {
self.symbol_cache
.write()
.map(|mut cache| cache.reset())
.unwrap_or(0)
}
pub fn semantic_index(&self) -> &RefCell<Option<SemanticIndex>> {
&self.semantic_index
}
pub fn semantic_index_rx(
&self,
) -> &RefCell<Option<crossbeam_channel::Receiver<SemanticIndexEvent>>> {
&self.semantic_index_rx
}
pub fn semantic_index_status(&self) -> &RefCell<SemanticIndexStatus> {
&self.semantic_index_status
}
pub fn semantic_embedding_model(
&self,
) -> &RefCell<Option<crate::semantic_index::EmbeddingModel>> {
&self.semantic_embedding_model
}
pub fn watcher(&self) -> &RefCell<Option<RecommendedWatcher>> {
&self.watcher
}
pub fn watcher_rx(&self) -> &RefCell<Option<mpsc::Receiver<notify::Result<notify::Event>>>> {
&self.watcher_rx
}
pub fn lsp(&self) -> RefMut<'_, LspManager> {
self.lsp_manager.borrow_mut()
}
pub fn lsp_notify_file_changed(&self, file_path: &Path, content: &str) {
if let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() {
let config = self.config();
if let Err(e) = lsp.notify_file_changed(file_path, content, &config) {
crate::slog_warn!("sync error for {}: {}", file_path.display(), e);
}
}
}
pub fn lsp_notify_and_collect_diagnostics(
&self,
file_path: &Path,
content: &str,
timeout: std::time::Duration,
) -> crate::lsp::manager::PostEditWaitOutcome {
let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() else {
return crate::lsp::manager::PostEditWaitOutcome::default();
};
lsp.drain_events();
let pre_snapshot = lsp.snapshot_pre_edit_state(file_path);
let config = self.config();
let expected_versions = match lsp.notify_file_changed_versioned(file_path, content, &config)
{
Ok(v) => v,
Err(e) => {
crate::slog_warn!("sync error for {}: {}", file_path.display(), e);
return crate::lsp::manager::PostEditWaitOutcome::default();
}
};
if expected_versions.is_empty() {
return crate::lsp::manager::PostEditWaitOutcome::default();
}
lsp.wait_for_post_edit_diagnostics(
file_path,
&config,
&expected_versions,
&pre_snapshot,
timeout,
)
}
fn custom_lsp_root_markers(&self) -> Vec<String> {
self.config()
.lsp_servers
.iter()
.flat_map(|s| s.root_markers.iter().cloned())
.collect()
}
fn notify_watched_config_files(&self, file_paths: &[PathBuf]) {
let custom_markers = self.custom_lsp_root_markers();
let config_paths: Vec<(PathBuf, FileChangeType)> = file_paths
.iter()
.filter(|path| is_config_file_path_with_custom(path, &custom_markers))
.cloned()
.map(|path| {
let change_type = if path.exists() {
FileChangeType::CHANGED
} else {
FileChangeType::DELETED
};
(path, change_type)
})
.collect();
self.notify_watched_config_events(&config_paths);
}
fn multi_file_write_paths(params: &serde_json::Value) -> Option<Vec<PathBuf>> {
let paths = params
.get("multi_file_write_paths")
.and_then(|value| value.as_array())?
.iter()
.filter_map(|value| value.as_str())
.map(PathBuf::from)
.collect::<Vec<_>>();
(!paths.is_empty()).then_some(paths)
}
fn watched_file_events_from_params(
params: &serde_json::Value,
extra_markers: &[String],
) -> Option<Vec<(PathBuf, FileChangeType)>> {
let events = params
.get("multi_file_write_paths")
.and_then(|value| value.as_array())?
.iter()
.filter_map(|entry| {
let path = entry
.get("path")
.and_then(|value| value.as_str())
.map(PathBuf::from)?;
if !is_config_file_path_with_custom(&path, extra_markers) {
return None;
}
let change_type = entry
.get("type")
.and_then(|value| value.as_str())
.and_then(Self::parse_file_change_type)
.unwrap_or_else(|| Self::change_type_from_current_state(&path));
Some((path, change_type))
})
.collect::<Vec<_>>();
(!events.is_empty()).then_some(events)
}
fn parse_file_change_type(value: &str) -> Option<FileChangeType> {
match value {
"created" | "CREATED" | "Created" => Some(FileChangeType::CREATED),
"changed" | "CHANGED" | "Changed" => Some(FileChangeType::CHANGED),
"deleted" | "DELETED" | "Deleted" => Some(FileChangeType::DELETED),
_ => None,
}
}
fn change_type_from_current_state(path: &Path) -> FileChangeType {
if path.exists() {
FileChangeType::CHANGED
} else {
FileChangeType::DELETED
}
}
fn notify_watched_config_events(&self, config_paths: &[(PathBuf, FileChangeType)]) {
if config_paths.is_empty() {
return;
}
if let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() {
let config = self.config();
if let Err(e) = lsp.notify_files_watched_changed(config_paths, &config) {
crate::slog_warn!("watched-file sync error: {}", e);
}
}
}
pub fn lsp_notify_watched_config_file(&self, file_path: &Path, change_type: FileChangeType) {
let custom_markers = self.custom_lsp_root_markers();
if !is_config_file_path_with_custom(file_path, &custom_markers) {
return;
}
self.notify_watched_config_events(&[(file_path.to_path_buf(), change_type)]);
}
pub fn lsp_post_multi_file_write(
&self,
file_path: &Path,
content: &str,
file_paths: &[PathBuf],
params: &serde_json::Value,
) -> Option<crate::lsp::manager::PostEditWaitOutcome> {
self.notify_watched_config_files(file_paths);
let wants_diagnostics = params
.get("diagnostics")
.and_then(|v| v.as_bool())
.unwrap_or(false);
if !wants_diagnostics {
self.lsp_notify_file_changed(file_path, content);
return None;
}
let wait_ms = params
.get("wait_ms")
.and_then(|v| v.as_u64())
.unwrap_or(3000)
.min(10_000);
Some(self.lsp_notify_and_collect_diagnostics(
file_path,
content,
std::time::Duration::from_millis(wait_ms),
))
}
pub fn lsp_post_write(
&self,
file_path: &Path,
content: &str,
params: &serde_json::Value,
) -> Option<crate::lsp::manager::PostEditWaitOutcome> {
let wants_diagnostics = params
.get("diagnostics")
.and_then(|v| v.as_bool())
.unwrap_or(false);
let custom_markers = self.custom_lsp_root_markers();
if !wants_diagnostics {
if let Some(file_paths) = Self::multi_file_write_paths(params) {
self.notify_watched_config_files(&file_paths);
} else if let Some(config_events) =
Self::watched_file_events_from_params(params, &custom_markers)
{
self.notify_watched_config_events(&config_events);
}
self.lsp_notify_file_changed(file_path, content);
return None;
}
let wait_ms = params
.get("wait_ms")
.and_then(|v| v.as_u64())
.unwrap_or(3000)
.min(10_000);
if let Some(file_paths) = Self::multi_file_write_paths(params) {
return self.lsp_post_multi_file_write(file_path, content, &file_paths, params);
}
if let Some(config_events) = Self::watched_file_events_from_params(params, &custom_markers)
{
self.notify_watched_config_events(&config_events);
}
Some(self.lsp_notify_and_collect_diagnostics(
file_path,
content,
std::time::Duration::from_millis(wait_ms),
))
}
pub fn validate_path(
&self,
req_id: &str,
path: &Path,
) -> Result<std::path::PathBuf, crate::protocol::Response> {
let config = self.config();
if !config.restrict_to_project_root {
return Ok(path.to_path_buf());
}
let root = match &config.project_root {
Some(r) => r.clone(),
None => return Ok(path.to_path_buf()), };
drop(config);
let raw_root = root.clone();
let resolved_root = std::fs::canonicalize(&root).unwrap_or(root);
let resolved = match std::fs::canonicalize(path) {
Ok(resolved) => resolved,
Err(_) => {
let normalized = normalize_path(path);
reject_escaping_symlink(req_id, path, &normalized, &resolved_root, &raw_root)?;
resolve_with_existing_ancestors(&normalized)
}
};
if !resolved.starts_with(&resolved_root) {
return Err(path_error_response(req_id, path, &resolved_root));
}
Ok(resolved)
}
pub fn lsp_server_count(&self) -> usize {
self.lsp_manager
.try_borrow()
.map(|lsp| lsp.server_count())
.unwrap_or(0)
}
pub fn symbol_cache_stats(&self) -> serde_json::Value {
let entries = self
.symbol_cache
.read()
.map(|cache| cache.len())
.unwrap_or(0);
serde_json::json!({
"local_entries": entries,
"warm_entries": 0,
})
}
}
#[cfg(test)]
mod status_emitter_tests {
use super::*;
use crate::parser::TreeSitterProvider;
fn ctx_with_frame_rx() -> (AppContext, mpsc::Receiver<PushFrame>) {
let ctx = AppContext::new(Box::new(TreeSitterProvider::new()), Config::default());
let (tx, rx) = mpsc::channel();
ctx.set_progress_sender(Some(Arc::new(Box::new(move |frame| {
let _ = tx.send(frame);
}))));
(ctx, rx)
}
#[test]
fn status_emitter_signal_triggers_push() {
let (ctx, rx) = ctx_with_frame_rx();
ctx.status_emitter().signal(ctx.build_status_snapshot());
let frame = rx
.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
.expect("status_changed push");
assert!(matches!(frame, PushFrame::StatusChanged(_)));
}
#[test]
fn status_emitter_debounces_burst() {
let (ctx, rx) = ctx_with_frame_rx();
for _ in 0..10 {
ctx.status_emitter().signal(ctx.build_status_snapshot());
}
let frame = rx
.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
.expect("status_changed push");
assert!(matches!(frame, PushFrame::StatusChanged(_)));
assert!(rx.try_recv().is_err());
}
#[test]
fn status_emitter_separate_windows_separate_pushes() {
let (ctx, rx) = ctx_with_frame_rx();
ctx.status_emitter().signal(ctx.build_status_snapshot());
rx.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
.expect("first push");
ctx.status_emitter().signal(ctx.build_status_snapshot());
rx.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
.expect("second push");
}
#[test]
fn status_emitter_no_signal_no_push() {
let (_ctx, rx) = ctx_with_frame_rx();
assert!(rx
.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 100))
.is_err());
}
#[test]
fn status_emitter_shutdown_cleanly_exits_debounce_thread() {
let (ctx, rx) = ctx_with_frame_rx();
drop(ctx);
assert!(rx.recv_timeout(Duration::from_millis(50)).is_err());
}
}
#[cfg(test)]
mod gitignore_tests {
use super::*;
use std::fs;
use std::path::Path;
use tempfile::TempDir;
fn make_ctx_with_root(root: &Path) -> AppContext {
let provider = Box::new(crate::parser::TreeSitterProvider::new());
let config = Config {
project_root: Some(root.to_path_buf()),
..Config::default()
};
AppContext::new(provider, config)
}
fn is_ignored(ctx: &AppContext, path: &Path) -> bool {
let Some(matcher) = ctx.gitignore() else {
return false;
};
let canonical = std::fs::canonicalize(path).unwrap_or_else(|_| path.to_path_buf());
if !canonical.starts_with(matcher.path()) {
return false;
}
let is_dir = canonical.is_dir();
matcher
.matched_path_or_any_parents(&canonical, is_dir)
.is_ignore()
}
#[test]
fn rebuild_gitignore_returns_none_without_project_root() {
let provider = Box::new(crate::parser::TreeSitterProvider::new());
let ctx = AppContext::new(provider, Config::default());
ctx.rebuild_gitignore();
assert!(ctx.gitignore().is_none());
}
#[test]
fn rebuild_gitignore_returns_none_for_project_with_no_gitignore() {
let tmp = TempDir::new().unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(ctx.gitignore().is_none());
}
#[test]
fn matcher_filters_files_in_ignored_dist_dir() {
let tmp = TempDir::new().unwrap();
fs::write(tmp.path().join(".gitignore"), "dist/\nbuild/\n").unwrap();
fs::create_dir_all(tmp.path().join("dist")).unwrap();
fs::create_dir_all(tmp.path().join("src")).unwrap();
let dist_file = tmp.path().join("dist").join("bundle.js");
let src_file = tmp.path().join("src").join("app.ts");
fs::write(&dist_file, "x").unwrap();
fs::write(&src_file, "y").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(ctx.gitignore().is_some());
assert!(
is_ignored(&ctx, &dist_file),
"dist/bundle.js should be ignored"
);
assert!(
!is_ignored(&ctx, &src_file),
"src/app.ts should NOT be ignored"
);
}
#[test]
fn matcher_handles_node_modules_and_target() {
let tmp = TempDir::new().unwrap();
fs::write(tmp.path().join(".gitignore"), "node_modules/\ntarget/\n").unwrap();
fs::create_dir_all(tmp.path().join("node_modules/foo")).unwrap();
fs::create_dir_all(tmp.path().join("target/debug")).unwrap();
let nm_file = tmp.path().join("node_modules/foo/index.js");
let target_file = tmp.path().join("target/debug/aft");
fs::write(&nm_file, "x").unwrap();
fs::write(&target_file, "x").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(is_ignored(&ctx, &nm_file));
assert!(is_ignored(&ctx, &target_file));
}
#[test]
fn matcher_honors_negation_pattern() {
let tmp = TempDir::new().unwrap();
fs::write(tmp.path().join(".gitignore"), "*.log\n!important.log\n").unwrap();
let random_log = tmp.path().join("random.log");
let important_log = tmp.path().join("important.log");
fs::write(&random_log, "x").unwrap();
fs::write(&important_log, "y").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(is_ignored(&ctx, &random_log));
assert!(
!is_ignored(&ctx, &important_log),
"negation pattern should un-ignore important.log"
);
}
#[test]
fn rebuild_picks_up_gitignore_changes() {
let tmp = TempDir::new().unwrap();
let ignore_path = tmp.path().join(".gitignore");
fs::write(&ignore_path, "foo.txt\n").unwrap();
let foo = tmp.path().join("foo.txt");
let bar = tmp.path().join("bar.txt");
fs::write(&foo, "").unwrap();
fs::write(&bar, "").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(is_ignored(&ctx, &foo));
assert!(!is_ignored(&ctx, &bar));
fs::write(&ignore_path, "bar.txt\n").unwrap();
ctx.rebuild_gitignore();
assert!(!is_ignored(&ctx, &foo));
assert!(is_ignored(&ctx, &bar));
}
#[test]
fn gitignore_loads_info_exclude_when_present() {
let tmp = TempDir::new().unwrap();
let info_dir = tmp.path().join(".git/info");
fs::create_dir_all(&info_dir).unwrap();
fs::write(info_dir.join("exclude"), "secrets.txt\n").unwrap();
let secrets = tmp.path().join("secrets.txt");
let public = tmp.path().join("public.txt");
fs::write(&secrets, "token").unwrap();
fs::write(&public, "ok").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(is_ignored(&ctx, &secrets));
assert!(!is_ignored(&ctx, &public));
}
#[test]
fn matcher_picks_up_nested_gitignore() {
let tmp = TempDir::new().unwrap();
fs::write(tmp.path().join(".gitignore"), "").unwrap();
let sub = tmp.path().join("packages/foo");
fs::create_dir_all(&sub).unwrap();
fs::write(sub.join(".gitignore"), "generated/\n").unwrap();
let generated_file = sub.join("generated").join("out.js");
fs::create_dir_all(generated_file.parent().unwrap()).unwrap();
fs::write(&generated_file, "x").unwrap();
let ctx = make_ctx_with_root(tmp.path());
ctx.rebuild_gitignore();
assert!(
is_ignored(&ctx, &generated_file),
"nested gitignore in packages/foo/.gitignore should ignore generated/"
);
}
}