Skip to main content

aft/
context.rs

1use std::cell::{Ref, RefCell, RefMut};
2use std::io::{self, BufWriter};
3use std::path::{Component, Path, PathBuf};
4use std::sync::{mpsc, Arc, Mutex};
5use std::time::{Duration, Instant};
6
7use lsp_types::FileChangeType;
8use notify::RecommendedWatcher;
9
10use crate::backup::BackupStore;
11use crate::bash_background::{BgCompletion, BgTaskRegistry};
12use crate::callgraph::CallGraph;
13use crate::checkpoint::CheckpointStore;
14use crate::config::Config;
15use crate::language::LanguageProvider;
16use crate::lsp::manager::LspManager;
17use crate::lsp::registry::is_config_file_path_with_custom;
18use crate::parser::{SharedSymbolCache, SymbolCache};
19use crate::protocol::{ProgressFrame, PushFrame, StatusChangedFrame, StatusPayload};
20
21pub type ProgressSender = Arc<Box<dyn Fn(PushFrame) + Send + Sync>>;
22pub type SharedProgressSender = Arc<Mutex<Option<ProgressSender>>>;
23pub type SharedStdoutWriter = Arc<Mutex<BufWriter<io::Stdout>>>;
24const STATUS_DEBOUNCE_MS: u64 = 1_000;
25
26pub struct StatusEmitter {
27    latest: Arc<Mutex<Option<StatusPayload>>>,
28    notify: mpsc::Sender<()>,
29}
30
31impl StatusEmitter {
32    fn new(progress_sender: SharedProgressSender) -> Self {
33        let (notify, rx) = mpsc::channel();
34        let latest = Arc::new(Mutex::new(None));
35        let latest_for_thread = Arc::clone(&latest);
36        std::thread::spawn(move || {
37            status_debounce_loop(rx, latest_for_thread, progress_sender);
38        });
39        Self { latest, notify }
40    }
41
42    pub fn signal(&self, snapshot: StatusPayload) {
43        if let Ok(mut latest) = self.latest.lock() {
44            *latest = Some(snapshot);
45        }
46        let _ = self.notify.send(());
47    }
48}
49
50fn status_debounce_loop(
51    rx: mpsc::Receiver<()>,
52    latest: Arc<Mutex<Option<StatusPayload>>>,
53    progress_sender: SharedProgressSender,
54) {
55    while rx.recv().is_ok() {
56        let deadline = Instant::now() + Duration::from_millis(STATUS_DEBOUNCE_MS);
57        while let Some(remaining) = deadline.checked_duration_since(Instant::now()) {
58            match rx.recv_timeout(remaining) {
59                Ok(()) => continue,
60                Err(mpsc::RecvTimeoutError::Timeout) => break,
61                Err(mpsc::RecvTimeoutError::Disconnected) => return,
62            }
63        }
64
65        let snapshot = latest.lock().ok().and_then(|mut latest| latest.take());
66        let Some(snapshot) = snapshot else { continue };
67        let sender = progress_sender
68            .lock()
69            .ok()
70            .and_then(|sender| sender.clone());
71        if let Some(sender) = sender {
72            sender(PushFrame::StatusChanged(StatusChangedFrame::new(
73                None, snapshot,
74            )));
75        }
76    }
77}
78use crate::search_index::SearchIndex;
79use crate::semantic_index::SemanticIndex;
80
81#[derive(Debug, Clone)]
82pub enum SemanticIndexStatus {
83    Disabled,
84    Building {
85        stage: String,
86        files: Option<usize>,
87        entries_done: Option<usize>,
88        entries_total: Option<usize>,
89    },
90    Ready,
91    Failed(String),
92}
93
94pub enum SemanticIndexEvent {
95    Progress {
96        stage: String,
97        files: Option<usize>,
98        entries_done: Option<usize>,
99        entries_total: Option<usize>,
100    },
101    Ready(SemanticIndex),
102    Failed(String),
103}
104
105/// Normalize a path by resolving `.` and `..` components lexically,
106/// without touching the filesystem. This prevents path traversal
107/// attacks when `fs::canonicalize` fails (e.g. for non-existent paths).
108fn normalize_path(path: &Path) -> PathBuf {
109    let mut result = PathBuf::new();
110    for component in path.components() {
111        match component {
112            Component::ParentDir => {
113                // Pop the last component unless we're at root or have no components
114                if !result.pop() {
115                    result.push(component);
116                }
117            }
118            Component::CurDir => {} // Skip `.`
119            _ => result.push(component),
120        }
121    }
122    result
123}
124
125fn resolve_with_existing_ancestors(path: &Path) -> PathBuf {
126    let mut existing = path.to_path_buf();
127    let mut tail_segments = Vec::new();
128
129    while !existing.exists() {
130        if let Some(name) = existing.file_name() {
131            tail_segments.push(name.to_owned());
132        } else {
133            break;
134        }
135
136        existing = match existing.parent() {
137            Some(parent) => parent.to_path_buf(),
138            None => break,
139        };
140    }
141
142    let mut resolved = std::fs::canonicalize(&existing).unwrap_or(existing);
143    for segment in tail_segments.into_iter().rev() {
144        resolved.push(segment);
145    }
146
147    resolved
148}
149
150fn path_error_response(
151    req_id: &str,
152    path: &Path,
153    resolved_root: &Path,
154) -> crate::protocol::Response {
155    crate::protocol::Response::error(
156        req_id,
157        "path_outside_root",
158        format!(
159            "path '{}' is outside the project root '{}'",
160            path.display(),
161            resolved_root.display()
162        ),
163    )
164}
165
166/// Walk `candidate` component-by-component. For any component that is a
167/// symlink on disk, iteratively follow the full chain (up to 40 hops) and
168/// reject if any hop's resolved target lies outside `resolved_root`.
169///
170/// This is the fallback path used when `fs::canonicalize` fails (e.g. on
171/// Linux with broken symlink chains pointing to non-existent destinations).
172/// On macOS `canonicalize` also fails for broken symlinks but the returned
173/// `/var/...` tempdir paths diverge from `resolved_root`'s `/private/var/...`
174/// form, so we must accept either form when deciding which symlinks to check.
175fn reject_escaping_symlink(
176    req_id: &str,
177    original_path: &Path,
178    candidate: &Path,
179    resolved_root: &Path,
180    raw_root: &Path,
181) -> Result<(), crate::protocol::Response> {
182    let mut current = PathBuf::new();
183
184    for component in candidate.components() {
185        current.push(component);
186
187        let Ok(metadata) = std::fs::symlink_metadata(&current) else {
188            continue;
189        };
190
191        if !metadata.file_type().is_symlink() {
192            continue;
193        }
194
195        // Only check symlinks that live inside the project root. This skips
196        // OS-level prefix symlinks (macOS /var → /private/var) that are not
197        // inside our project directory and whose "escaping" is harmless.
198        //
199        // We compare against BOTH the canonicalized root (resolved_root, e.g.
200        // /private/var/.../project) AND the raw root (e.g. /var/.../project)
201        // because tempdir() returns raw paths while fs::canonicalize returns
202        // the resolved form — and our `current` may be in either form.
203        let inside_root = current.starts_with(resolved_root) || current.starts_with(raw_root);
204        if !inside_root {
205            continue;
206        }
207
208        iterative_follow_chain(req_id, original_path, &current, resolved_root)?;
209    }
210
211    Ok(())
212}
213
214/// Iteratively follow a symlink chain from `link` and reject if any hop's
215/// resolved target is outside `resolved_root`. Depth-capped at 40 hops.
216fn iterative_follow_chain(
217    req_id: &str,
218    original_path: &Path,
219    start: &Path,
220    resolved_root: &Path,
221) -> Result<(), crate::protocol::Response> {
222    let mut link = start.to_path_buf();
223    let mut depth = 0usize;
224
225    loop {
226        if depth > 40 {
227            return Err(path_error_response(req_id, original_path, resolved_root));
228        }
229
230        let target = match std::fs::read_link(&link) {
231            Ok(t) => t,
232            Err(_) => {
233                // Can't read the link — treat as escaping to be safe.
234                return Err(path_error_response(req_id, original_path, resolved_root));
235            }
236        };
237
238        let resolved_target = if target.is_absolute() {
239            normalize_path(&target)
240        } else {
241            let parent = link.parent().unwrap_or_else(|| Path::new(""));
242            normalize_path(&parent.join(&target))
243        };
244
245        // Check boundary: use canonicalized target when available (handles
246        // macOS /var → /private/var aliasing), fall back to the normalized
247        // path when canonicalize fails (e.g. broken symlink on Linux).
248        let canonical_target =
249            std::fs::canonicalize(&resolved_target).unwrap_or_else(|_| resolved_target.clone());
250
251        if !canonical_target.starts_with(resolved_root)
252            && !resolved_target.starts_with(resolved_root)
253        {
254            return Err(path_error_response(req_id, original_path, resolved_root));
255        }
256
257        // If the target is itself a symlink, follow the next hop.
258        match std::fs::symlink_metadata(&resolved_target) {
259            Ok(meta) if meta.file_type().is_symlink() => {
260                link = resolved_target;
261                depth += 1;
262            }
263            _ => break, // Non-symlink or non-existent target — chain ends here.
264        }
265    }
266
267    Ok(())
268}
269
270/// Shared application context threaded through all command handlers.
271///
272/// Holds the language provider, backup/checkpoint stores, configuration,
273/// and call graph engine. Constructed once at startup and passed by
274/// reference to `dispatch`.
275///
276/// Stores use `RefCell` for interior mutability — the binary is single-threaded
277/// (one request at a time on the stdin read loop) so runtime borrow checking
278/// is safe and never contended.
279pub struct AppContext {
280    provider: Box<dyn LanguageProvider>,
281    backup: RefCell<BackupStore>,
282    checkpoint: RefCell<CheckpointStore>,
283    config: RefCell<Config>,
284    canonical_cache_root: RefCell<Option<PathBuf>>,
285    is_worktree_bridge: RefCell<bool>,
286    git_common_dir: RefCell<Option<PathBuf>>,
287    callgraph: RefCell<Option<CallGraph>>,
288    search_index: RefCell<Option<SearchIndex>>,
289    search_index_rx: RefCell<Option<crossbeam_channel::Receiver<SearchIndex>>>,
290    symbol_cache: SharedSymbolCache,
291    semantic_index: RefCell<Option<SemanticIndex>>,
292    semantic_index_rx: RefCell<Option<crossbeam_channel::Receiver<SemanticIndexEvent>>>,
293    semantic_index_status: RefCell<SemanticIndexStatus>,
294    semantic_embedding_model: RefCell<Option<crate::semantic_index::EmbeddingModel>>,
295    watcher: RefCell<Option<RecommendedWatcher>>,
296    watcher_rx: RefCell<Option<mpsc::Receiver<notify::Result<notify::Event>>>>,
297    lsp_manager: RefCell<LspManager>,
298    /// Shared registry of LSP child PIDs. Cloned and passed to the signal
299    /// handler so it can SIGKILL all children before aft exits, preventing
300    /// orphaned LSP processes when bridge.shutdown() SIGTERMs aft.
301    lsp_child_registry: crate::lsp::child_registry::LspChildRegistry,
302    stdout_writer: SharedStdoutWriter,
303    progress_sender: SharedProgressSender,
304    status_emitter: StatusEmitter,
305    bash_background: BgTaskRegistry,
306    /// Thread-safe registry of TOML output filters. Lazy-built on first
307    /// access; populated atomically via `RwLock`. Shared between command
308    /// handlers (which use it through `filter_registry()` -> read guard) and
309    /// the `BgTaskRegistry` watchdog thread (which uses it through
310    /// `compress::compress_with_registry`). Reloaded when configure changes
311    /// the project root or storage_dir; see [`AppContext::reset_filter_registry`].
312    filter_registry: crate::compress::SharedFilterRegistry,
313    /// Set to true once the filter_registry has been populated. Avoids
314    /// double-loading on hot paths without holding a write lock.
315    filter_registry_loaded: std::sync::atomic::AtomicBool,
316    /// Live `experimental.bash.compress` flag, kept in sync with `config`
317    /// from the configure handler. Exposed via [`AppContext::bash_compress_flag`]
318    /// so the BgTaskRegistry's watchdog-thread compressor can read it without
319    /// holding the config refcell.
320    bash_compress_flag: Arc<std::sync::atomic::AtomicBool>,
321    /// Project gitignore matcher, rebuilt by [`AppContext::rebuild_gitignore`]
322    /// whenever `project_root` changes or a watcher event reports a
323    /// `.gitignore` write. Used by the watcher event filter to decide which
324    /// path-changes are interesting to AFT's caches. `None` when no project
325    /// root is configured or when the project has no gitignore files; in that
326    /// case the watcher falls back to a small hardcoded infra-directory skip.
327    gitignore: RefCell<Option<Arc<ignore::gitignore::Gitignore>>>,
328}
329
330impl AppContext {
331    pub fn new(provider: Box<dyn LanguageProvider>, config: Config) -> Self {
332        let bash_compress_enabled = config.experimental_bash_compress;
333        let progress_sender = Arc::new(Mutex::new(None));
334        let stdout_writer = Arc::new(Mutex::new(BufWriter::new(io::stdout())));
335        let status_emitter = StatusEmitter::new(Arc::clone(&progress_sender));
336        let symbol_cache = provider
337            .as_any()
338            .downcast_ref::<crate::parser::TreeSitterProvider>()
339            .map(|provider| provider.symbol_cache())
340            .unwrap_or_else(|| Arc::new(std::sync::RwLock::new(SymbolCache::new())));
341        let lsp_child_registry = crate::lsp::child_registry::LspChildRegistry::new();
342        let mut lsp_manager = LspManager::new();
343        lsp_manager.set_child_registry(lsp_child_registry.clone());
344        AppContext {
345            provider,
346            backup: RefCell::new(BackupStore::new()),
347            checkpoint: RefCell::new(CheckpointStore::new()),
348            config: RefCell::new(config),
349            canonical_cache_root: RefCell::new(None),
350            is_worktree_bridge: RefCell::new(false),
351            git_common_dir: RefCell::new(None),
352            callgraph: RefCell::new(None),
353            search_index: RefCell::new(None),
354            search_index_rx: RefCell::new(None),
355            symbol_cache,
356            semantic_index: RefCell::new(None),
357            semantic_index_rx: RefCell::new(None),
358            semantic_index_status: RefCell::new(SemanticIndexStatus::Disabled),
359            semantic_embedding_model: RefCell::new(None),
360            watcher: RefCell::new(None),
361            watcher_rx: RefCell::new(None),
362            lsp_manager: RefCell::new(lsp_manager),
363            lsp_child_registry,
364            stdout_writer,
365            progress_sender: Arc::clone(&progress_sender),
366            status_emitter,
367            bash_background: BgTaskRegistry::new(progress_sender),
368            filter_registry: Arc::new(std::sync::RwLock::new(
369                crate::compress::toml_filter::FilterRegistry::default(),
370            )),
371            filter_registry_loaded: std::sync::atomic::AtomicBool::new(false),
372            bash_compress_flag: Arc::new(std::sync::atomic::AtomicBool::new(bash_compress_enabled)),
373            gitignore: RefCell::new(None),
374        }
375    }
376
377    /// Borrow the cached project gitignore matcher. Returns `None` when no
378    /// project_root is configured or when the project has no gitignore files.
379    pub fn gitignore(&self) -> Option<Arc<ignore::gitignore::Gitignore>> {
380        self.gitignore.borrow().clone()
381    }
382
383    /// Rebuild the gitignore matcher from the current `project_root` and
384    /// cache it. Called by the configure handler whenever the project root
385    /// changes, and by the watcher event drain when a `.gitignore` file
386    /// itself is modified.
387    ///
388    /// The builder honors:
389    /// - `<project_root>/.gitignore`
390    /// - `<project_root>/.git/info/exclude` (loaded explicitly because
391    ///   `GitignoreBuilder::new` does not auto-discover it)
392    /// - nested `.gitignore` files (each `.gitignore` discovered during
393    ///   the recursive walk)
394    ///
395    /// Stores `None` if there's no project_root or no matchable gitignore
396    /// files. Logs build errors but never fails configure.
397    pub fn rebuild_gitignore(&self) {
398        use ignore::gitignore::GitignoreBuilder;
399        use std::path::Path;
400        let root_raw = match self.config().project_root.clone() {
401            Some(r) => r,
402            None => {
403                *self.gitignore.borrow_mut() = None;
404                return;
405            }
406        };
407        // Canonicalize the root so symlink-prefix mismatches don't cause
408        // `Gitignore::matched_path_or_any_parents` to panic on watcher event
409        // paths. macOS routinely surfaces `/private/var/...` while `project_root`
410        // arrives as `/var/...` (a symlink to `/private/var`); the `ignore`
411        // crate's matcher panics when a query path isn't lexically under the
412        // matcher's root. Canonicalizing both ends (here for root, naturally
413        // for watcher events on macOS) keeps them in the same prefix space.
414        let root = std::fs::canonicalize(&root_raw).unwrap_or(root_raw);
415        let mut builder = GitignoreBuilder::new(&root);
416        // Add root .gitignore (the most common case)
417        let root_ignore = Path::new(&root).join(".gitignore");
418        if root_ignore.exists() {
419            if let Some(err) = builder.add(&root_ignore) {
420                crate::slog_warn!(
421                    "gitignore parse error in {}: {}",
422                    root_ignore.display(),
423                    err
424                );
425            }
426        }
427        // .git/info/exclude — manually added because GitignoreBuilder::new()
428        // does not auto-discover it (verified against ignore-0.4.25 source).
429        let info_exclude = Path::new(&root).join(".git").join("info").join("exclude");
430        if info_exclude.exists() {
431            if let Some(err) = builder.add(&info_exclude) {
432                crate::slog_warn!(
433                    "gitignore parse error in {}: {}",
434                    info_exclude.display(),
435                    err
436                );
437            }
438        }
439        // Walk the project to pick up nested .gitignore files. Cap the walk
440        // at the same SOURCE_WALK_LIMIT used by other configure-time walks
441        // (currently 20000 files); gitignore lookup-cost stays bounded for
442        // huge monorepos. Skip the obvious infra dirs so we don't accidentally
443        // load a vendored repo's .gitignore that doesn't apply to ours.
444        let walker = ignore::WalkBuilder::new(&root)
445            .standard_filters(true)
446            // Hidden files are filtered by default, but `.gitignore` starts with
447            // `.` so we need to traverse "hidden" entries to find nested ones.
448            .hidden(false)
449            .max_depth(Some(8))
450            .filter_entry(|entry| {
451                let name = entry.file_name().to_string_lossy();
452                !matches!(
453                    name.as_ref(),
454                    "node_modules" | "target" | ".git" | ".opencode" | ".alfonso"
455                )
456            })
457            .build();
458        for entry in walker.flatten() {
459            if entry.file_name() == ".gitignore" && entry.path() != root_ignore {
460                if let Some(err) = builder.add(entry.path()) {
461                    crate::slog_warn!(
462                        "nested gitignore parse error in {}: {}",
463                        entry.path().display(),
464                        err
465                    );
466                }
467            }
468        }
469        match builder.build() {
470            Ok(gi) => {
471                let count = gi.num_ignores();
472                if count > 0 {
473                    crate::slog_info!("gitignore matcher built: {} pattern(s)", count);
474                    *self.gitignore.borrow_mut() = Some(Arc::new(gi));
475                } else {
476                    *self.gitignore.borrow_mut() = None;
477                }
478            }
479            Err(err) => {
480                crate::slog_warn!("gitignore matcher build failed: {}", err);
481                *self.gitignore.borrow_mut() = None;
482            }
483        }
484    }
485
486    /// Shared atomic mirror of `experimental.bash.compress`. Updated by the
487    /// configure handler. Read by the BgTaskRegistry compressor closure.
488    pub fn bash_compress_flag(&self) -> Arc<std::sync::atomic::AtomicBool> {
489        Arc::clone(&self.bash_compress_flag)
490    }
491
492    /// Update the shared `bash_compress_flag` mirror. Call this from the
493    /// configure handler whenever `experimental.bash.compress` changes so the
494    /// BgTaskRegistry watchdog sees the new value on the next completion.
495    pub fn sync_bash_compress_flag(&self) {
496        let value = self.config().experimental_bash_compress;
497        self.bash_compress_flag
498            .store(value, std::sync::atomic::Ordering::Relaxed);
499    }
500
501    pub fn set_bash_compress_enabled(&self, enabled: bool) {
502        self.config_mut().experimental_bash_compress = enabled;
503        self.bash_compress_flag
504            .store(enabled, std::sync::atomic::Ordering::Relaxed);
505    }
506
507    /// Read-only access to the TOML filter registry, building it lazily on
508    /// first use. Returns an `RwLockReadGuard` that callers can `lookup`
509    /// against directly.
510    pub fn filter_registry(
511        &self,
512    ) -> std::sync::RwLockReadGuard<'_, crate::compress::toml_filter::FilterRegistry> {
513        self.ensure_filter_registry_loaded();
514        match self.filter_registry.read() {
515            Ok(g) => g,
516            Err(poisoned) => poisoned.into_inner(),
517        }
518    }
519
520    /// Returns the shared `Arc<RwLock<FilterRegistry>>` handle so threads
521    /// outside `AppContext` (notably the bash watchdog) can read it without
522    /// touching the rest of the context.
523    pub fn shared_filter_registry(&self) -> crate::compress::SharedFilterRegistry {
524        self.ensure_filter_registry_loaded();
525        Arc::clone(&self.filter_registry)
526    }
527
528    /// Force a fresh load of the TOML filter registry. Called when configure
529    /// changes the project root, storage_dir, or trust state so subsequent
530    /// `compress::compress` calls pick up new filters.
531    pub fn reset_filter_registry(&self) {
532        let new_registry = crate::compress::build_registry_for_context(self);
533        match self.filter_registry.write() {
534            Ok(mut slot) => *slot = new_registry,
535            Err(poisoned) => *poisoned.into_inner() = new_registry,
536        }
537        self.filter_registry_loaded
538            .store(true, std::sync::atomic::Ordering::Release);
539    }
540
541    fn ensure_filter_registry_loaded(&self) {
542        use std::sync::atomic::Ordering;
543        if self.filter_registry_loaded.load(Ordering::Acquire) {
544            return;
545        }
546        // Build outside the lock to avoid blocking other readers during a
547        // multi-file TOML parse.
548        let new_registry = crate::compress::build_registry_for_context(self);
549        if let Ok(mut slot) = self.filter_registry.write() {
550            *slot = new_registry;
551            self.filter_registry_loaded.store(true, Ordering::Release);
552        }
553    }
554
555    /// Clone the LSP child registry handle. Used by main.rs to give the
556    /// signal handler thread a way to SIGKILL LSP children on shutdown.
557    pub fn lsp_child_registry(&self) -> crate::lsp::child_registry::LspChildRegistry {
558        self.lsp_child_registry.clone()
559    }
560
561    pub fn stdout_writer(&self) -> SharedStdoutWriter {
562        Arc::clone(&self.stdout_writer)
563    }
564
565    pub fn set_progress_sender(&self, sender: Option<ProgressSender>) {
566        if let Ok(mut progress_sender) = self.progress_sender.lock() {
567            *progress_sender = sender;
568        }
569    }
570
571    pub fn emit_progress(&self, frame: ProgressFrame) {
572        let Ok(progress_sender) = self.progress_sender.lock().map(|sender| sender.clone()) else {
573            return;
574        };
575        if let Some(sender) = progress_sender.as_ref() {
576            sender(PushFrame::Progress(frame));
577        }
578    }
579
580    pub fn status_emitter(&self) -> &StatusEmitter {
581        &self.status_emitter
582    }
583
584    /// Get a clone of the current progress sender for use from background
585    /// threads. Returns `None` when the main loop hasn't installed one (tests,
586    /// CLI without push frames).
587    ///
588    /// Used by `configure`'s deferred file-walk thread to push warnings after
589    /// configure has already returned, so configure latency stays sub-100 ms
590    /// even on huge directories.
591    pub fn progress_sender_handle(&self) -> Option<ProgressSender> {
592        self.progress_sender
593            .lock()
594            .ok()
595            .and_then(|sender| sender.clone())
596    }
597
598    pub fn bash_background(&self) -> &BgTaskRegistry {
599        &self.bash_background
600    }
601
602    pub fn drain_bg_completions(&self) -> Vec<BgCompletion> {
603        self.bash_background.drain_completions()
604    }
605
606    /// Access the language provider.
607    pub fn provider(&self) -> &dyn LanguageProvider {
608        self.provider.as_ref()
609    }
610
611    /// Access the backup store.
612    pub fn backup(&self) -> &RefCell<BackupStore> {
613        &self.backup
614    }
615
616    /// Access the checkpoint store.
617    pub fn checkpoint(&self) -> &RefCell<CheckpointStore> {
618        &self.checkpoint
619    }
620
621    /// Access the configuration (shared borrow).
622    pub fn config(&self) -> Ref<'_, Config> {
623        self.config.borrow()
624    }
625
626    /// Access the configuration (mutable borrow).
627    pub fn config_mut(&self) -> RefMut<'_, Config> {
628        self.config.borrow_mut()
629    }
630
631    pub fn set_canonical_cache_root(&self, root: PathBuf) {
632        debug_assert!(root.is_absolute());
633        *self.canonical_cache_root.borrow_mut() = Some(root);
634    }
635
636    pub fn canonical_cache_root(&self) -> PathBuf {
637        self.canonical_cache_root
638            .borrow()
639            .clone()
640            .expect("canonical_cache_root accessed before handle_configure")
641    }
642
643    pub fn canonical_cache_root_opt(&self) -> Option<PathBuf> {
644        self.canonical_cache_root.borrow().clone()
645    }
646
647    pub fn set_cache_role(&self, is_worktree_bridge: bool, git_common_dir: Option<PathBuf>) {
648        *self.is_worktree_bridge.borrow_mut() = is_worktree_bridge;
649        *self.git_common_dir.borrow_mut() = git_common_dir;
650    }
651
652    pub fn is_worktree_bridge(&self) -> bool {
653        *self.is_worktree_bridge.borrow()
654    }
655
656    pub fn cache_role(&self) -> &'static str {
657        if self.canonical_cache_root.borrow().is_none() {
658            "not_initialized"
659        } else if self.is_worktree_bridge() {
660            "worktree"
661        } else {
662            "main"
663        }
664    }
665
666    /// Access the call graph engine.
667    pub fn callgraph(&self) -> &RefCell<Option<CallGraph>> {
668        &self.callgraph
669    }
670
671    /// Access the search index.
672    pub fn search_index(&self) -> &RefCell<Option<SearchIndex>> {
673        &self.search_index
674    }
675
676    /// Access the search-index build receiver.
677    pub fn search_index_rx(&self) -> &RefCell<Option<crossbeam_channel::Receiver<SearchIndex>>> {
678        &self.search_index_rx
679    }
680
681    /// Access the shared symbol cache.
682    pub fn symbol_cache(&self) -> SharedSymbolCache {
683        Arc::clone(&self.symbol_cache)
684    }
685
686    /// Clear the shared symbol cache and return the new active generation.
687    pub fn reset_symbol_cache(&self) -> u64 {
688        self.symbol_cache
689            .write()
690            .map(|mut cache| cache.reset())
691            .unwrap_or(0)
692    }
693
694    /// Access the semantic search index.
695    pub fn semantic_index(&self) -> &RefCell<Option<SemanticIndex>> {
696        &self.semantic_index
697    }
698
699    /// Access the semantic-index build receiver.
700    pub fn semantic_index_rx(
701        &self,
702    ) -> &RefCell<Option<crossbeam_channel::Receiver<SemanticIndexEvent>>> {
703        &self.semantic_index_rx
704    }
705
706    pub fn semantic_index_status(&self) -> &RefCell<SemanticIndexStatus> {
707        &self.semantic_index_status
708    }
709
710    /// Access the cached semantic embedding model.
711    pub fn semantic_embedding_model(
712        &self,
713    ) -> &RefCell<Option<crate::semantic_index::EmbeddingModel>> {
714        &self.semantic_embedding_model
715    }
716
717    /// Access the file watcher handle (kept alive to continue watching).
718    pub fn watcher(&self) -> &RefCell<Option<RecommendedWatcher>> {
719        &self.watcher
720    }
721
722    /// Access the watcher event receiver.
723    pub fn watcher_rx(&self) -> &RefCell<Option<mpsc::Receiver<notify::Result<notify::Event>>>> {
724        &self.watcher_rx
725    }
726
727    /// Access the LSP manager.
728    pub fn lsp(&self) -> RefMut<'_, LspManager> {
729        self.lsp_manager.borrow_mut()
730    }
731
732    /// Notify LSP servers that a file was written.
733    /// Call this after write_format_validate in command handlers.
734    pub fn lsp_notify_file_changed(&self, file_path: &Path, content: &str) {
735        if let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() {
736            let config = self.config();
737            if let Err(e) = lsp.notify_file_changed(file_path, content, &config) {
738                crate::slog_warn!("sync error for {}: {}", file_path.display(), e);
739            }
740        }
741    }
742
743    /// Notify LSP and optionally wait for diagnostics.
744    ///
745    /// Call this after `write_format_validate` when the request has `"diagnostics": true`.
746    /// Sends didChange to the server, waits briefly for publishDiagnostics, and returns
747    /// any diagnostics for the file. If no server is running, returns empty immediately.
748    ///
749    /// v0.17.3: this is the version-aware path. Pre-edit cached diagnostics
750    /// are NEVER returned — only entries whose `version` matches the
751    /// post-edit document version (or, for unversioned servers, whose
752    /// `epoch` advanced past the pre-edit snapshot).
753    pub fn lsp_notify_and_collect_diagnostics(
754        &self,
755        file_path: &Path,
756        content: &str,
757        timeout: std::time::Duration,
758    ) -> crate::lsp::manager::PostEditWaitOutcome {
759        let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() else {
760            return crate::lsp::manager::PostEditWaitOutcome::default();
761        };
762
763        // Clear any queued notifications before this write so the wait loop only
764        // observes diagnostics triggered by the current change.
765        lsp.drain_events();
766
767        // Snapshot per-server epochs and document versions BEFORE sending
768        // didChange so the wait loop can prove freshness without accepting
769        // stale pre-edit publishes that arrived late.
770        let pre_snapshot = lsp.snapshot_pre_edit_state(file_path);
771
772        // Send didChange/didOpen and capture per-server target version.
773        let config = self.config();
774        let expected_versions = match lsp.notify_file_changed_versioned(file_path, content, &config)
775        {
776            Ok(v) => v,
777            Err(e) => {
778                crate::slog_warn!("sync error for {}: {}", file_path.display(), e);
779                return crate::lsp::manager::PostEditWaitOutcome::default();
780            }
781        };
782
783        // No server matched this file — return an empty outcome that's
784        // honestly `complete: true` (nothing to wait for).
785        if expected_versions.is_empty() {
786            return crate::lsp::manager::PostEditWaitOutcome::default();
787        }
788
789        lsp.wait_for_post_edit_diagnostics(
790            file_path,
791            &config,
792            &expected_versions,
793            &pre_snapshot,
794            timeout,
795        )
796    }
797
798    /// Collect custom server root_markers from user config for use in
799    /// `is_config_file_path_with_custom` checks (#25).
800    fn custom_lsp_root_markers(&self) -> Vec<String> {
801        self.config()
802            .lsp_servers
803            .iter()
804            .flat_map(|s| s.root_markers.iter().cloned())
805            .collect()
806    }
807
808    fn notify_watched_config_files(&self, file_paths: &[PathBuf]) {
809        let custom_markers = self.custom_lsp_root_markers();
810        let config_paths: Vec<(PathBuf, FileChangeType)> = file_paths
811            .iter()
812            .filter(|path| is_config_file_path_with_custom(path, &custom_markers))
813            .cloned()
814            .map(|path| {
815                let change_type = if path.exists() {
816                    FileChangeType::CHANGED
817                } else {
818                    FileChangeType::DELETED
819                };
820                (path, change_type)
821            })
822            .collect();
823
824        self.notify_watched_config_events(&config_paths);
825    }
826
827    fn multi_file_write_paths(params: &serde_json::Value) -> Option<Vec<PathBuf>> {
828        let paths = params
829            .get("multi_file_write_paths")
830            .and_then(|value| value.as_array())?
831            .iter()
832            .filter_map(|value| value.as_str())
833            .map(PathBuf::from)
834            .collect::<Vec<_>>();
835
836        (!paths.is_empty()).then_some(paths)
837    }
838
839    /// Parse config-file watched events from `multi_file_write_paths` when the
840    /// array contains object entries `{ "path": "...", "type": "created|changed|deleted" }`.
841    ///
842    /// This handles the OBJECT variant of `multi_file_write_paths`. The STRING
843    /// variant (bare path strings) is handled by `multi_file_write_paths()` and
844    /// `notify_watched_config_files()`. Both variants read the same JSON key but
845    /// with different per-entry schemas — they are NOT redundant.
846    ///
847    /// #18 note: in older code this function also existed alongside `multi_file_write_paths()`
848    /// and was reachable via the `else if` branch when all entries were objects.
849    /// Restoring both is correct.
850    fn watched_file_events_from_params(
851        params: &serde_json::Value,
852        extra_markers: &[String],
853    ) -> Option<Vec<(PathBuf, FileChangeType)>> {
854        let events = params
855            .get("multi_file_write_paths")
856            .and_then(|value| value.as_array())?
857            .iter()
858            .filter_map(|entry| {
859                // Only handle object entries — string entries go through multi_file_write_paths()
860                let path = entry
861                    .get("path")
862                    .and_then(|value| value.as_str())
863                    .map(PathBuf::from)?;
864
865                if !is_config_file_path_with_custom(&path, extra_markers) {
866                    return None;
867                }
868
869                let change_type = entry
870                    .get("type")
871                    .and_then(|value| value.as_str())
872                    .and_then(Self::parse_file_change_type)
873                    .unwrap_or_else(|| Self::change_type_from_current_state(&path));
874
875                Some((path, change_type))
876            })
877            .collect::<Vec<_>>();
878
879        (!events.is_empty()).then_some(events)
880    }
881
882    fn parse_file_change_type(value: &str) -> Option<FileChangeType> {
883        match value {
884            "created" | "CREATED" | "Created" => Some(FileChangeType::CREATED),
885            "changed" | "CHANGED" | "Changed" => Some(FileChangeType::CHANGED),
886            "deleted" | "DELETED" | "Deleted" => Some(FileChangeType::DELETED),
887            _ => None,
888        }
889    }
890
891    fn change_type_from_current_state(path: &Path) -> FileChangeType {
892        if path.exists() {
893            FileChangeType::CHANGED
894        } else {
895            FileChangeType::DELETED
896        }
897    }
898
899    fn notify_watched_config_events(&self, config_paths: &[(PathBuf, FileChangeType)]) {
900        if config_paths.is_empty() {
901            return;
902        }
903
904        if let Ok(mut lsp) = self.lsp_manager.try_borrow_mut() {
905            let config = self.config();
906            if let Err(e) = lsp.notify_files_watched_changed(config_paths, &config) {
907                crate::slog_warn!("watched-file sync error: {}", e);
908            }
909        }
910    }
911
912    pub fn lsp_notify_watched_config_file(&self, file_path: &Path, change_type: FileChangeType) {
913        let custom_markers = self.custom_lsp_root_markers();
914        if !is_config_file_path_with_custom(file_path, &custom_markers) {
915            return;
916        }
917
918        self.notify_watched_config_events(&[(file_path.to_path_buf(), change_type)]);
919    }
920
921    /// Post-write LSP hook for multi-file edits. When the patch includes
922    /// config-file edits, notify active workspace servers via
923    /// `workspace/didChangeWatchedFiles` before sending the per-document
924    /// didOpen/didChange for the current file.
925    pub fn lsp_post_multi_file_write(
926        &self,
927        file_path: &Path,
928        content: &str,
929        file_paths: &[PathBuf],
930        params: &serde_json::Value,
931    ) -> Option<crate::lsp::manager::PostEditWaitOutcome> {
932        self.notify_watched_config_files(file_paths);
933
934        let wants_diagnostics = params
935            .get("diagnostics")
936            .and_then(|v| v.as_bool())
937            .unwrap_or(false);
938
939        if !wants_diagnostics {
940            self.lsp_notify_file_changed(file_path, content);
941            return None;
942        }
943
944        let wait_ms = params
945            .get("wait_ms")
946            .and_then(|v| v.as_u64())
947            .unwrap_or(3000)
948            .min(10_000);
949
950        Some(self.lsp_notify_and_collect_diagnostics(
951            file_path,
952            content,
953            std::time::Duration::from_millis(wait_ms),
954        ))
955    }
956
957    /// Post-write LSP hook: notify server and optionally collect diagnostics.
958    ///
959    /// This is the single call site for all command handlers after `write_format_validate`.
960    /// Behavior:
961    /// - When `diagnostics: true` is in `params`, notifies the server, waits
962    ///   until matching diagnostics arrive or the timeout expires, and returns
963    ///   `Some(outcome)` with the verified-fresh diagnostics + per-server
964    ///   status.
965    /// - When `diagnostics: false` (or absent), just notifies (fire-and-forget)
966    ///   and returns `None`. Callers must NOT wrap this in `Some(...)`; the
967    ///   `None` is what tells the response builder to omit the LSP fields
968    ///   entirely (preserves the no-diagnostics-requested response shape).
969    ///
970    /// v0.17.3: default `wait_ms` raised from 1500 to 3000 because real-world
971    /// tsserver re-analysis on monorepo files routinely takes 2-5s. Still
972    /// capped at 10000ms.
973    pub fn lsp_post_write(
974        &self,
975        file_path: &Path,
976        content: &str,
977        params: &serde_json::Value,
978    ) -> Option<crate::lsp::manager::PostEditWaitOutcome> {
979        let wants_diagnostics = params
980            .get("diagnostics")
981            .and_then(|v| v.as_bool())
982            .unwrap_or(false);
983
984        let custom_markers = self.custom_lsp_root_markers();
985
986        if !wants_diagnostics {
987            if let Some(file_paths) = Self::multi_file_write_paths(params) {
988                self.notify_watched_config_files(&file_paths);
989            } else if let Some(config_events) =
990                Self::watched_file_events_from_params(params, &custom_markers)
991            {
992                self.notify_watched_config_events(&config_events);
993            }
994            self.lsp_notify_file_changed(file_path, content);
995            return None;
996        }
997
998        let wait_ms = params
999            .get("wait_ms")
1000            .and_then(|v| v.as_u64())
1001            .unwrap_or(3000)
1002            .min(10_000); // Cap at 10 seconds to prevent hangs from adversarial input
1003
1004        if let Some(file_paths) = Self::multi_file_write_paths(params) {
1005            return self.lsp_post_multi_file_write(file_path, content, &file_paths, params);
1006        }
1007
1008        if let Some(config_events) = Self::watched_file_events_from_params(params, &custom_markers)
1009        {
1010            self.notify_watched_config_events(&config_events);
1011        }
1012
1013        Some(self.lsp_notify_and_collect_diagnostics(
1014            file_path,
1015            content,
1016            std::time::Duration::from_millis(wait_ms),
1017        ))
1018    }
1019
1020    /// Validate that a file path falls within the configured project root.
1021    ///
1022    /// When `project_root` is configured (normal plugin usage), this resolves the
1023    /// path and checks it starts with the root. Returns the canonicalized path on
1024    /// success, or an error response on violation.
1025    ///
1026    /// When no `project_root` is configured (direct CLI usage), all paths pass
1027    /// through unrestricted for backward compatibility.
1028    pub fn validate_path(
1029        &self,
1030        req_id: &str,
1031        path: &Path,
1032    ) -> Result<std::path::PathBuf, crate::protocol::Response> {
1033        let config = self.config();
1034        // When restrict_to_project_root is false (default), allow all paths
1035        if !config.restrict_to_project_root {
1036            return Ok(path.to_path_buf());
1037        }
1038        let root = match &config.project_root {
1039            Some(r) => r.clone(),
1040            None => return Ok(path.to_path_buf()), // No root configured, allow all
1041        };
1042        drop(config);
1043
1044        // Keep the raw root for symlink-guard comparisons. On macOS, tempdir()
1045        // returns /var/... paths while canonicalize gives /private/var/...; we
1046        // need both forms so reject_escaping_symlink can recognise in-root
1047        // symlinks regardless of which prefix form `current` happens to have.
1048        let raw_root = root.clone();
1049        let resolved_root = std::fs::canonicalize(&root).unwrap_or(root);
1050
1051        // Resolve the path (follow symlinks, normalize ..). If canonicalization
1052        // fails (e.g. path does not exist or traverses a broken symlink), inspect
1053        // every existing component with lstat before falling back lexically so a
1054        // broken in-root symlink cannot be used to write outside project_root.
1055        let resolved = match std::fs::canonicalize(path) {
1056            Ok(resolved) => resolved,
1057            Err(_) => {
1058                let normalized = normalize_path(path);
1059                reject_escaping_symlink(req_id, path, &normalized, &resolved_root, &raw_root)?;
1060                resolve_with_existing_ancestors(&normalized)
1061            }
1062        };
1063
1064        if !resolved.starts_with(&resolved_root) {
1065            return Err(path_error_response(req_id, path, &resolved_root));
1066        }
1067
1068        Ok(resolved)
1069    }
1070
1071    /// Count active LSP server instances.
1072    pub fn lsp_server_count(&self) -> usize {
1073        self.lsp_manager
1074            .try_borrow()
1075            .map(|lsp| lsp.server_count())
1076            .unwrap_or(0)
1077    }
1078
1079    /// Symbol cache statistics from the language provider.
1080    pub fn symbol_cache_stats(&self) -> serde_json::Value {
1081        let entries = self
1082            .symbol_cache
1083            .read()
1084            .map(|cache| cache.len())
1085            .unwrap_or(0);
1086        serde_json::json!({
1087            "local_entries": entries,
1088            "warm_entries": 0,
1089        })
1090    }
1091}
1092
1093#[cfg(test)]
1094mod status_emitter_tests {
1095    use super::*;
1096    use crate::parser::TreeSitterProvider;
1097
1098    fn ctx_with_frame_rx() -> (AppContext, mpsc::Receiver<PushFrame>) {
1099        let ctx = AppContext::new(Box::new(TreeSitterProvider::new()), Config::default());
1100        let (tx, rx) = mpsc::channel();
1101        ctx.set_progress_sender(Some(Arc::new(Box::new(move |frame| {
1102            let _ = tx.send(frame);
1103        }))));
1104        (ctx, rx)
1105    }
1106
1107    #[test]
1108    fn status_emitter_signal_triggers_push() {
1109        let (ctx, rx) = ctx_with_frame_rx();
1110        ctx.status_emitter().signal(ctx.build_status_snapshot());
1111        let frame = rx
1112            .recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
1113            .expect("status_changed push");
1114        assert!(matches!(frame, PushFrame::StatusChanged(_)));
1115    }
1116
1117    #[test]
1118    fn status_emitter_debounces_burst() {
1119        let (ctx, rx) = ctx_with_frame_rx();
1120        for _ in 0..10 {
1121            ctx.status_emitter().signal(ctx.build_status_snapshot());
1122        }
1123        let frame = rx
1124            .recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
1125            .expect("status_changed push");
1126        assert!(matches!(frame, PushFrame::StatusChanged(_)));
1127        assert!(rx.try_recv().is_err());
1128    }
1129
1130    #[test]
1131    fn status_emitter_separate_windows_separate_pushes() {
1132        let (ctx, rx) = ctx_with_frame_rx();
1133        ctx.status_emitter().signal(ctx.build_status_snapshot());
1134        rx.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
1135            .expect("first push");
1136        ctx.status_emitter().signal(ctx.build_status_snapshot());
1137        rx.recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 500))
1138            .expect("second push");
1139    }
1140
1141    #[test]
1142    fn status_emitter_no_signal_no_push() {
1143        let (_ctx, rx) = ctx_with_frame_rx();
1144        assert!(rx
1145            .recv_timeout(Duration::from_millis(STATUS_DEBOUNCE_MS + 100))
1146            .is_err());
1147    }
1148
1149    #[test]
1150    fn status_emitter_shutdown_cleanly_exits_debounce_thread() {
1151        let (ctx, rx) = ctx_with_frame_rx();
1152        drop(ctx);
1153        assert!(rx.recv_timeout(Duration::from_millis(50)).is_err());
1154    }
1155}
1156
1157#[cfg(test)]
1158mod gitignore_tests {
1159    use super::*;
1160    use std::fs;
1161    use std::path::Path;
1162    use tempfile::TempDir;
1163
1164    fn make_ctx_with_root(root: &Path) -> AppContext {
1165        let provider = Box::new(crate::parser::TreeSitterProvider::new());
1166        let config = Config {
1167            project_root: Some(root.to_path_buf()),
1168            ..Config::default()
1169        };
1170        AppContext::new(provider, config)
1171    }
1172
1173    /// Helper: returns true when the matcher would skip `path` (as if it
1174    /// arrived via a watcher event for this project root). Canonicalizes
1175    /// the query path so symlink prefixes (e.g. macOS `/var` → `/private/var`)
1176    /// don't trip the `ignore` crate's "path is expected to be under the
1177    /// root" panic — production code does the same guard via
1178    /// `path.starts_with(matcher.path())` in `drain_watcher_events`.
1179    fn is_ignored(ctx: &AppContext, path: &Path) -> bool {
1180        let Some(matcher) = ctx.gitignore() else {
1181            return false;
1182        };
1183        let canonical = std::fs::canonicalize(path).unwrap_or_else(|_| path.to_path_buf());
1184        if !canonical.starts_with(matcher.path()) {
1185            return false;
1186        }
1187        let is_dir = canonical.is_dir();
1188        matcher
1189            .matched_path_or_any_parents(&canonical, is_dir)
1190            .is_ignore()
1191    }
1192
1193    #[test]
1194    fn rebuild_gitignore_returns_none_without_project_root() {
1195        let provider = Box::new(crate::parser::TreeSitterProvider::new());
1196        let ctx = AppContext::new(provider, Config::default());
1197        ctx.rebuild_gitignore();
1198        assert!(ctx.gitignore().is_none());
1199    }
1200
1201    #[test]
1202    fn rebuild_gitignore_returns_none_for_project_with_no_gitignore() {
1203        let tmp = TempDir::new().unwrap();
1204        let ctx = make_ctx_with_root(tmp.path());
1205        ctx.rebuild_gitignore();
1206        assert!(ctx.gitignore().is_none());
1207    }
1208
1209    #[test]
1210    fn matcher_filters_files_in_ignored_dist_dir() {
1211        let tmp = TempDir::new().unwrap();
1212        fs::write(tmp.path().join(".gitignore"), "dist/\nbuild/\n").unwrap();
1213        fs::create_dir_all(tmp.path().join("dist")).unwrap();
1214        fs::create_dir_all(tmp.path().join("src")).unwrap();
1215        let dist_file = tmp.path().join("dist").join("bundle.js");
1216        let src_file = tmp.path().join("src").join("app.ts");
1217        fs::write(&dist_file, "x").unwrap();
1218        fs::write(&src_file, "y").unwrap();
1219
1220        let ctx = make_ctx_with_root(tmp.path());
1221        ctx.rebuild_gitignore();
1222
1223        assert!(ctx.gitignore().is_some());
1224        assert!(
1225            is_ignored(&ctx, &dist_file),
1226            "dist/bundle.js should be ignored"
1227        );
1228        assert!(
1229            !is_ignored(&ctx, &src_file),
1230            "src/app.ts should NOT be ignored"
1231        );
1232    }
1233
1234    #[test]
1235    fn matcher_handles_node_modules_and_target() {
1236        let tmp = TempDir::new().unwrap();
1237        fs::write(tmp.path().join(".gitignore"), "node_modules/\ntarget/\n").unwrap();
1238        fs::create_dir_all(tmp.path().join("node_modules/foo")).unwrap();
1239        fs::create_dir_all(tmp.path().join("target/debug")).unwrap();
1240        let nm_file = tmp.path().join("node_modules/foo/index.js");
1241        let target_file = tmp.path().join("target/debug/aft");
1242        fs::write(&nm_file, "x").unwrap();
1243        fs::write(&target_file, "x").unwrap();
1244
1245        let ctx = make_ctx_with_root(tmp.path());
1246        ctx.rebuild_gitignore();
1247
1248        assert!(is_ignored(&ctx, &nm_file));
1249        assert!(is_ignored(&ctx, &target_file));
1250    }
1251
1252    #[test]
1253    fn matcher_honors_negation_pattern() {
1254        // .gitignore: ignore all *.log files EXCEPT important.log
1255        let tmp = TempDir::new().unwrap();
1256        fs::write(tmp.path().join(".gitignore"), "*.log\n!important.log\n").unwrap();
1257        let random_log = tmp.path().join("random.log");
1258        let important_log = tmp.path().join("important.log");
1259        fs::write(&random_log, "x").unwrap();
1260        fs::write(&important_log, "y").unwrap();
1261
1262        let ctx = make_ctx_with_root(tmp.path());
1263        ctx.rebuild_gitignore();
1264
1265        assert!(is_ignored(&ctx, &random_log));
1266        assert!(
1267            !is_ignored(&ctx, &important_log),
1268            "negation pattern should un-ignore important.log"
1269        );
1270    }
1271
1272    #[test]
1273    fn rebuild_picks_up_gitignore_changes() {
1274        let tmp = TempDir::new().unwrap();
1275        let ignore_path = tmp.path().join(".gitignore");
1276        fs::write(&ignore_path, "foo.txt\n").unwrap();
1277        let foo = tmp.path().join("foo.txt");
1278        let bar = tmp.path().join("bar.txt");
1279        fs::write(&foo, "").unwrap();
1280        fs::write(&bar, "").unwrap();
1281
1282        let ctx = make_ctx_with_root(tmp.path());
1283        ctx.rebuild_gitignore();
1284        assert!(is_ignored(&ctx, &foo));
1285        assert!(!is_ignored(&ctx, &bar));
1286
1287        // Now flip the rules: ignore bar.txt instead of foo.txt
1288        fs::write(&ignore_path, "bar.txt\n").unwrap();
1289        ctx.rebuild_gitignore();
1290        assert!(!is_ignored(&ctx, &foo));
1291        assert!(is_ignored(&ctx, &bar));
1292    }
1293
1294    #[test]
1295    fn gitignore_loads_info_exclude_when_present() {
1296        let tmp = TempDir::new().unwrap();
1297        let info_dir = tmp.path().join(".git/info");
1298        fs::create_dir_all(&info_dir).unwrap();
1299        fs::write(info_dir.join("exclude"), "secrets.txt\n").unwrap();
1300        let secrets = tmp.path().join("secrets.txt");
1301        let public = tmp.path().join("public.txt");
1302        fs::write(&secrets, "token").unwrap();
1303        fs::write(&public, "ok").unwrap();
1304
1305        let ctx = make_ctx_with_root(tmp.path());
1306        ctx.rebuild_gitignore();
1307
1308        assert!(is_ignored(&ctx, &secrets));
1309        assert!(!is_ignored(&ctx, &public));
1310    }
1311
1312    #[test]
1313    fn matcher_picks_up_nested_gitignore() {
1314        let tmp = TempDir::new().unwrap();
1315        // Root .gitignore is intentionally empty — only the nested one ignores
1316        fs::write(tmp.path().join(".gitignore"), "").unwrap();
1317        let sub = tmp.path().join("packages/foo");
1318        fs::create_dir_all(&sub).unwrap();
1319        fs::write(sub.join(".gitignore"), "generated/\n").unwrap();
1320        let generated_file = sub.join("generated").join("out.js");
1321        fs::create_dir_all(generated_file.parent().unwrap()).unwrap();
1322        fs::write(&generated_file, "x").unwrap();
1323
1324        let ctx = make_ctx_with_root(tmp.path());
1325        ctx.rebuild_gitignore();
1326
1327        assert!(
1328            is_ignored(&ctx, &generated_file),
1329            "nested gitignore in packages/foo/.gitignore should ignore generated/"
1330        );
1331    }
1332}