1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
use crate::error::Error;
use crate::file_picker::{FFFMode, FilePicker};
use crate::git::GitStatusCache;
use crate::shared::{SharedFrecency, SharedPicker};
use crate::sort_buffer::sort_with_buffer;
use git2::Repository;
use notify::event::{AccessKind, AccessMode};
use notify::{Config, EventKind, EventKindMask, RecursiveMode};
use notify_debouncer_full::{DebounceEventResult, DebouncedEvent, NoCache, new_debouncer_opt};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::time::Duration;
use tracing::{Level, debug, error, info, warn};
type Debouncer = notify_debouncer_full::Debouncer<notify::RecommendedWatcher, NoCache>;
/// Owns the file-system watcher and guarantees that all background threads
/// are fully joined before `stop()` / `Drop` returns.
///
/// Architecture:
/// - The debouncer (and its internal watcher) live inside an **owner thread**
/// that we spawn and hold the `JoinHandle` for.
/// - `stop()` sets a flag, unparks the owner thread, and **joins** it.
/// - Inside the owner thread, `Debouncer::stop()` is called which joins the
/// debouncer's event-processing thread.
/// - On Windows an additional short sleep is added after `Debouncer::stop()`
/// because `notify`'s `ReadDirectoryChangesWatcher` discards its thread
/// `JoinHandle`, so we cannot join it directly. The watcher's `Drop` does
/// signal the thread via semaphore so it exits almost immediately, but we
/// need to give the OS a moment to reclaim it.
pub struct BackgroundWatcher {
stop_signal: Arc<AtomicBool>,
owner_thread: Option<std::thread::JoinHandle<()>>,
}
const DEBOUNCE_TIMEOUT: Duration = Duration::from_millis(250);
const MAX_PATHS_THRESHOLD: usize = 1024;
/// On macOS, each `watch()` call creates a separate FSEventStream. When the
/// number of directories exceeds this threshold we fall back to a single
/// recursive watch to avoid exhausting the per-process stream limit.
const MAX_MACOS_NONRECURSIVE_WATCHES: usize = 4096;
/// Minimum seconds between frecency tracks of the same file in AI mode.
/// Prevents score inflation from rapid burst edits by AI agents.
const AI_MODE_COOLDOWN_SECS: u64 = 5 * 60;
impl BackgroundWatcher {
pub fn new(
base_path: PathBuf,
git_workdir: Option<PathBuf>,
shared_picker: SharedPicker,
shared_frecency: SharedFrecency,
mode: FFFMode,
watch_dirs: Vec<PathBuf>,
) -> Result<Self, Error> {
info!(
"Initializing background watcher for path: {}, mode: {:?}",
base_path.display(),
mode,
);
let (watch_tx, watch_rx) = mpsc::channel::<PathBuf>();
// Clone shared state for the owner thread (needed for injecting
// files that existed before a watch was registered on their directory).
let owner_picker = shared_picker.clone();
let owner_git_workdir = git_workdir.clone();
let debouncer = Self::create_debouncer(
base_path,
git_workdir,
shared_picker,
shared_frecency,
mode,
watch_dirs,
watch_tx,
)?;
info!("Background file watcher initialized successfully");
let stop_signal = Arc::new(AtomicBool::new(false));
let stop_clone = Arc::clone(&stop_signal);
// The owner thread keeps the debouncer alive and ensures proper
// cleanup: `Debouncer::stop()` joins its internal thread, then the
// watcher `Drop` signals its I/O thread to exit.
let owner_thread = std::thread::Builder::new()
.name("fff-watcher-owner".into())
.spawn(move || {
let mut debouncer = debouncer;
while !stop_clone.load(Ordering::Acquire) {
// Process pending watch requests from the event handler
// (new directories that need to be watched).
while let Ok(dir) = watch_rx.try_recv() {
match debouncer.watch(dir.as_path(), RecursiveMode::NonRecursive) {
Ok(()) => {
debug!("Added watch for new directory: {}", dir.display());
}
Err(e) => {
warn!("Failed to watch new directory {}: {}", dir.display(), e);
}
}
// Files created before the watch was registered don't
// generate events. Do a flat (non-recursive) read_dir
// to inject any files that already exist. Subdirectories
// are not descended — they get their own watches via
// future Create events from this directory's watch.
inject_existing_files(&dir, &owner_picker, &owner_git_workdir);
}
std::thread::park_timeout(Duration::from_secs(1));
}
// Debouncer::stop() joins the debouncer's event thread, then
// drops the watcher (whose Drop signals the I/O thread).
debouncer.stop();
// On Windows the notify crate discards the ReadDirectoryChangesW
// thread's JoinHandle — we cannot join it. Its Drop signals the
// thread via semaphore so it exits almost immediately; give the
// OS a moment to fully reclaim it.
#[cfg(windows)]
std::thread::sleep(Duration::from_millis(250));
})
.expect("failed to spawn fff-watcher-owner thread");
Ok(Self {
stop_signal,
owner_thread: Some(owner_thread),
})
}
fn create_debouncer(
base_path: PathBuf,
git_workdir: Option<PathBuf>,
shared_picker: SharedPicker,
shared_frecency: SharedFrecency,
mode: FFFMode,
watch_dirs: Vec<PathBuf>,
watch_tx: mpsc::Sender<PathBuf>,
) -> Result<Debouncer, Error> {
let config = Config::default()
// do not follow symlinks as then notifiers spawns a bunch of events for symlinked
// files that could be git ignored, we have to property differentiate those and if
// the file was edited through a
.with_follow_symlinks(false)
// only the actual modification events, ignore the open syscals that we can generate by
// our own grep calls and preview window rendering
.with_event_kinds(EventKindMask::CORE);
// Decide the watching strategy up-front so the event handler closure
// knows whether it needs to request dynamic directory watches.
let use_recursive =
cfg!(target_os = "macos") && watch_dirs.len() > MAX_MACOS_NONRECURSIVE_WATCHES;
let git_workdir_for_handler = git_workdir.clone();
let mut debouncer = new_debouncer_opt(
DEBOUNCE_TIMEOUT,
Some(DEBOUNCE_TIMEOUT / 2), // tick rate for the event span
{
move |result: DebounceEventResult| match result {
Ok(events) => {
let new_dirs = handle_debounced_events(
events,
&git_workdir_for_handler,
&shared_picker,
&shared_frecency,
mode,
);
// In NonRecursive mode, register watches for newly
// discovered directories so future file events in them
// are captured. In Recursive mode the single stream
// already covers new subdirectories.
if !use_recursive {
for dir in new_dirs {
let _ = watch_tx.send(dir);
}
}
}
Err(errors) => {
error!("File watcher errors: {:?}", errors);
}
}
},
// There is an issue with recommended cache implementation on macos
// it keeps track of all the files added to the watcher which is not a problem
// for us because any rename to the file will anyway require the removing from the
// ordedred index and adding it back with the new name
NoCache::new(),
config,
)?;
// Watching strategy:
//
// For small-to-medium repos we watch each indexed directory individually
// (NonRecursive). This avoids receiving events for gitignored paths like
// node_modules/ and keeps the event volume low.
//
// On macOS, each `watch()` call creates a separate FSEventStream. Large
// repos (e.g. Chromium with 487K+ files) can have tens of thousands of
// directories, which exhausts the per-process FSEvents stream limit and
// causes "unable to start FSEvent stream" errors. When the directory
// count exceeds the threshold we fall back to a single Recursive watch
// on the base path. FSEvents handles this efficiently with one kernel
// stream for the entire subtree. Gitignored paths are already filtered
// in the event handler via `should_include_file()`.
//
// On Linux (inotify), RecursiveMode::Recursive creates one kernel watch
// per subdirectory *including* gitignored ones, wasting file descriptors.
// The per-directory NonRecursive approach is always used on Linux.
//
// New directories created at runtime are detected via Create events on
// the parent and dynamically added by the owner thread via watch_tx.
if use_recursive {
debouncer.watch(base_path.as_path(), RecursiveMode::Recursive)?;
info!(
"File watcher initialized with single recursive watch on {} \
({} directories exceeded threshold of {})",
base_path.display(),
watch_dirs.len(),
MAX_MACOS_NONRECURSIVE_WATCHES,
);
} else {
debouncer.watch(base_path.as_path(), RecursiveMode::NonRecursive)?;
for dir in &watch_dirs {
match debouncer.watch(dir.as_path(), RecursiveMode::NonRecursive) {
Ok(()) => {}
Err(e) => {
// Non-fatal: directory may have been removed between discovery and watch
warn!("Failed to watch directory {}: {}", dir.display(), e);
}
}
}
info!(
"File watcher initialized for {} directories (NonRecursive) under {}",
watch_dirs.len(),
base_path.display()
);
}
// The .git directory is excluded from the file list but we still need
// to observe changes that affect git status (staging, unstaging,
// committing, branch switches, merges, etc.).
// When using recursive mode the base watch already covers .git/,
// but these targeted watches are cheap (at most 3 extra streams)
// and ensure we catch status changes even if the recursive backend
// coalesces or delays .git events.
watch_git_status_paths(&mut debouncer, git_workdir.as_ref());
Ok(debouncer)
}
pub fn stop(&mut self) {
self.stop_signal.store(true, Ordering::Release);
if let Some(handle) = self.owner_thread.take() {
handle.thread().unpark();
if let Err(e) = handle.join() {
error!("Watcher owner thread panicked: {:?}", e);
}
}
info!("Background file watcher stopped successfully");
}
}
impl Drop for BackgroundWatcher {
fn drop(&mut self) {
self.stop();
}
}
#[tracing::instrument(name = "fs_events", skip(events, shared_picker, shared_frecency), level = Level::DEBUG)]
fn handle_debounced_events(
events: Vec<DebouncedEvent>,
git_workdir: &Option<PathBuf>,
shared_picker: &SharedPicker,
shared_frecency: &SharedFrecency,
mode: FFFMode,
) -> Vec<PathBuf> {
// this will be called very often, we have to minimiy the lock time for file picker
let repo = git_workdir.as_ref().and_then(|p| Repository::open(p).ok());
let mut need_full_rescan = false;
let mut need_full_git_rescan = false;
let mut paths_to_remove = Vec::new();
let mut paths_to_add_or_modify = Vec::new();
let mut new_dirs_to_watch = Vec::new();
let mut affected_paths_count = 0usize;
for debounced_event in &events {
// It is very important to not react to the access errors because we inevitably
// gonna trigger the sync by our own preview or other unnecessary noise
if matches!(
debounced_event.event.kind,
EventKind::Access(
AccessKind::Read
| AccessKind::Open(_)
| AccessKind::Close(AccessMode::Read | AccessMode::Execute)
)
) {
continue;
}
// When macOS FSEvents (or other backends) overflow their event buffer, the kernel
// drops individual events and emits a Rescan flag telling us to re-scan the subtree.
// Without handling this, modified source files can be silently missed.
if debounced_event.event.need_rescan() {
warn!(
"Received rescan event for paths {:?}, triggering full rescan",
debounced_event.event.paths
);
need_full_rescan = true;
break;
}
tracing::debug!(event = ?debounced_event.event, "Processing FS event");
for path in &debounced_event.event.paths {
if is_ignore_definition_path(path) {
info!(
"Detected change in ignore definition file: {}",
path.display()
);
need_full_rescan = true;
break;
}
if is_dotgit_change_affecting_status(path, &repo) {
need_full_git_rescan = true;
}
if is_git_file(path) {
continue;
}
// Use a combination of event kind and filesystem state to decide
// whether a path is an addition/modification or a removal.
//
// We cannot rely on `path.exists()` alone because:
// - A freshly created file might not be visible yet (race).
// - macOS FSEvents uses Modify(Name(Any)) for both rename-in
// and rename-out, so we must stat the path to disambiguate.
//
// We cannot rely on event kind alone because:
// - Remove events are not always emitted (macOS often sends
// Modify(Name(Any)) instead of Remove).
let is_removal = matches!(debounced_event.event.kind, EventKind::Remove(_));
if is_removal || !path.exists() {
paths_to_remove.push(path.as_path());
} else if path.is_dir() {
// New directory — collect it so the caller can register a
// watcher. No filesystem scanning: files that arrive later
// will be handled by the newly registered watch.
if !is_path_ignored(path, &repo) {
new_dirs_to_watch.push(path.to_path_buf());
}
} else {
// For additions/modifications, still filter gitignored files.
if should_include_file(path, &repo) {
paths_to_add_or_modify.push(path.as_path());
}
}
}
affected_paths_count += debounced_event.event.paths.len();
if affected_paths_count > MAX_PATHS_THRESHOLD {
warn!(
"Too many affected paths ({}) in a single batch, triggering full rescan",
affected_paths_count
);
need_full_rescan = true;
break;
}
if need_full_rescan {
break;
}
}
if need_full_rescan {
info!(?affected_paths_count, "Triggering full rescan");
trigger_full_rescan(shared_picker, shared_frecency);
return Vec::new();
}
// It's important to get the allocated sort
sort_with_buffer(paths_to_add_or_modify.as_mut_slice(), |a, b| {
a.as_os_str().cmp(b.as_os_str())
});
paths_to_add_or_modify.dedup_by(|a, b| a.as_os_str().eq(b.as_os_str()));
info!(
"Event processing summary: {} to remove, {} to add/modify, {} new dirs",
paths_to_remove.len(),
paths_to_add_or_modify.len(),
new_dirs_to_watch.len()
);
// Apply file index updates (add/remove) unconditionally — these must
// happen even when there is no git repository.
let files_to_update_git_status =
if !paths_to_remove.is_empty() || !paths_to_add_or_modify.is_empty() {
debug!(
"Applying file index changes: {} to remove, {} to add/modify",
paths_to_remove.len(),
paths_to_add_or_modify.len(),
);
let apply_changes = |picker: &mut FilePicker| -> Vec<PathBuf> {
for path in &paths_to_remove {
let removed = picker.remove_file_by_path(path);
debug!("remove_file_by_path({:?}) -> {}", path, removed);
}
let mut files_to_update = Vec::with_capacity(paths_to_add_or_modify.len());
for path in &paths_to_add_or_modify {
let added = picker.on_create_or_modify(path).is_some();
if added {
debug!("on_create_or_modify({:?}) -> Some", path);
files_to_update.push(path.to_path_buf());
} else {
error!("on_create_or_modify({:?}) -> None (file not added!)", path);
}
}
info!(
"apply_changes complete: {} files to update git status",
files_to_update.len()
);
files_to_update
};
let Ok(mut guard) = shared_picker.write() else {
error!("Failed to acquire file picker write lock");
return new_dirs_to_watch;
};
let Some(ref mut picker) = *guard else {
error!("File picker not initialized");
return new_dirs_to_watch;
};
apply_changes(picker)
} else {
debug!("No file index changes to apply");
Vec::new()
};
// AI mode: auto-track frecency for all modified/created files.
// Uses a 5-minute cooldown per file to prevent score inflation from rapid
// burst edits (AI agents often edit the same file many times in minutes).
// This runs after apply_changes so the picker write lock is released.
if mode.is_ai() && !paths_to_add_or_modify.is_empty() {
let mut tracked_count = 0usize;
if let Ok(frecency_guard) = shared_frecency.read()
&& let Some(ref frecency) = *frecency_guard
{
for path in &paths_to_add_or_modify {
// Skip if this file was tracked less than 5 minutes ago
let should_track = match frecency.seconds_since_last_access(path) {
Ok(Some(secs)) => secs >= AI_MODE_COOLDOWN_SECS,
Ok(None) => true, // Never tracked before
Err(_) => true, // DB error, track anyway
};
if !should_track {
continue;
}
if let Err(e) = frecency.track_access(path) {
error!("Failed to track frecency for {:?}: {:?}", path, e);
} else {
tracked_count += 1;
}
}
if tracked_count > 0 {
info!("AI mode: tracked frecency for {} files", tracked_count);
}
}
// Update in-memory frecency scores for tracked files
if tracked_count > 0
&& let Ok(mut picker_guard) = shared_picker.write()
&& let Some(ref mut picker) = *picker_guard
&& let Ok(frecency_guard) = shared_frecency.read()
&& let Some(ref frecency) = *frecency_guard
{
for path in &paths_to_add_or_modify {
let _ = picker.update_single_file_frecency(path, frecency);
}
}
}
// Git status updates require a repository.
let Some(repo) = repo.as_ref() else {
debug!("No git repo available, skipping git status updates");
return new_dirs_to_watch;
};
if need_full_git_rescan {
info!("Triggering full git rescan");
let result = shared_picker.refresh_git_status(shared_frecency);
if let Err(e) = result {
error!("Failed to refresh git status: {:?}", e);
}
return new_dirs_to_watch;
}
if !files_to_update_git_status.is_empty() {
info!(
"Fetching git status for {} files",
files_to_update_git_status.len()
);
let status = match GitStatusCache::git_status_for_paths(repo, &files_to_update_git_status) {
Ok(status) => status,
Err(e) => {
tracing::error!(?e, "Failed to query git status");
return new_dirs_to_watch;
}
};
if let Ok(mut guard) = shared_picker.write()
&& let Some(ref mut picker) = *guard
{
if let Err(e) = picker.update_git_statuses(status, shared_frecency) {
error!("Failed to update git statuses: {:?}", e);
} else {
info!("Successfully updated git statuses in picker");
}
} else {
error!("Failed to acquire picker lock for git status update");
}
}
new_dirs_to_watch
}
fn trigger_full_rescan(shared_picker: &SharedPicker, shared_frecency: &SharedFrecency) {
info!("Triggering full filesystem rescan");
// Note: no need to clear mmaps — they are backed by the kernel page cache
// and automatically reflect file changes. Old FileItems (and their mmaps)
// are dropped when the picker rebuilds its file list.
let Ok(mut guard) = shared_picker.write() else {
error!("Failed to acquire file picker write lock for full rescan");
return;
};
let Some(ref mut picker) = *guard else {
error!("File picker not initialized, cannot trigger rescan");
return;
};
if let Err(e) = picker.trigger_rescan(shared_frecency) {
error!("Failed to trigger full rescan: {:?}", e);
return;
}
info!("Full filesystem rescan completed successfully");
// Spawn background warmup + bigram rebuild (mirrors the initial scan's
// post-scan phase). The write lock is still held here but the spawned
// thread re-acquires it later — safe because the guard drops at function end.
if shared_picker.need_complex_rebuild() {
picker.spawn_post_rescan_rebuild(shared_picker.clone());
}
}
/// After registering a watch on a newly created directory, list its
/// immediate children and add any files to the picker.
fn inject_existing_files(dir: &Path, shared_picker: &SharedPicker, git_workdir: &Option<PathBuf>) {
let Ok(entries) = std::fs::read_dir(dir) else {
return;
};
let repo = git_workdir.as_ref().and_then(|p| Repository::open(p).ok());
let mut files_to_add = Vec::new();
for entry in entries.flatten() {
if entry.file_type().is_ok_and(|ft| ft.is_file()) {
let path = entry.path();
if should_include_file(&path, &repo) {
files_to_add.push(path);
}
}
}
if files_to_add.is_empty() {
return;
}
let Ok(mut guard) = shared_picker.write() else {
return;
};
let Some(ref mut picker) = *guard else {
return;
};
for path in &files_to_add {
picker.on_create_or_modify(path);
}
debug!(
"Injected {} existing files from new directory {}",
files_to_add.len(),
dir.display(),
);
}
fn should_include_file(path: &Path, repo: &Option<Repository>) -> bool {
// Directories are not indexed — only regular files (and symlinks to files).
if path.is_dir() {
return false;
}
match repo.as_ref() {
Some(repo) => repo.is_path_ignored(path) != Ok(true),
None => {
// No git repo — apply basic sanity filters.
// Hidden directories are skipped by the watcher setup (hidden(true)),
// but events can still arrive for files in known non-code directories.
!is_non_code_directory(path)
}
}
}
fn is_non_code_directory(path: &Path) -> bool {
crate::ignore::is_non_code_directory(path)
}
#[inline]
fn is_path_ignored(path: &Path, repo: &Option<Repository>) -> bool {
match repo.as_ref() {
Some(repo) => repo.is_path_ignored(path) == Ok(true),
None => is_non_code_directory(path),
}
}
#[inline]
fn is_git_file(path: &Path) -> bool {
path.components()
.any(|component| component.as_os_str() == ".git")
}
fn is_dotgit_change_affecting_status(changed: &Path, repo: &Option<Repository>) -> bool {
let Some(repo) = repo.as_ref() else {
return false;
};
let git_dir = repo.path();
if let Ok(rel) = changed.strip_prefix(git_dir) {
if rel.starts_with("objects") || rel.starts_with("logs") || rel.starts_with("hooks") {
return false;
}
if rel == Path::new("index") || rel == Path::new("index.lock") {
return true;
}
if rel == Path::new("HEAD") {
return true;
}
if rel.starts_with("refs") || rel == Path::new("packed-refs") {
return true;
}
if rel == Path::new("info/exclude") || rel == Path::new("info/sparse-checkout") {
return true;
}
if let Some(fname) = rel.file_name().and_then(|f| f.to_str())
&& matches!(fname, "MERGE_HEAD" | "CHERRY_PICK_HEAD" | "REVERT_HEAD")
{
return true;
}
}
false
}
fn is_ignore_definition_path(path: &Path) -> bool {
matches!(
path.file_name().and_then(|f| f.to_str()),
Some(".ignore") | Some(".gitignore")
)
}
fn watch_git_status_paths(debouncer: &mut Debouncer, git_workdir: Option<&PathBuf>) {
let Some(workdir) = git_workdir else {
return;
};
let git_dir = workdir.join(".git");
if !git_dir.is_dir() {
return;
}
// Watch .git/ non-recursively to catch top-level files:
// index, index.lock, HEAD, packed-refs, MERGE_HEAD, CHERRY_PICK_HEAD, REVERT_HEAD
if let Err(e) = debouncer.watch(&git_dir, RecursiveMode::NonRecursive) {
warn!("Failed to watch .git directory: {}", e);
return;
}
// Watch refs/ recursively to catch branch/tag changes
let refs_dir = git_dir.join("refs");
if refs_dir.is_dir()
&& let Err(e) = debouncer.watch(&refs_dir, RecursiveMode::Recursive)
{
warn!("Failed to watch .git/refs: {}", e);
}
// Watch info/ non-recursively for exclude and sparse-checkout
let info_dir = git_dir.join("info");
if info_dir.is_dir()
&& let Err(e) = debouncer.watch(&info_dir, RecursiveMode::NonRecursive)
{
warn!("Failed to watch .git/info: {}", e);
}
}