use anyhow::{Context, Result};
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use std::time::Duration;
use notify::RecursiveMode;
use notify_debouncer_full::{DebounceEventResult, Debouncer, RecommendedCache};
#[cfg(not(target_os = "macos"))]
use notify_debouncer_full::{new_debouncer, notify::RecommendedWatcher};
#[cfg(target_os = "macos")]
use notify_debouncer_full::{
new_debouncer_opt,
notify::{Config as NotifyConfig, PollWatcher},
};
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel};
const WORKTREE_DEBOUNCE: Duration = Duration::from_millis(300);
const HEAD_DEBOUNCE: Duration = Duration::from_millis(100);
const WORKTREE_EXCLUDED_DIR_NAMES: &[&str] = &[
".git",
"target",
"node_modules",
".direnv",
".venv",
"dist",
"build",
".next",
".turbo",
".cache",
".gradle",
".mvn",
".idea",
".vscode",
"__pycache__",
];
#[cfg(target_os = "macos")]
type KizuWatcher = PollWatcher;
#[cfg(not(target_os = "macos"))]
type KizuWatcher = RecommendedWatcher;
type KizuDebouncer = Debouncer<KizuWatcher, RecommendedCache>;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum WatchSource {
Worktree,
GitPerWorktreeHead,
GitRefs,
GitCommonRoot,
}
impl WatchSource {
pub fn label(self) -> &'static str {
match self {
WatchSource::Worktree => "worktree",
WatchSource::GitPerWorktreeHead => "git.head",
WatchSource::GitRefs => "git.refs",
WatchSource::GitCommonRoot => "git.root",
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct WatchRoot {
path: PathBuf,
recursive_mode: RecursiveMode,
compare_contents: bool,
source: WatchSource,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum WatchEvent {
Worktree,
GitHead(WatchSource),
EventLog(PathBuf),
Error {
source: WatchSource,
message: String,
},
}
pub struct WatchHandle {
pub events: UnboundedReceiver<WatchEvent>,
matcher: SharedMatcher,
git_dir: PathBuf,
common_git_dir: PathBuf,
worktree_root: PathBuf,
watched_children: std::collections::HashSet<PathBuf>,
worktree_debouncer: KizuDebouncer,
_git_state: Vec<KizuDebouncer>,
_events_debouncer: Option<KizuDebouncer>,
}
impl WatchHandle {
pub fn update_current_branch_ref(&self, current_branch_ref: Option<&str>) {
let new_inner =
BaselineMatcherInner::new(&self.git_dir, &self.common_git_dir, current_branch_ref);
if let Ok(mut guard) = self.matcher.write() {
*guard = new_inner;
}
}
pub fn refresh_worktree_watches(&mut self) {
let children = match recursive_worktree_children(&self.worktree_root) {
Ok(c) => c,
Err(_) => return,
};
for child in children {
if self.watched_children.contains(&child) {
continue;
}
if self
.worktree_debouncer
.watch(&child, RecursiveMode::Recursive)
.is_ok()
{
self.watched_children.insert(child);
}
}
}
}
pub fn start(
root: &Path,
git_dir: &Path,
common_git_dir: &Path,
current_branch_ref: Option<&str>,
) -> Result<WatchHandle> {
let (tx, rx) = unbounded_channel::<WatchEvent>();
let worktree_root = root.to_path_buf();
let git_dir_owned = git_dir.to_path_buf();
let common_git_dir_owned = common_git_dir.to_path_buf();
let matcher: SharedMatcher = Arc::new(RwLock::new(BaselineMatcherInner::new(
&git_dir_owned,
&common_git_dir_owned,
current_branch_ref,
)));
let (worktree_debouncer, initial_children) =
spawn_worktree_debouncer(&worktree_root, &git_dir_owned, tx.clone())?;
let mut git_state = Vec::new();
for watch_root in git_state_watch_roots(&git_dir_owned, &common_git_dir_owned) {
git_state.push(spawn_git_state_debouncer(
&watch_root,
Arc::clone(&matcher),
tx.clone(),
)?);
}
let events_debouncer = spawn_events_dir_debouncer(&worktree_root, tx.clone());
Ok(WatchHandle {
events: rx,
matcher,
git_dir: git_dir_owned,
common_git_dir: common_git_dir_owned,
worktree_root,
watched_children: initial_children,
worktree_debouncer,
_git_state: git_state,
_events_debouncer: events_debouncer,
})
}
pub(crate) type SharedMatcher = Arc<RwLock<BaselineMatcherInner>>;
#[derive(Debug, Clone)]
pub(crate) struct BaselineMatcherInner {
head_file: PathBuf,
branch_ref: Option<PathBuf>,
packed_refs: PathBuf,
}
impl BaselineMatcherInner {
pub(crate) fn new(
git_dir: &Path,
common_git_dir: &Path,
current_branch_ref: Option<&str>,
) -> Self {
let head_file = canonicalize_or_self(&git_dir.join("HEAD"));
let branch_ref = current_branch_ref.map(|r| {
let mut p = common_git_dir.to_path_buf();
for segment in r.split('/') {
p.push(segment);
}
canonicalize_or_self(&p)
});
let packed_refs = canonicalize_or_self(&common_git_dir.join("packed-refs"));
Self {
head_file,
branch_ref,
packed_refs,
}
}
pub(crate) fn matches(&self, path: &Path) -> bool {
let p = canonicalize_or_self(path);
p == self.head_file
|| self.branch_ref.as_ref().is_some_and(|r| p == *r)
|| p == self.packed_refs
}
}
fn spawn_worktree_debouncer(
root: &Path,
git_dir: &Path,
tx: UnboundedSender<WatchEvent>,
) -> Result<(KizuDebouncer, std::collections::HashSet<PathBuf>)> {
let git_dir = git_dir.to_path_buf();
let excluded_dirs: Vec<PathBuf> = WORKTREE_EXCLUDED_DIR_NAMES
.iter()
.map(|name| root.join(name))
.collect();
let callback_tx = tx.clone();
let mut debouncer = new_kizu_debouncer(
WORKTREE_DEBOUNCE,
true,
move |result: DebounceEventResult| {
let events = match result {
Ok(events) => events,
Err(errors) => {
let message = format_notify_errors(WatchSource::Worktree, &errors);
let _ = callback_tx.send(WatchEvent::Error {
source: WatchSource::Worktree,
message,
});
return;
}
};
let dominated = |p: &Path| {
is_inside(p, &git_dir) || excluded_dirs.iter().any(|excl| is_inside(p, excl))
};
let touches_worktree = events
.iter()
.any(|ev| ev.event.paths.iter().any(|p| !dominated(p)));
if touches_worktree {
let _ = callback_tx.send(WatchEvent::Worktree);
}
},
)
.context("failed to create worktree debouncer")?;
debouncer
.watch(root, RecursiveMode::NonRecursive)
.with_context(|| format!("failed to watch worktree at {}", root.display()))?;
let recursive_children = match recursive_worktree_children(root) {
Ok(children) => children,
Err(err) => {
let _ = tx.send(WatchEvent::Error {
source: WatchSource::Worktree,
message: format!("watcher [{}]: {err:#}", WatchSource::Worktree.label()),
});
return Ok((debouncer, std::collections::HashSet::new()));
}
};
let mut watched = std::collections::HashSet::with_capacity(recursive_children.len());
for child in recursive_children {
debouncer
.watch(&child, RecursiveMode::Recursive)
.with_context(|| format!("failed to watch worktree at {}", child.display()))?;
watched.insert(child);
}
Ok((debouncer, watched))
}
fn recursive_worktree_children(root: &Path) -> Result<Vec<PathBuf>> {
let mut children = Vec::new();
let entries = std::fs::read_dir(root)
.with_context(|| format!("failed to read worktree root {}", root.display()))?;
for entry in entries {
let entry = entry
.with_context(|| format!("failed to enumerate worktree root {}", root.display()))?;
if entry
.file_name()
.to_str()
.is_some_and(is_excluded_worktree_dir_name)
{
continue;
}
let path = entry.path();
if path.is_dir() {
children.push(path);
}
}
children.sort();
Ok(children)
}
fn is_excluded_worktree_dir_name(name: &str) -> bool {
WORKTREE_EXCLUDED_DIR_NAMES.contains(&name)
}
fn git_state_watch_roots(git_dir: &Path, common_git_dir: &Path) -> Vec<WatchRoot> {
vec![
WatchRoot {
path: git_dir.join("HEAD"),
recursive_mode: RecursiveMode::NonRecursive,
compare_contents: true,
source: WatchSource::GitPerWorktreeHead,
},
WatchRoot {
path: common_git_dir.join("refs"),
recursive_mode: RecursiveMode::Recursive,
compare_contents: true,
source: WatchSource::GitRefs,
},
WatchRoot {
path: common_git_dir.to_path_buf(),
recursive_mode: RecursiveMode::NonRecursive,
compare_contents: true,
source: WatchSource::GitCommonRoot,
},
]
}
fn spawn_git_state_debouncer(
watch_root: &WatchRoot,
matcher: SharedMatcher,
tx: UnboundedSender<WatchEvent>,
) -> Result<KizuDebouncer> {
let source = watch_root.source;
let compare_contents = watch_root.compare_contents;
let mut debouncer = new_kizu_debouncer(
HEAD_DEBOUNCE,
compare_contents,
move |result: DebounceEventResult| {
let events = match result {
Ok(events) => events,
Err(errors) => {
let message = format_notify_errors(source, &errors);
let _ = tx.send(WatchEvent::Error { source, message });
return;
}
};
let Ok(guard) = matcher.read() else {
let _ = tx.send(WatchEvent::Error {
source,
message: format!(
"watcher [{}]: baseline matcher lock poisoned",
source.label()
),
});
return;
};
let baseline_touched = events
.iter()
.any(|ev| ev.event.paths.iter().any(|p| guard.matches(p)));
drop(guard);
if baseline_touched {
let _ = tx.send(WatchEvent::GitHead(source));
}
},
)
.context("failed to create git_dir debouncer")?;
debouncer
.watch(&watch_root.path, watch_root.recursive_mode)
.with_context(|| format!("failed to watch git_dir at {}", watch_root.path.display()))?;
Ok(debouncer)
}
fn new_kizu_debouncer<F>(
timeout: Duration,
compare_contents: bool,
event_handler: F,
) -> notify::Result<KizuDebouncer>
where
F: notify_debouncer_full::DebounceEventHandler,
{
#[cfg(target_os = "macos")]
{
let poll_interval = timeout.checked_div(4).unwrap_or(timeout);
new_debouncer_opt::<F, KizuWatcher, RecommendedCache>(
timeout,
None,
event_handler,
RecommendedCache::new(),
NotifyConfig::default()
.with_poll_interval(poll_interval)
.with_compare_contents(compare_contents),
)
}
#[cfg(not(target_os = "macos"))]
{
let _ = compare_contents;
new_debouncer(timeout, None, event_handler)
}
}
fn format_notify_errors(source: WatchSource, errors: &[notify::Error]) -> String {
let joined = errors
.iter()
.map(|e| e.to_string())
.collect::<Vec<_>>()
.join("; ");
if joined.is_empty() {
format!("watcher [{}]: unknown backend failure", source.label())
} else {
format!("watcher [{}]: {joined}", source.label())
}
}
fn is_inside(path: &Path, git_dir: &Path) -> bool {
let p = canonicalize_or_self(path);
let g = canonicalize_or_self(git_dir);
p.starts_with(&g)
}
pub(crate) fn canonicalize_or_self(p: &Path) -> PathBuf {
if let Ok(canonical) = p.canonicalize() {
return canonical;
}
let mut missing_tail = Vec::new();
let mut cursor = p;
while let Some(parent) = cursor.parent() {
let Some(name) = cursor.file_name() else {
break;
};
missing_tail.push(name.to_os_string());
if let Ok(mut canonical_parent) = parent.canonicalize() {
for segment in missing_tail.iter().rev() {
canonical_parent.push(segment);
}
return canonical_parent;
}
cursor = parent;
}
p.to_path_buf()
}
const EVENTS_DEBOUNCE: Duration = Duration::from_millis(100);
fn spawn_events_dir_debouncer(
root: &Path,
tx: UnboundedSender<WatchEvent>,
) -> Option<KizuDebouncer> {
let events_dir = crate::paths::events_dir(root)?;
let _ = crate::paths::ensure_private_dir(&events_dir);
if !events_dir.is_dir() {
return None;
}
let events_dir_owned = events_dir.clone();
let mut debouncer = new_kizu_debouncer(
EVENTS_DEBOUNCE,
false, move |result: DebounceEventResult| {
let events = match result {
Ok(events) => events,
Err(_) => return, };
for event in events {
for path in &event.event.paths {
if path
.file_name()
.is_some_and(|n| n.to_string_lossy().starts_with('.'))
{
continue;
}
if path.starts_with(&events_dir_owned) {
let _ = tx.send(WatchEvent::EventLog(path.clone()));
}
}
}
},
)
.ok()?;
debouncer
.watch(&events_dir, RecursiveMode::NonRecursive)
.ok()?;
Some(debouncer)
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::process::Command;
use std::sync::mpsc;
use tempfile::TempDir;
use tokio::time::{Duration as TokioDuration, timeout};
fn init_repo() -> TempDir {
let dir = tempfile::tempdir().expect("create tempdir");
run_git(dir.path(), &["init", "--quiet", "--initial-branch=main"]);
run_git(dir.path(), &["config", "user.email", "test@example.com"]);
run_git(dir.path(), &["config", "user.name", "kizu test"]);
dir
}
fn run_git(cwd: &Path, args: &[&str]) {
let status = Command::new("git")
.args(args)
.current_dir(cwd)
.status()
.unwrap_or_else(|e| panic!("git {args:?} failed to spawn: {e}"));
assert!(status.success(), "git {args:?} exited with {status:?}");
}
const DRAIN_WAIT: TokioDuration = TokioDuration::from_millis(2_000);
async fn drain_events(handle: &mut WatchHandle, wait: TokioDuration) {
let deadline = tokio::time::Instant::now() + wait;
while tokio::time::Instant::now() < deadline {
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
let poll = remaining.min(TokioDuration::from_millis(200));
match timeout(poll, handle.events.recv()).await {
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
}
async fn saw_matching_event<F>(
handle: &mut WatchHandle,
wait: TokioDuration,
mut matches: F,
) -> bool
where
F: FnMut(&WatchEvent) -> bool,
{
let deadline = tokio::time::Instant::now() + wait;
while tokio::time::Instant::now() < deadline {
let now = tokio::time::Instant::now();
let remaining = deadline.saturating_duration_since(now);
let next_poll = if remaining > TokioDuration::from_millis(200) {
TokioDuration::from_millis(200)
} else {
remaining
};
match timeout(next_poll, handle.events.recv()).await {
Ok(Some(event)) if matches(&event) => return true,
Ok(Some(_)) => continue,
Ok(None) => return false,
Err(_) => continue,
}
}
false
}
#[tokio::test(flavor = "current_thread")]
async fn worktree_event_is_received_for_a_new_file() {
let repo = init_repo();
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
fs::write(root.join("hello.txt"), "hello\n").expect("write file");
let event = timeout(DRAIN_WAIT, handle.events.recv())
.await
.expect("worktree event arrived")
.expect("channel still open");
assert_eq!(event, WatchEvent::Worktree);
}
#[tokio::test(flavor = "current_thread")]
async fn worktree_watcher_skips_target_directory() {
let repo = init_repo();
fs::create_dir_all(repo.path().join("target")).expect("create target");
fs::create_dir_all(repo.path().join("src")).expect("create src");
fs::write(repo.path().join("target").join("foo.rs"), "fn build() {}\n")
.expect("write target file");
fs::write(repo.path().join("src").join("bar.rs"), "fn app() {}\n").expect("write src file");
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
drain_events(&mut handle, TokioDuration::from_millis(800)).await;
fs::write(root.join("target").join("foo.rs"), "fn build() { 1 }\n")
.expect("rewrite target file");
let saw_target_event =
saw_matching_event(&mut handle, TokioDuration::from_millis(1_000), |event| {
*event == WatchEvent::Worktree
})
.await;
assert!(
!saw_target_event,
"nested writes under excluded target/ must not emit Worktree"
);
fs::write(root.join("src").join("bar.rs"), "fn app() { 1 }\n").expect("rewrite src file");
let saw_src_event = saw_matching_event(&mut handle, DRAIN_WAIT, |event| {
*event == WatchEvent::Worktree
})
.await;
assert!(
saw_src_event,
"nested writes under non-excluded top-level directories must still emit Worktree"
);
}
#[tokio::test(flavor = "current_thread")]
async fn worktree_watcher_still_sees_root_level_file_writes() {
let repo = init_repo();
fs::write(repo.path().join("README.md"), "before\n").expect("write root file");
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(250)).await;
fs::write(root.join("README.md"), "after!\n").expect("rewrite root file");
let saw_root_event = saw_matching_event(&mut handle, DRAIN_WAIT, |event| {
*event == WatchEvent::Worktree
})
.await;
assert!(
saw_root_event,
"root-level file writes must still emit Worktree with a non-recursive root watch"
);
}
#[tokio::test(flavor = "current_thread")]
async fn writes_inside_git_dir_do_not_emit_worktree_event() {
let repo = init_repo();
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
drain_events(&mut handle, TokioDuration::from_millis(800)).await;
fs::write(git_dir.join("kizu_test_marker"), b"x").expect("write inside git_dir");
let mut saw_worktree = false;
let mut saw_head = false;
let drain_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < drain_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::Worktree)) => {
saw_worktree = true;
break;
}
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head = true;
break;
}
Ok(Some(WatchEvent::Error { .. } | WatchEvent::EventLog(_))) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
!saw_worktree,
"git_dir-only writes must not surface as Worktree events"
);
assert!(
!saw_head,
"non-HEAD/refs writes inside git_dir must not surface as GitHead"
);
}
#[tokio::test(flavor = "current_thread")]
async fn writing_current_branch_ref_emits_head_event() {
let repo = init_repo();
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let mut handle = start(
&root,
&git_dir,
&common,
Some("refs/heads/kizu-test-branch"),
)
.expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
let refs_heads = git_dir.join("refs").join("heads");
fs::create_dir_all(&refs_heads).expect("create refs/heads");
fs::write(
refs_heads.join("kizu-test-branch"),
b"0000000000000000000000000000000000000000\n",
)
.expect("write ref");
let mut saw_head = false;
let drain_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < drain_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head = true;
break;
}
Ok(Some(WatchEvent::Worktree)) => continue,
Ok(Some(WatchEvent::Error { .. } | WatchEvent::EventLog(_))) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
saw_head,
"writes under the session's own refs/heads/<branch> must emit GitHead"
);
}
#[tokio::test(flavor = "current_thread")]
async fn writing_unrelated_refs_does_not_emit_head_event() {
let repo = init_repo();
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let mut handle = start(&root, &git_dir, &common, Some("refs/heads/main"))
.expect("start watcher with main as active branch");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
let refs_heads = git_dir.join("refs").join("heads");
fs::create_dir_all(&refs_heads).expect("create refs/heads");
fs::write(
refs_heads.join("sibling-branch"),
b"0000000000000000000000000000000000000000\n",
)
.expect("write sibling");
let refs_remotes = git_dir.join("refs").join("remotes").join("origin");
fs::create_dir_all(&refs_remotes).expect("create refs/remotes/origin");
fs::write(
refs_remotes.join("feature"),
b"0000000000000000000000000000000000000000\n",
)
.expect("write remote ref");
let refs_tags = git_dir.join("refs").join("tags");
fs::create_dir_all(&refs_tags).expect("create refs/tags");
fs::write(
refs_tags.join("v1.0"),
b"0000000000000000000000000000000000000000\n",
)
.expect("write tag");
let mut saw_head = false;
let drain_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < drain_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head = true;
break;
}
Ok(Some(WatchEvent::Worktree)) => continue,
Ok(Some(WatchEvent::Error { .. } | WatchEvent::EventLog(_))) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
!saw_head,
"unrelated ref activity (sibling branch, remotes, tags) \
must not raise GitHead under the narrowed matcher"
);
}
#[tokio::test(flavor = "current_thread")]
async fn linked_worktree_commit_raises_head_event_via_common_git_dir() {
let main = init_repo();
fs::write(main.path().join("seed.txt"), "seed\n").expect("write seed");
run_git(main.path(), &["add", "seed.txt"]);
run_git(main.path(), &["commit", "--quiet", "-m", "init"]);
let linked_path = main
.path()
.parent()
.expect("tempdir has parent")
.join(format!("kizu-linked-wt-{}", std::process::id()));
let _ = fs::remove_dir_all(&linked_path);
run_git(
main.path(),
&[
"worktree",
"add",
"-b",
"feature-branch",
linked_path.to_str().expect("linked path utf8"),
],
);
let linked_root = crate::git::find_root(&linked_path).expect("find_root linked");
let linked_git_dir =
crate::git::git_dir(&linked_root).expect("linked per-worktree git_dir");
let common_git_dir =
crate::git::git_common_dir(&linked_root).expect("linked common git_dir");
assert_ne!(
canonicalize_or_self(&linked_git_dir),
canonicalize_or_self(&common_git_dir),
"linked worktree must have distinct per-worktree and common git dirs \
(got both = {})",
linked_git_dir.display()
);
let linked_branch = crate::git::current_branch_ref(&linked_root).expect("linked branch");
let mut handle = start(
&linked_root,
&linked_git_dir,
&common_git_dir,
linked_branch.as_deref(),
)
.expect("start watcher with common git dir");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
fs::write(linked_root.join("new.txt"), "hi\n").expect("write new");
run_git(&linked_root, &["add", "new.txt"]);
run_git(&linked_root, &["commit", "--quiet", "-m", "linked commit"]);
let mut saw_head = false;
let drain_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < drain_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head = true;
break;
}
Ok(Some(WatchEvent::Worktree)) => continue,
Ok(Some(WatchEvent::Error { .. } | WatchEvent::EventLog(_))) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
saw_head,
"commit in a linked worktree must raise GitHead via the common git dir"
);
drop(handle);
let _ = fs::remove_dir_all(&linked_path);
}
#[test]
fn baseline_matcher_accepts_head_branch_ref_and_packed_refs_only() {
let git_dir = Path::new("/tmp/repo/.git");
let matcher = BaselineMatcherInner::new(git_dir, git_dir, Some("refs/heads/main"));
assert!(matcher.matches(&git_dir.join("HEAD")));
assert!(matcher.matches(&git_dir.join("refs").join("heads").join("main")));
assert!(matcher.matches(&git_dir.join("packed-refs")));
assert!(!matcher.matches(&git_dir.join("refs").join("heads").join("feature")));
assert!(
!matcher.matches(
&git_dir
.join("refs")
.join("remotes")
.join("origin")
.join("main")
)
);
assert!(!matcher.matches(&git_dir.join("refs").join("tags").join("v1.0")));
assert!(!matcher.matches(&git_dir.join("index")));
assert!(!matcher.matches(&git_dir.join("index.lock")));
assert!(!matcher.matches(&git_dir.join("logs").join("HEAD")));
assert!(!matcher.matches(&git_dir.join("objects").join("pack").join("pack-abc.idx")));
assert!(!matcher.matches(&git_dir.join("COMMIT_EDITMSG")));
assert!(!matcher.matches(&git_dir.join("ORIG_HEAD")));
assert!(!matcher.matches(&git_dir.join("FETCH_HEAD")));
}
#[test]
fn baseline_matcher_detached_head_tracks_head_file_only() {
let git_dir = Path::new("/tmp/repo/.git");
let matcher = BaselineMatcherInner::new(git_dir, git_dir, None);
assert!(matcher.matches(&git_dir.join("HEAD")));
assert!(matcher.matches(&git_dir.join("packed-refs")));
assert!(!matcher.matches(&git_dir.join("refs").join("heads").join("main")));
assert!(!matcher.matches(&git_dir.join("refs").join("heads").join("feature")));
}
#[test]
fn baseline_matcher_linked_worktree_splits_head_and_branch_ref() {
let per = Path::new("/tmp/repo/.git/worktrees/wt1");
let common = Path::new("/tmp/repo/.git");
let matcher = BaselineMatcherInner::new(per, common, Some("refs/heads/feature"));
assert!(matcher.matches(&per.join("HEAD")));
assert!(matcher.matches(&common.join("refs").join("heads").join("feature")));
assert!(matcher.matches(&common.join("packed-refs")));
assert!(!matcher.matches(&common.join("HEAD")));
assert!(!matcher.matches(&common.join("worktrees").join("wt2").join("HEAD")));
}
#[test]
fn canonicalize_or_self_preserves_missing_tail_under_canonical_parent() {
let temp = tempfile::tempdir().expect("tempdir");
let parent = temp.path().join("refs").join("heads");
fs::create_dir_all(&parent).expect("create existing parent");
let missing = parent.join("future-branch");
let canonical_parent = parent.canonicalize().expect("canonical parent");
assert_eq!(
canonicalize_or_self(&missing),
canonical_parent.join("future-branch")
);
}
#[test]
fn git_state_watch_roots_focus_on_head_refs_and_common_root() {
let temp = tempfile::tempdir().expect("tempdir");
let git_dir = temp.path().join(".git");
fs::create_dir_all(git_dir.join("refs").join("heads")).expect("create refs/heads");
let roots = git_state_watch_roots(&git_dir, &git_dir);
assert_eq!(
roots,
vec![
WatchRoot {
path: git_dir.join("HEAD"),
recursive_mode: RecursiveMode::NonRecursive,
compare_contents: true,
source: WatchSource::GitPerWorktreeHead,
},
WatchRoot {
path: git_dir.join("refs"),
recursive_mode: RecursiveMode::Recursive,
compare_contents: true,
source: WatchSource::GitRefs,
},
WatchRoot {
path: git_dir.clone(),
recursive_mode: RecursiveMode::NonRecursive,
compare_contents: true,
source: WatchSource::GitCommonRoot,
},
]
);
}
#[test]
fn selected_kizu_backend_smoke_receives_create_event() {
let dir = tempfile::tempdir().expect("tempdir");
let (tx, rx) = mpsc::channel();
let mut debouncer =
new_kizu_debouncer(TokioDuration::from_millis(50), false, tx).expect("new debouncer");
debouncer
.watch(dir.path(), RecursiveMode::Recursive)
.expect("watch tempdir");
let file = dir.path().join("smoke.txt");
fs::write(&file, "ok\n").expect("write smoke file");
let deadline = std::time::Instant::now() + std::time::Duration::from_secs(10);
while std::time::Instant::now() < deadline {
let batch = rx
.recv_timeout(deadline - std::time::Instant::now())
.expect("receive debounced event")
.expect("notify backend error");
if batch.iter().any(|event| {
event.event.paths.iter().any(|path| {
*path == file
|| path
.canonicalize()
.ok()
.is_some_and(|canonical| canonical == file)
})
}) {
return;
}
}
panic!(
"selected kizu watcher backend never observed {}",
file.display()
);
}
#[tokio::test(flavor = "current_thread")]
async fn update_current_branch_ref_reroutes_head_detection_without_restart() {
let repo = init_repo();
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let mut handle =
start(&root, &git_dir, &common, Some("refs/heads/main")).expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
let refs_heads = git_dir.join("refs").join("heads");
fs::create_dir_all(&refs_heads).expect("create refs/heads");
fs::write(
refs_heads.join("sibling"),
b"1111111111111111111111111111111111111111\n",
)
.expect("write sibling phase 1");
let mut saw_head_before_update = false;
let phase1_until = tokio::time::Instant::now() + TokioDuration::from_millis(600);
while tokio::time::Instant::now() < phase1_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head_before_update = true;
break;
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
!saw_head_before_update,
"writes to a branch the matcher is not tracking must not fire GitHead"
);
handle.update_current_branch_ref(Some("refs/heads/sibling"));
tokio::time::sleep(TokioDuration::from_millis(150)).await;
fs::write(
refs_heads.join("sibling"),
b"2222222222222222222222222222222222222222\n",
)
.expect("write sibling phase 2");
let mut saw_head_after_update = false;
let phase2_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < phase2_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_head_after_update = true;
break;
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
saw_head_after_update,
"after update_current_branch_ref the matcher must see the newly tracked branch"
);
}
#[tokio::test(flavor = "current_thread")]
async fn packed_refs_rewrites_after_birth_still_emit_head_event() {
let repo = init_repo();
fs::write(repo.path().join("seed.txt"), "seed\n").expect("write seed");
run_git(repo.path(), &["add", "seed.txt"]);
run_git(repo.path(), &["commit", "--quiet", "-m", "init"]);
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let packed_refs = common.join("packed-refs");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(150)).await;
fs::write(
&packed_refs,
"0000000000000000000000000000000000000000 refs/heads/main\n",
)
.expect("create packed-refs");
let mut saw_birth = false;
let phase1_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < phase1_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_birth = true;
break;
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(saw_birth, "creating packed-refs must emit GitHead");
fs::write(
&packed_refs,
"1111111111111111111111111111111111111111 refs/heads/main\n",
)
.expect("rewrite packed-refs");
let mut saw_rewrite = false;
let phase2_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < phase2_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::GitHead(_))) => {
saw_rewrite = true;
break;
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
saw_rewrite,
"rewriting packed-refs after it is created must still emit GitHead"
);
}
#[tokio::test(flavor = "current_thread")]
async fn same_size_existing_file_rewrite_emits_worktree_event() {
let repo = init_repo();
fs::write(repo.path().join("same.txt"), "alpha\n").expect("write seed");
run_git(repo.path(), &["add", "same.txt"]);
run_git(repo.path(), &["commit", "--quiet", "-m", "init"]);
let root = crate::git::find_root(repo.path()).expect("find_root");
let git_dir = crate::git::git_dir(&root).expect("git_dir");
let common = crate::git::git_common_dir(&root).expect("common git_dir");
let branch = crate::git::current_branch_ref(&root).expect("current branch");
let mut handle = start(&root, &git_dir, &common, branch.as_deref()).expect("start watcher");
tokio::time::sleep(TokioDuration::from_millis(250)).await;
fs::write(root.join("same.txt"), "omega\n").expect("rewrite same-size file");
let mut saw_worktree = false;
let drain_until = tokio::time::Instant::now() + DRAIN_WAIT;
while tokio::time::Instant::now() < drain_until {
match timeout(TokioDuration::from_millis(200), handle.events.recv()).await {
Ok(Some(WatchEvent::Worktree)) => {
saw_worktree = true;
break;
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
assert!(
saw_worktree,
"rewriting an existing file with the same size must still emit Worktree"
);
}
}