1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
//! Task 7 Phase 7b2 — editor-save pattern matrix + ensure_watching
//! idempotence (A2 §I).
//!
//! Each editor family saves files differently. The watcher must
//! normalise every pattern to "exactly one logical changed file in
//! the debounced `ChangeSet`", which in turn must drive exactly one
//! rebuild dispatch. This test binary validates the end-to-end
//! watcher+dispatcher pipeline against all 5 patterns plus two
//! idempotence tests on `ensure_watching`.
use std::{fs, time::Duration};
mod support;
use support::{
WatcherHarness, assert_exactly_one_rebuild,
editor_patterns::{EditorSavePattern, simulate_save},
};
/// Shared test body — seed a file, invoke `simulate_save` with the
/// pattern, and assert exactly one rebuild fires within the settle
/// window.
async fn run_pattern(pattern: EditorSavePattern) {
let h = WatcherHarness::new().await;
let target = h.root.join("touched.rs");
// Seed the target file (all patterns except DirectWrite require
// an existing file to rename/back up).
fs::write(&target, b"pub fn original() {}\n").expect("seed write");
// Let the seed write settle beyond the debounce window so the
// test's `simulate_save` triggers a fresh, isolated rebuild.
tokio::time::sleep(Duration::from_millis(500)).await;
assert_exactly_one_rebuild(
&h.dispatcher,
// Timeout: generous enough that a slow CI host's watcher
// + pipeline still completes (500 ms debounce margin × 6).
Duration::from_secs(3),
// Post-settle: 2× debounce (200 ms) — proves no second
// rebuild fires from an over-split atomic-save sequence.
Duration::from_millis(400),
|| {
simulate_save(&target, b"pub fn modified() {}\n", pattern);
},
)
.await;
}
// ---------------------------------------------------------------------------
// Five patterns × 1 rebuild each
// ---------------------------------------------------------------------------
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn direct_write_triggers_exactly_one_rebuild() {
run_pattern(EditorSavePattern::DirectWrite).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn vim_atomic_rename_triggers_exactly_one_rebuild() {
run_pattern(EditorSavePattern::VimAtomicRename).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn jetbrains_atomic_save_triggers_exactly_one_rebuild() {
run_pattern(EditorSavePattern::JetBrainsAtomicSave).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn vscode_safe_save_triggers_exactly_one_rebuild() {
run_pattern(EditorSavePattern::VscodeSafeSave).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn emacs_backup_triggers_exactly_one_rebuild() {
run_pattern(EditorSavePattern::EmacsBackup).await;
}
// ---------------------------------------------------------------------------
// ensure_watching idempotence
// ---------------------------------------------------------------------------
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn ensure_watching_is_idempotent_for_same_key() {
let h = WatcherHarness::new().await;
assert_eq!(
h.dispatcher.watchers_len(),
1,
"initial ensure_watching must have inserted 1 entry"
);
// Re-call with the same key — must be a no-op fast path.
let ws = h.manager.lookup(&h.key).expect("workspace present");
h.dispatcher
.ensure_watching(&h.key, ws, h.root.clone())
.await
.expect("second ensure_watching must succeed");
assert_eq!(
h.dispatcher.watchers_len(),
1,
"idempotent second call must NOT spawn a duplicate entry"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn ensure_watching_is_race_free_under_concurrent_callers() {
// Regression test for the 7b2 iter-0 feat review MAJOR: two
// concurrent `ensure_watching` calls for the same WorkspaceKey
// must produce exactly one stored entry, not two. The fix holds
// `self.watchers` across the entire check → spawn → insert
// sequence, serialising concurrent callers through the map's
// parking_lot::Mutex.
use std::sync::Arc;
let h = WatcherHarness::new().await;
// The harness already inserted one entry via its own
// ensure_watching at construction time. Tear that down so we
// exercise the "fresh spawn under contention" path.
use std::sync::atomic::Ordering;
use support::wait_until;
let ws = h.manager.lookup(&h.key).expect("workspace present");
ws.rebuild_cancelled.store(true, Ordering::Release);
assert!(
wait_until(|| h.dispatcher.watchers_len() == 0, Duration::from_secs(3)).await,
"harness's initial watcher must drain before the race test"
);
ws.rebuild_cancelled.store(false, Ordering::Release);
// Fire N concurrent ensure_watching calls for the same key.
// Each task holds the workspace Arc + root PathBuf it needs.
const N: usize = 8;
let mut handles = Vec::with_capacity(N);
for _ in 0..N {
let dispatcher = Arc::clone(&h.dispatcher);
let key = h.key.clone();
let ws = Arc::clone(&ws);
let root = h.root.clone();
handles.push(tokio::spawn(async move {
dispatcher.ensure_watching(&key, ws, root).await
}));
}
for h in handles {
h.await
.expect("task panicked")
.expect("ensure_watching must succeed under contention");
}
assert_eq!(
h.dispatcher.watchers_len(),
1,
"N concurrent ensure_watching calls must produce exactly 1 stored entry, \
not N; got {} entries",
h.dispatcher.watchers_len()
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn ensure_watching_prunes_finished_entry_and_respawns() {
use std::sync::atomic::Ordering;
use support::wait_until;
let h = WatcherHarness::new().await;
assert_eq!(h.dispatcher.watchers_len(), 1);
// Force the watcher to shut down via rebuild_cancelled. Once the
// async task drains, live=false and reap_watcher removes the
// entry — so the map reaches size 0 after shutdown.
let ws = h.manager.lookup(&h.key).expect("workspace present");
ws.rebuild_cancelled.store(true, Ordering::Release);
let reaped = wait_until(|| h.dispatcher.watchers_len() == 0, Duration::from_secs(3)).await;
assert!(reaped, "watcher map must reach 0 after cancellation");
// Reset the cancellation flag and re-call ensure_watching.
// Since the old entry was reaped, this is effectively a fresh
// spawn (not a prune-then-respawn through a live=false zombie).
// The prune-then-respawn flow is separately exercised if the
// reap hasn't happened yet at the time of the second call —
// here we verify the END STATE after a complete shutdown: a
// subsequent ensure_watching spawns a fresh pair.
ws.rebuild_cancelled.store(false, Ordering::Release);
h.dispatcher
.ensure_watching(&h.key, ws, h.root.clone())
.await
.expect("respawn after shutdown must succeed");
assert_eq!(
h.dispatcher.watchers_len(),
1,
"respawn must populate the map with a fresh entry"
);
}