sqry_core/workspace/cache.rs
1//! Aggregate workspace status cache.
2//!
3//! The aggregate cache lives at
4//! `<workspace_dir>/.sqry/workspace-cache/status.json` and answers
5//! `sqry workspace status` / `sqry/indexStatus` queries that span the
6//! entire [`super::logical::LogicalWorkspace`]. Per the §1.2 storage
7//! contract the file is:
8//!
9//! - **Atomically written** via tempfile + rename, so a partially
10//! written file is never observed.
11//! - **mtime-bound TTL** of [`CACHE_TTL`] (60 s). [`read_cache`] returns
12//! `Ok(None)` when the file is missing *or* older than the TTL, which
13//! the caller treats as a soft-miss and recomputes.
14//!
15//! The per-source-root snapshots continue to live at
16//! `<source_root>/.sqry/graph/snapshot.sqry`; this file is **derived**
17//! from those snapshots and may safely be deleted at any time.
18
19use std::fs;
20use std::io::{ErrorKind, Write};
21use std::path::{Path, PathBuf};
22use std::time::{Duration, SystemTime};
23
24use serde::{Deserialize, Serialize};
25
26use super::error::WorkspaceError;
27use super::serde_time;
28
29/// Subdirectory under `<workspace_dir>/.sqry/` that holds the aggregate
30/// status cache. Kept as a const so the path is single-sourced.
31pub const WORKSPACE_CACHE_DIRNAME: &str = "workspace-cache";
32
33/// File name (under [`WORKSPACE_CACHE_DIRNAME`]) for the aggregate
34/// status payload.
35pub const WORKSPACE_STATUS_FILENAME: &str = "status.json";
36
37/// Time-to-live for the aggregate status cache entry. Older files are
38/// treated as a soft-miss by [`read_cache`].
39pub const CACHE_TTL: Duration = Duration::from_secs(60);
40
41/// Per-source-root status entry inside an aggregate
42/// [`WorkspaceIndexStatus`].
43#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
44pub struct SourceRootStatus {
45 /// Absolute path to the source root the entry describes.
46 pub path: PathBuf,
47 /// One-word machine-readable status:
48 /// `"ok"` (indexed), `"missing"` (no snapshot), `"building"` (a
49 /// rebuild is in progress), or `"error"` (snapshot read failed).
50 pub status: SourceRootIndexState,
51 /// Last-indexed timestamp for the source root, if available.
52 #[serde(default, with = "serde_time::option")]
53 pub last_indexed_at: Option<SystemTime>,
54 /// Cached symbol count for the source root, if available.
55 pub symbol_count: Option<u64>,
56 /// STEP_11_4 — JVM classpath directory for this source root, if
57 /// the workspace builder populated
58 /// [`crate::workspace::SourceRoot::classpath_dir`].
59 /// `None` when the source root has no `<root>/.sqry/classpath/`
60 /// directory or the workspace was built before STEP_11_4. Surfaces
61 /// the auto-populated field through the LSP / MCP / CLI status
62 /// payload from the same source-of-truth as the daemon's
63 /// `daemon/workspaceStatus` `classpath_present: bool`.
64 #[serde(default, skip_serializing_if = "Option::is_none")]
65 pub classpath_dir: Option<PathBuf>,
66}
67
68/// Coarse status for a single source root inside an aggregate
69/// [`WorkspaceIndexStatus`]. Kept deliberately small — the surface that
70/// emits this is the LSP wire / `--json` output, where additional detail
71/// (file counts, language hints, last-error) is folded into the
72/// per-source-root [`crate::json_response::IndexStatus`] payload that
73/// `sqry index --status` already returns.
74#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
75#[serde(rename_all = "lowercase")]
76pub enum SourceRootIndexState {
77 /// Snapshot is present and considered healthy.
78 Ok,
79 /// No snapshot file exists at the canonical path.
80 Missing,
81 /// A `.sqry/graph/build.lock` is present — a rebuild is in progress.
82 Building,
83 /// Reading the snapshot or its metadata failed.
84 Error,
85}
86
87/// Non-fatal warning attached to a [`WorkspaceIndexStatus`] aggregate.
88///
89/// STEP_11_4 (workspace-aware-cross-repo, 2026-04-26) — adds the
90/// "soft-failure" surface so cross-source-root macro expansion errors,
91/// classpath probe failures, and similar partial-degradation events do
92/// not have to escalate to a hard `Err` that masks the rest of the
93/// workspace. Consumers (LSP / MCP / CLI) render the warnings inline
94/// next to the per-source-root rollup; downstream tooling can decide
95/// whether to treat them as advisory or as build-gating.
96///
97/// `#[non_exhaustive]` so future warning variants can be added without
98/// breaking downstream pattern matches.
99#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
100#[serde(tag = "kind", rename_all = "camelCase")]
101#[non_exhaustive]
102pub enum WorkspaceWarning {
103 /// Rust macro expansion against a logical-workspace source root
104 /// produced an `InvalidWorkspaceRoot` error. Surfaces the affected
105 /// source root and the underlying error detail. Macro expansion is
106 /// degraded (the affected root contributes no expanded macro
107 /// metadata) but the rest of the workspace still indexes cleanly.
108 MacroExpansionInvalidRoot {
109 /// Canonical absolute path of the affected source root.
110 source_root: PathBuf,
111 /// Error detail from
112 /// `sqry_lang_rust::macro_expander::MacroExpandError::InvalidWorkspaceRoot`.
113 detail: String,
114 },
115 /// Source root advertised a `<source_root>/.sqry/classpath/`
116 /// directory but the directory could not be opened (permission
117 /// error, racy unlink, etc.). The JVM classpath analyzer falls
118 /// back to source-only mode for the affected root.
119 ClasspathProbeFailed {
120 /// Canonical absolute path of the affected source root.
121 source_root: PathBuf,
122 /// IO error detail.
123 detail: String,
124 },
125}
126
127/// Aggregate workspace-level status payload.
128///
129/// Field shape follows §1.4 of the implementation plan: a vector of
130/// per-source-root statuses plus precomputed counts so the LSP / CLI
131/// can render summary lines without re-iterating the vector.
132///
133/// STEP_11_4 (workspace-aware-cross-repo, 2026-04-26) — adds the
134/// `warnings` channel so non-fatal degradations (macro expansion root
135/// errors, classpath probe failures, …) surface to LSP / MCP without
136/// escalating to a hard build error. `warnings` is `Vec<WorkspaceWarning>`
137/// so multiple warnings from independent source roots compose
138/// deterministically.
139#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
140pub struct WorkspaceIndexStatus {
141 /// Per-source-root entries (sorted by `path` for determinism).
142 pub source_root_statuses: Vec<SourceRootStatus>,
143 /// Number of source roots whose snapshot file is missing.
144 pub missing_count: u32,
145 /// Number of source roots currently being rebuilt.
146 pub building_count: u32,
147 /// Number of source roots reporting `ok`.
148 pub ok_count: u32,
149 /// Number of source roots reporting `error`.
150 pub error_count: u32,
151 /// Wall-clock time at which this aggregate was computed. Round-trips
152 /// through the same millisecond encoding as
153 /// [`crate::workspace::registry::WorkspaceMetadata`] timestamps.
154 #[serde(with = "serde_time")]
155 pub generated_at: SystemTime,
156 /// Non-fatal warnings attached to this aggregate. Empty in the
157 /// common case; populated by analyzers (macro expansion root,
158 /// classpath probe, …) that hit a partial-degradation condition
159 /// they want to surface to LSP / MCP / CLI consumers without
160 /// failing the whole status response.
161 ///
162 /// `#[serde(default)]` so v1 cache files (which never carry the
163 /// field) round-trip into an empty vec on read.
164 /// `skip_serializing_if = "Vec::is_empty"` keeps the wire form
165 /// identical to v1 in the common (no-warning) path.
166 #[serde(default, skip_serializing_if = "Vec::is_empty")]
167 pub warnings: Vec<WorkspaceWarning>,
168}
169
170impl WorkspaceIndexStatus {
171 /// Construct an aggregate from a vector of per-source-root statuses,
172 /// recomputing the summary counters and stamping `generated_at` to
173 /// `SystemTime::now()`.
174 #[must_use]
175 pub fn from_source_root_statuses(mut entries: Vec<SourceRootStatus>) -> Self {
176 entries.sort_by(|a, b| a.path.cmp(&b.path));
177 let mut missing_count: u32 = 0;
178 let mut building_count: u32 = 0;
179 let mut ok_count: u32 = 0;
180 let mut error_count: u32 = 0;
181 for entry in &entries {
182 match entry.status {
183 SourceRootIndexState::Missing => missing_count = missing_count.saturating_add(1),
184 SourceRootIndexState::Building => building_count = building_count.saturating_add(1),
185 SourceRootIndexState::Ok => ok_count = ok_count.saturating_add(1),
186 SourceRootIndexState::Error => error_count = error_count.saturating_add(1),
187 }
188 }
189 Self {
190 source_root_statuses: entries,
191 missing_count,
192 building_count,
193 ok_count,
194 error_count,
195 generated_at: SystemTime::now(),
196 warnings: Vec::new(),
197 }
198 }
199
200 /// Total number of source roots covered.
201 #[must_use]
202 pub fn total(&self) -> u32 {
203 u32::try_from(self.source_root_statuses.len()).unwrap_or(u32::MAX)
204 }
205
206 /// Append a [`WorkspaceWarning`] to the aggregate. Returns `&mut Self`
207 /// for the same fluent-builder feel as
208 /// [`Self::from_source_root_statuses`].
209 ///
210 /// Used by macro-boundary / classpath analyzers to surface non-fatal
211 /// degradations through the same status payload the LSP and MCP
212 /// already render.
213 pub fn push_warning(&mut self, warning: WorkspaceWarning) -> &mut Self {
214 self.warnings.push(warning);
215 self
216 }
217
218 /// Returns `true` when the aggregate carries any
219 /// [`WorkspaceWarning`]. Convenience for CLI / MCP renderers.
220 #[must_use]
221 pub fn has_warnings(&self) -> bool {
222 !self.warnings.is_empty()
223 }
224}
225
226/// Resolve the absolute path to the aggregate-status cache file under
227/// `workspace_dir`.
228#[must_use]
229pub fn cache_path(workspace_dir: &Path) -> PathBuf {
230 workspace_dir
231 .join(".sqry")
232 .join(WORKSPACE_CACHE_DIRNAME)
233 .join(WORKSPACE_STATUS_FILENAME)
234}
235
236/// Read the aggregate status cache for `workspace_dir`.
237///
238/// Returns:
239///
240/// - `Ok(Some(status))` if the cache file exists and was last modified
241/// within [`CACHE_TTL`].
242/// - `Ok(None)` if the file is absent, older than the TTL, or has an
243/// unreadable mtime (caller treats all three as soft-misses).
244///
245/// # Errors
246///
247/// Returns [`WorkspaceError::Io`] for filesystem failures other than
248/// `NotFound`, and [`WorkspaceError::Serialization`] when the file is
249/// present but not parseable as a [`WorkspaceIndexStatus`].
250pub fn read_cache(workspace_dir: &Path) -> Result<Option<WorkspaceIndexStatus>, WorkspaceError> {
251 let path = cache_path(workspace_dir);
252
253 let metadata = match fs::metadata(&path) {
254 Ok(m) => m,
255 Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None),
256 Err(err) => return Err(WorkspaceError::io(&path, err)),
257 };
258
259 // mtime-bound TTL. If the mtime is unreadable we honour the
260 // documented contract above: treat the entry as a soft-miss
261 // (`Ok(None)`) rather than escalating an Io error. Clocks can
262 // jump and the cache file is *derived* — recomputing is cheap and
263 // always safe.
264 //
265 // The seam is exposed as a closure-shaped helper so test code can
266 // inject an `Err` without having to fabricate a real filesystem
267 // that returns an unreadable mtime (which is impossible to do
268 // portably on Linux/macOS/Windows).
269 let Ok(modified) = read_modified(&metadata) else {
270 return Ok(None);
271 };
272 let age = SystemTime::now()
273 .duration_since(modified)
274 .unwrap_or(Duration::ZERO);
275 if age > CACHE_TTL {
276 return Ok(None);
277 }
278
279 let bytes = fs::read(&path).map_err(|err| WorkspaceError::io(&path, err))?;
280 let status: WorkspaceIndexStatus =
281 serde_json::from_slice(&bytes).map_err(WorkspaceError::Serialization)?;
282 Ok(Some(status))
283}
284
285/// Atomically persist `status` to the aggregate cache for
286/// `workspace_dir`.
287///
288/// Writes to a sibling `<status>.json.tmp.<pid>.<nanos>` file inside the
289/// cache directory and then renames over the canonical path, so a
290/// concurrent reader either sees the previous payload or the new one in
291/// full (no torn reads). Creates the cache directory if missing.
292///
293/// # Errors
294///
295/// Returns [`WorkspaceError::Io`] for filesystem failures and
296/// [`WorkspaceError::Serialization`] when the payload cannot be encoded.
297///
298/// # Panics
299///
300/// Panics if [`cache_path`] returns a path without a parent directory,
301/// which is structurally impossible — `cache_path` always returns a
302/// path with at least three components (`<dir>/.sqry/workspace-cache/status.json`).
303pub fn write_cache(
304 workspace_dir: &Path,
305 status: &WorkspaceIndexStatus,
306) -> Result<(), WorkspaceError> {
307 let path = cache_path(workspace_dir);
308 let dir = path
309 .parent()
310 .expect("cache_path always returns a path with a parent");
311
312 fs::create_dir_all(dir).map_err(|err| WorkspaceError::io(dir, err))?;
313
314 let bytes = serde_json::to_vec_pretty(status).map_err(WorkspaceError::Serialization)?;
315
316 let tmp_path = temp_sibling_path(&path);
317 {
318 // Inner scope so the file handle is closed before the rename.
319 let mut file =
320 fs::File::create(&tmp_path).map_err(|err| WorkspaceError::io(&tmp_path, err))?;
321 file.write_all(&bytes)
322 .map_err(|err| WorkspaceError::io(&tmp_path, err))?;
323 file.sync_all()
324 .map_err(|err| WorkspaceError::io(&tmp_path, err))?;
325 }
326 // `rename` is atomic within a single filesystem on Unix and on
327 // modern Windows (`MoveFileExW` with `MOVEFILE_REPLACE_EXISTING`,
328 // which `std::fs::rename` uses). If the rename fails we tidy the
329 // tempfile up before propagating the error so callers don't accrue
330 // detritus on retry.
331 if let Err(err) = fs::rename(&tmp_path, &path) {
332 let _ = fs::remove_file(&tmp_path);
333 return Err(WorkspaceError::io(&path, err));
334 }
335 Ok(())
336}
337
338/// Extract the mtime from `metadata`, with a test-only seam that lets
339/// `aggregate_cache_returns_none_when_mtime_unreadable` simulate the
340/// `Err` branch without having to fabricate a real filesystem on which
341/// `metadata.modified()` fails (no such filesystem exists portably on
342/// Linux / macOS / Windows). In production builds this is a thin
343/// wrapper around [`fs::Metadata::modified`].
344#[cfg(not(test))]
345fn read_modified(metadata: &fs::Metadata) -> std::io::Result<SystemTime> {
346 metadata.modified()
347}
348
349#[cfg(test)]
350fn read_modified(metadata: &fs::Metadata) -> std::io::Result<SystemTime> {
351 if test_hooks::FORCE_MTIME_UNREADABLE.load(std::sync::atomic::Ordering::SeqCst) {
352 return Err(std::io::Error::new(
353 std::io::ErrorKind::Unsupported,
354 "test hook: simulated unreadable mtime",
355 ));
356 }
357 metadata.modified()
358}
359
360/// Test-only hooks for [`read_cache`]'s mtime seam. The flags here are
361/// `pub(crate)` so the in-module `tests` block can flip them without
362/// exposing the seam to downstream crates.
363#[cfg(test)]
364pub(crate) mod test_hooks {
365 use std::sync::atomic::AtomicBool;
366
367 /// When `true`, `read_modified` returns `Err` regardless of the
368 /// underlying `metadata.modified()` result. Drives the
369 /// `aggregate_cache_returns_none_when_mtime_unreadable` test.
370 pub(crate) static FORCE_MTIME_UNREADABLE: AtomicBool = AtomicBool::new(false);
371}
372
373/// Build a tempfile path that is a sibling of `path` so the eventual
374/// rename stays on the same filesystem (a cross-FS `rename` is not
375/// atomic on either Unix or Windows).
376fn temp_sibling_path(path: &Path) -> PathBuf {
377 let pid = std::process::id();
378 let nanos = SystemTime::now()
379 .duration_since(SystemTime::UNIX_EPOCH)
380 .map_or(0, |d| d.as_nanos());
381 let mut name = path
382 .file_name()
383 .map(std::ffi::OsStr::to_os_string)
384 .unwrap_or_default();
385 name.push(format!(".tmp.{pid}.{nanos}"));
386 let mut tmp = path.to_path_buf();
387 tmp.set_file_name(name);
388 tmp
389}
390
391#[cfg(test)]
392mod tests {
393 use super::*;
394 use serial_test::serial;
395 use tempfile::tempdir;
396
397 fn sample_status() -> WorkspaceIndexStatus {
398 WorkspaceIndexStatus::from_source_root_statuses(vec![
399 SourceRootStatus {
400 path: PathBuf::from("/ws/a"),
401 status: SourceRootIndexState::Ok,
402 last_indexed_at: Some(SystemTime::UNIX_EPOCH + Duration::from_secs(1_700_000_000)),
403 symbol_count: Some(42),
404 classpath_dir: None,
405 },
406 SourceRootStatus {
407 path: PathBuf::from("/ws/b"),
408 status: SourceRootIndexState::Missing,
409 last_indexed_at: None,
410 symbol_count: None,
411 classpath_dir: None,
412 },
413 ])
414 }
415
416 #[test]
417 #[serial(workspace_cache_read)]
418 fn aggregate_cache_writes_and_reads_under_ttl() {
419 let temp = tempdir().unwrap();
420 let status = sample_status();
421 write_cache(temp.path(), &status).unwrap();
422 let read = read_cache(temp.path())
423 .unwrap()
424 .expect("cache hit expected");
425 assert_eq!(read.source_root_statuses, status.source_root_statuses);
426 assert_eq!(read.ok_count, 1);
427 assert_eq!(read.missing_count, 1);
428 }
429
430 #[test]
431 #[serial(workspace_cache_read)]
432 fn aggregate_cache_returns_none_when_absent() {
433 let temp = tempdir().unwrap();
434 assert!(read_cache(temp.path()).unwrap().is_none());
435 }
436
437 #[test]
438 #[serial(workspace_cache_read)]
439 fn aggregate_cache_returns_none_after_ttl() {
440 let temp = tempdir().unwrap();
441 let status = sample_status();
442 write_cache(temp.path(), &status).unwrap();
443
444 // Force the on-disk mtime backwards beyond CACHE_TTL by using
445 // `filetime`-equivalent direct syscall. We can't depend on the
446 // `filetime` crate from sqry-core, so we rely on `utimes` via
447 // the `std::fs::File::set_modified` API that landed in 1.75.
448 let path = cache_path(temp.path());
449 let stale = SystemTime::now() - (CACHE_TTL + Duration::from_secs(5));
450 let f = fs::OpenOptions::new().write(true).open(&path).unwrap();
451 f.set_modified(stale).unwrap();
452 drop(f);
453
454 assert!(read_cache(temp.path()).unwrap().is_none());
455 }
456
457 #[test]
458 #[serial(workspace_cache_read)]
459 fn aggregate_cache_atomic_write_no_partial_files() {
460 let temp = tempdir().unwrap();
461 let status = sample_status();
462 write_cache(temp.path(), &status).unwrap();
463 // After a successful write, no `.tmp.*` files must remain in
464 // the cache directory.
465 let cache_dir = temp.path().join(".sqry").join(WORKSPACE_CACHE_DIRNAME);
466 let leftovers: Vec<_> = fs::read_dir(&cache_dir)
467 .unwrap()
468 .filter_map(Result::ok)
469 .filter(|e| e.file_name().to_string_lossy().contains(".tmp."))
470 .collect();
471 assert!(
472 leftovers.is_empty(),
473 "expected no tempfile leftovers, got {leftovers:?}"
474 );
475 }
476
477 /// Stronger atomic-write coverage: a concurrent reader running
478 /// alongside a tight writer loop must NEVER observe a half-written
479 /// `status.json`. The single-shot test above proves we don't leave
480 /// `.tmp.*` files behind on the happy path; this test proves the
481 /// rename-into-place pattern is the *only* way readers see
482 /// canonical content (no torn JSON, no truncated reads, no
483 /// deserialise errors from a partial payload).
484 ///
485 /// Codex iter1 `APPROVE_WITH_CHANGES` required this stronger
486 /// acceptance check for the §1.2 atomic-write contract.
487 #[test]
488 #[serial(workspace_cache_read)]
489 fn aggregate_cache_atomic_write_visible_only_complete() {
490 use std::sync::Arc;
491 use std::sync::atomic::{AtomicBool, Ordering};
492 use std::thread;
493
494 let temp = tempdir().unwrap();
495 let dir = temp.path().to_path_buf();
496 let stop = Arc::new(AtomicBool::new(false));
497
498 // Pre-populate so the very first read can't fail with
499 // `NotFound` — we want to assert the steady-state contract,
500 // not the cold-start race.
501 write_cache(&dir, &sample_status()).unwrap();
502
503 let writer_stop = Arc::clone(&stop);
504 let writer_dir = dir.clone();
505 let writer = thread::spawn(move || {
506 // Tight write loop: every iteration writes the full
507 // payload through the same temp-then-rename path the
508 // production code uses.
509 while !writer_stop.load(Ordering::Relaxed) {
510 write_cache(&writer_dir, &sample_status()).unwrap();
511 }
512 });
513
514 // Read aggressively. The exact iteration count is tuned so
515 // the test runs comfortably under 2 s on a typical dev
516 // machine while still exercising thousands of read/write
517 // interleavings.
518 let reads: usize = 5_000;
519 let mut hits: usize = 0;
520 let mut misses: usize = 0;
521 for _ in 0..reads {
522 match read_cache(&dir) {
523 // The point of this test is the deserialise contract,
524 // not the cache-hit ratio: a `None` here means the
525 // mtime was too old at the instant of the read (the
526 // writer hadn't yet renamed the new payload into
527 // place), which is also a legal observation. Either
528 // way we MUST never see `Err`, because that would
529 // imply a torn read or a half-written JSON payload
530 // ever became visible to a reader.
531 Ok(Some(s)) => {
532 // Spot-check that the payload is structurally
533 // complete — `from_source_root_statuses` is the
534 // only way to populate `source_root_statuses`,
535 // and the sample fixture always has 2 entries.
536 assert_eq!(
537 s.source_root_statuses.len(),
538 2,
539 "concurrent read returned an incomplete status payload",
540 );
541 hits += 1;
542 }
543 Ok(None) => misses += 1,
544 Err(err) => panic!(
545 "read_cache observed a torn / partial status.json during \
546 concurrent writes: {err:?}"
547 ),
548 }
549 }
550
551 stop.store(true, Ordering::Relaxed);
552 writer.join().unwrap();
553
554 // We expect overwhelmingly cache hits (the writer keeps the
555 // mtime fresh), but tolerate a small misses tail to keep the
556 // test stable across CI hosts. The acceptance criterion is
557 // the absence of `Err` above.
558 assert!(
559 hits > 0,
560 "expected at least some cache hits, got {hits} hits / {misses} misses"
561 );
562 }
563
564 /// Codex iter1 `APPROVE_WITH_CHANGES` — confirms the documented
565 /// `read_cache` contract: an unreadable mtime maps to
566 /// `Ok(None)` (soft-miss), not `Err(WorkspaceError::Io)`. Driven
567 /// by the `FORCE_MTIME_UNREADABLE` test seam in `read_modified`.
568 #[test]
569 #[serial(workspace_cache_read)]
570 fn aggregate_cache_returns_none_when_mtime_unreadable() {
571 use std::sync::atomic::Ordering;
572
573 let temp = tempdir().unwrap();
574 let status = sample_status();
575 write_cache(temp.path(), &status).unwrap();
576
577 // Sanity: with the seam disabled, the read returns Some.
578 assert!(
579 read_cache(temp.path()).unwrap().is_some(),
580 "baseline read should hit"
581 );
582
583 // Flip the seam, re-read, expect Ok(None).
584 test_hooks::FORCE_MTIME_UNREADABLE.store(true, Ordering::SeqCst);
585 let result = read_cache(temp.path());
586 // Reset before any assertion that might unwind, so a panic
587 // does not leak the seam state into other tests.
588 test_hooks::FORCE_MTIME_UNREADABLE.store(false, Ordering::SeqCst);
589
590 assert!(
591 matches!(result, Ok(None)),
592 "unreadable mtime must yield Ok(None), got {result:?}"
593 );
594 }
595
596 #[test]
597 fn aggregate_status_summary_counts_match_entries() {
598 let status = sample_status();
599 assert_eq!(status.total(), 2);
600 assert_eq!(status.ok_count, 1);
601 assert_eq!(status.missing_count, 1);
602 assert_eq!(status.building_count, 0);
603 assert_eq!(status.error_count, 0);
604 }
605}