use super::super::shared_test_helpers::{create_fake_image, test_metadata};
use super::*;
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
use std::fs;
use std::path::PathBuf;
use tempfile::TempDir;
#[test]
fn cache_dir_with_root_does_not_create_dir() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("kernels");
assert!(!root.exists());
let cache = CacheDir::with_root(root.clone());
assert!(!root.exists());
assert_eq!(cache.root(), root);
}
#[test]
fn cache_dir_list_returns_empty_for_nonexistent_root() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("never-created");
assert!(!root.exists());
let cache = CacheDir::with_root(root);
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_store_creates_root_lazily() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("lazy-root");
assert!(!root.exists());
let cache = CacheDir::with_root(root.clone());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("key", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(root.exists(), "store() must create the cache root");
}
#[test]
fn cache_dir_default_root_returns_path() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", tmp.path());
let resolved = CacheDir::default_root().unwrap();
assert_eq!(resolved, tmp.path());
}
#[test]
fn cache_dir_list_empty() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_store_and_lookup() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("6.14.2-tarball-x86_64", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.key, "6.14.2-tarball-x86_64");
assert!(entry.path.join("bzImage").exists());
assert!(entry.path.join("metadata.json").exists());
let found = cache.lookup("6.14.2-tarball-x86_64");
assert!(found.is_some());
let found = found.unwrap();
assert_eq!(found.key, "6.14.2-tarball-x86_64");
assert_eq!(found.metadata.version.as_deref(), Some("6.14.2"));
}
#[test]
fn cache_dir_lookup_missing() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("nonexistent").is_none());
}
#[test]
fn cache_dir_lookup_corrupt_metadata() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("bad-entry");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("bzImage"), b"fake").unwrap();
fs::write(entry_dir.join("metadata.json"), b"not json").unwrap();
let found = cache.lookup("bad-entry");
assert!(found.is_none());
}
#[test]
fn cache_dir_lookup_missing_image() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("no-image");
fs::create_dir_all(&entry_dir).unwrap();
let meta = test_metadata("6.14.2");
let json = serde_json::to_string(&meta).unwrap();
fs::write(entry_dir.join("metadata.json"), json).unwrap();
let found = cache.lookup("no-image");
assert!(found.is_none());
}
#[test]
fn cache_dir_store_overwrites_existing() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta1 = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
config_hash: Some("hash-v1".to_string()),
..test_metadata("6.14.2")
};
cache
.store(
"6.14.2-tarball-x86_64",
&CacheArtifacts::new(&image),
&meta1,
)
.unwrap();
let meta2 = KernelMetadata {
built_at: "2026-04-12T11:00:00Z".to_string(),
config_hash: Some("hash-v2".to_string()),
..test_metadata("6.14.2")
};
cache
.store(
"6.14.2-tarball-x86_64",
&CacheArtifacts::new(&image),
&meta2,
)
.unwrap();
let found = cache.lookup("6.14.2-tarball-x86_64").unwrap();
assert_eq!(found.metadata.built_at, "2026-04-12T11:00:00Z");
assert_eq!(found.metadata.config_hash.as_deref(), Some("hash-v2"));
}
#[test]
fn cache_dir_list_sorted_newest_first() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_mid = KernelMetadata {
built_at: "2026-04-11T10:00:00Z".to_string(),
..test_metadata("6.14.0")
};
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("mid", &CacheArtifacts::new(&image), &meta_mid)
.unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].key(), "new");
assert_eq!(entries[1].key(), "mid");
assert_eq!(entries[2].key(), "old");
}
#[test]
fn cache_dir_list_includes_corrupt_entries() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("valid", &CacheArtifacts::new(&image), &meta)
.unwrap();
let bad_dir = tmp.path().join("corrupt");
fs::create_dir_all(&bad_dir).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 2);
let valid = entries.iter().find(|e| e.key() == "valid").unwrap();
assert!(valid.as_valid().is_some());
let corrupt = entries.iter().find(|e| e.key() == "corrupt").unwrap();
assert!(corrupt.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = corrupt else {
panic!("expected Corrupt variant");
};
assert_eq!(
reason, "metadata.json missing",
"missing-metadata reason should be the exact missing-file label, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_missing_image_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("missing-image", &CacheArtifacts::new(&image), &meta)
.unwrap();
fs::remove_file(entry.image_path()).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "missing-image");
assert!(
listed.as_valid().is_none(),
"entry with missing image must not surface as Valid",
);
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for missing-image entry");
};
assert!(
reason.contains("image file") && reason.contains("missing"),
"reason should cite missing image file, got: {reason}",
);
assert!(
reason.contains(&meta.image_name),
"reason should name the specific image file, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_unreadable_metadata_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("unreadable-metadata");
fs::create_dir_all(entry_dir.join("metadata.json")).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "unreadable-metadata");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for entry with unreadable metadata");
};
assert!(
reason.starts_with("metadata.json unreadable: "),
"unreadable-metadata reason should carry the unreadable prefix distinct from the \
missing / schema-drift / malformed / truncated prefixes, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_malformed_json_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("malformed-json");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), b"not valid json {[").unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "malformed-json");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for malformed-json entry");
};
assert!(
reason.starts_with("metadata.json malformed: "),
"malformed-JSON reason should carry the malformed prefix \
(Category::Syntax route), got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_incomplete_metadata_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("incomplete-metadata");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), br#"{"version": "6.14"}"#).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "incomplete-metadata");
assert!(
listed.as_valid().is_none(),
"incomplete-metadata missing required fields must not deserialize as Valid",
);
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for entry with incomplete metadata");
};
assert!(
reason.starts_with("metadata.json schema drift: "),
"incomplete-metadata reason should carry the schema-drift \
prefix (Category::Data route), got: {reason}",
);
assert!(
reason.contains("missing field `source`"),
"incomplete-metadata reason should name the first missing required field, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_truncated_json_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("truncated-json");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), br#"{"source":"#).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "truncated-json");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for truncated-json entry");
};
assert!(
reason.starts_with("metadata.json truncated: "),
"truncated-JSON reason should carry the truncated prefix \
(Category::Eof route), got: {reason}",
);
}
#[test]
fn cache_dir_list_skips_tmp_dirs() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let tmp_dir = tmp.path().join(".tmp-in-progress-12345");
fs::create_dir_all(&tmp_dir).unwrap();
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_list_skips_regular_files() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
fs::write(tmp.path().join("stray-file.txt"), b"stray").unwrap();
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_clean_all() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store("a", &CacheArtifacts::new(&image), &test_metadata("6.14.0"))
.unwrap();
cache
.store("b", &CacheArtifacts::new(&image), &test_metadata("6.14.1"))
.unwrap();
cache
.store("c", &CacheArtifacts::new(&image), &test_metadata("6.14.2"))
.unwrap();
let removed = cache.clean_all().unwrap();
assert_eq!(removed, 3);
assert!(cache.list().unwrap().is_empty());
}
#[test]
fn cache_dir_clean_keep_n() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_mid = KernelMetadata {
built_at: "2026-04-11T10:00:00Z".to_string(),
..test_metadata("6.14.0")
};
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("mid", &CacheArtifacts::new(&image), &meta_mid)
.unwrap();
let removed = cache.clean_keep(1).unwrap();
assert_eq!(removed, 2);
let remaining = cache.list().unwrap();
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].key(), "new");
}
#[test]
fn cache_dir_clean_keep_more_than_exist() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store(
"only",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
let removed = cache.clean_keep(5).unwrap();
assert_eq!(removed, 0);
assert_eq!(cache.list().unwrap().len(), 1);
}
#[test]
fn cache_dir_clean_empty_cache() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let removed = cache.clean_all().unwrap();
assert_eq!(removed, 0);
}
#[test]
fn cache_dir_store_rejects_image_name_traversal() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let mut meta = test_metadata("6.14.2");
meta.image_name = "../escape".to_string();
let err = cache
.store("valid-key", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("image name"),
"expected image_name rejection, got: {err}"
);
}
#[test]
fn cache_dir_store_tmp_prefix_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store(".tmp-sneaky", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains(".tmp-"),
"expected .tmp- rejection, got: {err}"
);
}
#[test]
fn cache_dir_lookup_tmp_prefix_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup(".tmp-sneaky").is_none());
}
#[test]
fn cache_dir_store_empty_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("empty"),
"expected empty-key error, got: {err}"
);
}
#[test]
fn cache_dir_lookup_empty_key_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("").is_none());
}
#[test]
fn cache_dir_store_path_traversal_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("../escape", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("path"),
"expected path-traversal error, got: {err}"
);
}
#[test]
fn cache_dir_lookup_path_traversal_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("../escape").is_none());
assert!(cache.lookup("foo/../bar").is_none());
}
#[test]
fn cache_dir_store_slash_in_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("a/b", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("path separator"),
"expected path-separator error, got: {err}"
);
}
#[test]
fn cache_dir_store_whitespace_only_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store(" ", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("empty"),
"expected empty/whitespace error, got: {err}"
);
}
#[test]
fn cache_dir_clean_keep_n_with_mixed_entries() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
let corrupt_dir = tmp.path().join("cache").join("corrupt");
fs::create_dir_all(&corrupt_dir).unwrap();
let removed = cache.clean_keep(1).unwrap();
assert_eq!(removed, 2);
let remaining = cache.list().unwrap();
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].key(), "new");
}
#[test]
fn cache_dir_store_overwrites_existing_key_atomically() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_a = TempDir::new().unwrap();
let image_a = create_fake_image(src_a.path());
fs::write(&image_a, b"version-a").unwrap();
let mut meta_a = test_metadata("6.14.2");
meta_a.built_at = "2026-04-10T00:00:00Z".to_string();
meta_a.config_hash = Some("hash-a".to_string());
let entry_a = cache
.store("collide", &CacheArtifacts::new(&image_a), &meta_a)
.unwrap();
assert_eq!(
fs::read(entry_a.path.join("bzImage")).unwrap(),
b"version-a"
);
let src_b = TempDir::new().unwrap();
let image_b = create_fake_image(src_b.path());
fs::write(&image_b, b"version-b").unwrap();
let mut meta_b = test_metadata("6.14.2");
meta_b.built_at = "2026-04-18T00:00:00Z".to_string();
meta_b.config_hash = Some("hash-b".to_string());
let entry_b = cache
.store("collide", &CacheArtifacts::new(&image_b), &meta_b)
.unwrap();
assert_eq!(
fs::read(entry_b.path.join("bzImage")).unwrap(),
b"version-b",
"new content must replace old content atomically"
);
let installed_meta = read_metadata(&entry_b.path).expect("metadata.json");
assert_eq!(installed_meta.built_at, "2026-04-18T00:00:00Z");
assert_eq!(installed_meta.config_hash.as_deref(), Some("hash-b"));
for dirent in fs::read_dir(&cache_root).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".tmp-"),
"unexpected leftover .tmp- directory under cache_root: {name}"
);
}
}
#[test]
fn cache_dir_store_cleans_stale_tmp() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let stale_tmp = cache_root.join(format!(".tmp-mykey-{}", std::process::id()));
fs::create_dir_all(&stale_tmp).unwrap();
fs::write(stale_tmp.join("junk"), b"leftover").unwrap();
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("mykey", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(!stale_tmp.exists());
}
#[test]
fn cache_dir_store_atomic_under_concurrent_readers() {
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::thread;
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = Arc::new(CacheDir::with_root(cache_root.clone()));
let src_a = TempDir::new().unwrap();
let image_a = src_a.path().join("bzImage");
let content_a = b"AAAAAAAA-image-version-a-AAAAAAAA".repeat(64);
fs::write(&image_a, &content_a).unwrap();
let src_b = TempDir::new().unwrap();
let image_b = src_b.path().join("bzImage");
let content_b = b"BBBBBBBB-image-version-b-BBBBBBBB".repeat(64);
fs::write(&image_b, &content_b).unwrap();
let meta_prime = test_metadata("6.14.2");
cache
.store("atomic-key", &CacheArtifacts::new(&image_a), &meta_prime)
.unwrap();
const WRITE_ITERATIONS: usize = 40;
let stop = Arc::new(AtomicBool::new(false));
let lookups_observed = Arc::new(AtomicUsize::new(0));
let atomicity_violations = Arc::new(AtomicUsize::new(0));
let observed_a = Arc::new(AtomicUsize::new(0));
let observed_b = Arc::new(AtomicUsize::new(0));
let reader_count = 4;
let mut readers = Vec::with_capacity(reader_count);
for _ in 0..reader_count {
let cache = Arc::clone(&cache);
let stop = Arc::clone(&stop);
let lookups_observed = Arc::clone(&lookups_observed);
let violations = Arc::clone(&atomicity_violations);
let observed_a = Arc::clone(&observed_a);
let observed_b = Arc::clone(&observed_b);
let expected_a = content_a.clone();
let expected_b = content_b.clone();
readers.push(thread::spawn(move || {
while !stop.load(Ordering::Relaxed) {
let Some(entry) = cache.lookup("atomic-key") else {
violations.fetch_add(1, Ordering::Relaxed);
continue;
};
let image_path = entry.image_path();
let Ok(bytes) = fs::read(&image_path) else {
violations.fetch_add(1, Ordering::Relaxed);
continue;
};
if bytes == expected_a {
observed_a.fetch_add(1, Ordering::Relaxed);
} else if bytes == expected_b {
observed_b.fetch_add(1, Ordering::Relaxed);
} else {
violations.fetch_add(1, Ordering::Relaxed);
}
lookups_observed.fetch_add(1, Ordering::Relaxed);
}
}));
}
for i in 0..WRITE_ITERATIONS {
let (image, label) = if i % 2 == 0 {
(&image_a, "a")
} else {
(&image_b, "b")
};
let mut meta = test_metadata("6.14.2");
meta.built_at = format!("2026-04-18T00:00:{:02}Z", i % 60);
meta.config_hash = Some(format!("iter-{i}-{label}"));
cache
.store("atomic-key", &CacheArtifacts::new(image), &meta)
.expect("store under concurrent readers must not fail");
}
stop.store(true, Ordering::Relaxed);
for r in readers {
r.join().expect("reader thread panicked");
}
assert_eq!(
atomicity_violations.load(Ordering::Relaxed),
0,
"lookup observed a missing or torn cache entry during concurrent store; \
rename-to-staging swap is not atomic",
);
assert!(
lookups_observed.load(Ordering::Relaxed) > 0,
"readers never observed a successful lookup — test did not \
actually exercise the concurrency window",
);
let saw_a = observed_a.load(Ordering::Relaxed);
let saw_b = observed_b.load(Ordering::Relaxed);
if saw_a == 0 || saw_b == 0 {
eprintln!(
"cache_dir_store_atomic_under_concurrent_readers: \
one writer version was never observed by readers \
(saw_a={saw_a}, saw_b={saw_b}). Atomicity invariant \
still holds; coverage of the race window is \
probabilistic under scheduling pressure.",
);
}
let final_entry = cache.lookup("atomic-key").expect("entry must exist");
let final_bytes = fs::read(final_entry.image_path()).unwrap();
assert!(
final_bytes == content_a || final_bytes == content_b,
"final image must match one of the writer's versions",
);
for dirent in fs::read_dir(&cache_root).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".tmp-"),
"unexpected leftover .tmp- directory under cache_root: {name}",
);
}
}
#[test]
fn cache_dir_store_with_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("vmlinux");
fs::write(&vmlinux, b"fake vmlinux ELF").unwrap();
let meta = test_metadata("6.14.2");
let entry = cache
.store(
"with-vmlinux",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(entry.path.join("vmlinux").exists());
assert!(entry.path.join("metadata.json").exists());
assert!(entry.metadata.has_vmlinux);
assert!(image.exists());
assert!(vmlinux.exists());
}
#[test]
fn cache_dir_store_without_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("no-vmlinux", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(!entry.path.join("vmlinux").exists());
assert!(entry.path.join("metadata.json").exists());
assert!(!entry.metadata.has_vmlinux);
assert!(!entry.metadata.vmlinux_stripped);
}
#[test]
fn cache_dir_store_falls_back_when_strip_fails() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("vmlinux");
let raw = b"not an ELF file";
fs::write(&vmlinux, raw).unwrap();
let meta = test_metadata("6.14.2");
let entry = cache
.store(
"strip-fallback",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
let cached = fs::read(entry.path.join("vmlinux")).unwrap();
assert_eq!(cached, raw, "fallback must copy raw bytes verbatim");
assert!(entry.metadata.has_vmlinux);
assert!(
!entry.metadata.vmlinux_stripped,
"raw-fallback path must set vmlinux_stripped = false"
);
}
fn make_warn_test_entry(has_vmlinux: bool, vmlinux_stripped: bool) -> CacheEntry {
let mut meta = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-24T12:00:00Z".to_string(),
);
meta.set_has_vmlinux(has_vmlinux);
meta.set_vmlinux_stripped(vmlinux_stripped);
CacheEntry {
key: "test-key".to_string(),
path: PathBuf::from("/nonexistent/entry"),
metadata: meta,
}
}
#[test]
fn should_warn_unstripped_fires_when_vmlinux_present_and_unstripped() {
let entry = make_warn_test_entry(true, false);
assert!(
should_warn_unstripped(&entry),
"has_vmlinux=true + vmlinux_stripped=false must warn"
);
}
#[test]
fn should_warn_unstripped_silent_when_vmlinux_stripped() {
let entry = make_warn_test_entry(true, true);
assert!(
!should_warn_unstripped(&entry),
"has_vmlinux=true + vmlinux_stripped=true must not warn"
);
}
#[test]
fn should_warn_unstripped_silent_when_no_vmlinux() {
let entry = make_warn_test_entry(false, false);
assert!(
!should_warn_unstripped(&entry),
"has_vmlinux=false must not warn (no vmlinux to worry about)"
);
}
fn make_stale_entry_with_key(key: &str) -> CacheEntry {
let mut meta = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-24T12:00:00Z".to_string(),
);
meta.set_has_vmlinux(true);
meta.set_vmlinux_stripped(false);
CacheEntry {
key: key.to_string(),
path: PathBuf::from("/nonexistent/entry"),
metadata: meta,
}
}
#[test]
fn should_emit_unstripped_warn_first_call_returns_true() {
let set = Mutex::new(HashSet::new());
let entry = make_stale_entry_with_key("first-call-key");
assert!(
should_emit_unstripped_warn(&entry, &set),
"first call against an empty set must return true so the \
caller emits the warn",
);
let recorded = set.lock().unwrap().contains("first-call-key");
assert!(
recorded,
"first call must insert the key into the dedup set so \
subsequent calls suppress",
);
}
#[test]
fn should_emit_unstripped_warn_repeat_call_same_key_returns_false() {
let set = Mutex::new(HashSet::new());
let entry = make_stale_entry_with_key("dedup-key");
let first = should_emit_unstripped_warn(&entry, &set);
let second = should_emit_unstripped_warn(&entry, &set);
assert!(first, "first call must return true (warn fires)");
assert!(
!second,
"second call for the same key must return false (dedup \
suppression — the warn already fired in this process)",
);
}
#[test]
fn should_emit_unstripped_warn_distinct_keys_each_warn_once() {
let set = Mutex::new(HashSet::new());
let entry_a = make_stale_entry_with_key("key-a");
let entry_b = make_stale_entry_with_key("key-b");
assert!(
should_emit_unstripped_warn(&entry_a, &set),
"key-a's first call must return true",
);
assert!(
should_emit_unstripped_warn(&entry_b, &set),
"key-b is distinct from key-a, so its first call must \
also return true (per-key dedup, not global)",
);
assert!(
!should_emit_unstripped_warn(&entry_a, &set),
"key-a's second call must dedup",
);
assert!(
!should_emit_unstripped_warn(&entry_b, &set),
"key-b's second call must dedup",
);
}
#[test]
fn should_emit_unstripped_warn_no_warn_needed_skips_dedup_insert() {
let set = Mutex::new(HashSet::new());
let no_vmlinux = make_warn_test_entry(false, false);
assert!(
!should_emit_unstripped_warn(&no_vmlinux, &set),
"an entry that doesn't need warning must return false",
);
assert!(
set.lock().unwrap().is_empty(),
"no-warn-needed path must NOT pollute the dedup set; \
the gate must short-circuit before the insert",
);
let stripped = make_warn_test_entry(true, true);
assert!(
!should_emit_unstripped_warn(&stripped, &set),
"an entry whose vmlinux WAS stripped must return false",
);
assert!(
set.lock().unwrap().is_empty(),
"stripped-vmlinux entry must also leave the dedup set \
empty — only stale entries get recorded",
);
}
#[test]
fn cache_dir_store_preserves_original_image() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("key", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(image.exists());
}
#[test]
fn cache_entry_image_path_joins_key_with_image_name() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let entry = cache
.store(
"key",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
assert_eq!(entry.image_path(), entry.path.join("bzImage"));
assert!(entry.image_path().exists());
}
#[test]
fn cache_entry_vmlinux_path_none_when_not_stored() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let entry = cache
.store(
"no-vml",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
assert!(entry.vmlinux_path().is_none());
}
#[test]
fn kconfig_status_matches_when_hash_equal() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("deadbeef".to_string()));
let entry = cache
.store("kc-match", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.kconfig_status("deadbeef"), KconfigStatus::Matches);
}
#[test]
fn kconfig_status_untracked_when_no_hash_in_entry() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = KernelMetadata {
ktstr_kconfig_hash: None,
..test_metadata("6.14.2")
};
let entry = cache
.store("kc-untracked", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.kconfig_status("anything"), KconfigStatus::Untracked);
}
#[test]
fn kconfig_status_stale_pins_cached_and_current_field_order() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("old_cached".to_string()));
let entry = cache
.store("kc-stale", &CacheArtifacts::new(&image), &meta)
.unwrap();
match entry.kconfig_status("new_current") {
KconfigStatus::Stale { cached, current } => {
assert_eq!(
cached, "old_cached",
"`cached` must hold the hash recorded in the entry"
);
assert_eq!(
current, "new_current",
"`current` must hold the hash the caller passed in"
);
}
other => panic!("expected KconfigStatus::Stale, got {other:?}"),
}
}
#[test]
fn acquire_shared_lock_creates_lockfile_at_expected_path() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let _guard = cache.acquire_shared_lock("some-key-123").unwrap();
assert!(
tmp.path().join(".locks").is_dir(),
"parent .locks/ subdirectory must materialize on first acquire",
);
assert!(
tmp.path().join(".locks").join("some-key-123.lock").exists(),
"lockfile must materialize at {{cache_root}}/.locks/{{key}}.lock on first acquire",
);
}
#[test]
fn acquire_shared_lock_permits_concurrent_readers() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "concurrent-sh";
let success = Arc::new(AtomicUsize::new(0));
let mut handles = Vec::new();
for _ in 0..4 {
let cache = Arc::clone(&cache);
let success = Arc::clone(&success);
handles.push(std::thread::spawn(move || {
let _g = cache
.acquire_shared_lock(key)
.expect("LOCK_SH must succeed");
success.fetch_add(1, Ordering::SeqCst);
std::thread::sleep(std::time::Duration::from_millis(50));
}));
}
for h in handles {
h.join().expect("reader thread panicked");
}
assert_eq!(
success.load(Ordering::SeqCst),
4,
"all 4 concurrent LOCK_SH acquires must succeed",
);
}
#[test]
fn try_acquire_exclusive_lock_fails_with_active_reader() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "force-contended";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader thread did not signal ready in time");
let err = cache.try_acquire_exclusive_lock(key).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("is locked by active test runs") || msg.contains("holders:"),
"error must surface the contention diagnostic; got: {msg}",
);
assert!(
msg.contains("lockfile"),
"error must name the lockfile path: {msg}",
);
release_tx.send(()).unwrap();
reader.join().expect("reader thread panicked");
}
#[test]
fn acquire_exclusive_lock_blocking_times_out_on_contention() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "blocking-timeout";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader did not signal ready in time");
let start = std::time::Instant::now();
let err = cache
.acquire_exclusive_lock_blocking(key, std::time::Duration::from_millis(200))
.unwrap_err();
let elapsed = start.elapsed();
let msg = format!("{err:#}");
assert!(
msg.contains("timed out"),
"error must mention the timeout: {msg}",
);
assert!(
elapsed >= std::time::Duration::from_millis(150),
"acquire should have waited ~timeout (150ms lower bound); \
got {elapsed:?}",
);
assert!(
msg.contains("KTSTR_CACHE_STORE_LOCK_TIMEOUT"),
"timeout error must surface the env-var override so \
operators discover the remediation without reading docs: {msg}",
);
release_tx.send(()).unwrap();
reader.join().expect("reader thread panicked");
}
#[test]
fn store_succeeds_under_internal_exclusive_lock() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("internal-lock", &CacheArtifacts::new(&image), &meta)
.expect("store must succeed when no readers contend");
assert!(entry.path.join("bzImage").exists());
assert!(
tmp.path()
.join("cache")
.join(".locks")
.join("internal-lock.lock")
.exists(),
"lockfile materialized during store must persist after store returns",
);
}
#[test]
fn store_blocks_while_reader_holds_shared_lock() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().join("cache-block")));
let key = "blocked-store";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader did not signal ready in time");
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let (store_done_tx, store_done_rx) = mpsc::channel();
let cache_store = Arc::clone(&cache);
let image_clone = image.clone();
let store_thread = std::thread::spawn(move || {
let _ = cache_store.store(key, &CacheArtifacts::new(&image_clone), &meta);
store_done_tx.send(()).unwrap();
});
let early = store_done_rx.recv_timeout(std::time::Duration::from_millis(200));
assert!(
early.is_err(),
"store() must block while reader holds LOCK_SH; got completion signal early",
);
release_tx.send(()).unwrap();
let finish = store_done_rx.recv_timeout(std::time::Duration::from_secs(10));
assert!(
finish.is_ok(),
"store() must complete after reader releases; got timeout",
);
reader.join().expect("reader thread panicked");
store_thread.join().expect("store thread panicked");
}
#[test]
fn lock_path_returns_expected_shape() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let path = cache.lock_path("my-key-42");
assert_eq!(path, tmp.path().join(".locks").join("my-key-42.lock"));
}
#[test]
fn locks_subdir_persists_after_guard_drop() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let locks_dir = tmp.path().join(".locks");
{
let _guard = cache
.acquire_shared_lock("persist-test")
.expect("acquire must succeed");
assert!(locks_dir.is_dir(), "must exist during guard lifetime");
}
assert!(
locks_dir.is_dir(),
".locks/ must persist after guard drop — next acquire \
keys /proc/locks on the existing inode",
);
}
#[test]
fn list_skips_locks_dotfile_subdirectory() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let _guard = cache.acquire_shared_lock("dummy").expect("acquire");
drop(_guard);
assert!(
tmp.path().join(".locks").is_dir(),
".locks/ must exist after acquire drop",
);
let entries = cache.list().expect("list must succeed");
let keys: Vec<&str> = entries
.iter()
.map(|e| match e {
ListedEntry::Valid(entry) => entry.key.as_str(),
ListedEntry::Corrupt { key, .. } => key.as_str(),
})
.collect();
assert!(
!keys.iter().any(|k| k.starts_with('.')),
"list() must not return dotfile children: {keys:?}",
);
}
#[test]
fn acquire_on_empty_root_creates_locks_dir_lazily() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("pristine");
std::fs::create_dir(&root).unwrap();
let cache = CacheDir::with_root(root.clone());
assert!(!root.join(".locks").exists());
let _guard = cache
.acquire_shared_lock("lazy-test")
.expect("first acquire on empty root must succeed");
assert!(
root.join(".locks").is_dir(),
"first acquire must materialize .locks/ lazily",
);
}
#[test]
fn cache_dir_clean_all_preserves_locks_subdir() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store(
"entry-a",
&CacheArtifacts::new(&image),
&test_metadata("6.14.0"),
)
.expect("store must succeed");
let _guard = cache
.acquire_shared_lock("entry-a")
.expect("SH acquire must succeed");
let locks_dir = cache_root.join(".locks");
let lockfile = locks_dir.join("entry-a.lock");
assert!(locks_dir.is_dir(), "precondition: .locks/ must exist");
assert!(lockfile.exists(), "precondition: lockfile must exist");
let removed = cache.clean_all().expect("clean_all must succeed");
assert_eq!(removed, 1, "clean_all must remove exactly 1 entry");
assert!(
locks_dir.is_dir(),
".locks/ subdirectory must survive clean_all",
);
assert!(
lockfile.exists(),
"lockfile must still exist under .locks/ after clean_all",
);
assert!(
!cache_root.join("entry-a").exists(),
"cache entry must be removed by clean_all",
);
}
#[test]
fn cache_dir_acquire_rejects_path_traversal_key() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let err = cache
.acquire_shared_lock("../../etc/passwd")
.expect_err("path-traversal key must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("path"),
"error must mention path rejection: {msg}",
);
let etc_passwd_lock = tmp.path().join("etc").join("passwd.lock");
assert!(
!etc_passwd_lock.exists(),
"path traversal must NOT create a lockfile outside .locks/",
);
assert!(
!cache_root.join(".locks").exists()
|| cache_root
.join(".locks")
.read_dir()
.unwrap()
.next()
.is_none(),
".locks/ must be empty if it exists at all — validator \
rejects before lockfile creation",
);
}
#[test]
fn try_acquire_exclusive_lock_succeeds_when_uncontended() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let guard = cache
.try_acquire_exclusive_lock("happy-path-key")
.expect("uncontended try_acquire_exclusive_lock must succeed");
let lockfile = tmp.path().join(".locks").join("happy-path-key.lock");
assert!(
lockfile.exists(),
"happy-path acquire must materialize the lockfile at \
{} — without it, /proc/locks lookup of contention \
diagnostics fails to attribute the holder",
lockfile.display(),
);
assert!(
tmp.path().join(".locks").is_dir(),
".locks/ subdirectory must exist after a happy-path \
acquire (lazy materialization)",
);
drop(guard);
let guard2 = cache
.try_acquire_exclusive_lock("happy-path-key")
.expect("second acquire on same key must succeed after the first guard drops");
drop(guard2);
}
#[test]
fn try_acquire_exclusive_lock_rejects_invalid_key() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let err = cache
.try_acquire_exclusive_lock("../escape")
.expect_err("invalid key must be rejected before lockfile open");
let msg = format!("{err:#}");
assert!(
msg.contains("path"),
"validator must surface a path-related diagnostic: {msg}",
);
}
#[test]
fn try_acquire_exclusive_lock_distinct_roots_dont_contend() {
let tmp_a = TempDir::new().unwrap();
let tmp_b = TempDir::new().unwrap();
let cache_a = CacheDir::with_root(tmp_a.path().to_path_buf());
let cache_b = CacheDir::with_root(tmp_b.path().to_path_buf());
let guard_a = cache_a
.try_acquire_exclusive_lock("shared-name")
.expect("acquire under root A must succeed");
let guard_b = cache_b.try_acquire_exclusive_lock("shared-name").expect(
"acquire on the same key under root B must NOT \
contend with A — different lockfiles, different OFDs",
);
drop(guard_a);
drop(guard_b);
}
#[test]
fn cache_content_matches_when_only_built_at_differs() {
let mut cached = test_metadata("6.14.2");
cached.built_at = "2026-04-12T10:00:00Z".to_string();
let mut caller = test_metadata("6.14.2");
caller.built_at = "2026-04-12T11:00:00Z".to_string();
assert!(
cache_content_matches(&cached, &caller, false),
"identical content hashes (config_hash, ktstr_kconfig_hash, \
extra_kconfig_hash) and identical vmlinux presence must \
classify as content-equal — built_at is just a timestamp",
);
}
#[test]
fn cache_content_matches_when_config_hash_differs() {
let mut cached = test_metadata("6.14.2");
cached.config_hash = Some("hash-cached".to_string());
let mut caller = test_metadata("6.14.2");
caller.config_hash = Some("hash-caller".to_string());
assert!(
!cache_content_matches(&cached, &caller, false),
"distinct config_hash must classify as content-different \
— the .config differs, so the boot image bytes differ",
);
}
#[test]
fn cache_content_matches_when_ktstr_kconfig_hash_differs() {
let mut cached = test_metadata("6.14.2");
cached.ktstr_kconfig_hash = Some("kc-cached".to_string());
let mut caller = test_metadata("6.14.2");
caller.ktstr_kconfig_hash = Some("kc-caller".to_string());
assert!(
!cache_content_matches(&cached, &caller, false),
"distinct ktstr_kconfig_hash means the kconfig fragment \
changed → built differently → content-different",
);
}
#[test]
fn cache_content_matches_when_extra_kconfig_hash_differs() {
let mut cached = test_metadata("6.14.2");
cached.extra_kconfig_hash = Some("xc-cached".to_string());
let mut caller = test_metadata("6.14.2");
caller.extra_kconfig_hash = Some("xc-caller".to_string());
assert!(
!cache_content_matches(&cached, &caller, false),
"distinct extra_kconfig_hash means the user fragment \
changed → built differently → content-different",
);
}
#[test]
fn cache_content_matches_when_vmlinux_presence_differs() {
let cached_with = {
let mut m = test_metadata("6.14.2");
m.set_has_vmlinux(true);
m
};
let caller = test_metadata("6.14.2");
assert!(
!cache_content_matches(&cached_with, &caller, false),
"cached has vmlinux, caller lacks vmlinux artifact — \
content-different (publish must drop the sidecar)",
);
let cached_without = test_metadata("6.14.2");
assert!(
!cache_content_matches(&cached_without, &caller, true),
"cached lacks vmlinux, caller supplies one — \
content-different (publish must add the sidecar)",
);
}
#[test]
fn store_in_lock_recheck_short_circuits_on_built_at_only_change() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta1 = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
cache
.store("recheck-key", &CacheArtifacts::new(&image), &meta1)
.unwrap();
let meta2 = KernelMetadata {
built_at: "2026-04-13T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let returned = cache
.store("recheck-key", &CacheArtifacts::new(&image), &meta2)
.unwrap();
assert_eq!(
returned.metadata.built_at, "2026-04-12T10:00:00Z",
"the in-lock recheck must short-circuit and return the \
EXISTING cached entry — the returned built_at must \
match meta1, not meta2. If this flips to meta2, the \
recheck did not fire and every concurrent peer is \
redundantly republishing.",
);
let on_disk = cache.lookup("recheck-key").unwrap();
assert_eq!(
on_disk.metadata.built_at, "2026-04-12T10:00:00Z",
"the on-disk metadata must also remain meta1 — the \
recheck must skip the rename/swap step",
);
}
#[test]
fn store_in_lock_recheck_bypasses_when_content_actually_differs() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta1 = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
config_hash: Some("hash-v1".to_string()),
..test_metadata("6.14.2")
};
cache
.store("bypass-key", &CacheArtifacts::new(&image), &meta1)
.unwrap();
let meta2 = KernelMetadata {
built_at: "2026-04-13T10:00:00Z".to_string(),
config_hash: Some("hash-v2".to_string()),
..test_metadata("6.14.2")
};
let returned = cache
.store("bypass-key", &CacheArtifacts::new(&image), &meta2)
.unwrap();
assert_eq!(
returned.metadata.config_hash.as_deref(),
Some("hash-v2"),
"distinct config_hash must bypass the recheck and \
publish meta2; the returned entry's config_hash must \
be meta2's",
);
assert_eq!(
returned.metadata.built_at, "2026-04-13T10:00:00Z",
"with content actually changing, the publish must \
land meta2's built_at",
);
}
#[test]
fn store_in_lock_recheck_mixed_content_peers_publish_one_per_group() {
use std::collections::BTreeSet;
use std::sync::Arc;
use std::sync::Barrier;
use std::thread;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().join("cache")));
let src_dir = TempDir::new().unwrap();
let image = src_dir.path().join("bzImage");
std::fs::write(&image, b"shared image bytes").unwrap();
const PEER_COUNT: usize = 8;
let group_a_inputs: BTreeSet<String> = (0..PEER_COUNT)
.filter(|i| i % 2 == 0)
.map(|i| format!("2026-04-12T10:00:{i:02}Z"))
.collect();
let group_b_inputs: BTreeSet<String> = (0..PEER_COUNT)
.filter(|i| i % 2 == 1)
.map(|i| format!("2026-04-12T10:00:{i:02}Z"))
.collect();
assert!(
group_a_inputs.is_disjoint(&group_b_inputs),
"test setup invariant: per-group input timestamps must \
be disjoint so the cross-group bleed assertion below is \
well-defined",
);
let barrier = Arc::new(Barrier::new(PEER_COUNT));
let mut handles = Vec::with_capacity(PEER_COUNT);
for i in 0..PEER_COUNT {
let cache = Arc::clone(&cache);
let barrier = Arc::clone(&barrier);
let image = image.clone();
handles.push(thread::spawn(move || {
let mut meta = test_metadata("6.14.2");
let (label, hash) = if i % 2 == 0 {
("a", "hash-a")
} else {
("b", "hash-b")
};
meta.built_at = format!("2026-04-12T10:00:{i:02}Z");
meta.config_hash = Some(hash.to_string());
barrier.wait();
let entry = cache
.store("mixed-key", &CacheArtifacts::new(&image), &meta)
.expect("every peer's store must succeed");
(
label,
entry.metadata.config_hash.clone(),
entry.metadata.built_at.clone(),
)
}));
}
let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect();
for (label, observed_hash, observed_built_at) in &results {
let observed_hash_str = observed_hash.as_deref();
assert!(
matches!(observed_hash_str, Some("hash-a") | Some("hash-b")),
"peer (group={label}) observed an invalid \
config_hash {observed_hash:?} — recheck must never \
produce a third state, only return one of the two \
published group hashes",
);
let expected_hash = match *label {
"a" => "hash-a",
"b" => "hash-b",
_ => unreachable!(),
};
assert_eq!(
observed_hash_str,
Some(expected_hash),
"peer (group={label}) returned config_hash \
{observed_hash:?} — expected {expected_hash}; \
cross-group recheck collapse detected (a recheck \
hit MUST require matching content-defining hashes)",
);
let observed_hash_bytes = observed_hash_str.unwrap_or("");
let in_a = group_a_inputs.contains(observed_built_at);
let in_b = group_b_inputs.contains(observed_built_at);
assert!(
in_a || in_b,
"peer (group={label}) returned built_at \
{observed_built_at:?} that is NOT one of the \
precomputed input timestamps — recheck must \
never synthesize a fresh timestamp",
);
match observed_hash_bytes {
"hash-a" => assert!(
in_a && !in_b,
"config_hash=hash-a entry returned built_at \
{observed_built_at:?} which lives in group B's \
input set — recheck-bypass on cross-group \
divergence broke and a group-A return is \
carrying a group-B timestamp",
),
"hash-b" => assert!(
in_b && !in_a,
"config_hash=hash-b entry returned built_at \
{observed_built_at:?} which lives in group A's \
input set — recheck-bypass on cross-group \
divergence broke and a group-B return is \
carrying a group-A timestamp",
),
_ => unreachable!(),
}
}
let group_a_built_ats: BTreeSet<&String> = results
.iter()
.filter(|(label, _, _)| *label == "a")
.map(|(_, _, built_at)| built_at)
.collect();
let group_b_built_ats: BTreeSet<&String> = results
.iter()
.filter(|(label, _, _)| *label == "b")
.map(|(_, _, built_at)| built_at)
.collect();
let group_a_size = results.iter().filter(|(l, _, _)| *l == "a").count();
let group_b_size = results.iter().filter(|(l, _, _)| *l == "b").count();
let collapse_fired_a = group_a_built_ats.len() < group_a_size;
let collapse_fired_b = group_b_built_ats.len() < group_b_size;
if !(collapse_fired_a || collapse_fired_b) {
eprintln!(
"store_in_lock_recheck_mixed_content_peers: \
collapse did not fire on this run (group_a \
distinct={}, size={}; group_b distinct={}, \
size={}). Hard invariants still hold; collapse \
firing is probabilistic under cross-group churn.",
group_a_built_ats.len(),
group_a_size,
group_b_built_ats.len(),
group_b_size,
);
}
let final_entry = cache.lookup("mixed-key").expect("entry must exist");
let final_hash = final_entry.metadata.config_hash.as_deref();
assert!(
matches!(final_hash, Some("hash-a") | Some("hash-b")),
"final on-disk config_hash {final_hash:?} must be one \
of the two published group hashes — anything else \
means publish was not atomic across overlapping writers",
);
let final_built_at = &final_entry.metadata.built_at;
let expected_set = match final_hash {
Some("hash-a") => &group_a_inputs,
Some("hash-b") => &group_b_inputs,
_ => unreachable!(),
};
assert!(
expected_set.contains(final_built_at),
"final on-disk built_at {final_built_at:?} must come \
from the input set of the winning group ({final_hash:?}) — \
a foreign timestamp would prove recheck wrote across \
the content-divergence axis",
);
}
#[test]
fn store_in_lock_recheck_serialises_concurrent_peers() {
use std::sync::Arc;
use std::sync::Barrier;
use std::thread;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().join("cache")));
let src_dir = TempDir::new().unwrap();
let image = src_dir.path().join("bzImage");
std::fs::write(&image, b"shared image bytes").unwrap();
const PEER_COUNT: usize = 8;
let barrier = Arc::new(Barrier::new(PEER_COUNT));
let mut handles = Vec::with_capacity(PEER_COUNT);
for i in 0..PEER_COUNT {
let cache = Arc::clone(&cache);
let barrier = Arc::clone(&barrier);
let image = image.clone();
handles.push(thread::spawn(move || {
let mut meta = test_metadata("6.14.2");
meta.built_at = format!("2026-04-12T10:00:{i:02}Z");
barrier.wait();
cache
.store("race-key", &CacheArtifacts::new(&image), &meta)
.expect("every peer's store must succeed")
}));
}
let entries: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect();
let timestamps: std::collections::BTreeSet<_> = entries
.iter()
.map(|e| e.metadata.built_at.clone())
.collect();
assert_eq!(
timestamps.len(),
1,
"every peer must observe the same head-writer timestamp \
after the in-lock recheck short-circuits theirs; \
distinct timestamps means the recheck didn't fire and \
every peer redundantly republished. Got: {timestamps:?}",
);
let final_entry = cache.lookup("race-key").expect("entry must exist");
let head_timestamp = timestamps.iter().next().unwrap();
assert_eq!(
&final_entry.metadata.built_at, head_timestamp,
"the cached entry's built_at must match what every peer \
returned — proves the head writer's publish landed and \
every late peer short-circuited to the same on-disk \
state",
);
}
#[test]
fn store_exclusive_lock_timeout_returns_default_when_unset() {
let _lock = lock_env();
let _g = EnvVarGuard::remove(STORE_EXCLUSIVE_LOCK_TIMEOUT_ENV);
assert_eq!(
store_exclusive_lock_timeout(),
STORE_EXCLUSIVE_LOCK_DEFAULT_TIMEOUT,
"absent env var must return the default timeout",
);
}
#[test]
fn store_exclusive_lock_timeout_returns_default_when_empty() {
let _lock = lock_env();
let _g = EnvVarGuard::set(STORE_EXCLUSIVE_LOCK_TIMEOUT_ENV, "");
assert_eq!(
store_exclusive_lock_timeout(),
STORE_EXCLUSIVE_LOCK_DEFAULT_TIMEOUT,
"empty env var must fall through to the default",
);
}
#[test]
fn store_exclusive_lock_timeout_parses_humantime() {
let _lock = lock_env();
for (input, want_secs) in [
("30s", 30),
("2m", 120),
("10min", 600),
("1h", 3600),
("90s", 90),
] {
let _g = EnvVarGuard::set(STORE_EXCLUSIVE_LOCK_TIMEOUT_ENV, input);
assert_eq!(
store_exclusive_lock_timeout(),
std::time::Duration::from_secs(want_secs),
"input `{input}` must parse to {want_secs}s",
);
}
}
#[test]
fn store_exclusive_lock_timeout_falls_through_on_parse_error() {
let _lock = lock_env();
let _g = EnvVarGuard::set(STORE_EXCLUSIVE_LOCK_TIMEOUT_ENV, "not-a-duration");
assert_eq!(
store_exclusive_lock_timeout(),
STORE_EXCLUSIVE_LOCK_DEFAULT_TIMEOUT,
"unparseable env value must fall back to the default \
rather than zero / disabled — a typo must not silently \
remove the timeout",
);
}
#[tracing_test::traced_test]
#[test]
fn store_emits_warn_when_vmlinux_strip_fails() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("vmlinux");
std::fs::write(&vmlinux, b"not an ELF file").unwrap();
let meta = test_metadata("6.14.2");
cache
.store(
"warn-on-strip-fail",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.expect("strip fallback must still produce a successful store");
assert!(
logs_contain("vmlinux strip failed"),
"the strip-fallback path MUST emit a tracing::warn! \
with the 'vmlinux strip failed' literal so an operator \
can see the strip pipeline degraded — without the \
warn, an unstripped 300 MB vmlinux lands silently and \
the operator can't correlate cache-bloat reports with \
strip failures",
);
assert!(
logs_contain("caching unstripped"),
"the warn body MUST tell the operator the caller fell \
back to caching the raw bytes (not that the cache \
refused) — so the operator understands the cache \
entry is usable but oversized",
);
}
#[test]
fn store_image_copy_failure_surfaces_diagnostic() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let nonexistent = src_dir.path().join("never-created-bzImage");
assert!(!nonexistent.exists());
let meta = test_metadata("6.14.2");
let err = cache
.store("img-copy-fail", &CacheArtifacts::new(&nonexistent), &meta)
.expect_err("missing source image must fail the store");
let msg = format!("{err:#}");
assert!(
msg.starts_with("copy kernel image to cache:"),
"diagnostic must START with the exact `copy kernel image \
to cache:` prefix so an operator can attribute the \
failure to the image-copy step (vs the stripped-vmlinux \
`copy stripped vmlinux to cache:` arm or the \
fallback-vmlinux `copy vmlinux to cache:` arm); got: {msg}",
);
for dirent in std::fs::read_dir(tmp.path().join("cache")).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".tmp-"),
"TmpDirGuard must remove the staging directory on \
the image-copy error path; found leftover: {name}",
);
}
}
#[test]
fn store_vmlinux_copy_failure_uses_exact_error_prefix() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("never-created-vmlinux");
assert!(!vmlinux.exists());
let meta = test_metadata("6.14.2");
let err = cache
.store(
"vml-fallback-copy-fail",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.expect_err("missing vmlinux must fail the store on the fallback path");
let msg = format!("{err:#}");
assert!(
msg.starts_with("copy vmlinux to cache:"),
"the fallback fs::copy arm wraps with the exact prefix \
`copy vmlinux to cache:` — this distinguishes it from \
the success-path stripped-copy `copy stripped vmlinux \
to cache:` and the kernel-image arm `copy kernel image \
to cache:`; a regression that drops the context \
wrapping or rewords the prefix would lose the \
arm-attribution diagnostic. Got: {msg}",
);
for dirent in std::fs::read_dir(tmp.path().join("cache")).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".tmp-"),
"TmpDirGuard must remove the staging directory on \
the vmlinux-fallback-copy error path; found \
leftover: {name}",
);
}
}
#[test]
fn tmp_dir_guard_removes_staging_dir_after_failed_store() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_dir = TempDir::new().unwrap();
let nonexistent = src_dir.path().join("never-created");
let meta = test_metadata("6.14.2");
let _ = cache
.store("guard-test", &CacheArtifacts::new(&nonexistent), &meta)
.expect_err("missing source must fail");
let mut leftover_tmp_count = 0;
if cache_root.exists() {
for dirent in std::fs::read_dir(&cache_root).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
if name.starts_with(".tmp-") {
leftover_tmp_count += 1;
}
}
}
assert_eq!(
leftover_tmp_count, 0,
"TmpDirGuard's Drop impl must clean up .tmp- staging \
directories after a failed store — found {leftover_tmp_count} leftover(s)",
);
assert!(
!cache_root.join("guard-test").exists(),
"a failed store must not publish a partial entry",
);
}
#[test]
fn store_fails_when_tmp_dir_path_is_regular_file() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
std::fs::create_dir_all(&cache_root).unwrap();
let pid = std::process::id();
let blocking_file = cache_root.join(format!(".tmp-blocked-key-{pid}"));
std::fs::write(&blocking_file, b"i am a regular file, not a directory").unwrap();
let err = cache
.store("blocked-key", &CacheArtifacts::new(&image), &meta)
.expect_err(
"pre-existing regular FILE at the tmp_dir path must \
fail the store — fs::remove_dir_all rejects non-directories",
);
let msg = format!("{err:#}");
assert!(
!msg.is_empty(),
"store error must carry a non-empty diagnostic; got: {msg}",
);
assert!(
blocking_file.exists(),
"the pre-existing regular file at the tmp_dir path MUST \
remain in place after the failed store — silently \
overwriting it would erase operator state without \
warning",
);
assert_eq!(
std::fs::read(&blocking_file).unwrap(),
b"i am a regular file, not a directory",
"the blocking file's CONTENTS must also be unchanged — \
not just the inode",
);
assert!(
!cache_root.join("blocked-key").exists(),
"a failed store must not publish a partial entry under \
the cache_key",
);
}
#[test]
fn kconfig_status_empty_strings_classify_as_matches() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("".to_string()));
let entry = cache
.store("empty-vs-empty", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(
entry.kconfig_status(""),
KconfigStatus::Matches,
"Some(\"\") cached + \"\" current must classify as \
Matches — the predicate is string equality on the \
inner string, not a separate emptiness check",
);
}
#[test]
fn kconfig_status_empty_cached_vs_nonempty_current_is_stale() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("".to_string()));
let entry = cache
.store("empty-vs-nonempty", &CacheArtifacts::new(&image), &meta)
.unwrap();
match entry.kconfig_status("real_hash") {
KconfigStatus::Stale { cached, current } => {
assert_eq!(
cached, "",
"Stale.cached must carry the empty string \
verbatim — empty Some(\"\") is NOT collapsed \
to None at compare time",
);
assert_eq!(
current, "real_hash",
"Stale.current must carry the caller's hash",
);
}
other => panic!("expected Stale, got {other:?}"),
}
}
#[test]
fn kconfig_status_none_cached_vs_empty_current_is_untracked() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = KernelMetadata {
ktstr_kconfig_hash: None,
..test_metadata("6.14.2")
};
let entry = cache
.store("none-vs-empty", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(
entry.kconfig_status(""),
KconfigStatus::Untracked,
"None cached + \"\" current MUST classify as Untracked",
);
}
#[test]
fn kconfig_status_none_cached_returns_untracked_regardless_of_current() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = KernelMetadata {
ktstr_kconfig_hash: None,
..test_metadata("6.14.2")
};
let entry = cache
.store("none-cached", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(
entry.kconfig_status(""),
KconfigStatus::Untracked,
"None cached + \"\" current must classify as Untracked — \
the caller's hash content is ignored when the cached \
entry has no recorded hash; a regression that \
special-cased current==\"\" to short-circuit to Matches \
would mistake pre-tracking-format entries for clean hits",
);
assert_eq!(
entry.kconfig_status("any-hash"),
KconfigStatus::Untracked,
"None cached + non-empty current must also classify as \
Untracked — the predicate is variant-driven, not \
input-driven",
);
}
#[test]
fn lookup_vs_list_diverge_on_corrupt_entry() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let entry_dir = cache_root.join("corrupt-entry");
std::fs::create_dir_all(&entry_dir).unwrap();
std::fs::write(entry_dir.join("metadata.json"), b"not valid json {[").unwrap();
assert!(
cache.lookup("corrupt-entry").is_none(),
"lookup() MUST return None on a corrupt entry — the \
caller treats it as a miss and proceeds to rebuild; \
surfacing the corruption here would force every \
caller to handle a third state besides hit/miss",
);
let entries = cache.list().unwrap();
let listed = entries.iter().find(|e| e.key() == "corrupt-entry").expect(
"list MUST surface the corrupt entry — the \
operator needs to see and decide what to do \
about it (clean? investigate?)",
);
assert!(
matches!(listed, ListedEntry::Corrupt { .. }),
"list MUST classify the entry as ListedEntry::Corrupt \
— the lookup miss is the same on-disk state as a list \
Corrupt entry, and the variant must be the Corrupt arm \
specifically (not just non-Valid). Got: {listed:?}",
);
}
#[test]
fn cache_content_matches_when_all_hashes_are_none() {
let cached = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
);
let caller = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-13T10:00:00Z".to_string(),
);
assert!(
cache_content_matches(&cached, &caller, false),
"two metadata values with every hash field set to None \
must classify as content-equal — None == None for the \
predicate; without this, a cache that doesn't track \
hashes would recheck-miss on every concurrent peer \
and redundantly republish",
);
}
#[test]
fn cache_content_matches_all_none_with_vmlinux_on_both_sides() {
let mut cached = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
);
cached.set_has_vmlinux(true);
let caller = KernelMetadata::new(
super::super::metadata::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-13T10:00:00Z".to_string(),
);
assert!(
cache_content_matches(&cached, &caller, true),
"all-None hashes + matched vmlinux presence (true=true) \
must classify as content-equal — the vmlinux-axis is \
the only non-hash axis of the predicate, and the \
test_metadata-based test only covers vmlinux=false. \
Without this case, a regression that special-cased \
'all-None implies no vmlinux' would silently pass \
the existing tests.",
);
}
#[test]
fn cache_content_matches_asymmetric_none_misses() {
let mut cached = test_metadata("6.14.2");
cached.config_hash = Some("hash-cached".to_string());
let mut caller = test_metadata("6.14.2");
caller.config_hash = None;
assert!(
!cache_content_matches(&cached, &caller, false),
"cached=Some, caller=None must classify as \
content-different — None != Some(s); a regression \
that treated None as 'matches everything' would \
break the recheck for any caller that lost its hash",
);
let mut cached = test_metadata("6.14.2");
cached.config_hash = None;
let mut caller = test_metadata("6.14.2");
caller.config_hash = Some("hash-caller".to_string());
assert!(
!cache_content_matches(&cached, &caller, false),
"cached=None, caller=Some must also classify as \
content-different — the asymmetric direction must \
also fail to recheck",
);
}
#[tracing_test::traced_test]
#[test]
fn lookup_silent_does_not_consume_warn_dedup_slot() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let key = "lookup-silent-contract";
let vmlinux = src_dir.path().join("vmlinux");
std::fs::write(&vmlinux, b"not an ELF file").unwrap();
let meta = test_metadata("6.14.2");
cache
.store(
key,
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
let _silent = cache.lookup_silent(key);
let _public = cache.lookup(key);
assert!(
logs_contain("using unstripped vmlinux"),
"lookup() after lookup_silent() MUST emit the \
unstripped-vmlinux warn — if lookup_silent had \
consumed the once-per-key dedup slot, this assertion \
would fail and the operator would never see the \
warn for entries that store() saw first via the \
in-lock recheck",
);
}
#[test]
fn clean_all_count_matches_listed_entry_count() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let cache_root = tmp.path().join("cache");
cache
.store(
"valid-1",
&CacheArtifacts::new(&image),
&test_metadata("6.13.0"),
)
.unwrap();
cache
.store(
"valid-2",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
let corrupt = cache_root.join("corrupt-1");
std::fs::create_dir_all(&corrupt).unwrap();
let removed = cache
.clean_all()
.expect("clean_all must succeed on a clean fs");
assert_eq!(
removed, 3,
"clean_all MUST report a count equal to the listed \
entry count (Valid + Corrupt) — operator-facing \
reporting that mismatched the actual cleanup would \
undermine trust in the diagnostic",
);
let surviving: Vec<_> = std::fs::read_dir(&cache_root)
.unwrap()
.filter_map(|d| d.ok())
.map(|d| d.file_name().to_string_lossy().into_owned())
.filter(|n| !n.starts_with('.'))
.collect();
assert!(
surviving.is_empty(),
"every non-dotfile entry must be gone after clean_all; \
surviving: {surviving:?}",
);
}