use std::fmt;
use std::fs;
use std::path::{Path, PathBuf};
use anyhow::Context;
use serde::{Deserialize, Serialize};
const TMP_DIR_PREFIX: &str = ".tmp-";
use crate::flock::LOCK_DIR_NAME;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase", tag = "type")]
#[non_exhaustive]
pub enum KernelSource {
Tarball,
Git {
git_hash: Option<String>,
#[serde(rename = "ref")]
git_ref: Option<String>,
},
Local {
source_tree_path: Option<PathBuf>,
git_hash: Option<String>,
},
}
impl fmt::Display for KernelSource {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
KernelSource::Tarball => f.write_str("tarball"),
KernelSource::Git { .. } => f.write_str("git"),
KernelSource::Local { .. } => f.write_str("local"),
}
}
}
impl KernelSource {
pub fn as_local_git_hash(&self) -> Option<&str> {
match self {
KernelSource::Local { git_hash, .. } => git_hash.as_deref(),
_ => None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub struct KernelMetadata {
pub version: Option<String>,
pub source: KernelSource,
pub arch: String,
pub image_name: String,
pub config_hash: Option<String>,
pub built_at: String,
pub ktstr_kconfig_hash: Option<String>,
has_vmlinux: bool,
vmlinux_stripped: bool,
pub source_vmlinux_size: Option<u64>,
pub source_vmlinux_mtime_secs: Option<i64>,
}
impl KernelMetadata {
pub fn new(source: KernelSource, arch: String, image_name: String, built_at: String) -> Self {
KernelMetadata {
version: None,
source,
arch,
image_name,
config_hash: None,
built_at,
ktstr_kconfig_hash: None,
has_vmlinux: false,
vmlinux_stripped: false,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
}
}
pub fn with_source_vmlinux_stat(mut self, size: u64, mtime_secs: i64) -> Self {
self.source_vmlinux_size = Some(size);
self.source_vmlinux_mtime_secs = Some(mtime_secs);
self
}
pub fn with_version(mut self, version: Option<String>) -> Self {
self.version = version;
self
}
pub fn with_config_hash(mut self, hash: Option<String>) -> Self {
self.config_hash = hash;
self
}
pub fn with_ktstr_kconfig_hash(mut self, hash: Option<String>) -> Self {
self.ktstr_kconfig_hash = hash;
self
}
pub fn has_vmlinux(&self) -> bool {
self.has_vmlinux
}
pub(crate) fn set_has_vmlinux(&mut self, value: bool) {
self.has_vmlinux = value;
}
pub fn vmlinux_stripped(&self) -> bool {
self.vmlinux_stripped
}
pub(crate) fn set_vmlinux_stripped(&mut self, value: bool) {
self.vmlinux_stripped = value;
}
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct CacheArtifacts<'a> {
pub image: &'a Path,
pub vmlinux: Option<&'a Path>,
}
impl<'a> CacheArtifacts<'a> {
pub fn new(image: &'a Path) -> Self {
CacheArtifacts {
image,
vmlinux: None,
}
}
pub fn with_vmlinux(mut self, vmlinux: &'a Path) -> Self {
self.vmlinux = Some(vmlinux);
self
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum KconfigStatus {
Matches,
Stale {
cached: String,
current: String,
},
Untracked,
}
impl KconfigStatus {
pub fn is_stale(&self) -> bool {
matches!(self, Self::Stale { .. })
}
pub fn is_untracked(&self) -> bool {
matches!(self, Self::Untracked)
}
}
impl fmt::Display for KconfigStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
KconfigStatus::Matches => f.write_str("matches"),
KconfigStatus::Stale { .. } => f.write_str("stale"),
KconfigStatus::Untracked => f.write_str("untracked"),
}
}
}
pub use crate::kernel_path::KernelId;
#[derive(Debug)]
#[non_exhaustive]
pub struct CacheEntry {
pub key: String,
pub path: PathBuf,
pub metadata: KernelMetadata,
}
impl CacheEntry {
pub fn image_path(&self) -> PathBuf {
self.path.join(&self.metadata.image_name)
}
pub fn vmlinux_path(&self) -> Option<PathBuf> {
self.metadata.has_vmlinux.then(|| self.path.join("vmlinux"))
}
pub fn kconfig_status(&self, current_hash: &str) -> KconfigStatus {
match self.metadata.ktstr_kconfig_hash.as_deref() {
None => KconfigStatus::Untracked,
Some(h) if h == current_hash => KconfigStatus::Matches,
Some(h) => KconfigStatus::Stale {
cached: h.to_string(),
current: current_hash.to_string(),
},
}
}
}
#[derive(Debug)]
#[non_exhaustive]
pub enum ListedEntry {
Valid(Box<CacheEntry>),
Corrupt {
key: String,
path: PathBuf,
reason: String,
},
}
impl ListedEntry {
pub fn key(&self) -> &str {
match self {
ListedEntry::Valid(e) => &e.key,
ListedEntry::Corrupt { key, .. } => key,
}
}
pub fn path(&self) -> &Path {
match self {
ListedEntry::Valid(e) => &e.path,
ListedEntry::Corrupt { path, .. } => path,
}
}
pub fn as_valid(&self) -> Option<&CacheEntry> {
match self {
ListedEntry::Valid(e) => Some(e.as_ref()),
ListedEntry::Corrupt { .. } => None,
}
}
pub fn error_kind(&self) -> Option<&'static str> {
match self {
ListedEntry::Valid(_) => None,
ListedEntry::Corrupt { reason, .. } => Some(classify_corrupt_reason(reason)),
}
}
}
fn classify_corrupt_reason(reason: &str) -> &'static str {
if reason == "metadata.json missing" {
"missing"
} else if reason.starts_with("metadata.json unreadable: ") {
"unreadable"
} else if reason.starts_with("metadata.json schema drift: ") {
"schema_drift"
} else if reason.starts_with("metadata.json malformed: ") {
"malformed"
} else if reason.starts_with("metadata.json truncated: ") {
"truncated"
} else if reason.starts_with("metadata.json parse error: ") {
"parse_error"
} else if reason.starts_with("image file ") && reason.contains("missing") {
"image_missing"
} else {
"unknown"
}
}
#[derive(Debug)]
#[non_exhaustive]
pub struct CacheDir {
root: PathBuf,
}
fn warn_if_unstripped_vmlinux(entry: &CacheEntry) {
if should_warn_unstripped(entry) {
eprintln!(
"cache: using unstripped vmlinux for {} (strip failed on a prior build; \
re-run with a clean cache to retry)",
entry.key,
);
}
}
fn should_warn_unstripped(entry: &CacheEntry) -> bool {
entry.metadata.has_vmlinux() && !entry.metadata.vmlinux_stripped()
}
impl CacheDir {
pub fn new() -> anyhow::Result<Self> {
let root = resolve_cache_root()?;
Ok(CacheDir { root })
}
pub fn with_root(root: PathBuf) -> Self {
CacheDir { root }
}
pub fn default_root() -> anyhow::Result<PathBuf> {
resolve_cache_root()
}
pub fn root(&self) -> &Path {
&self.root
}
pub fn lookup(&self, cache_key: &str) -> Option<CacheEntry> {
if let Err(e) = validate_cache_key(cache_key) {
tracing::warn!("invalid cache key: {e}");
return None;
}
let entry_dir = self.root.join(cache_key);
if !entry_dir.is_dir() {
return None;
}
let metadata = read_metadata(&entry_dir).ok()?;
if !entry_dir.join(&metadata.image_name).exists() {
return None;
}
let entry = CacheEntry {
key: cache_key.to_string(),
path: entry_dir,
metadata,
};
warn_if_unstripped_vmlinux(&entry);
Some(entry)
}
pub fn list(&self) -> anyhow::Result<Vec<ListedEntry>> {
let mut entries: Vec<ListedEntry> = Vec::new();
let read_dir = match fs::read_dir(&self.root) {
Ok(rd) => rd,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(entries),
Err(e) => return Err(e.into()),
};
for dir_entry in read_dir {
let dir_entry = dir_entry?;
let path = dir_entry.path();
let file_name = dir_entry.file_name();
let name_hint = file_name.to_string_lossy();
if name_hint.starts_with('.') {
continue;
}
if !path.is_dir() {
continue;
}
let name = match dir_entry.file_name().into_string() {
Ok(n) => n,
Err(_) => continue,
};
if name.starts_with(TMP_DIR_PREFIX) {
continue;
}
match read_metadata(&path) {
Ok(metadata) => {
let image_path = path.join(&metadata.image_name);
if image_path.exists() {
entries.push(ListedEntry::Valid(Box::new(CacheEntry {
key: name,
path,
metadata,
})));
} else {
entries.push(ListedEntry::Corrupt {
key: name,
path,
reason: format!(
"image file {} missing from entry directory",
metadata.image_name
),
});
}
}
Err(reason) => {
tracing::info!(
entry = %name,
path = %path.display(),
%reason,
"cache entry corrupt at list-time",
);
entries.push(ListedEntry::Corrupt {
key: name,
path,
reason,
});
}
}
}
entries.sort_by(|a, b| {
let a_time = a.as_valid().map(|e| e.metadata.built_at.as_str());
let b_time = b.as_valid().map(|e| e.metadata.built_at.as_str());
b_time.cmp(&a_time)
});
Ok(entries)
}
pub fn store(
&self,
cache_key: &str,
artifacts: &CacheArtifacts<'_>,
metadata: &KernelMetadata,
) -> anyhow::Result<CacheEntry> {
validate_cache_key(cache_key)?;
validate_filename(&metadata.image_name)?;
let _store_lock =
self.acquire_exclusive_lock_blocking(cache_key, STORE_EXCLUSIVE_LOCK_TIMEOUT)?;
let final_dir = self.root.join(cache_key);
let tmp_dir = self.root.join(format!(
"{TMP_DIR_PREFIX}{}-{}",
cache_key,
std::process::id(),
));
if tmp_dir.exists() {
fs::remove_dir_all(&tmp_dir)?;
}
if let Err(e) = clean_orphaned_tmp_dirs(&self.root) {
tracing::warn!(err = %format!("{e:#}"), "clean_orphaned_tmp_dirs failed; continuing store");
}
fs::create_dir_all(&tmp_dir)?;
let _guard = TmpDirGuard(&tmp_dir);
let image_dest = tmp_dir.join(&metadata.image_name);
fs::copy(artifacts.image, &image_dest)
.map_err(|e| anyhow::anyhow!("copy kernel image to cache: {e}"))?;
let (has_vmlinux, vmlinux_stripped) = if let Some(vmlinux) = artifacts.vmlinux {
let vmlinux_dest = tmp_dir.join("vmlinux");
match strip_vmlinux_debug(vmlinux) {
Ok(stripped) => {
fs::copy(stripped.path(), &vmlinux_dest)
.map_err(|e| anyhow::anyhow!("copy stripped vmlinux to cache: {e}"))?;
(true, true)
}
Err(e) => {
eprintln!(
"cache: vmlinux strip failed for {cache_key} ({e:#}); \
caching unstripped (larger on-disk payload). \
See `ktstr cache list --json` vmlinux_stripped field.",
);
tracing::warn!(
cache_key = cache_key,
err = %format!("{e:#}"),
"vmlinux strip failed, caching unstripped",
);
fs::copy(vmlinux, &vmlinux_dest)
.map_err(|e| anyhow::anyhow!("copy vmlinux to cache: {e}"))?;
(true, false)
}
}
} else {
(false, false)
};
let mut meta = metadata.clone();
meta.set_has_vmlinux(has_vmlinux);
meta.set_vmlinux_stripped(vmlinux_stripped);
let meta_json = serde_json::to_string_pretty(&meta)?;
fs::write(tmp_dir.join("metadata.json"), meta_json)
.map_err(|e| anyhow::anyhow!("write cache metadata: {e}"))?;
match fs::rename(&tmp_dir, &final_dir) {
Ok(()) => {}
Err(e)
if e.raw_os_error() == Some(libc::ENOTEMPTY)
|| e.raw_os_error() == Some(libc::EEXIST) =>
{
atomic_swap_dirs(&tmp_dir, &final_dir)?;
}
Err(e) => {
return Err(anyhow::anyhow!("atomic rename cache entry: {e}"));
}
}
Ok(CacheEntry {
key: cache_key.to_string(),
path: final_dir,
metadata: meta,
})
}
pub fn clean_all(&self) -> anyhow::Result<usize> {
self.remove_entries(self.list()?)
}
pub fn clean_keep(&self, keep: usize) -> anyhow::Result<usize> {
self.remove_entries(self.list()?.into_iter().skip(keep))
}
fn remove_entries<I: IntoIterator<Item = ListedEntry>>(
&self,
iter: I,
) -> anyhow::Result<usize> {
let to_remove: Vec<_> = iter.into_iter().collect();
let count = to_remove.len();
for entry in &to_remove {
fs::remove_dir_all(entry.path())?;
}
Ok(count)
}
pub(crate) fn lock_path(&self, cache_key: &str) -> PathBuf {
self.root
.join(LOCK_DIR_NAME)
.join(format!("{cache_key}.lock"))
}
pub(crate) fn ensure_lock_dir(&self) -> anyhow::Result<()> {
let dir = self.root.join(LOCK_DIR_NAME);
fs::create_dir_all(&dir)
.with_context(|| format!("create lock subdirectory {}", dir.display()))
}
pub fn acquire_shared_lock(&self, cache_key: &str) -> anyhow::Result<SharedLockGuard> {
validate_cache_key(cache_key)?;
let path = self.lock_path(cache_key);
let fd = crate::flock::acquire_flock_with_timeout(
&path,
FlockMode::Shared,
SHARED_LOCK_DEFAULT_TIMEOUT,
&format!("cache entry {cache_key:?}"),
None,
)?;
Ok(SharedLockGuard { fd })
}
pub fn acquire_exclusive_lock_blocking(
&self,
cache_key: &str,
timeout: std::time::Duration,
) -> anyhow::Result<ExclusiveLockGuard> {
validate_cache_key(cache_key)?;
let path = self.lock_path(cache_key);
let fd = crate::flock::acquire_flock_with_timeout(
&path,
FlockMode::Exclusive,
timeout,
&format!("cache entry {cache_key:?}"),
None,
)?;
Ok(ExclusiveLockGuard { fd })
}
pub fn try_acquire_exclusive_lock(
&self,
cache_key: &str,
) -> anyhow::Result<ExclusiveLockGuard> {
validate_cache_key(cache_key)?;
self.ensure_lock_dir()?;
let path = self.lock_path(cache_key);
match crate::flock::try_flock(&path, crate::flock::FlockMode::Exclusive)? {
Some(fd) => Ok(ExclusiveLockGuard { fd }),
None => {
let holders = crate::flock::read_holders(&path).unwrap_or_default();
anyhow::bail!(
"cache entry {cache_key:?} is locked by active test runs \
(lockfile {lockfile}, holders: {holders}). Wait for \
those tests to finish, or kill them, then retry.",
lockfile = path.display(),
holders = crate::flock::format_holder_list(&holders),
);
}
}
}
}
const SHARED_LOCK_DEFAULT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const STORE_EXCLUSIVE_LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60);
use crate::flock::FlockMode;
#[derive(Debug)]
pub struct SharedLockGuard {
#[allow(dead_code)]
fd: std::os::fd::OwnedFd,
}
#[derive(Debug)]
pub struct ExclusiveLockGuard {
#[allow(dead_code)]
fd: std::os::fd::OwnedFd,
}
fn clean_orphaned_tmp_dirs(cache_root: &Path) -> anyhow::Result<()> {
if !cache_root.is_dir() {
return Ok(());
}
let read_dir = match fs::read_dir(cache_root) {
Ok(rd) => rd,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(()),
Err(e) => anyhow::bail!("read cache root {}: {e}", cache_root.display()),
};
for dir_entry in read_dir {
let dir_entry = match dir_entry {
Ok(d) => d,
Err(e) => {
tracing::warn!(err = %format!("{e:#}"), "skip unreadable cache root entry");
continue;
}
};
let name = match dir_entry.file_name().into_string() {
Ok(n) => n,
Err(_) => continue, };
if !name.starts_with(TMP_DIR_PREFIX) {
continue;
}
let pid_str = match name.rsplit_once('-') {
Some((_, suffix)) if !suffix.is_empty() => suffix,
_ => continue, };
let pid: i32 = match pid_str.parse() {
Ok(p) => p,
Err(_) => continue, };
if pid <= 0 {
continue;
}
let dead = matches!(
nix::sys::signal::kill(nix::unistd::Pid::from_raw(pid), None),
Err(nix::errno::Errno::ESRCH),
);
if !dead {
continue;
}
let path = dir_entry.path();
match fs::remove_dir_all(&path) {
Ok(()) => {
tracing::info!(
path = %path.display(),
orphan_pid = pid,
"cleaned orphaned .tmp- dir from prior crashed process",
);
}
Err(e) => {
tracing::warn!(
err = %format!("{e:#}"),
path = %path.display(),
"failed to remove orphaned .tmp- dir; leaving in place",
);
}
}
}
Ok(())
}
fn validate_cache_key(key: &str) -> anyhow::Result<()> {
if key.is_empty() || key.trim().is_empty() {
anyhow::bail!("cache key must not be empty or whitespace-only");
}
if key.contains('/') || key.contains('\\') {
anyhow::bail!("cache key must not contain path separators: {key:?}");
}
if key == "." || key == ".." {
anyhow::bail!("cache key must not be a directory reference: {key:?}");
}
if key.contains("..") {
anyhow::bail!("cache key must not contain path traversal: {key:?}");
}
if key.contains('\0') {
anyhow::bail!("cache key must not contain null bytes");
}
if key.starts_with(TMP_DIR_PREFIX) {
anyhow::bail!("cache key must not start with {TMP_DIR_PREFIX} (reserved): {key:?}",);
}
Ok(())
}
fn validate_filename(name: &str) -> anyhow::Result<()> {
if name.is_empty() {
anyhow::bail!("image name must not be empty");
}
if name.contains('/') || name.contains('\\') {
anyhow::bail!("image name must not contain path separators: {name:?}");
}
if name.contains("..") {
anyhow::bail!("image name must not contain path traversal: {name:?}");
}
if name.contains('\0') {
anyhow::bail!("image name must not contain null bytes");
}
Ok(())
}
struct TmpDirGuard<'a>(&'a Path);
impl Drop for TmpDirGuard<'_> {
fn drop(&mut self) {
let _ = fs::remove_dir_all(self.0);
}
}
fn atomic_swap_dirs(src: &Path, dst: &Path) -> anyhow::Result<()> {
rustix::fs::renameat_with(
rustix::fs::CWD,
src,
rustix::fs::CWD,
dst,
rustix::fs::RenameFlags::EXCHANGE,
)
.map_err(|e| {
anyhow::anyhow!(
"renameat2(RENAME_EXCHANGE) {} <-> {}: {e}",
src.display(),
dst.display(),
)
})
}
fn read_metadata(dir: &Path) -> Result<KernelMetadata, String> {
let meta_path = dir.join("metadata.json");
let contents = match fs::read_to_string(&meta_path) {
Ok(c) => c,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
return Err("metadata.json missing".to_string());
}
Err(e) => return Err(format!("metadata.json unreadable: {e}")),
};
serde_json::from_str(&contents).map_err(|e| match e.classify() {
serde_json::error::Category::Data => format!("metadata.json schema drift: {e}"),
serde_json::error::Category::Syntax => format!("metadata.json malformed: {e}"),
serde_json::error::Category::Eof => format!("metadata.json truncated: {e}"),
serde_json::error::Category::Io => {
tracing::error!(
err = %e,
"serde_json::from_str returned Category::Io — unexpected for in-memory input",
);
format!("metadata.json parse error: {e}")
}
})
}
pub fn prefer_source_tree_for_dwarf(dir: &Path) -> Option<PathBuf> {
let metadata = read_metadata(dir).ok()?;
let want_size = metadata.source_vmlinux_size?;
let want_mtime = metadata.source_vmlinux_mtime_secs?;
let KernelSource::Local {
source_tree_path, ..
} = metadata.source
else {
return None;
};
let src_path = source_tree_path?;
let vmlinux = src_path.join("vmlinux");
let stat = std::fs::metadata(&vmlinux).ok()?;
if !stat.is_file() {
return None;
}
if stat.len() != want_size {
return None;
}
let cur_mtime = stat.modified().ok().and_then(|t| {
t.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.ok()
.or_else(|| {
std::time::UNIX_EPOCH
.duration_since(t)
.ok()
.map(|d| -(d.as_secs() as i64))
})
})?;
if cur_mtime != want_mtime {
return None;
}
Some(src_path)
}
pub fn recover_local_source_tree(dir: &Path) -> Option<PathBuf> {
let metadata = read_metadata(dir).ok()?;
if let KernelSource::Local {
source_tree_path: Some(p),
..
} = metadata.source
{
return Some(p);
}
None
}
const STRUCTURAL_KEEP_SECTIONS: &[&[u8]] = &[
b"", b".shstrtab", ];
const SPECULATIVE_ZERO_DATA_SECTIONS: &[&[u8]] = &[b".init.data"];
fn is_keep_section(name: &[u8]) -> bool {
STRUCTURAL_KEEP_SECTIONS.contains(&name)
|| crate::monitor::symbols::VMLINUX_KEEP_SECTIONS.contains(&name)
|| crate::monitor::VMLINUX_KEEP_SECTIONS.contains(&name)
|| crate::probe::btf::VMLINUX_KEEP_SECTIONS.contains(&name)
}
fn is_zero_data_section(name: &[u8]) -> bool {
SPECULATIVE_ZERO_DATA_SECTIONS.contains(&name)
|| crate::monitor::symbols::VMLINUX_ZERO_DATA_SECTIONS.contains(&name)
}
#[derive(Debug)]
pub(crate) struct StrippedVmlinux {
_tmp: tempfile::TempDir,
path: PathBuf,
}
impl StrippedVmlinux {
pub fn path(&self) -> &Path {
&self.path
}
}
pub(crate) fn strip_vmlinux_debug(vmlinux_path: &Path) -> anyhow::Result<StrippedVmlinux> {
let raw =
fs::read(vmlinux_path).map_err(|e| anyhow::anyhow!("read vmlinux for stripping: {e}"))?;
let original_size = raw.len();
let data =
neutralize_relocs(&raw).map_err(|e| anyhow::anyhow!("preprocess vmlinux ELF: {e}"))?;
let out = match strip_keep_list(&data) {
Ok(buf) => buf,
Err(e) => {
tracing::warn!("keep-list strip failed ({e:#}), falling back to debug-only strip");
strip_debug_prefix(&data)?
}
};
let stripped_size = out.len();
let saved_mb = (original_size - stripped_size) as f64 / (1024.0 * 1024.0);
tracing::debug!(
original = original_size,
stripped = stripped_size,
saved_mb = format!("{saved_mb:.0}"),
"strip_vmlinux_debug",
);
let tmp_dir = tempfile::TempDir::new()
.map_err(|e| anyhow::anyhow!("create temp dir for stripped vmlinux: {e}"))?;
let stripped_path = tmp_dir.path().join("vmlinux");
fs::write(&stripped_path, &out).map_err(|e| anyhow::anyhow!("write stripped vmlinux: {e}"))?;
Ok(StrippedVmlinux {
_tmp: tmp_dir,
path: stripped_path,
})
}
fn neutralize_relocs(data: &[u8]) -> anyhow::Result<Vec<u8>> {
const SHT_RELR: u32 = object::elf::SHT_RELR;
const SHT_CREL: u32 = object::elf::SHT_CREL;
const SHT_PROGBITS: u32 = goblin::elf::section_header::SHT_PROGBITS;
let elf = goblin::elf::Elf::parse(data)
.map_err(|e| anyhow::anyhow!("parse vmlinux ELF for preprocess: {e}"))?;
let mut out = data.to_vec();
let shoff = elf.header.e_shoff as usize;
let shentsize = elf.header.e_shentsize as usize;
let (sh_size_offset, sh_size_width) = if elf.is_64 { (32, 8) } else { (20, 4) };
let sh_type_offset: usize = 4;
let sh_type_width: usize = 4;
let le = elf.little_endian;
use goblin::elf::section_header::{SHT_REL, SHT_RELA};
for (i, sh) in elf.section_headers.iter().enumerate() {
let is_reloc = matches!(sh.sh_type, SHT_REL | SHT_RELA | SHT_RELR | SHT_CREL);
if !is_reloc {
continue;
}
let entry_offset = shoff
.checked_add(
i.checked_mul(shentsize)
.ok_or_else(|| anyhow::anyhow!("section header table overflow at index {i}"))?,
)
.ok_or_else(|| anyhow::anyhow!("section header offset overflow at index {i}"))?;
let type_offset = entry_offset
.checked_add(sh_type_offset)
.ok_or_else(|| anyhow::anyhow!("sh_type offset overflow at index {i}"))?;
let type_end = type_offset
.checked_add(sh_type_width)
.ok_or_else(|| anyhow::anyhow!("sh_type end overflow at index {i}"))?;
let size_offset = entry_offset
.checked_add(sh_size_offset)
.ok_or_else(|| anyhow::anyhow!("sh_size offset overflow at index {i}"))?;
let size_end = size_offset
.checked_add(sh_size_width)
.ok_or_else(|| anyhow::anyhow!("sh_size end overflow at index {i}"))?;
if type_end > out.len() || size_end > out.len() {
anyhow::bail!("section header {i} sh_type or sh_size field extends past file end");
}
let type_bytes: [u8; 4] = if le {
SHT_PROGBITS.to_le_bytes()
} else {
SHT_PROGBITS.to_be_bytes()
};
out[type_offset..type_end].copy_from_slice(&type_bytes);
out[size_offset..size_end].fill(0);
}
Ok(out)
}
fn strip_keep_list(data: &[u8]) -> anyhow::Result<Vec<u8>> {
let mut builder = object::build::elf::Builder::read(data)
.map_err(|e| anyhow::anyhow!("parse vmlinux ELF: {e}"))?;
for section in builder.sections.iter_mut() {
let name = section.name.as_slice();
if is_keep_section(name) {
continue;
}
if is_zero_data_section(name) {
section.sh_type = object::elf::SHT_NOBITS;
section.data = object::build::elf::SectionData::UninitializedData(0);
continue;
}
let is_code = section.sh_flags & u64::from(object::elf::SHF_EXECINSTR) != 0;
if is_code {
section.sh_type = object::elf::SHT_NOBITS;
section.data = object::build::elf::SectionData::UninitializedData(0);
} else {
section.delete = true;
}
}
let named_syms = builder
.symbols
.iter()
.filter(|s| !s.delete && !s.name.as_slice().is_empty())
.count();
if named_syms == 0 {
anyhow::bail!("keep-list strip emptied symbol table (0 named symbols)");
}
let mut out = Vec::new();
builder
.write(&mut out)
.map_err(|e| anyhow::anyhow!("rewrite stripped vmlinux: {e}"))?;
Ok(out)
}
fn strip_debug_prefix(data: &[u8]) -> anyhow::Result<Vec<u8>> {
crate::elf_strip::rewrite(data, |name| {
name.starts_with(b".debug_")
|| name == b".comment"
|| name.starts_with(b".rela.")
|| name.starts_with(b".rel.")
|| name.starts_with(b".relr.")
|| name.starts_with(b".crel.")
})
.map_err(|e| anyhow::anyhow!("rewrite stripped vmlinux (fallback): {e}"))
}
pub(crate) fn resolve_cache_root_with_suffix(suffix: &str) -> anyhow::Result<PathBuf> {
match std::env::var("KTSTR_CACHE_DIR") {
Ok(dir) if !dir.is_empty() => return Ok(PathBuf::from(dir)),
Ok(_) => { }
Err(std::env::VarError::NotPresent) => { }
Err(std::env::VarError::NotUnicode(raw)) => {
anyhow::bail!(
"KTSTR_CACHE_DIR contains non-UTF-8 bytes ({} bytes): {:?}. \
ktstr requires a UTF-8 cache path — set KTSTR_CACHE_DIR \
to an ASCII/UTF-8 directory (e.g. `/tmp/ktstr-cache`) or \
unset it to fall back to $XDG_CACHE_HOME/$HOME.",
raw.len(),
raw,
);
}
}
if let Ok(xdg) = std::env::var("XDG_CACHE_HOME")
&& !xdg.is_empty()
{
return Ok(PathBuf::from(xdg).join("ktstr").join(suffix));
}
let home = validate_home_for_cache()?;
Ok(home.join(".cache").join("ktstr").join(suffix))
}
pub(crate) fn validate_home_for_cache() -> anyhow::Result<PathBuf> {
let home = match std::env::var("HOME") {
Ok(v) if !v.is_empty() => v,
Ok(_) => {
anyhow::bail!(
"HOME is set to the empty string; cannot resolve cache directory. \
An empty HOME usually means a Dockerfile or shell rc has \
`export HOME=` or `ENV HOME=` with no value. Either set HOME \
to a real absolute path, or set KTSTR_CACHE_DIR to an absolute \
path (e.g. /tmp/ktstr-cache) or XDG_CACHE_HOME to specify a \
cache location explicitly."
);
}
Err(_) => {
anyhow::bail!(
"HOME is unset; cannot resolve cache directory. \
The container init or login shell did not assign HOME — set \
it to an absolute path, or set KTSTR_CACHE_DIR to an absolute \
path (e.g. /tmp/ktstr-cache) or XDG_CACHE_HOME to specify a \
cache location explicitly."
);
}
};
if home == "/" {
anyhow::bail!(
"HOME is `/`; the resulting cache path /.cache/ktstr would alias the \
root filesystem rather than naming a user cache. This usually means \
the process inherited HOME from a container init or root login that \
did not set a real home. Set KTSTR_CACHE_DIR to an absolute path \
(e.g. /tmp/ktstr-cache) or XDG_CACHE_HOME to bypass HOME entirely."
);
}
if !home.starts_with('/') {
anyhow::bail!(
"HOME={home:?} is not an absolute path; ktstr requires HOME to start \
with `/` so the cache root resolves consistently regardless of the \
current working directory. Set HOME to an absolute path, or set \
KTSTR_CACHE_DIR / XDG_CACHE_HOME to a specific cache location."
);
}
Ok(PathBuf::from(home))
}
fn resolve_cache_root() -> anyhow::Result<PathBuf> {
resolve_cache_root_with_suffix("kernels")
}
pub(crate) fn path_inside_cache_root(p: &Path) -> bool {
let root = match resolve_cache_root() {
Ok(r) => r,
Err(e) => {
tracing::debug!(
err = %e,
"cache root unresolvable; treating path as outside cache",
);
return false;
}
};
let canon_root = match fs::canonicalize(&root) {
Ok(r) => r,
Err(e) => {
tracing::debug!(
root = %root.display(),
err = %e,
"cache root canonicalize failed; treating path as outside cache",
);
return false;
}
};
let parent = match p.parent() {
Some(p) if !p.as_os_str().is_empty() => p,
_ => return false,
};
let canon_parent = match fs::canonicalize(parent) {
Ok(p) => p,
Err(e) => {
tracing::debug!(
parent = %parent.display(),
err = %e,
"input path parent canonicalize failed; treating as outside cache",
);
return false;
}
};
canon_parent.starts_with(&canon_root)
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn sh_type_name(t: u32) -> &'static str {
use goblin::elf::section_header::{
SHT_DYNAMIC, SHT_DYNSYM, SHT_HASH, SHT_NOBITS, SHT_NOTE, SHT_NULL, SHT_PROGBITS,
SHT_REL, SHT_RELA, SHT_SHLIB, SHT_STRTAB, SHT_SYMTAB,
};
match t {
SHT_NULL => "SHT_NULL",
SHT_PROGBITS => "SHT_PROGBITS",
SHT_SYMTAB => "SHT_SYMTAB",
SHT_STRTAB => "SHT_STRTAB",
SHT_RELA => "SHT_RELA",
SHT_HASH => "SHT_HASH",
SHT_DYNAMIC => "SHT_DYNAMIC",
SHT_NOTE => "SHT_NOTE",
SHT_NOBITS => "SHT_NOBITS",
SHT_REL => "SHT_REL",
SHT_SHLIB => "SHT_SHLIB",
SHT_DYNSYM => "SHT_DYNSYM",
_ => "SHT_UNKNOWN",
}
}
fn test_metadata(version: &str) -> KernelMetadata {
KernelMetadata {
version: Some(version.to_string()),
source: KernelSource::Tarball,
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: Some("abc123".to_string()),
built_at: "2026-04-12T10:00:00Z".to_string(),
ktstr_kconfig_hash: Some("def456".to_string()),
has_vmlinux: false,
vmlinux_stripped: false,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
}
}
fn create_fake_image(dir: &Path) -> PathBuf {
let image = dir.join("bzImage");
fs::write(&image, b"fake kernel image").unwrap();
image
}
fn build_base_elf_with_text_symbol(
arch: object::Architecture,
) -> object::write::Object<'static> {
use object::write;
let sym_size = match arch {
object::Architecture::X86_64 => 8,
object::Architecture::I386 => 4,
other => panic!(
"build_base_elf_with_text_symbol: unsupported arch {other:?}; supported: X86_64, I386",
),
};
let mut obj =
write::Object::new(object::BinaryFormat::Elf, arch, object::Endianness::Little);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 1);
let _ = obj.add_symbol(write::Symbol {
name: b"test_text_symbol".to_vec(),
value: 0x0,
size: sym_size,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
obj
}
#[test]
#[should_panic(expected = "unsupported arch")]
fn build_base_elf_with_text_symbol_panics_on_unsupported_arch() {
let _ = build_base_elf_with_text_symbol(object::Architecture::Aarch64);
}
#[test]
fn keep_section_sources_are_disjoint() {
use std::collections::HashMap;
let mut origins: HashMap<&[u8], Vec<&str>> = HashMap::new();
let sources: &[(&str, &[&[u8]])] = &[
("cache::STRUCTURAL_KEEP_SECTIONS", STRUCTURAL_KEEP_SECTIONS),
(
"monitor::symbols::VMLINUX_KEEP_SECTIONS",
crate::monitor::symbols::VMLINUX_KEEP_SECTIONS,
),
(
"monitor::VMLINUX_KEEP_SECTIONS",
crate::monitor::VMLINUX_KEEP_SECTIONS,
),
(
"probe::btf::VMLINUX_KEEP_SECTIONS",
crate::probe::btf::VMLINUX_KEEP_SECTIONS,
),
];
for (label, list) in sources {
for name in *list {
origins.entry(*name).or_default().push(label);
}
}
let dupes: Vec<_> = origins
.iter()
.filter(|(_, lists)| lists.len() > 1)
.collect();
assert!(
dupes.is_empty(),
"keep-list entries declared by multiple source modules (drift hazard): {dupes:?}",
);
}
#[test]
fn zero_data_section_sources_are_disjoint() {
use std::collections::HashSet;
let speculative: HashSet<&[u8]> = SPECULATIVE_ZERO_DATA_SECTIONS.iter().copied().collect();
let declared: HashSet<&[u8]> = crate::monitor::symbols::VMLINUX_ZERO_DATA_SECTIONS
.iter()
.copied()
.collect();
let overlap: Vec<_> = speculative.intersection(&declared).collect();
assert!(
overlap.is_empty(),
"zero-data section declared by both SPECULATIVE and a consumer (drift hazard): {overlap:?}",
);
}
#[test]
fn cache_metadata_serde_roundtrip() {
let meta = test_metadata("6.14.2");
let json = serde_json::to_string_pretty(&meta).unwrap();
let parsed: KernelMetadata = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.version.as_deref(), Some("6.14.2"));
assert_eq!(parsed.source, KernelSource::Tarball);
assert_eq!(parsed.arch, "x86_64");
assert_eq!(parsed.image_name, "bzImage");
assert_eq!(parsed.config_hash.as_deref(), Some("abc123"));
assert_eq!(parsed.built_at, "2026-04-12T10:00:00Z");
assert_eq!(parsed.ktstr_kconfig_hash.as_deref(), Some("def456"));
assert!(!parsed.has_vmlinux);
assert!(!parsed.vmlinux_stripped);
}
#[test]
fn cache_metadata_serde_git_with_payload() {
let meta = KernelMetadata {
version: Some("6.15-rc3".to_string()),
source: KernelSource::Git {
git_hash: Some("a1b2c3d".to_string()),
git_ref: Some("v6.15-rc3".to_string()),
},
arch: "aarch64".to_string(),
image_name: "Image".to_string(),
config_hash: None,
built_at: "2026-04-12T12:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: false,
vmlinux_stripped: false,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
let json = serde_json::to_string(&meta).unwrap();
let parsed: KernelMetadata = serde_json::from_str(&json).unwrap();
assert!(matches!(
parsed.source,
KernelSource::Git {
git_hash: Some(ref h),
git_ref: Some(ref r),
}
if h == "a1b2c3d" && r == "v6.15-rc3"
));
}
#[test]
fn cache_metadata_serde_local_with_source_tree() {
let meta = KernelMetadata {
version: Some("6.14.0".to_string()),
source: KernelSource::Local {
source_tree_path: Some(PathBuf::from("/tmp/linux")),
git_hash: Some("deadbee".to_string()),
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: Some("fff000".to_string()),
built_at: "2026-04-12T14:00:00Z".to_string(),
ktstr_kconfig_hash: Some("aaa111".to_string()),
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
let json = serde_json::to_string(&meta).unwrap();
let parsed: KernelMetadata = serde_json::from_str(&json).unwrap();
assert!(matches!(
parsed.source,
KernelSource::Local {
source_tree_path: Some(ref p),
git_hash: Some(ref h),
}
if p == &PathBuf::from("/tmp/linux") && h == "deadbee"
));
assert!(parsed.has_vmlinux);
assert!(parsed.vmlinux_stripped);
}
#[test]
fn kernel_source_local_git_hash_serde_round_trip_none() {
let src = KernelSource::Local {
source_tree_path: Some(PathBuf::from("/tmp/linux")),
git_hash: None,
};
let json = serde_json::to_string(&src).unwrap();
assert!(
json.contains(r#""git_hash":null"#),
"git_hash=None must round-trip as explicit null, got {json}"
);
let parsed: KernelSource = serde_json::from_str(&json).unwrap();
assert!(matches!(parsed, KernelSource::Local { git_hash: None, .. }));
}
#[test]
fn kernel_source_option_fields_serialize_as_explicit_null() {
let local = KernelSource::Local {
source_tree_path: None,
git_hash: None,
};
let local_json = serde_json::to_string(&local).unwrap();
assert!(
local_json.contains(r#""source_tree_path":null"#),
"Local.source_tree_path=None must serialize as explicit null, got {local_json}"
);
assert!(
local_json.contains(r#""git_hash":null"#),
"Local.git_hash=None must serialize as explicit null, got {local_json}"
);
let git = KernelSource::Git {
git_hash: None,
git_ref: None,
};
let git_json = serde_json::to_string(&git).unwrap();
assert!(
git_json.contains(r#""git_hash":null"#),
"Git.git_hash=None must serialize as explicit null, got {git_json}"
);
assert!(
git_json.contains(r#""ref":null"#),
"Git.git_ref=None must serialize as explicit null under the `ref` key, got {git_json}"
);
}
#[test]
fn kernel_source_absent_option_keys_deserialize_as_none() {
let git_bare: KernelSource = serde_json::from_str(r#"{"type":"git"}"#)
.expect("Git with absent Option keys must deserialize");
assert!(matches!(
git_bare,
KernelSource::Git {
git_hash: None,
git_ref: None,
}
));
let git_hash_only: KernelSource =
serde_json::from_str(r#"{"type":"git","git_hash":"abc"}"#)
.expect("Git with only git_hash must deserialize");
assert!(matches!(
git_hash_only,
KernelSource::Git {
git_hash: Some(ref h),
git_ref: None,
} if h == "abc"
));
let git_ref_only: KernelSource = serde_json::from_str(r#"{"type":"git","ref":"main"}"#)
.expect("Git with only ref must deserialize");
assert!(matches!(
git_ref_only,
KernelSource::Git {
git_hash: None,
git_ref: Some(ref r),
} if r == "main"
));
let local_bare: KernelSource = serde_json::from_str(r#"{"type":"local"}"#)
.expect("Local with absent Option keys must deserialize");
assert!(matches!(
local_bare,
KernelSource::Local {
source_tree_path: None,
git_hash: None,
}
));
let local_path_only: KernelSource =
serde_json::from_str(r#"{"type":"local","source_tree_path":"/tmp/linux"}"#)
.expect("Local with only source_tree_path must deserialize");
assert!(matches!(
local_path_only,
KernelSource::Local {
source_tree_path: Some(ref p),
git_hash: None,
} if p.to_str() == Some("/tmp/linux")
));
let local_hash_only: KernelSource =
serde_json::from_str(r#"{"type":"local","git_hash":"deadbeef"}"#)
.expect("Local with only git_hash must deserialize");
assert!(matches!(
local_hash_only,
KernelSource::Local {
source_tree_path: None,
git_hash: Some(ref h),
} if h == "deadbeef"
));
}
#[test]
fn kernel_source_serde_tagged_representation() {
let t = serde_json::to_string(&KernelSource::Tarball).unwrap();
assert_eq!(t, r#"{"type":"tarball"}"#);
let g = serde_json::to_string(&KernelSource::Git {
git_hash: Some("abc".to_string()),
git_ref: Some("main".to_string()),
})
.unwrap();
assert!(g.contains(r#""type":"git""#));
assert!(g.contains(r#""git_hash":"abc""#));
assert!(g.contains(r#""ref":"main""#));
let l = serde_json::to_string(&KernelSource::Local {
source_tree_path: Some(PathBuf::from("/tmp/linux")),
git_hash: Some("a1b2c3d".to_string()),
})
.unwrap();
assert!(l.contains(r#""type":"local""#));
assert!(l.contains(r#""source_tree_path":"/tmp/linux""#));
assert!(l.contains(r#""git_hash":"a1b2c3d""#));
}
#[test]
fn cache_dir_with_root_does_not_create_dir() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("kernels");
assert!(!root.exists());
let cache = CacheDir::with_root(root.clone());
assert!(!root.exists());
assert_eq!(cache.root(), root);
}
#[test]
fn cache_dir_list_returns_empty_for_nonexistent_root() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("never-created");
assert!(!root.exists());
let cache = CacheDir::with_root(root);
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_store_creates_root_lazily() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("lazy-root");
assert!(!root.exists());
let cache = CacheDir::with_root(root.clone());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("key", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(root.exists(), "store() must create the cache root");
}
#[test]
fn cache_dir_default_root_returns_path() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", tmp.path());
let resolved = CacheDir::default_root().unwrap();
assert_eq!(resolved, tmp.path());
}
#[test]
fn cache_dir_list_empty() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_store_and_lookup() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("6.14.2-tarball-x86_64", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.key, "6.14.2-tarball-x86_64");
assert!(entry.path.join("bzImage").exists());
assert!(entry.path.join("metadata.json").exists());
let found = cache.lookup("6.14.2-tarball-x86_64");
assert!(found.is_some());
let found = found.unwrap();
assert_eq!(found.key, "6.14.2-tarball-x86_64");
assert_eq!(found.metadata.version.as_deref(), Some("6.14.2"));
}
#[test]
fn cache_dir_lookup_missing() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("nonexistent").is_none());
}
#[test]
fn cache_dir_lookup_corrupt_metadata() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("bad-entry");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("bzImage"), b"fake").unwrap();
fs::write(entry_dir.join("metadata.json"), b"not json").unwrap();
let found = cache.lookup("bad-entry");
assert!(found.is_none());
}
#[test]
fn cache_dir_lookup_missing_image() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("no-image");
fs::create_dir_all(&entry_dir).unwrap();
let meta = test_metadata("6.14.2");
let json = serde_json::to_string(&meta).unwrap();
fs::write(entry_dir.join("metadata.json"), json).unwrap();
let found = cache.lookup("no-image");
assert!(found.is_none());
}
#[test]
fn cache_dir_store_overwrites_existing() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta1 = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
cache
.store(
"6.14.2-tarball-x86_64",
&CacheArtifacts::new(&image),
&meta1,
)
.unwrap();
let meta2 = KernelMetadata {
built_at: "2026-04-12T11:00:00Z".to_string(),
..test_metadata("6.14.2")
};
cache
.store(
"6.14.2-tarball-x86_64",
&CacheArtifacts::new(&image),
&meta2,
)
.unwrap();
let found = cache.lookup("6.14.2-tarball-x86_64").unwrap();
assert_eq!(found.metadata.built_at, "2026-04-12T11:00:00Z");
}
#[test]
fn cache_dir_list_sorted_newest_first() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_mid = KernelMetadata {
built_at: "2026-04-11T10:00:00Z".to_string(),
..test_metadata("6.14.0")
};
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("mid", &CacheArtifacts::new(&image), &meta_mid)
.unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].key(), "new");
assert_eq!(entries[1].key(), "mid");
assert_eq!(entries[2].key(), "old");
}
#[test]
fn cache_dir_list_includes_corrupt_entries() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("valid", &CacheArtifacts::new(&image), &meta)
.unwrap();
let bad_dir = tmp.path().join("corrupt");
fs::create_dir_all(&bad_dir).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 2);
let valid = entries.iter().find(|e| e.key() == "valid").unwrap();
assert!(valid.as_valid().is_some());
let corrupt = entries.iter().find(|e| e.key() == "corrupt").unwrap();
assert!(corrupt.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = corrupt else {
panic!("expected Corrupt variant");
};
assert_eq!(
reason, "metadata.json missing",
"missing-metadata reason should be the exact missing-file label, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_missing_image_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("missing-image", &CacheArtifacts::new(&image), &meta)
.unwrap();
fs::remove_file(entry.image_path()).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "missing-image");
assert!(
listed.as_valid().is_none(),
"entry with missing image must not surface as Valid",
);
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for missing-image entry");
};
assert!(
reason.contains("image file") && reason.contains("missing"),
"reason should cite missing image file, got: {reason}",
);
assert!(
reason.contains(&meta.image_name),
"reason should name the specific image file, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_unreadable_metadata_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("unreadable-metadata");
fs::create_dir_all(entry_dir.join("metadata.json")).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "unreadable-metadata");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for entry with unreadable metadata");
};
assert!(
reason.starts_with("metadata.json unreadable: "),
"unreadable-metadata reason should carry the unreadable prefix distinct from the \
missing / schema-drift / malformed / truncated prefixes, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_malformed_json_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("malformed-json");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), b"not valid json {[").unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "malformed-json");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for malformed-json entry");
};
assert!(
reason.starts_with("metadata.json malformed: "),
"malformed-JSON reason should carry the malformed prefix \
(Category::Syntax route), got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_incomplete_metadata_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("incomplete-metadata");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), br#"{"version": "6.14"}"#).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "incomplete-metadata");
assert!(
listed.as_valid().is_none(),
"incomplete-metadata missing required fields must not deserialize as Valid",
);
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for entry with incomplete metadata");
};
assert!(
reason.starts_with("metadata.json schema drift: "),
"incomplete-metadata reason should carry the schema-drift \
prefix (Category::Data route), got: {reason}",
);
assert!(
reason.contains("missing field `source`"),
"incomplete-metadata reason should name the first missing required field, got: {reason}",
);
}
#[test]
fn cache_dir_list_classifies_truncated_json_as_corrupt() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let entry_dir = tmp.path().join("truncated-json");
fs::create_dir_all(&entry_dir).unwrap();
fs::write(entry_dir.join("metadata.json"), br#"{"source":"#).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 1);
let listed = &entries[0];
assert_eq!(listed.key(), "truncated-json");
assert!(listed.as_valid().is_none());
let ListedEntry::Corrupt { reason, .. } = listed else {
panic!("expected Corrupt variant for truncated-json entry");
};
assert!(
reason.starts_with("metadata.json truncated: "),
"truncated-JSON reason should carry the truncated prefix \
(Category::Eof route), got: {reason}",
);
}
#[test]
fn classify_corrupt_reason_covers_every_documented_prefix() {
let cases: &[(&str, &str)] = &[
("metadata.json missing", "missing"),
(
"metadata.json unreadable: Is a directory (os error 21)",
"unreadable",
),
(
"metadata.json schema drift: missing field `source` at line 1 column 21",
"schema_drift",
),
(
"metadata.json malformed: expected value at line 1 column 1",
"malformed",
),
(
"metadata.json truncated: EOF while parsing a value at line 1 column 10",
"truncated",
),
(
"metadata.json parse error: something unexpected",
"parse_error",
),
(
"image file bzImage missing from entry directory",
"image_missing",
),
("some future prefix nobody wrote yet", "unknown"),
];
for (reason, expected) in cases {
assert_eq!(
classify_corrupt_reason(reason),
*expected,
"reason `{reason}` should classify as `{expected}`",
);
}
}
#[test]
fn listed_entry_error_kind_dispatches_on_variant() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("valid-ek", &CacheArtifacts::new(&image), &meta)
.unwrap();
let bad_dir = tmp.path().join("cache").join("corrupt-ek");
fs::create_dir_all(&bad_dir).unwrap();
let entries = cache.list().unwrap();
assert_eq!(entries.len(), 2);
let valid = entries
.iter()
.find(|e| e.key() == "valid-ek")
.expect("valid entry must be listed");
let corrupt = entries
.iter()
.find(|e| e.key() == "corrupt-ek")
.expect("corrupt entry must be listed");
assert_eq!(
valid.error_kind(),
None,
"Valid entries must report no error_kind",
);
assert_eq!(
corrupt.error_kind(),
Some("missing"),
"missing-metadata Corrupt entry must classify as `missing`",
);
}
#[test]
fn cache_dir_list_skips_tmp_dirs() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let tmp_dir = tmp.path().join(".tmp-in-progress-12345");
fs::create_dir_all(&tmp_dir).unwrap();
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_list_skips_regular_files() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
fs::write(tmp.path().join("stray-file.txt"), b"stray").unwrap();
let entries = cache.list().unwrap();
assert!(entries.is_empty());
}
#[test]
fn cache_dir_clean_all() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store("a", &CacheArtifacts::new(&image), &test_metadata("6.14.0"))
.unwrap();
cache
.store("b", &CacheArtifacts::new(&image), &test_metadata("6.14.1"))
.unwrap();
cache
.store("c", &CacheArtifacts::new(&image), &test_metadata("6.14.2"))
.unwrap();
let removed = cache.clean_all().unwrap();
assert_eq!(removed, 3);
assert!(cache.list().unwrap().is_empty());
}
#[test]
fn cache_dir_clean_keep_n() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_mid = KernelMetadata {
built_at: "2026-04-11T10:00:00Z".to_string(),
..test_metadata("6.14.0")
};
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("mid", &CacheArtifacts::new(&image), &meta_mid)
.unwrap();
let removed = cache.clean_keep(1).unwrap();
assert_eq!(removed, 2);
let remaining = cache.list().unwrap();
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].key(), "new");
}
#[test]
fn cache_dir_clean_keep_more_than_exist() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store(
"only",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
let removed = cache.clean_keep(5).unwrap();
assert_eq!(removed, 0);
assert_eq!(cache.list().unwrap().len(), 1);
}
#[test]
fn cache_dir_clean_empty_cache() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let removed = cache.clean_all().unwrap();
assert_eq!(removed, 0);
}
#[test]
fn cache_resolve_root_ktstr_cache_dir() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let dir = tmp.path().join("custom-cache");
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", &dir);
let root = resolve_cache_root().unwrap();
assert_eq!(root, dir);
}
#[test]
fn cache_resolve_root_xdg_cache_home() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::set("XDG_CACHE_HOME", tmp.path());
let root = resolve_cache_root().unwrap();
assert_eq!(root, tmp.path().join("ktstr").join("kernels"));
}
#[test]
fn cache_resolve_root_empty_ktstr_cache_dir_falls_through() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard1 = EnvVarGuard::set("KTSTR_CACHE_DIR", "");
let _guard2 = EnvVarGuard::set("XDG_CACHE_HOME", tmp.path());
let root = resolve_cache_root().unwrap();
assert_eq!(root, tmp.path().join("ktstr").join("kernels"));
}
#[test]
fn cache_resolve_root_empty_xdg_falls_to_home() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::set("XDG_CACHE_HOME", "");
let _guard3 = EnvVarGuard::set("HOME", tmp.path());
let root = resolve_cache_root().unwrap();
assert_eq!(
root,
tmp.path().join(".cache").join("ktstr").join("kernels")
);
}
#[test]
fn cache_resolve_root_home_unset_error() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _guard3 = EnvVarGuard::remove("HOME");
let err = resolve_cache_root().unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("HOME is unset"),
"expected HOME-unset error, got: {msg}"
);
assert!(
!msg.contains("HOME is set to the empty string"),
"unset HOME must NOT use the empty-string diagnostic — the two \
cases are distinct now (NotPresent vs Ok(\"\")), got: {msg}",
);
assert!(
msg.contains("KTSTR_CACHE_DIR"),
"error should suggest KTSTR_CACHE_DIR, got: {msg}"
);
}
#[test]
fn cache_resolve_root_home_root_slash_error() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _guard3 = EnvVarGuard::set("HOME", "/");
let err = resolve_cache_root().unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("HOME is `/`"),
"expected HOME=/ specific error, got: {msg}"
);
assert!(
msg.contains("/.cache/ktstr"),
"diagnostic must cite the offending cache path, got: {msg}"
);
assert!(
msg.contains("KTSTR_CACHE_DIR"),
"error should suggest KTSTR_CACHE_DIR, got: {msg}"
);
}
#[test]
fn cache_resolve_root_home_empty_error() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _guard3 = EnvVarGuard::set("HOME", "");
let err = resolve_cache_root().unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("HOME is set to the empty string"),
"empty-HOME bail must use the empty-string diagnostic, got: {msg}",
);
assert!(
!msg.contains("HOME is unset"),
"empty-HOME must NOT use the unset diagnostic — the two \
cases are distinct now, got: {msg}",
);
}
#[test]
fn cache_resolve_root_home_relative_path_error() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _guard3 = EnvVarGuard::set("HOME", "relative/dir");
let err = resolve_cache_root().unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("not an absolute path"),
"expected relative-path-specific error, got: {msg}"
);
assert!(
msg.contains("relative/dir"),
"diagnostic must cite the offending HOME value, got: {msg}"
);
assert!(
msg.contains("KTSTR_CACHE_DIR"),
"error should suggest KTSTR_CACHE_DIR, got: {msg}"
);
}
#[test]
fn cache_resolve_root_home_bare_name_relative_error() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _guard3 = EnvVarGuard::set("HOME", "tmp");
let err = resolve_cache_root().unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("not an absolute path"),
"expected relative-path-specific error, got: {msg}"
);
assert!(
msg.contains("\"tmp\""),
"diagnostic must cite the offending HOME value via its Debug \
representation, got: {msg}"
);
}
#[test]
fn cache_resolve_root_home_absolute_passes() {
let _lock = lock_env();
let _guard1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _guard2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let tmp = TempDir::new().expect("tempdir");
let _guard3 = EnvVarGuard::set("HOME", tmp.path());
let resolved = resolve_cache_root().expect("absolute HOME must resolve");
let expected = tmp.path().join(".cache").join("ktstr").join("kernels");
assert_eq!(
resolved, expected,
"absolute HOME must produce $HOME/.cache/ktstr/kernels",
);
}
#[test]
#[cfg(unix)]
fn cache_resolve_root_non_utf8_ktstr_cache_dir_bails() {
let _lock = lock_env();
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let bytes: &[u8] = b"/tmp/ktstr-\xFFcache";
let value = OsStr::from_bytes(bytes);
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", value);
let err = resolve_cache_root()
.expect_err("non-UTF-8 KTSTR_CACHE_DIR must bail, not silently fall through");
let msg = err.to_string();
assert!(
msg.contains("KTSTR_CACHE_DIR"),
"error must name the offending variable, got: {msg}",
);
assert!(
msg.contains("non-UTF-8"),
"error must mention non-UTF-8 so the operator knows the encoding, \
got: {msg}",
);
assert!(
msg.contains("UTF-8") || msg.contains("unset") || msg.contains("ASCII"),
"error must name a remediation (UTF-8 replacement or unset), \
got: {msg}",
);
}
#[test]
fn path_inside_cache_root_accepts_path_inside() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", tmp.path());
let entry = tmp.path().join("kentry");
std::fs::create_dir_all(&entry).unwrap();
let vmlinux = entry.join("vmlinux");
std::fs::write(&vmlinux, b"placeholder").unwrap();
assert!(
path_inside_cache_root(&vmlinux),
"vmlinux directly under cache root must be classified as in-cache",
);
}
#[test]
fn path_inside_cache_root_rejects_path_outside() {
let _lock = lock_env();
let cache_root = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let source_tree = TempDir::new().unwrap();
let vmlinux = source_tree.path().join("vmlinux");
std::fs::write(&vmlinux, b"placeholder").unwrap();
assert!(
!path_inside_cache_root(&vmlinux),
"vmlinux in a sibling tempdir must NOT be classified as in-cache",
);
}
#[test]
fn path_inside_cache_root_rejects_bare_filename() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", tmp.path());
let bare = std::path::Path::new("vmlinux");
assert!(
!path_inside_cache_root(bare),
"bare filename (no parent) must short-circuit to false",
);
}
#[test]
fn path_inside_cache_root_false_when_unresolvable() {
let _lock = lock_env();
let _g1 = EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _g2 = EnvVarGuard::remove("XDG_CACHE_HOME");
let _g3 = EnvVarGuard::remove("HOME");
let dir = TempDir::new().unwrap();
let f = dir.path().join("vmlinux");
std::fs::write(&f, b"x").unwrap();
assert!(
!path_inside_cache_root(&f),
"unresolvable cache root must classify as outside-cache (false)",
);
}
#[test]
fn path_inside_cache_root_false_when_parent_canonicalize_fails() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", tmp.path());
let nonexistent = std::path::Path::new("/this/parent/should/not/exist/vmlinux");
assert!(
!nonexistent.parent().unwrap().exists(),
"precondition: parent must not exist for the canonicalize \
failure path to be exercised",
);
assert!(
!path_inside_cache_root(nonexistent),
"nonexistent parent must surface as outside-cache, not panic",
);
}
#[test]
#[cfg(unix)]
fn path_inside_cache_root_follows_symlink_into_cache() {
let _lock = lock_env();
let cache_root = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let entry = cache_root.path().join("kentry");
std::fs::create_dir_all(&entry).unwrap();
let real = entry.join("vmlinux");
std::fs::write(&real, b"placeholder").unwrap();
let outside = TempDir::new().unwrap();
let alias_parent = outside.path().join("alias");
std::os::unix::fs::symlink(&entry, &alias_parent).unwrap();
let through_alias = alias_parent.join("vmlinux");
assert!(
through_alias.exists(),
"precondition: path through symlinked parent must be reachable",
);
assert!(
path_inside_cache_root(&through_alias),
"path whose parent symlink resolves into cache must classify as in-cache",
);
}
#[test]
#[cfg(unix)]
fn path_inside_cache_root_follows_symlink_out_of_cache() {
let _lock = lock_env();
let cache_root = TempDir::new().unwrap();
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let outside = TempDir::new().unwrap();
let real = outside.path().join("vmlinux");
std::fs::write(&real, b"placeholder").unwrap();
let alias_parent = cache_root.path().join("alias");
std::os::unix::fs::symlink(outside.path(), &alias_parent).unwrap();
let through_alias = alias_parent.join("vmlinux");
assert!(
through_alias.exists(),
"precondition: path through symlinked parent must be reachable",
);
assert!(
!path_inside_cache_root(&through_alias),
"path whose parent symlink resolves OUT of cache must classify as outside-cache",
);
}
#[test]
fn path_inside_cache_root_empty_ktstr_cache_dir_falls_through() {
let _lock = lock_env();
let tmp = TempDir::new().unwrap();
let _g1 = EnvVarGuard::set("KTSTR_CACHE_DIR", "");
let _g2 = EnvVarGuard::set("XDG_CACHE_HOME", tmp.path());
let resolved = tmp.path().join("ktstr").join("kernels");
let entry = resolved.join("kentry");
std::fs::create_dir_all(&entry).unwrap();
let vmlinux = entry.join("vmlinux");
std::fs::write(&vmlinux, b"placeholder").unwrap();
assert!(
path_inside_cache_root(&vmlinux),
"with empty KTSTR_CACHE_DIR, the cascade must resolve via \
XDG_CACHE_HOME and accept paths inside that resolved root",
);
}
#[test]
fn path_inside_cache_root_fresh_resolution_per_call() {
let _lock = lock_env();
let cache_a = TempDir::new().unwrap();
let cache_b = TempDir::new().unwrap();
let entry_a = cache_a.path().join("kentry");
std::fs::create_dir_all(&entry_a).unwrap();
let vmlinux_a = entry_a.join("vmlinux");
std::fs::write(&vmlinux_a, b"placeholder").unwrap();
{
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", cache_a.path());
assert!(
path_inside_cache_root(&vmlinux_a),
"first call: vmlinux is inside cache_a (the active root)",
);
}
{
let _guard = EnvVarGuard::set("KTSTR_CACHE_DIR", cache_b.path());
assert!(
!path_inside_cache_root(&vmlinux_a),
"second call: KTSTR_CACHE_DIR has moved to cache_b, so the \
vmlinux (still under cache_a) must be classified outside",
);
}
}
#[test]
fn clean_orphaned_tmp_dirs_removes_dead_pid_tempdir() {
let tmp = TempDir::new().unwrap();
let dead_pid = libc::pid_t::MAX;
let orphan = tmp
.path()
.join(format!("{TMP_DIR_PREFIX}somekey-{dead_pid}"));
std::fs::create_dir_all(&orphan).unwrap();
std::fs::write(orphan.join("inner.txt"), b"data").unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
!orphan.exists(),
"dead-pid tempdir must be removed by clean_orphaned_tmp_dirs",
);
}
#[test]
fn clean_orphaned_tmp_dirs_preserves_live_pid_tempdir() {
let tmp = TempDir::new().unwrap();
let live_pid = unsafe { libc::getpid() };
let keeper = tmp
.path()
.join(format!("{TMP_DIR_PREFIX}somekey-{live_pid}"));
std::fs::create_dir_all(&keeper).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
keeper.exists(),
"live-pid tempdir must NOT be removed — its owner is still running",
);
}
#[test]
fn clean_orphaned_tmp_dirs_leaves_malformed_suffix_alone() {
let tmp = TempDir::new().unwrap();
let nonnum = tmp.path().join(format!("{TMP_DIR_PREFIX}somekey-notapid"));
std::fs::create_dir_all(&nonnum).unwrap();
let empty_suf = tmp.path().join(format!("{TMP_DIR_PREFIX}somekey-"));
std::fs::create_dir_all(&empty_suf).unwrap();
let no_dash = tmp.path().join(format!("{TMP_DIR_PREFIX}nokeyhere"));
std::fs::create_dir_all(&no_dash).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(nonnum.exists(), "non-numeric pid suffix must be left alone");
assert!(empty_suf.exists(), "empty pid suffix must be left alone");
assert!(no_dash.exists(), "no-pid-suffix entry must be left alone");
}
#[test]
fn clean_orphaned_tmp_dirs_leaves_unrelated_entries_alone() {
let tmp = TempDir::new().unwrap();
let real_entry = tmp.path().join("real-cache-entry");
std::fs::create_dir_all(&real_entry).unwrap();
let other = tmp.path().join("not-a-tempdir");
std::fs::create_dir_all(&other).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
real_entry.exists(),
"unrelated cache entry must be preserved"
);
assert!(other.exists(), "unrelated directory must be preserved");
}
#[test]
#[cfg(unix)]
fn clean_orphaned_tmp_dirs_skips_non_utf8_names() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let tmp = TempDir::new().unwrap();
let mut bytes: Vec<u8> = b".tmp-".to_vec();
bytes.push(0xFF);
bytes.extend_from_slice(b"-123");
let bad_name = OsStr::from_bytes(&bytes);
let bad_path = tmp.path().join(bad_name);
std::fs::create_dir(&bad_path).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
bad_path.exists(),
"non-UTF-8 entry must be left alone — the scan cannot \
confirm it matches our format, so safe-default is skip",
);
}
#[test]
fn clean_orphaned_tmp_dirs_handles_missing_cache_root() {
let tmp = TempDir::new().unwrap();
let never_created = tmp.path().join("never-created");
clean_orphaned_tmp_dirs(&never_created).unwrap();
}
#[test]
fn clean_orphaned_tmp_dirs_mixed_entries() {
let tmp = TempDir::new().unwrap();
let dead_pid = libc::pid_t::MAX;
let live_pid = unsafe { libc::getpid() };
let dead = tmp.path().join(format!("{TMP_DIR_PREFIX}a-{dead_pid}"));
let live = tmp.path().join(format!("{TMP_DIR_PREFIX}b-{live_pid}"));
let unrelated = tmp.path().join("c-regular-entry");
std::fs::create_dir_all(&dead).unwrap();
std::fs::create_dir_all(&live).unwrap();
std::fs::create_dir_all(&unrelated).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(!dead.exists(), "dead orphan must be removed");
assert!(live.exists(), "live-pid entry must survive");
assert!(unrelated.exists(), "unrelated entry must survive");
}
#[test]
fn clean_orphaned_tmp_dirs_preserves_pid_zero_suffix() {
let tmp = TempDir::new().unwrap();
let entry = tmp.path().join(format!("{TMP_DIR_PREFIX}somekey-0"));
std::fs::create_dir_all(&entry).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
entry.exists(),
"pid=0 suffix must be preserved — `pid <= 0` filter \
skips the entry before kill(0, None)'s pgrp-broadcast \
ambiguity can reach the liveness probe",
);
}
#[test]
fn clean_orphaned_tmp_dirs_double_dash_parses_as_positive_pid() {
let tmp = TempDir::new().unwrap();
let entry = tmp.path().join(format!("{TMP_DIR_PREFIX}somekey--12345"));
std::fs::create_dir_all(&entry).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
let pid_alive = matches!(
nix::sys::signal::kill(nix::unistd::Pid::from_raw(12345), None),
Ok(()),
);
if pid_alive {
assert!(
entry.exists(),
"pid 12345 was alive at probe time → entry must be \
preserved; got: entry removed (regression?)",
);
} else {
assert!(
!entry.exists(),
"pid 12345 was dead at probe time → entry must be \
removed (proves positive-pid parse). A regression to \
negative-pid parse would preserve unconditionally; \
entry still exists.",
);
}
}
#[test]
fn clean_orphaned_tmp_dirs_leaves_regular_file_entry() {
let tmp = TempDir::new().unwrap();
let dead_pid = libc::pid_t::MAX;
let file_entry = tmp
.path()
.join(format!("{TMP_DIR_PREFIX}fileshaped-{dead_pid}"));
std::fs::write(&file_entry, b"not a directory").unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
file_entry.exists(),
"regular file with tempdir-shaped name + dead pid must \
NOT be removed — `remove_dir_all` errors on a file, \
and the scan's error-tolerance contract leaves it",
);
}
#[test]
#[cfg(unix)]
fn clean_orphaned_tmp_dirs_leaves_symlink_entry() {
let tmp = TempDir::new().unwrap();
let target_root = TempDir::new().unwrap();
let target_file = target_root.path().join("sentinel.txt");
std::fs::write(&target_file, b"must-not-be-deleted").unwrap();
let dead_pid = libc::pid_t::MAX;
let symlink = tmp
.path()
.join(format!("{TMP_DIR_PREFIX}symkey-{dead_pid}"));
std::os::unix::fs::symlink(target_root.path(), &symlink).unwrap();
clean_orphaned_tmp_dirs(tmp.path()).unwrap();
assert!(
target_file.exists(),
"symlink target's contents must survive the scan — \
following symlinks would delete unrelated state \
outside the cache root, a critical security / data- \
safety regression",
);
assert_eq!(
std::fs::read(&target_file).unwrap(),
b"must-not-be-deleted",
"target file content must be unchanged",
);
}
#[test]
fn cache_validate_key_rejects_empty() {
let err = validate_cache_key("").unwrap_err();
assert!(err.to_string().contains("empty"));
}
#[test]
fn cache_validate_key_rejects_whitespace_only() {
let err = validate_cache_key(" ").unwrap_err();
assert!(err.to_string().contains("empty"));
}
#[test]
fn cache_validate_key_rejects_forward_slash() {
let err = validate_cache_key("a/b").unwrap_err();
assert!(err.to_string().contains("path separator"));
}
#[test]
fn cache_validate_key_rejects_backslash() {
let err = validate_cache_key("a\\b").unwrap_err();
assert!(err.to_string().contains("path separator"));
}
#[test]
fn cache_validate_key_rejects_dotdot() {
let err = validate_cache_key("foo..bar").unwrap_err();
assert!(err.to_string().contains("path traversal"));
}
#[test]
fn cache_validate_key_rejects_null_byte() {
let err = validate_cache_key("key\0evil").unwrap_err();
assert!(err.to_string().contains("null"));
}
#[test]
fn cache_validate_key_rejects_tmp_prefix() {
let err = validate_cache_key(".tmp-in-progress").unwrap_err();
assert!(
err.to_string().contains(".tmp-"),
"expected .tmp- rejection, got: {err}"
);
}
#[test]
fn cache_validate_key_rejects_dot() {
let err = validate_cache_key(".").unwrap_err();
assert!(
err.to_string().contains("directory reference"),
"expected dot rejection, got: {err}"
);
}
#[test]
fn cache_validate_key_rejects_dotdot_bare() {
let err = validate_cache_key("..").unwrap_err();
assert!(
err.to_string().contains("directory reference"),
"expected dotdot rejection, got: {err}"
);
}
#[test]
fn cache_validate_key_accepts_valid() {
assert!(validate_cache_key("6.14.2-tarball-x86_64").is_ok());
assert!(validate_cache_key("local-deadbeef-x86_64").is_ok());
assert!(validate_cache_key("v6.14-git-a1b2c3d-aarch64").is_ok());
}
#[test]
fn cache_validate_filename_rejects_traversal() {
assert!(validate_filename("../etc/passwd").is_err());
assert!(validate_filename("foo/../bar").is_err());
}
#[test]
fn cache_validate_filename_rejects_empty() {
assert!(validate_filename("").is_err());
}
#[test]
fn cache_validate_filename_accepts_valid() {
assert!(validate_filename("bzImage").is_ok());
assert!(validate_filename("Image").is_ok());
}
#[test]
fn cache_dir_store_rejects_image_name_traversal() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let mut meta = test_metadata("6.14.2");
meta.image_name = "../escape".to_string();
let err = cache
.store("valid-key", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("image name"),
"expected image_name rejection, got: {err}"
);
}
#[test]
fn cache_dir_store_tmp_prefix_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store(".tmp-sneaky", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains(".tmp-"),
"expected .tmp- rejection, got: {err}"
);
}
#[test]
fn cache_dir_lookup_tmp_prefix_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup(".tmp-sneaky").is_none());
}
#[test]
fn cache_dir_store_empty_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("empty"),
"expected empty-key error, got: {err}"
);
}
#[test]
fn cache_dir_lookup_empty_key_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("").is_none());
}
#[test]
fn cache_dir_store_path_traversal_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("../escape", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("path"),
"expected path-traversal error, got: {err}"
);
}
#[test]
fn cache_dir_lookup_path_traversal_returns_none() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
assert!(cache.lookup("../escape").is_none());
assert!(cache.lookup("foo/../bar").is_none());
}
#[test]
fn cache_dir_store_slash_in_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store("a/b", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("path separator"),
"expected path-separator error, got: {err}"
);
}
#[test]
fn cache_dir_store_whitespace_only_key_rejected() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let err = cache
.store(" ", &CacheArtifacts::new(&image), &meta)
.unwrap_err();
assert!(
err.to_string().contains("empty"),
"expected empty/whitespace error, got: {err}"
);
}
#[test]
fn cache_dir_clean_keep_n_with_mixed_entries() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta_new = KernelMetadata {
built_at: "2026-04-12T10:00:00Z".to_string(),
..test_metadata("6.14.2")
};
let meta_old = KernelMetadata {
built_at: "2026-04-10T10:00:00Z".to_string(),
..test_metadata("6.13.0")
};
cache
.store("new", &CacheArtifacts::new(&image), &meta_new)
.unwrap();
cache
.store("old", &CacheArtifacts::new(&image), &meta_old)
.unwrap();
let corrupt_dir = tmp.path().join("cache").join("corrupt");
fs::create_dir_all(&corrupt_dir).unwrap();
let removed = cache.clean_keep(1).unwrap();
assert_eq!(removed, 2);
let remaining = cache.list().unwrap();
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].key(), "new");
}
#[test]
fn cache_dir_store_overwrites_existing_key_atomically() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_a = TempDir::new().unwrap();
let image_a = create_fake_image(src_a.path());
fs::write(&image_a, b"version-a").unwrap();
let mut meta_a = test_metadata("6.14.2");
meta_a.built_at = "2026-04-10T00:00:00Z".to_string();
let entry_a = cache
.store("collide", &CacheArtifacts::new(&image_a), &meta_a)
.unwrap();
assert_eq!(
fs::read(entry_a.path.join("bzImage")).unwrap(),
b"version-a"
);
let src_b = TempDir::new().unwrap();
let image_b = create_fake_image(src_b.path());
fs::write(&image_b, b"version-b").unwrap();
let mut meta_b = test_metadata("6.14.2");
meta_b.built_at = "2026-04-18T00:00:00Z".to_string();
let entry_b = cache
.store("collide", &CacheArtifacts::new(&image_b), &meta_b)
.unwrap();
assert_eq!(
fs::read(entry_b.path.join("bzImage")).unwrap(),
b"version-b",
"new content must replace old content atomically"
);
let installed_meta = read_metadata(&entry_b.path).expect("metadata.json");
assert_eq!(installed_meta.built_at, "2026-04-18T00:00:00Z");
for dirent in fs::read_dir(&cache_root).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".evict-") && !name.starts_with(".tmp-"),
"unexpected leftover directory under cache_root: {name}"
);
}
}
#[test]
fn cache_dir_store_cleans_stale_tmp() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let stale_tmp = cache_root.join(format!(".tmp-mykey-{}", std::process::id()));
fs::create_dir_all(&stale_tmp).unwrap();
fs::write(stale_tmp.join("junk"), b"leftover").unwrap();
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("mykey", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(!stale_tmp.exists());
}
#[test]
fn cache_dir_store_atomic_under_concurrent_readers() {
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::thread;
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = Arc::new(CacheDir::with_root(cache_root.clone()));
let src_a = TempDir::new().unwrap();
let image_a = src_a.path().join("bzImage");
let content_a = b"AAAAAAAA-image-version-a-AAAAAAAA".repeat(64);
fs::write(&image_a, &content_a).unwrap();
let src_b = TempDir::new().unwrap();
let image_b = src_b.path().join("bzImage");
let content_b = b"BBBBBBBB-image-version-b-BBBBBBBB".repeat(64);
fs::write(&image_b, &content_b).unwrap();
let meta_prime = test_metadata("6.14.2");
cache
.store("atomic-key", &CacheArtifacts::new(&image_a), &meta_prime)
.unwrap();
const WRITE_ITERATIONS: usize = 40;
let stop = Arc::new(AtomicBool::new(false));
let lookups_observed = Arc::new(AtomicUsize::new(0));
let atomicity_violations = Arc::new(AtomicUsize::new(0));
let reader_count = 4;
let mut readers = Vec::with_capacity(reader_count);
for _ in 0..reader_count {
let cache = Arc::clone(&cache);
let stop = Arc::clone(&stop);
let lookups_observed = Arc::clone(&lookups_observed);
let violations = Arc::clone(&atomicity_violations);
let expected_a = content_a.clone();
let expected_b = content_b.clone();
readers.push(thread::spawn(move || {
while !stop.load(Ordering::Relaxed) {
let Some(entry) = cache.lookup("atomic-key") else {
violations.fetch_add(1, Ordering::Relaxed);
continue;
};
let image_path = entry.image_path();
let Ok(bytes) = fs::read(&image_path) else {
violations.fetch_add(1, Ordering::Relaxed);
continue;
};
if bytes != expected_a && bytes != expected_b {
violations.fetch_add(1, Ordering::Relaxed);
}
lookups_observed.fetch_add(1, Ordering::Relaxed);
}
}));
}
for i in 0..WRITE_ITERATIONS {
let (image, label) = if i % 2 == 0 {
(&image_a, "a")
} else {
(&image_b, "b")
};
let mut meta = test_metadata("6.14.2");
meta.built_at = format!("2026-04-18T00:00:{:02}Z", i % 60);
meta.config_hash = Some(format!("iter-{i}-{label}"));
cache
.store("atomic-key", &CacheArtifacts::new(image), &meta)
.expect("store under concurrent readers must not fail");
}
stop.store(true, Ordering::Relaxed);
for r in readers {
r.join().expect("reader thread panicked");
}
assert_eq!(
atomicity_violations.load(Ordering::Relaxed),
0,
"lookup observed a missing or torn cache entry during concurrent store; \
rename-to-staging swap is not atomic",
);
assert!(
lookups_observed.load(Ordering::Relaxed) > 0,
"readers never observed a successful lookup — test did not \
actually exercise the concurrency window",
);
let final_entry = cache.lookup("atomic-key").expect("entry must exist");
let final_bytes = fs::read(final_entry.image_path()).unwrap();
assert!(
final_bytes == content_a || final_bytes == content_b,
"final image must match one of the writer's versions",
);
for dirent in fs::read_dir(&cache_root).unwrap() {
let name = dirent.unwrap().file_name().to_string_lossy().into_owned();
assert!(
!name.starts_with(".evict-") && !name.starts_with(".tmp-"),
"unexpected leftover directory under cache_root: {name}",
);
}
}
#[test]
fn cache_dir_store_with_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("vmlinux");
fs::write(&vmlinux, b"fake vmlinux ELF").unwrap();
let meta = test_metadata("6.14.2");
let entry = cache
.store(
"with-vmlinux",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(entry.path.join("vmlinux").exists());
assert!(entry.path.join("metadata.json").exists());
assert!(entry.metadata.has_vmlinux);
assert!(image.exists());
assert!(vmlinux.exists());
}
#[test]
fn cache_dir_store_without_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("no-vmlinux", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(entry.path.join("bzImage").exists());
assert!(!entry.path.join("vmlinux").exists());
assert!(entry.path.join("metadata.json").exists());
assert!(!entry.metadata.has_vmlinux);
assert!(!entry.metadata.vmlinux_stripped);
}
#[test]
fn cache_dir_store_strips_vmlinux_internally() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = create_strip_test_fixture(src_dir.path());
let source_size = fs::metadata(&vmlinux).unwrap().len();
let meta = test_metadata("6.14.2");
let entry = cache
.store(
"strip-in-store",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
let cached_vmlinux = entry.path.join("vmlinux");
let cached_size = fs::metadata(&cached_vmlinux).unwrap().len();
assert!(
cached_size < source_size,
"stored vmlinux ({cached_size} bytes) should be smaller \
than source ({source_size}) after internal strip"
);
let data = fs::read(&cached_vmlinux).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
let section_names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
assert!(
!section_names.contains(&".debug_info"),
"internal strip should have removed .debug_info"
);
assert!(entry.metadata.has_vmlinux);
assert!(
entry.metadata.vmlinux_stripped,
"strip-succeeds path must set vmlinux_stripped = true"
);
}
#[test]
fn cache_dir_store_falls_back_when_strip_fails() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = src_dir.path().join("vmlinux");
let raw = b"not an ELF file";
fs::write(&vmlinux, raw).unwrap();
let meta = test_metadata("6.14.2");
let entry = cache
.store(
"strip-fallback",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
let cached = fs::read(entry.path.join("vmlinux")).unwrap();
assert_eq!(cached, raw, "fallback must copy raw bytes verbatim");
assert!(entry.metadata.has_vmlinux);
assert!(
!entry.metadata.vmlinux_stripped,
"raw-fallback path must set vmlinux_stripped = false so \
`ktstr cache list --json` surfaces the strip failure"
);
}
fn make_warn_test_entry(has_vmlinux: bool, vmlinux_stripped: bool) -> CacheEntry {
let mut meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-24T12:00:00Z".to_string(),
);
meta.set_has_vmlinux(has_vmlinux);
meta.set_vmlinux_stripped(vmlinux_stripped);
CacheEntry {
key: "test-key".to_string(),
path: PathBuf::from("/nonexistent/entry"),
metadata: meta,
}
}
#[test]
fn should_warn_unstripped_fires_when_vmlinux_present_and_unstripped() {
let entry = make_warn_test_entry(true, false);
assert!(
should_warn_unstripped(&entry),
"has_vmlinux=true + vmlinux_stripped=false must warn"
);
}
#[test]
fn should_warn_unstripped_silent_when_vmlinux_stripped() {
let entry = make_warn_test_entry(true, true);
assert!(
!should_warn_unstripped(&entry),
"has_vmlinux=true + vmlinux_stripped=true must not warn"
);
}
#[test]
fn should_warn_unstripped_silent_when_no_vmlinux() {
let entry = make_warn_test_entry(false, false);
assert!(
!should_warn_unstripped(&entry),
"has_vmlinux=false must not warn (no vmlinux to worry about)"
);
}
#[test]
fn cache_dir_store_preserves_original_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = create_strip_test_fixture(src_dir.path());
let source_size = fs::metadata(&vmlinux).unwrap().len();
let meta = test_metadata("6.14.2");
cache
.store(
"preserve-src",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&meta,
)
.unwrap();
assert!(vmlinux.exists(), "source vmlinux must survive store()");
assert_eq!(
fs::metadata(&vmlinux).unwrap().len(),
source_size,
"source vmlinux size must not change"
);
}
#[test]
fn cache_dir_store_preserves_original_image() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
cache
.store("key", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert!(image.exists());
}
#[test]
fn cache_entry_image_path_joins_key_with_image_name() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let entry = cache
.store(
"key",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
assert_eq!(entry.image_path(), entry.path.join("bzImage"));
assert!(entry.image_path().exists());
}
#[test]
fn cache_entry_vmlinux_path_some_when_stored() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let vmlinux = create_strip_test_fixture(src_dir.path());
let entry = cache
.store(
"with-vml",
&CacheArtifacts::new(&image).with_vmlinux(&vmlinux),
&test_metadata("6.14.2"),
)
.unwrap();
let vml_path = entry.vmlinux_path().expect("vmlinux_path() should be Some");
assert_eq!(vml_path, entry.path.join("vmlinux"));
assert!(vml_path.exists());
}
#[test]
fn cache_entry_vmlinux_path_none_when_not_stored() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let entry = cache
.store(
"no-vml",
&CacheArtifacts::new(&image),
&test_metadata("6.14.2"),
)
.unwrap();
assert!(entry.vmlinux_path().is_none());
}
#[test]
fn kconfig_status_matches_when_hash_equal() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("deadbeef".to_string()));
let entry = cache
.store("kc-match", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.kconfig_status("deadbeef"), KconfigStatus::Matches);
}
#[test]
fn kconfig_status_untracked_when_no_hash_in_entry() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = KernelMetadata {
ktstr_kconfig_hash: None,
..test_metadata("6.14.2")
};
let entry = cache
.store("kc-untracked", &CacheArtifacts::new(&image), &meta)
.unwrap();
assert_eq!(entry.kconfig_status("anything"), KconfigStatus::Untracked);
}
#[test]
fn kconfig_status_stale_pins_cached_and_current_field_order() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2").with_ktstr_kconfig_hash(Some("old_cached".to_string()));
let entry = cache
.store("kc-stale", &CacheArtifacts::new(&image), &meta)
.unwrap();
match entry.kconfig_status("new_current") {
KconfigStatus::Stale { cached, current } => {
assert_eq!(
cached, "old_cached",
"`cached` must hold the hash recorded in the entry"
);
assert_eq!(
current, "new_current",
"`current` must hold the hash the caller passed in"
);
}
other => panic!("expected KconfigStatus::Stale, got {other:?}"),
}
}
#[test]
fn prefer_source_tree_local_with_vmlinux() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let vmlinux = src_tree.join("vmlinux");
fs::write(&vmlinux, b"fake-elf").unwrap();
let stat = fs::metadata(&vmlinux).unwrap();
let mtime_secs = stat
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Local {
source_tree_path: Some(src_tree.clone()),
git_hash: None,
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: Some(stat.len()),
source_vmlinux_mtime_secs: Some(mtime_secs),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(prefer_source_tree_for_dwarf(&cache_entry), Some(src_tree));
}
#[test]
fn prefer_source_tree_local_without_vmlinux_in_tree() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let meta = KernelMetadata {
version: None,
source: KernelSource::Local {
source_tree_path: Some(src_tree),
git_hash: None,
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: false,
vmlinux_stripped: false,
source_vmlinux_size: Some(42),
source_vmlinux_mtime_secs: Some(1_700_000_000),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(prefer_source_tree_for_dwarf(&cache_entry), None);
}
#[test]
fn prefer_source_tree_tarball_source_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Tarball,
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(prefer_source_tree_for_dwarf(&cache_entry), None);
}
#[test]
fn prefer_source_tree_no_metadata_returns_none() {
let tmp = TempDir::new().unwrap();
assert_eq!(prefer_source_tree_for_dwarf(tmp.path()), None);
}
#[test]
fn prefer_source_tree_metadata_parse_failure_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
fs::write(
cache_entry.join("metadata.json"),
br#"{"not_kernel_metadata": true}"#,
)
.unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&cache_entry),
None,
"malformed metadata.json must short-circuit to None, not bail",
);
let other_entry = tmp.path().join("other");
fs::create_dir_all(&other_entry).unwrap();
fs::write(other_entry.join("metadata.json"), b"not json at all {{{").unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&other_entry),
None,
"unparseable metadata.json must short-circuit to None, not bail",
);
}
#[test]
fn prefer_source_tree_local_with_none_source_tree_path_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Local {
source_tree_path: None,
git_hash: Some("abc123".to_string()),
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: Some(42),
source_vmlinux_mtime_secs: Some(1_700_000_000),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&cache_entry),
None,
"Local entry with source_tree_path=None must short-circuit \
to None at the `let src_path = source_tree_path?;` line \
— no filesystem probe must run",
);
}
#[test]
fn prefer_source_tree_validates_matching_vmlinux_stat_and_returns_path() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let vmlinux = src_tree.join("vmlinux");
fs::write(&vmlinux, b"fake-elf-bytes").unwrap();
let stat = fs::metadata(&vmlinux).unwrap();
let mtime_secs = stat
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let meta = KernelMetadata {
version: None,
source: KernelSource::Local {
source_tree_path: Some(src_tree.clone()),
git_hash: None,
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: Some(stat.len()),
source_vmlinux_mtime_secs: Some(mtime_secs),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&cache_entry),
Some(src_tree),
"matching size + mtime must pass the validation gate"
);
}
#[test]
fn prefer_source_tree_size_mismatch_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let vmlinux = src_tree.join("vmlinux");
fs::write(&vmlinux, b"fake-elf-bytes").unwrap();
let stat = fs::metadata(&vmlinux).unwrap();
let mtime_secs = stat
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let meta = KernelMetadata {
version: None,
source: KernelSource::Local {
source_tree_path: Some(src_tree),
git_hash: None,
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: Some(stat.len() + 1),
source_vmlinux_mtime_secs: Some(mtime_secs),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&cache_entry),
None,
"size mismatch must drop validation and return None"
);
}
#[test]
fn prefer_source_tree_mtime_mismatch_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let vmlinux = src_tree.join("vmlinux");
fs::write(&vmlinux, b"fake-elf-bytes").unwrap();
let stat = fs::metadata(&vmlinux).unwrap();
let mtime_secs = stat
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let meta = KernelMetadata {
version: None,
source: KernelSource::Local {
source_tree_path: Some(src_tree),
git_hash: None,
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: Some(stat.len()),
source_vmlinux_mtime_secs: Some(mtime_secs - 3600),
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(
prefer_source_tree_for_dwarf(&cache_entry),
None,
"mtime mismatch must drop validation and return None"
);
}
#[test]
fn recover_local_source_tree_local_with_path_returns_source_tree() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
let src_tree = tmp.path().join("src");
fs::create_dir_all(&cache_entry).unwrap();
fs::create_dir_all(&src_tree).unwrap();
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Local {
source_tree_path: Some(src_tree.clone()),
git_hash: Some("abc1234".to_string()),
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: false,
vmlinux_stripped: false,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(recover_local_source_tree(&cache_entry), Some(src_tree));
}
#[test]
fn recover_local_source_tree_no_metadata_returns_none() {
let tmp = TempDir::new().unwrap();
assert_eq!(recover_local_source_tree(tmp.path()), None);
}
#[test]
fn recover_local_source_tree_tarball_source_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Tarball,
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(recover_local_source_tree(&cache_entry), None);
}
#[test]
fn recover_local_source_tree_local_with_none_path_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
let meta = KernelMetadata {
version: Some("6.14.2".to_string()),
source: KernelSource::Local {
source_tree_path: None,
git_hash: Some("abc1234".to_string()),
},
arch: "x86_64".to_string(),
image_name: "bzImage".to_string(),
config_hash: None,
built_at: "2026-04-18T10:00:00Z".to_string(),
ktstr_kconfig_hash: None,
has_vmlinux: true,
vmlinux_stripped: true,
source_vmlinux_size: None,
source_vmlinux_mtime_secs: None,
};
fs::write(
cache_entry.join("metadata.json"),
serde_json::to_string(&meta).unwrap(),
)
.unwrap();
assert_eq!(recover_local_source_tree(&cache_entry), None);
}
#[test]
fn recover_local_source_tree_malformed_metadata_returns_none() {
let tmp = TempDir::new().unwrap();
let cache_entry = tmp.path().join("cache");
fs::create_dir_all(&cache_entry).unwrap();
fs::write(
cache_entry.join("metadata.json"),
br#"{"not_kernel_metadata": true}"#,
)
.unwrap();
assert_eq!(recover_local_source_tree(&cache_entry), None);
}
fn has_symbol(elf: &goblin::elf::Elf, name: &str) -> bool {
elf.syms
.iter()
.any(|s| s.st_value != 0 && elf.strtab.get_at(s.st_name) == Some(name))
}
fn create_strip_test_fixture(dir: &Path) -> PathBuf {
use object::write;
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::X86_64,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 1);
let _ = obj.add_symbol(write::Symbol {
name: b"test_text_symbol".to_vec(),
value: 0x10,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let btf_id = obj.add_section(Vec::new(), b".BTF".to_vec(), object::SectionKind::Metadata);
obj.append_section_data(btf_id, &[0xEB; 256], 1);
let rodata_id = obj.add_section(
Vec::new(),
b".rodata".to_vec(),
object::SectionKind::ReadOnlyData,
);
obj.append_section_data(rodata_id, &[0xCA; 512], 1);
let bss_id = obj.add_section(
Vec::new(),
b".bss".to_vec(),
object::SectionKind::UninitializedData,
);
obj.append_section_bss(bss_id, 256, 8);
let _ = obj.add_symbol(write::Symbol {
name: b"test_bss_symbol".to_vec(),
value: 0x50,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(bss_id),
flags: object::SymbolFlags::None,
});
let btf_ext_id = obj.add_section(
Vec::new(),
b".BTF.ext".to_vec(),
object::SectionKind::Metadata,
);
obj.append_section_data(btf_ext_id, &[0xE1; 128], 1);
let debug_id = obj.add_section(
Vec::new(),
b".debug_info".to_vec(),
object::SectionKind::Debug,
);
obj.append_section_data(debug_id, &[0xAA; 4096], 1);
let debug_str_id = obj.add_section(
Vec::new(),
b".debug_str".to_vec(),
object::SectionKind::Debug,
);
obj.append_section_data(debug_str_id, &[0xBB; 2048], 1);
let data_id = obj.add_section(Vec::new(), b".data".to_vec(), object::SectionKind::Data);
obj.append_section_data(data_id, &[0xDD; 512], 8);
let _ = obj.add_symbol(write::Symbol {
name: b"test_data_symbol".to_vec(),
value: 0x20,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(data_id),
flags: object::SymbolFlags::None,
});
let percpu_id = obj.add_section(
Vec::new(),
b".data..percpu".to_vec(),
object::SectionKind::Data,
);
obj.append_section_data(percpu_id, &[0xCC; 256], 8);
let _ = obj.add_symbol(write::Symbol {
name: b"test_percpu_symbol".to_vec(),
value: 0x30,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(percpu_id),
flags: object::SymbolFlags::None,
});
let initdata_id = obj.add_section(
Vec::new(),
b".init.data".to_vec(),
object::SectionKind::Data,
);
obj.append_section_data(initdata_id, &[0x11; 1024], 8);
let _ = obj.add_symbol(write::Symbol {
name: b"test_initdata_symbol".to_vec(),
value: 0x40,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(initdata_id),
flags: object::SymbolFlags::None,
});
let data = obj.write().unwrap();
let path = dir.join("vmlinux");
fs::write(&path, &data).unwrap();
path
}
#[test]
fn strip_vmlinux_debug_applies_keep_list() {
let src = TempDir::new().unwrap();
let vmlinux = create_strip_test_fixture(src.path());
let original_size = fs::metadata(&vmlinux).unwrap().len();
let source_data = fs::read(&vmlinux).unwrap();
let source_elf = goblin::elf::Elf::parse(&source_data).unwrap();
let source_section_names: Vec<&str> = source_elf
.section_headers
.iter()
.filter_map(|s| source_elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [
".debug_info",
".debug_str",
".BTF.ext",
".BTF",
".rodata",
".bss",
".symtab",
".strtab",
] {
assert!(
source_section_names.contains(&name),
"fixture missing expected section {name}"
);
}
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
let stripped_path = stripped.path();
let stripped_size = fs::metadata(stripped_path).unwrap().len();
assert!(
stripped_size < original_size,
"stripped ({stripped_size}) should be smaller than original ({original_size})"
);
let data = fs::read(stripped_path).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
let section_names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
assert!(
!section_names.contains(&".debug_info"),
"should not contain .debug_info"
);
assert!(
!section_names.contains(&".debug_str"),
"should not contain .debug_str"
);
assert!(
!section_names.contains(&".BTF.ext"),
"should not contain .BTF.ext"
);
for name in [".BTF", ".rodata", ".bss", ".symtab", ".strtab"] {
assert!(section_names.contains(&name), "should preserve {name}");
}
}
#[test]
fn strip_vmlinux_debug_symtab_readable() {
let src = TempDir::new().unwrap();
let vmlinux = create_strip_test_fixture(src.path());
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
let stripped_path = stripped.path();
let data = fs::read(stripped_path).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
assert!(
has_symbol(&elf, "test_text_symbol"),
"stripped ELF should contain test_text_symbol in symtab"
);
assert!(
has_symbol(&elf, "test_bss_symbol"),
"stripped ELF should contain test_bss_symbol in symtab"
);
}
#[test]
fn strip_vmlinux_debug_zeros_data_sections() {
let src = TempDir::new().unwrap();
let vmlinux = create_strip_test_fixture(src.path());
use goblin::elf::section_header::SHT_NOBITS;
let source_data = fs::read(&vmlinux).unwrap();
let source_elf = goblin::elf::Elf::parse(&source_data).unwrap();
for name_bytes in crate::monitor::symbols::VMLINUX_ZERO_DATA_SECTIONS
.iter()
.chain(SPECULATIVE_ZERO_DATA_SECTIONS.iter())
{
let name = std::str::from_utf8(name_bytes).unwrap();
let sh = source_elf
.section_headers
.iter()
.find(|s| source_elf.shdr_strtab.get_at(s.sh_name) == Some(name))
.unwrap_or_else(|| panic!("fixture missing expected {name}"));
assert_ne!(
sh.sh_type,
SHT_NOBITS,
"fixture {name} must start non-SHT_NOBITS so the strip is observable; got sh_type={} ({})",
sh.sh_type,
sh_type_name(sh.sh_type),
);
assert!(
sh.sh_size > 0,
"fixture {name} must start with nonzero sh_size"
);
}
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
let stripped_path = stripped.path();
let data = fs::read(stripped_path).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
let find_section = |name: &str| {
elf.section_headers
.iter()
.find(|s| elf.shdr_strtab.get_at(s.sh_name) == Some(name))
.unwrap_or_else(|| panic!("section {name} missing from stripped ELF"))
};
let assert_nobits_empty = |name: &str| {
let sh = find_section(name);
let sh_type = sh.sh_type;
let sh_size = sh.sh_size;
assert_eq!(
sh_type,
SHT_NOBITS,
"section {name} should be SHT_NOBITS after strip, got sh_type={sh_type} ({})",
sh_type_name(sh_type),
);
assert_eq!(
sh_size, 0,
"section {name} should have sh_size == 0 after strip, got {sh_size}",
);
};
for name_bytes in crate::monitor::symbols::VMLINUX_ZERO_DATA_SECTIONS
.iter()
.chain(SPECULATIVE_ZERO_DATA_SECTIONS.iter())
{
let name = std::str::from_utf8(name_bytes).unwrap();
assert_nobits_empty(name);
}
assert_nobits_empty(".text");
assert!(
has_symbol(&elf, "test_data_symbol"),
"test_data_symbol dropped by strip"
);
assert!(
has_symbol(&elf, "test_percpu_symbol"),
"test_percpu_symbol dropped by strip"
);
assert!(
has_symbol(&elf, "test_initdata_symbol"),
"test_initdata_symbol dropped by strip"
);
}
#[test]
fn strip_debug_prefix_removes_debug_and_preserves_rest() {
let src = TempDir::new().unwrap();
let vmlinux = create_strip_test_fixture(src.path());
let raw = fs::read(&vmlinux).unwrap();
let processed = neutralize_relocs(&raw).unwrap();
let stripped = strip_debug_prefix(&processed).unwrap();
let elf = goblin::elf::Elf::parse(&stripped).unwrap();
let names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
assert!(
!names.contains(&".debug_info"),
"fallback should remove .debug_info"
);
assert!(
!names.contains(&".debug_str"),
"fallback should remove .debug_str"
);
for name in [".BTF", ".BTF.ext", ".text", ".data", ".rodata", ".symtab"] {
assert!(
names.contains(&name),
"fallback must preserve {name}, got sections {names:?}"
);
}
}
#[test]
fn strip_debug_prefix_removes_dot_comment() {
use object::write;
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::X86_64,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 1);
let _ = obj.add_symbol(write::Symbol {
name: b"test_text_symbol".to_vec(),
value: 0x0,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let comment_id = obj.add_section(
Vec::new(),
b".comment".to_vec(),
object::SectionKind::OtherString,
);
obj.append_section_data(comment_id, b"GCC: (GNU) 14.2.1 20250207\0", 1);
let data = obj.write().unwrap();
let source_elf = goblin::elf::Elf::parse(&data).unwrap();
let source_names: Vec<&str> = source_elf
.section_headers
.iter()
.filter_map(|s| source_elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [".comment", ".text"] {
assert!(
source_names.contains(&name),
"fixture missing expected section {name}; got {source_names:?}"
);
}
let processed = neutralize_relocs(&data).unwrap();
let stripped = strip_debug_prefix(&processed).unwrap();
let elf = goblin::elf::Elf::parse(&stripped).unwrap();
let names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
assert!(
!names.contains(&".comment"),
"fallback must remove .comment, got sections {names:?}"
);
assert!(
names.contains(&".text"),
"fallback must preserve .text, got sections {names:?}"
);
}
#[test]
fn strip_debug_prefix_removes_reloc_prefix_sections() {
use object::elf::{SHT_REL, SHT_RELA, SHT_RELR};
let mut obj = build_base_elf_with_text_symbol(object::Architecture::X86_64);
let rela_id = obj.add_section(
Vec::new(),
b".rela.text".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(rela_id, &[0xA5; 24], 1);
let rel_id = obj.add_section(
Vec::new(),
b".rel.data".to_vec(),
object::SectionKind::Elf(SHT_REL),
);
obj.append_section_data(rel_id, &[0xC7; 16], 1);
let relr_id = obj.add_section(
Vec::new(),
b".relr.dyn".to_vec(),
object::SectionKind::Elf(SHT_RELR),
);
obj.append_section_data(relr_id, &[0xD3; 16], 1);
let crel_id = obj.add_section(
Vec::new(),
b".crel.text".to_vec(),
object::SectionKind::Elf(object::elf::SHT_CREL),
);
obj.append_section_data(crel_id, &[0xE4; 8], 1);
let data = obj.write().unwrap();
let source_elf = goblin::elf::Elf::parse(&data).unwrap();
let source_names: Vec<&str> = source_elf
.section_headers
.iter()
.filter_map(|s| source_elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [
".rela.text",
".rel.data",
".relr.dyn",
".crel.text",
".text",
] {
assert!(
source_names.contains(&name),
"fixture missing expected section {name}; got {source_names:?}"
);
}
let processed = neutralize_relocs(&data).unwrap();
let stripped = strip_debug_prefix(&processed).unwrap();
let elf = goblin::elf::Elf::parse(&stripped).unwrap();
let names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [".rela.text", ".rel.data", ".relr.dyn", ".crel.text"] {
assert!(
!names.contains(&name),
"fallback must delete {name} (prefix arm), got sections {names:?}"
);
}
assert!(
names.contains(&".text"),
"fallback must preserve .text, got sections {names:?}"
);
}
#[test]
fn neutralize_relocs_zeros_sh_size_of_every_reloc_section() {
use object::elf::{SHF_ALLOC, SHT_REL, SHT_RELA, SHT_RELR};
let mut obj = build_base_elf_with_text_symbol(object::Architecture::X86_64);
let kaslr_id = obj.add_section(
Vec::new(),
b".rela.kaslr".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(kaslr_id, &[0xA5; 32], 1);
obj.section_mut(kaslr_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rel_id = obj.add_section(
Vec::new(),
b".rel.foo".to_vec(),
object::SectionKind::Elf(SHT_REL),
);
obj.append_section_data(rel_id, &[0xC7; 24], 1);
obj.section_mut(rel_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rdbg_id = obj.add_section(
Vec::new(),
b".rela.debug_info".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(rdbg_id, &[0xB6; 16], 1);
let relr_id = obj.add_section(
Vec::new(),
b".relr.dyn".to_vec(),
object::SectionKind::Elf(SHT_RELR),
);
obj.append_section_data(relr_id, &[0xD3; 24], 1);
obj.section_mut(relr_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let data = obj.write().unwrap();
let pre_elf = goblin::elf::Elf::parse(&data).unwrap();
let mut pre_kaslr = None;
let mut pre_rel = None;
let mut pre_rdbg = None;
let mut pre_relr = None;
let mut pre_text = None;
for sh in pre_elf.section_headers.iter() {
let name = pre_elf.shdr_strtab.get_at(sh.sh_name).unwrap_or("");
match name {
".rela.kaslr" => pre_kaslr = Some(sh.clone()),
".rel.foo" => pre_rel = Some(sh.clone()),
".rela.debug_info" => pre_rdbg = Some(sh.clone()),
".relr.dyn" => pre_relr = Some(sh.clone()),
".text" => pre_text = Some(sh.clone()),
_ => {}
}
}
let pre_kaslr = pre_kaslr.expect("fixture must carry .rela.kaslr");
let pre_rel = pre_rel.expect("fixture must carry .rel.foo");
let pre_rdbg = pre_rdbg.expect("fixture must carry .rela.debug_info");
let pre_relr = pre_relr.expect("fixture must carry .relr.dyn");
let pre_text = pre_text.expect("fixture must carry .text");
assert_eq!(
pre_kaslr.sh_type,
SHT_RELA,
".rela.kaslr sh_type must be SHT_RELA; got sh_type={} ({})",
pre_kaslr.sh_type,
sh_type_name(pre_kaslr.sh_type),
);
assert!(
pre_kaslr.sh_flags & u64::from(SHF_ALLOC) != 0,
".rela.kaslr must carry SHF_ALLOC; got sh_flags={:#x}",
pre_kaslr.sh_flags
);
assert_eq!(
pre_kaslr.sh_size, 32,
".rela.kaslr sh_size must match 32-byte payload"
);
assert_eq!(
pre_rel.sh_type,
SHT_REL,
".rel.foo sh_type must be SHT_REL; got sh_type={} ({})",
pre_rel.sh_type,
sh_type_name(pre_rel.sh_type),
);
assert!(
pre_rel.sh_flags & u64::from(SHF_ALLOC) != 0,
".rel.foo must carry SHF_ALLOC; got sh_flags={:#x}",
pre_rel.sh_flags
);
assert_eq!(
pre_rel.sh_size, 24,
".rel.foo sh_size must match 24-byte payload"
);
assert_eq!(
pre_rdbg.sh_type,
SHT_RELA,
".rela.debug_info sh_type must be SHT_RELA; got sh_type={} ({})",
pre_rdbg.sh_type,
sh_type_name(pre_rdbg.sh_type),
);
assert_eq!(
pre_rdbg.sh_flags & u64::from(SHF_ALLOC),
0,
".rela.debug_info must NOT carry SHF_ALLOC; got sh_flags={:#x}",
pre_rdbg.sh_flags
);
assert_eq!(
pre_rdbg.sh_size, 16,
".rela.debug_info sh_size must match 16-byte payload"
);
assert_eq!(
pre_relr.sh_type,
SHT_RELR,
".relr.dyn sh_type must be SHT_RELR (19); got sh_type={} ({})",
pre_relr.sh_type,
sh_type_name(pre_relr.sh_type),
);
assert_eq!(
pre_relr.sh_size, 24,
".relr.dyn sh_size must match 24-byte payload"
);
assert_eq!(
pre_text.sh_size, 64,
".text sh_size must match 64-byte payload"
);
let kaslr_offset = pre_kaslr.sh_offset as usize;
let kaslr_size = pre_kaslr.sh_size as usize;
let kaslr_original_data = data[kaslr_offset..kaslr_offset + kaslr_size].to_vec();
let processed = neutralize_relocs(&data).unwrap();
assert_eq!(
processed.len(),
data.len(),
"neutralize_relocs must not resize the ELF; only sh_size header fields are rewritten"
);
let post_elf = goblin::elf::Elf::parse(&processed).unwrap();
let mut post_kaslr = None;
let mut post_rel = None;
let mut post_rdbg = None;
let mut post_relr = None;
let mut post_text = None;
for sh in post_elf.section_headers.iter() {
let name = post_elf.shdr_strtab.get_at(sh.sh_name).unwrap_or("");
match name {
".rela.kaslr" => post_kaslr = Some(sh.clone()),
".rel.foo" => post_rel = Some(sh.clone()),
".rela.debug_info" => post_rdbg = Some(sh.clone()),
".relr.dyn" => post_relr = Some(sh.clone()),
".text" => post_text = Some(sh.clone()),
_ => {}
}
}
let post_kaslr = post_kaslr.expect(".rela.kaslr must survive");
let post_rel = post_rel.expect(".rel.foo must survive");
let post_rdbg = post_rdbg.expect(".rela.debug_info must survive");
let post_relr = post_relr.expect(".relr.dyn must survive");
let post_text = post_text.expect(".text must survive");
assert_eq!(
post_kaslr.sh_size, 0,
".rela.kaslr sh_size must be zeroed; got {}",
post_kaslr.sh_size
);
assert_eq!(
post_rel.sh_size, 0,
".rel.foo sh_size must be zeroed; got {}",
post_rel.sh_size
);
assert_eq!(
post_rdbg.sh_size, 0,
".rela.debug_info sh_size must be zeroed (SHF_ALLOC gate dropped); got {}",
post_rdbg.sh_size
);
assert_eq!(
post_relr.sh_size, 0,
".relr.dyn sh_size must be zeroed (SHT_RELR match arm); got {}",
post_relr.sh_size
);
assert_eq!(
post_text.sh_size, pre_text.sh_size,
".text sh_size must be preserved (not a relocation section)"
);
assert_eq!(
&processed[kaslr_offset..kaslr_offset + kaslr_size],
&kaslr_original_data[..],
".rela.kaslr data bytes must be preserved; neutralize only rewrites sh_size"
);
assert_eq!(
post_kaslr.sh_offset, pre_kaslr.sh_offset,
"sh_offset must be preserved"
);
assert_eq!(
post_kaslr.sh_type,
object::elf::SHT_PROGBITS,
"sh_type must be rewritten to SHT_PROGBITS; got sh_type={} ({})",
post_kaslr.sh_type,
sh_type_name(post_kaslr.sh_type),
);
assert_eq!(
post_kaslr.sh_flags, pre_kaslr.sh_flags,
"sh_flags must be preserved"
);
assert_eq!(
post_rel.sh_type,
object::elf::SHT_PROGBITS,
".rel.foo sh_type must be SHT_PROGBITS"
);
assert_eq!(
post_rdbg.sh_type,
object::elf::SHT_PROGBITS,
".rela.debug_info sh_type must be SHT_PROGBITS"
);
assert_eq!(
post_relr.sh_type,
object::elf::SHT_PROGBITS,
".relr.dyn sh_type must be SHT_PROGBITS"
);
}
#[test]
fn neutralize_relocs_noop_when_no_reloc_sections() {
let data = build_base_elf_with_text_symbol(object::Architecture::X86_64)
.write()
.unwrap();
let processed = neutralize_relocs(&data).unwrap();
assert_eq!(
processed, data,
"neutralize_relocs must be a byte-identity no-op when no reloc sections are present"
);
}
#[test]
fn neutralize_relocs_is_idempotent() {
use object::elf::{SHF_ALLOC, SHT_REL, SHT_RELA};
let mut obj = build_base_elf_with_text_symbol(object::Architecture::X86_64);
let kaslr_id = obj.add_section(
Vec::new(),
b".rela.kaslr".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(kaslr_id, &[0xA5; 32], 1);
obj.section_mut(kaslr_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rel_id = obj.add_section(
Vec::new(),
b".rel.foo".to_vec(),
object::SectionKind::Elf(SHT_REL),
);
obj.append_section_data(rel_id, &[0xC7; 24], 1);
obj.section_mut(rel_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rdbg_id = obj.add_section(
Vec::new(),
b".rela.debug_info".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(rdbg_id, &[0xB6; 16], 1);
let data = obj.write().unwrap();
let first_pass = neutralize_relocs(&data).unwrap();
let second_pass = neutralize_relocs(&first_pass).unwrap();
assert_ne!(
first_pass, data,
"first call must modify bytes on a fixture with reloc sections; \
if this fails, neutralize_relocs is a no-op"
);
assert_eq!(
second_pass, first_pass,
"neutralize_relocs must be idempotent: a second pass over its own output produces byte-identical bytes"
);
assert_eq!(
first_pass.len(),
data.len(),
"first pass must preserve ELF length"
);
assert_eq!(
second_pass.len(),
first_pass.len(),
"second pass must preserve ELF length"
);
let post_elf = goblin::elf::Elf::parse(&second_pass)
.expect("second-pass output must remain parseable as ELF");
let mut post_kaslr = None;
let mut post_rel = None;
let mut post_rdbg = None;
for sh in post_elf.section_headers.iter() {
let name = post_elf.shdr_strtab.get_at(sh.sh_name).unwrap_or("");
match name {
".rela.kaslr" => post_kaslr = Some(sh.clone()),
".rel.foo" => post_rel = Some(sh.clone()),
".rela.debug_info" => post_rdbg = Some(sh.clone()),
_ => {}
}
}
let post_kaslr = post_kaslr.expect(".rela.kaslr must survive second pass");
let post_rel = post_rel.expect(".rel.foo must survive second pass");
let post_rdbg = post_rdbg.expect(".rela.debug_info must survive second pass");
assert_eq!(
post_kaslr.sh_size, 0,
".rela.kaslr sh_size must remain zero after the second pass"
);
assert_eq!(
post_rel.sh_size, 0,
".rel.foo sh_size must remain zero after the second pass"
);
assert_eq!(
post_rdbg.sh_size, 0,
".rela.debug_info sh_size must remain zero after the second pass (SHF_ALLOC gate dropped)"
);
assert!(
post_kaslr.sh_flags & u64::from(SHF_ALLOC) != 0,
".rela.kaslr SHF_ALLOC flag must survive both passes; got sh_flags={:#x}",
post_kaslr.sh_flags
);
assert!(
post_rel.sh_flags & u64::from(SHF_ALLOC) != 0,
".rel.foo SHF_ALLOC flag must survive both passes; got sh_flags={:#x}",
post_rel.sh_flags
);
assert_eq!(
post_rdbg.sh_flags & u64::from(SHF_ALLOC),
0,
".rela.debug_info must retain its (cleared) SHF_ALLOC flag across both passes; got sh_flags={:#x}",
post_rdbg.sh_flags
);
}
#[test]
fn neutralize_relocs_rejects_invalid_elf() {
let cases: &[(&str, &[u8])] = &[
("bad magic", b"not an ELF at all, just some bytes"),
(
"magic ok but invalid EI_CLASS",
&[
0x7f, b'E', b'L', b'F', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
),
];
for (label, input) in cases {
let err = neutralize_relocs(input).unwrap_err();
let rendered = format!("{err:#}");
assert!(
rendered.contains("parse vmlinux ELF for preprocess"),
"[{label}] expected error context to name the ELF parse step; got: {rendered}"
);
}
}
#[test]
fn neutralize_relocs_zeros_sh_size_in_elf32_fixture() {
use object::elf::{SHF_ALLOC, SHT_REL, SHT_RELA};
let mut obj = build_base_elf_with_text_symbol(object::Architecture::I386);
let kaslr_id = obj.add_section(
Vec::new(),
b".rela.kaslr".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(kaslr_id, &[0xA5; 16], 1);
obj.section_mut(kaslr_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rel_id = obj.add_section(
Vec::new(),
b".rel.foo".to_vec(),
object::SectionKind::Elf(SHT_REL),
);
obj.append_section_data(rel_id, &[0xC7; 12], 1);
obj.section_mut(rel_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let data = obj.write().unwrap();
let pre_elf = goblin::elf::Elf::parse(&data).unwrap();
assert!(
!pre_elf.is_64,
"fixture must produce ELF32 (is_64 == false) to exercise the (20, 4) branch"
);
let pre_kaslr = pre_elf
.section_headers
.iter()
.find(|sh| pre_elf.shdr_strtab.get_at(sh.sh_name) == Some(".rela.kaslr"))
.expect("fixture must carry .rela.kaslr")
.clone();
let pre_rel = pre_elf
.section_headers
.iter()
.find(|sh| pre_elf.shdr_strtab.get_at(sh.sh_name) == Some(".rel.foo"))
.expect("fixture must carry .rel.foo")
.clone();
assert_eq!(
pre_kaslr.sh_type,
SHT_RELA,
".rela.kaslr sh_type must be SHT_RELA; got sh_type={} ({})",
pre_kaslr.sh_type,
sh_type_name(pre_kaslr.sh_type),
);
assert!(
pre_kaslr.sh_flags & u64::from(SHF_ALLOC) != 0,
".rela.kaslr must carry SHF_ALLOC; got sh_flags={:#x}",
pre_kaslr.sh_flags
);
assert_eq!(
pre_kaslr.sh_size, 16,
".rela.kaslr sh_size must match 16-byte payload pre-call"
);
assert_eq!(
pre_rel.sh_type,
SHT_REL,
".rel.foo sh_type must be SHT_REL; got sh_type={} ({})",
pre_rel.sh_type,
sh_type_name(pre_rel.sh_type),
);
assert!(
pre_rel.sh_flags & u64::from(SHF_ALLOC) != 0,
".rel.foo must carry SHF_ALLOC; got sh_flags={:#x}",
pre_rel.sh_flags
);
assert_eq!(
pre_rel.sh_size, 12,
".rel.foo sh_size must match 12-byte payload pre-call"
);
let processed = neutralize_relocs(&data).unwrap();
assert_eq!(
processed.len(),
data.len(),
"neutralize_relocs must not resize the ELF32 buffer"
);
let post_elf = goblin::elf::Elf::parse(&processed).unwrap();
assert!(
!post_elf.is_64,
"post-call parse must still be ELF32; the fn must not alter the e_ident class byte"
);
let post_kaslr = post_elf
.section_headers
.iter()
.find(|sh| post_elf.shdr_strtab.get_at(sh.sh_name) == Some(".rela.kaslr"))
.expect(".rela.kaslr must survive the neutralize pass")
.clone();
let post_rel = post_elf
.section_headers
.iter()
.find(|sh| post_elf.shdr_strtab.get_at(sh.sh_name) == Some(".rel.foo"))
.expect(".rel.foo must survive the neutralize pass")
.clone();
assert_eq!(
post_kaslr.sh_size, 0,
"ELF32 .rela.kaslr sh_size must be zeroed (SHT_RELA arm); got {}",
post_kaslr.sh_size
);
assert_eq!(
post_rel.sh_size, 0,
"ELF32 .rel.foo sh_size must be zeroed (SHT_REL arm); got {}",
post_rel.sh_size
);
}
#[test]
fn neutralize_relocs_noop_when_no_reloc_sections_elf32() {
use object::write;
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::I386,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 1);
let _ = obj.add_symbol(write::Symbol {
name: b"test_text_symbol".to_vec(),
value: 0x0,
size: 4,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let data = obj.write().unwrap();
let pre_elf = goblin::elf::Elf::parse(&data).unwrap();
assert!(
!pre_elf.is_64,
"fixture must produce ELF32 (is_64 == false) to exercise the (20, 4) branch",
);
let processed = neutralize_relocs(&data).unwrap();
assert_eq!(
processed, data,
"neutralize_relocs must be byte-identity on ELF32 when no reloc sections are present",
);
}
#[test]
fn neutralize_relocs_is_idempotent_elf32() {
use object::elf::{SHF_ALLOC, SHT_REL, SHT_RELA};
use object::write;
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::I386,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 1);
let _ = obj.add_symbol(write::Symbol {
name: b"test_text_symbol".to_vec(),
value: 0x0,
size: 4,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let kaslr_id = obj.add_section(
Vec::new(),
b".rela.kaslr".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(kaslr_id, &[0xA5; 16], 1);
obj.section_mut(kaslr_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rel_id = obj.add_section(
Vec::new(),
b".rel.foo".to_vec(),
object::SectionKind::Elf(SHT_REL),
);
obj.append_section_data(rel_id, &[0xC7; 12], 1);
obj.section_mut(rel_id).flags = object::SectionFlags::Elf {
sh_flags: u64::from(SHF_ALLOC),
};
let rdbg_id = obj.add_section(
Vec::new(),
b".rela.debug_info".to_vec(),
object::SectionKind::Elf(SHT_RELA),
);
obj.append_section_data(rdbg_id, &[0xB6; 8], 1);
let data = obj.write().unwrap();
assert!(
!goblin::elf::Elf::parse(&data).unwrap().is_64,
"fixture must be ELF32 to exercise the (20, 4) idempotence path",
);
let first_pass = neutralize_relocs(&data).unwrap();
let second_pass = neutralize_relocs(&first_pass).unwrap();
assert_ne!(
first_pass, data,
"first pass must rewrite sh_size on ELF32 reloc sections",
);
assert_eq!(
second_pass, first_pass,
"neutralize_relocs must be byte-identity idempotent on ELF32",
);
let post_elf = goblin::elf::Elf::parse(&second_pass).unwrap();
for name in [".rela.kaslr", ".rel.foo", ".rela.debug_info"] {
let sh = post_elf
.section_headers
.iter()
.find(|sh| post_elf.shdr_strtab.get_at(sh.sh_name) == Some(name))
.unwrap_or_else(|| panic!("{name} must survive second pass"));
assert_eq!(
sh.sh_size, 0,
"ELF32 {name} sh_size must be zeroed after both passes (SHF_ALLOC gate dropped)"
);
}
}
#[test]
fn strip_vmlinux_debug_nonexistent_file() {
let result = strip_vmlinux_debug(Path::new("/nonexistent/vmlinux"));
assert!(result.is_err());
}
#[test]
fn strip_vmlinux_debug_non_elf_file() {
let tmp = TempDir::new().unwrap();
let path = tmp.path().join("vmlinux");
fs::write(&path, b"not an ELF file").unwrap();
let result = strip_vmlinux_debug(&path);
assert!(result.is_err());
}
fn build_reloc_fixture(
dir: &Path,
extra_section_name: &[u8],
extra_section_sh_type: u32,
extra_section_data: &[u8],
mutate_header: impl FnOnce(&mut [u8]),
) -> PathBuf {
use object::write;
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::X86_64,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 16);
let _ = obj.add_symbol(write::Symbol {
name: b"pipeline_anchor".to_vec(),
value: 0x10,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let btf_id = obj.add_section(Vec::new(), b".BTF".to_vec(), object::SectionKind::Other);
obj.append_section_data(btf_id, &[0x42; 128], 1);
let rodata_id = obj.add_section(
Vec::new(),
b".rodata".to_vec(),
object::SectionKind::ReadOnlyData,
);
obj.append_section_data(rodata_id, &[0xAA; 256], 1);
let extra_id = obj.add_section(
Vec::new(),
extra_section_name.to_vec(),
object::SectionKind::Elf(extra_section_sh_type),
);
obj.append_section_data(extra_id, extra_section_data, 1);
let mut bytes = obj.write().unwrap();
mutate_header(&mut bytes);
let path = dir.join("vmlinux");
fs::write(&path, &bytes).unwrap();
path
}
fn assert_stripped_preserves_keep_list_and_deletes(stripped: &Path, reloc_name: &str) {
let data = fs::read(stripped).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
let names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [".symtab", ".strtab", ".BTF", ".rodata"] {
assert!(
names.contains(&name),
"keep-list section {name} must survive strip_vmlinux_debug; got {names:?}"
);
}
assert!(
!names.contains(&reloc_name),
"reloc section {reloc_name} must be deleted by strip_vmlinux_debug; got {names:?}"
);
}
#[test]
fn strip_vmlinux_debug_handles_nonalloc_rela_with_invalid_entries() {
let src = TempDir::new().unwrap();
let vmlinux = build_reloc_fixture(
src.path(),
b".rela.invalid",
object::elf::SHT_RELA,
&[0xA5; 24],
|_| {},
);
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
assert_stripped_preserves_keep_list_and_deletes(stripped.path(), ".rela.invalid");
}
#[test]
fn strip_vmlinux_debug_handles_nonalloc_rela_with_non_entsize_sh_size() {
let src = TempDir::new().unwrap();
let vmlinux = build_reloc_fixture(
src.path(),
b".rela.odd",
object::elf::SHT_RELA,
&[0x11; 24],
|bytes| {
let elf = goblin::elf::Elf::parse(bytes).unwrap();
let shoff = elf.header.e_shoff as usize;
let shentsize = elf.header.e_shentsize as usize;
let idx = elf
.section_headers
.iter()
.position(|sh| elf.shdr_strtab.get_at(sh.sh_name) == Some(".rela.odd"))
.expect("fixture must carry .rela.odd");
drop(elf);
let sh_size_off = shoff + idx * shentsize + 32;
let bad_size: u64 = 17;
bytes[sh_size_off..sh_size_off + 8].copy_from_slice(&bad_size.to_le_bytes());
},
);
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
assert_stripped_preserves_keep_list_and_deletes(stripped.path(), ".rela.odd");
}
#[test]
fn strip_vmlinux_debug_handles_relr_section() {
let src = TempDir::new().unwrap();
let vmlinux = build_reloc_fixture(
src.path(),
b".relr.dyn",
object::elf::SHT_RELR,
&[0x77; 16],
|_| {},
);
let raw = fs::read(&vmlinux).unwrap();
let neutralized = neutralize_relocs(&raw).unwrap();
let neutralized_elf = goblin::elf::Elf::parse(&neutralized).unwrap();
let relr_sh = neutralized_elf
.section_headers
.iter()
.find(|sh| neutralized_elf.shdr_strtab.get_at(sh.sh_name) == Some(".relr.dyn"))
.expect(".relr.dyn must survive neutralize");
assert_eq!(
relr_sh.sh_type,
object::elf::SHT_PROGBITS,
".relr.dyn sh_type must be rewritten to SHT_PROGBITS (SHT_RELR arm of the match); got sh_type={}",
relr_sh.sh_type,
);
assert_eq!(
relr_sh.sh_size, 0,
".relr.dyn sh_size must be zeroed post-neutralize",
);
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
assert_stripped_preserves_keep_list_and_deletes(stripped.path(), ".relr.dyn");
}
#[test]
fn strip_vmlinux_debug_deletes_reloc_sections_and_preserves_keep_list() {
use object::write;
let src = TempDir::new().unwrap();
let mut obj = write::Object::new(
object::BinaryFormat::Elf,
object::Architecture::X86_64,
object::Endianness::Little,
);
let text_id = obj.add_section(Vec::new(), b".text".to_vec(), object::SectionKind::Text);
obj.append_section_data(text_id, &[0xCC; 64], 16);
let _ = obj.add_symbol(write::Symbol {
name: b"pipeline_anchor".to_vec(),
value: 0x10,
size: 8,
kind: object::SymbolKind::Data,
scope: object::SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(text_id),
flags: object::SymbolFlags::None,
});
let btf_id = obj.add_section(Vec::new(), b".BTF".to_vec(), object::SectionKind::Other);
obj.append_section_data(btf_id, &[0x42; 128], 1);
let rodata_id = obj.add_section(
Vec::new(),
b".rodata".to_vec(),
object::SectionKind::ReadOnlyData,
);
obj.append_section_data(rodata_id, &[0xAA; 256], 1);
let rela_id = obj.add_section(
Vec::new(),
b".rela.dbg".to_vec(),
object::SectionKind::Elf(object::elf::SHT_RELA),
);
obj.append_section_data(rela_id, &[0xA5; 24], 1);
let relr_id = obj.add_section(
Vec::new(),
b".relr.dyn".to_vec(),
object::SectionKind::Elf(object::elf::SHT_RELR),
);
obj.append_section_data(relr_id, &[0xD3; 24], 1);
let bytes = obj.write().unwrap();
let vmlinux = src.path().join("vmlinux");
fs::write(&vmlinux, &bytes).unwrap();
let source_elf = goblin::elf::Elf::parse(&bytes).unwrap();
let source_names: Vec<&str> = source_elf
.section_headers
.iter()
.filter_map(|s| source_elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [
".text",
".BTF",
".rodata",
".rela.dbg",
".relr.dyn",
".symtab",
".strtab",
] {
assert!(
source_names.contains(&name),
"fixture missing expected section {name}; got {source_names:?}"
);
}
let stripped = strip_vmlinux_debug(&vmlinux).unwrap();
let data = fs::read(stripped.path()).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
let names: Vec<&str> = elf
.section_headers
.iter()
.filter_map(|s| elf.shdr_strtab.get_at(s.sh_name))
.collect();
for name in [".symtab", ".strtab", ".BTF", ".rodata"] {
assert!(
names.contains(&name),
"keep-list section {name} must survive strip; got {names:?}"
);
}
for name in [".rela.dbg", ".relr.dyn"] {
assert!(
!names.contains(&name),
"reloc section {name} must be deleted by strip; got {names:?}"
);
}
}
#[test]
fn strip_vmlinux_debug_preserves_monitor_symbols() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
skip!("no vmlinux found; {}", crate::KTSTR_KERNEL_HINT);
};
if path.starts_with("/sys/") {
skip!("vmlinux is raw BTF (not ELF), cannot strip debug");
}
let stripped = strip_vmlinux_debug(&path).unwrap();
let stripped_path = stripped.path();
let syms = crate::monitor::symbols::KernelSymbols::from_vmlinux(stripped_path).unwrap();
assert_ne!(
syms.runqueues, 0,
"runqueues symbol missing from stripped vmlinux"
);
assert_ne!(
syms.per_cpu_offset, 0,
"__per_cpu_offset symbol missing from stripped vmlinux"
);
let source_syms = crate::monitor::symbols::KernelSymbols::from_vmlinux(&path).unwrap();
assert_eq!(
source_syms.init_top_pgt.is_some(),
syms.init_top_pgt.is_some(),
"strip changed KernelSymbols init_top_pgt presence"
);
assert_eq!(
source_syms.page_offset_base_kva.is_some(),
syms.page_offset_base_kva.is_some(),
"strip changed page_offset_base_kva presence"
);
assert_eq!(
source_syms.scx_root.is_some(),
syms.scx_root.is_some(),
"strip changed scx_root presence"
);
assert_eq!(
source_syms.pgtable_l5_enabled.is_some(),
syms.pgtable_l5_enabled.is_some(),
"strip changed pgtable_l5_enabled presence"
);
assert_eq!(
source_syms.prog_idr.is_some(),
syms.prog_idr.is_some(),
"strip changed prog_idr presence"
);
assert_eq!(
source_syms.scx_watchdog_timeout.is_some(),
syms.scx_watchdog_timeout.is_some(),
"strip changed scx_watchdog_timeout presence"
);
let source_data = fs::read(&path).unwrap();
let source_elf = goblin::elf::Elf::parse(&source_data).unwrap();
let stripped_data = fs::read(stripped_path).unwrap();
let stripped_elf = goblin::elf::Elf::parse(&stripped_data).unwrap();
assert_eq!(
has_symbol(&source_elf, "init_top_pgt"),
has_symbol(&stripped_elf, "init_top_pgt"),
"strip changed raw-symtab init_top_pgt presence"
);
assert_eq!(
has_symbol(&source_elf, "swapper_pg_dir"),
has_symbol(&stripped_elf, "swapper_pg_dir"),
"strip changed raw-symtab swapper_pg_dir presence"
);
}
#[test]
fn strip_vmlinux_debug_shrinks_when_source_has_debug_info() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
skip!("no vmlinux found; {}", crate::KTSTR_KERNEL_HINT);
};
if path.starts_with("/sys/") {
skip!("vmlinux is raw BTF (not ELF), cannot strip debug");
}
let source_data = fs::read(&path).unwrap();
let source_elf = goblin::elf::Elf::parse(&source_data).unwrap();
let source_has_debug = source_elf
.section_headers
.iter()
.any(|sh| source_elf.shdr_strtab.get_at(sh.sh_name) == Some(".debug_info"));
if !source_has_debug {
skip!(
"source vmlinux has no .debug_info — already stripped \
(cached copy or distro-stripped); rebuild source tree \
to exercise the size-shrink path"
);
}
let stripped = strip_vmlinux_debug(&path).unwrap();
let source_size = fs::metadata(&path).unwrap().len();
let stripped_size = fs::metadata(stripped.path()).unwrap().len();
assert!(
stripped_size < source_size,
"stripped vmlinux ({stripped_size} bytes) should be smaller than \
source ({source_size} bytes)"
);
}
#[test]
fn strip_vmlinux_debug_preserves_bpf_idr_symbols() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
skip!("no vmlinux found; {}", crate::KTSTR_KERNEL_HINT);
};
if path.starts_with("/sys/") {
skip!("vmlinux is raw BTF (not ELF), cannot strip debug");
}
let stripped = strip_vmlinux_debug(&path).unwrap();
let stripped_path = stripped.path();
let data = fs::read(stripped_path).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
assert!(
has_symbol(&elf, "map_idr"),
"map_idr symbol missing from stripped vmlinux"
);
assert!(
has_symbol(&elf, "prog_idr"),
"prog_idr symbol missing from stripped vmlinux"
);
}
#[test]
fn strip_vmlinux_debug_preserves_function_symbols() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
skip!("no vmlinux found; {}", crate::KTSTR_KERNEL_HINT);
};
if path.starts_with("/sys/") {
skip!("vmlinux is raw BTF (not ELF), cannot strip debug");
}
let source_data = fs::read(&path).unwrap();
let source_elf = goblin::elf::Elf::parse(&source_data).unwrap();
if !has_symbol(&source_elf, "schedule") {
skip!(
"source vmlinux has no `schedule` symbol \
(already stripped by older ktstr) -- rebuild the kernel \
cache to exercise this test"
);
}
let stripped = strip_vmlinux_debug(&path).unwrap();
let stripped_path = stripped.path();
let data = fs::read(stripped_path).unwrap();
let elf = goblin::elf::Elf::parse(&data).unwrap();
assert!(
has_symbol(&elf, "schedule"),
"schedule function symbol dropped by strip"
);
}
#[test]
fn kconfig_status_display_matches_renders_lowercase_word() {
assert_eq!(KconfigStatus::Matches.to_string(), "matches");
}
#[test]
fn kconfig_status_display_stale_renders_lowercase_word_without_hashes() {
let s = KconfigStatus::Stale {
cached: "deadbeef".to_string(),
current: "cafebabe".to_string(),
}
.to_string();
assert_eq!(
s, "stale",
"Display elides the cached/current hashes; callers that need them must match on the variant directly"
);
}
#[test]
fn kconfig_status_display_untracked_renders_lowercase_word() {
assert_eq!(KconfigStatus::Untracked.to_string(), "untracked");
}
#[test]
fn acquire_shared_lock_creates_lockfile_at_expected_path() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let _guard = cache.acquire_shared_lock("some-key-123").unwrap();
assert!(
tmp.path().join(".locks").is_dir(),
"parent .locks/ subdirectory must materialize on first acquire",
);
assert!(
tmp.path().join(".locks").join("some-key-123.lock").exists(),
"lockfile must materialize at {{cache_root}}/.locks/{{key}}.lock on first acquire",
);
}
#[test]
fn acquire_shared_lock_permits_concurrent_readers() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "concurrent-sh";
let success = Arc::new(AtomicUsize::new(0));
let mut handles = Vec::new();
for _ in 0..4 {
let cache = Arc::clone(&cache);
let success = Arc::clone(&success);
handles.push(std::thread::spawn(move || {
let _g = cache
.acquire_shared_lock(key)
.expect("LOCK_SH must succeed");
success.fetch_add(1, Ordering::SeqCst);
std::thread::sleep(std::time::Duration::from_millis(50));
}));
}
for h in handles {
h.join().expect("reader thread panicked");
}
assert_eq!(
success.load(Ordering::SeqCst),
4,
"all 4 concurrent LOCK_SH acquires must succeed",
);
}
#[test]
fn try_acquire_exclusive_lock_fails_with_active_reader() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "force-contended";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader thread did not signal ready in time");
let err = cache.try_acquire_exclusive_lock(key).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("is locked by active test runs") || msg.contains("holders:"),
"error must surface the contention diagnostic; got: {msg}",
);
assert!(
msg.contains("lockfile"),
"error must name the lockfile path: {msg}",
);
release_tx.send(()).unwrap();
reader.join().expect("reader thread panicked");
}
#[test]
fn acquire_exclusive_lock_blocking_times_out_on_contention() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().to_path_buf()));
let key = "blocking-timeout";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader did not signal ready in time");
let start = std::time::Instant::now();
let err = cache
.acquire_exclusive_lock_blocking(key, std::time::Duration::from_millis(200))
.unwrap_err();
let elapsed = start.elapsed();
let msg = format!("{err:#}");
assert!(
msg.contains("timed out"),
"error must mention the timeout: {msg}",
);
assert!(
elapsed >= std::time::Duration::from_millis(150),
"acquire should have waited ~timeout (150ms lower bound); \
got {elapsed:?}",
);
release_tx.send(()).unwrap();
reader.join().expect("reader thread panicked");
}
#[test]
fn store_succeeds_under_internal_exclusive_lock() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let entry = cache
.store("internal-lock", &CacheArtifacts::new(&image), &meta)
.expect("store must succeed when no readers contend");
assert!(entry.path.join("bzImage").exists());
assert!(
tmp.path()
.join("cache")
.join(".locks")
.join("internal-lock.lock")
.exists(),
"lockfile materialized during store must persist after \
store returns (it's fine; the flock is released on fd \
drop but the file stays as a reusable sentinel)",
);
}
#[test]
fn store_blocks_while_reader_holds_shared_lock() {
use std::sync::Arc;
use std::sync::mpsc;
let tmp = TempDir::new().unwrap();
let cache = Arc::new(CacheDir::with_root(tmp.path().join("cache-block")));
let key = "blocked-store";
let (ready_tx, ready_rx) = mpsc::channel();
let (release_tx, release_rx) = mpsc::channel::<()>();
let cache_reader = Arc::clone(&cache);
let reader = std::thread::spawn(move || {
let _g = cache_reader
.acquire_shared_lock(key)
.expect("reader LOCK_SH must succeed");
ready_tx.send(()).unwrap();
release_rx.recv().unwrap();
});
ready_rx
.recv_timeout(std::time::Duration::from_secs(5))
.expect("reader did not signal ready in time");
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
let meta = test_metadata("6.14.2");
let (store_done_tx, store_done_rx) = mpsc::channel();
let cache_store = Arc::clone(&cache);
let image_clone = image.clone();
let store_thread = std::thread::spawn(move || {
let _ = cache_store.store(key, &CacheArtifacts::new(&image_clone), &meta);
store_done_tx.send(()).unwrap();
});
let early = store_done_rx.recv_timeout(std::time::Duration::from_millis(200));
assert!(
early.is_err(),
"store() must block while reader holds LOCK_SH; got completion signal early",
);
release_tx.send(()).unwrap();
let finish = store_done_rx.recv_timeout(std::time::Duration::from_secs(10));
assert!(
finish.is_ok(),
"store() must complete after reader releases; got timeout",
);
reader.join().expect("reader thread panicked");
store_thread.join().expect("store thread panicked");
}
#[test]
fn lock_path_returns_expected_shape() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let path = cache.lock_path("my-key-42");
assert_eq!(path, tmp.path().join(".locks").join("my-key-42.lock"));
}
#[test]
fn locks_subdir_persists_after_guard_drop() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let locks_dir = tmp.path().join(".locks");
{
let _guard = cache
.acquire_shared_lock("persist-test")
.expect("acquire must succeed");
assert!(locks_dir.is_dir(), "must exist during guard lifetime");
}
assert!(
locks_dir.is_dir(),
".locks/ must persist after guard drop — next acquire \
keys /proc/locks on the existing inode",
);
}
#[test]
fn list_skips_locks_dotfile_subdirectory() {
let tmp = TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().to_path_buf());
let _guard = cache.acquire_shared_lock("dummy").expect("acquire");
drop(_guard);
assert!(
tmp.path().join(".locks").is_dir(),
".locks/ must exist after acquire drop",
);
let entries = cache.list().expect("list must succeed");
let keys: Vec<&str> = entries
.iter()
.map(|e| match e {
ListedEntry::Valid(entry) => entry.key.as_str(),
ListedEntry::Corrupt { key, .. } => key.as_str(),
})
.collect();
assert!(
!keys.iter().any(|k| k.starts_with('.')),
"list() must not return dotfile children: {keys:?}",
);
}
#[test]
fn acquire_on_empty_root_creates_locks_dir_lazily() {
let tmp = TempDir::new().unwrap();
let root = tmp.path().join("pristine");
std::fs::create_dir(&root).unwrap();
let cache = CacheDir::with_root(root.clone());
assert!(!root.join(".locks").exists());
let _guard = cache
.acquire_shared_lock("lazy-test")
.expect("first acquire on empty root must succeed");
assert!(
root.join(".locks").is_dir(),
"first acquire must materialize .locks/ lazily",
);
}
#[test]
fn cache_dir_clean_all_preserves_locks_subdir() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let src_dir = TempDir::new().unwrap();
let image = create_fake_image(src_dir.path());
cache
.store(
"entry-a",
&CacheArtifacts::new(&image),
&test_metadata("6.14.0"),
)
.expect("store must succeed");
let _guard = cache
.acquire_shared_lock("entry-a")
.expect("SH acquire must succeed");
let locks_dir = cache_root.join(".locks");
let lockfile = locks_dir.join("entry-a.lock");
assert!(locks_dir.is_dir(), "precondition: .locks/ must exist");
assert!(lockfile.exists(), "precondition: lockfile must exist");
let removed = cache.clean_all().expect("clean_all must succeed");
assert_eq!(removed, 1, "clean_all must remove exactly 1 entry");
assert!(
locks_dir.is_dir(),
".locks/ subdirectory must survive clean_all — the live \
SH flock's inode would otherwise orphan",
);
assert!(
lockfile.exists(),
"lockfile must still exist under .locks/ after clean_all",
);
assert!(
!cache_root.join("entry-a").exists(),
"cache entry must be removed by clean_all",
);
}
#[test]
fn cache_dir_acquire_rejects_path_traversal_key() {
let tmp = TempDir::new().unwrap();
let cache_root = tmp.path().join("cache");
let cache = CacheDir::with_root(cache_root.clone());
let err = cache
.acquire_shared_lock("../../etc/passwd")
.expect_err("path-traversal key must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("path"),
"error must mention path rejection: {msg}",
);
let etc_passwd_lock = tmp.path().join("etc").join("passwd.lock");
assert!(
!etc_passwd_lock.exists(),
"path traversal must NOT create a lockfile outside .locks/",
);
assert!(
!cache_root.join(".locks").exists()
|| cache_root
.join(".locks")
.read_dir()
.unwrap()
.next()
.is_none(),
".locks/ must be empty if it exists at all — validator \
rejects before lockfile creation",
);
}
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
#[test]
fn validate_home_for_cache_rejects_unset() {
let _env_lock = lock_env();
let _home = EnvVarGuard::remove("HOME");
let err = super::validate_home_for_cache().expect_err("unset HOME must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("HOME is unset"),
"diagnostic must call out the unset case specifically: {msg}",
);
assert!(
!msg.contains("HOME is set to the empty string"),
"unset HOME must NOT use the empty-string diagnostic — the two \
cases are distinct now (NotPresent vs Ok(\"\")): {msg}",
);
}
#[test]
fn validate_home_for_cache_rejects_empty() {
let _env_lock = lock_env();
let _home = EnvVarGuard::set("HOME", "");
let err = super::validate_home_for_cache().expect_err("empty HOME must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("HOME is set to the empty string"),
"diagnostic must call out the empty-string case specifically: {msg}",
);
assert!(
!msg.contains("HOME is unset"),
"empty HOME must NOT use the unset diagnostic — the two \
cases are distinct now: {msg}",
);
}
#[test]
fn validate_home_for_cache_rejects_root_slash() {
let _env_lock = lock_env();
let _home = EnvVarGuard::set("HOME", "/");
let err = super::validate_home_for_cache().expect_err("HOME=/ must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("HOME is `/`"),
"diagnostic must call out the root-slash case specifically: {msg}",
);
assert!(
msg.contains("/.cache/ktstr"),
"diagnostic must explain why (/.cache/ktstr aliases root fs): {msg}",
);
}
#[test]
fn validate_home_for_cache_rejects_relative_path() {
let _env_lock = lock_env();
for rel in ["relative", "./relative", "home/user", "."] {
let _home = EnvVarGuard::set("HOME", rel);
let err = super::validate_home_for_cache()
.expect_err(&format!("relative path '{rel}' must be rejected"));
let msg = format!("{err:#}");
assert!(
msg.contains("not an absolute path"),
"[rel={rel:?}] diagnostic must call out non-absolute: {msg}",
);
assert!(
msg.contains(&format!("{rel:?}")),
"[rel={rel:?}] diagnostic must echo the offending value verbatim: {msg}",
);
}
}
#[test]
fn validate_home_for_cache_accepts_absolute_paths() {
let _env_lock = lock_env();
for ok in [
"/home/user",
"/var/empty",
"/root",
"/a", "/home/user with spaces",
"/home/user/.local/share",
] {
let _home = EnvVarGuard::set("HOME", ok);
let got = super::validate_home_for_cache()
.unwrap_or_else(|e| panic!("absolute path {ok:?} must be accepted; got: {e:#}"));
assert_eq!(
got,
std::path::PathBuf::from(ok),
"returned PathBuf must equal the HOME value verbatim — \
helper does not append the cache suffix or canonicalize",
);
}
}
#[test]
fn validate_home_for_cache_does_not_canonicalize_dots_and_doubles() {
let _env_lock = lock_env();
for not_normalized in ["//", "/./", "/.", "/foo//bar", "/./home"] {
let _home = EnvVarGuard::set("HOME", not_normalized);
super::validate_home_for_cache().unwrap_or_else(|e| {
panic!(
"non-normalized but absolute path {not_normalized:?} must \
pass the helper (downstream OS surfaces the diagnostic); \
got: {e:#}",
)
});
}
}
}