use std::path::Path;
use anyhow::{Result, anyhow};
use crate::cache::CacheDir;
use super::util::new_table;
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct LlcLockRow {
pub(crate) llc_idx: usize,
pub(crate) numa_node: Option<usize>,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct CpuLockRow {
pub(crate) cpu: usize,
pub(crate) numa_node: Option<usize>,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct CacheLockRow {
pub(crate) cache_key: String,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct RunDirLockRow {
pub(crate) run_key: String,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct LocksSnapshot {
pub(crate) llcs: Vec<LlcLockRow>,
pub(crate) cpus: Vec<CpuLockRow>,
pub(crate) cache: Vec<CacheLockRow>,
pub(crate) run_dirs: Vec<RunDirLockRow>,
}
fn collect_locks_snapshot() -> Result<LocksSnapshot> {
let cache_root = CacheDir::default_root().ok();
let runs_root = crate::test_support::runs_root();
collect_locks_snapshot_from(Path::new("/tmp"), cache_root.as_deref(), Some(&runs_root))
}
pub(crate) fn collect_locks_snapshot_from(
tmp_root: &Path,
cache_root: Option<&Path>,
runs_root: Option<&Path>,
) -> Result<LocksSnapshot> {
use crate::vmm::host_topology::HostTopology;
let host_topo = HostTopology::from_sysfs().ok();
let llc_pattern = format!("{}/ktstr-llc-*.lock", tmp_root.display());
let mut llcs: Vec<LlcLockRow> = Vec::new();
for entry in glob::glob(&llc_pattern)
.map_err(|e| anyhow!("glob {llc_pattern}: {e}"))?
.flatten()
{
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let Some(idx_str) = stem.strip_prefix("ktstr-llc-") else {
continue;
};
let Ok(llc_idx) = idx_str.parse::<usize>() else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
let numa_node = host_topo.as_ref().and_then(|t| {
if llc_idx < t.llc_groups.len() {
Some(t.llc_numa_node(llc_idx))
} else {
None
}
});
llcs.push(LlcLockRow {
llc_idx,
numa_node,
lockfile: entry.display().to_string(),
holders,
});
}
llcs.sort_by_key(|r| r.llc_idx);
let cpu_pattern = format!("{}/ktstr-cpu-*.lock", tmp_root.display());
let mut cpus: Vec<CpuLockRow> = Vec::new();
for entry in glob::glob(&cpu_pattern)
.map_err(|e| anyhow!("glob {cpu_pattern}: {e}"))?
.flatten()
{
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let Some(idx_str) = stem.strip_prefix("ktstr-cpu-") else {
continue;
};
let Ok(cpu) = idx_str.parse::<usize>() else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
let numa_node = host_topo
.as_ref()
.and_then(|t| t.cpu_to_node.get(&cpu).copied());
cpus.push(CpuLockRow {
cpu,
numa_node,
lockfile: entry.display().to_string(),
holders,
});
}
cpus.sort_by_key(|r| r.cpu);
let mut cache: Vec<CacheLockRow> = Vec::new();
if let Some(cache_root) = cache_root {
let locks_dir = cache_root.join(crate::flock::LOCK_DIR_NAME);
let pattern = format!("{}/*.lock", locks_dir.display());
if let Ok(expanded) = glob::glob(&pattern) {
for entry in expanded.flatten() {
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
cache.push(CacheLockRow {
cache_key: stem.to_string(),
lockfile: entry.display().to_string(),
holders,
});
}
}
}
cache.sort_by(|a, b| a.cache_key.cmp(&b.cache_key));
let mut run_dirs: Vec<RunDirLockRow> = Vec::new();
if let Some(runs_root) = runs_root {
let locks_dir = runs_root.join(crate::flock::LOCK_DIR_NAME);
let pattern = format!("{}/*.lock", locks_dir.display());
if let Ok(expanded) = glob::glob(&pattern) {
for entry in expanded.flatten() {
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
run_dirs.push(RunDirLockRow {
run_key: stem.to_string(),
lockfile: entry.display().to_string(),
holders,
});
}
}
}
run_dirs.sort_by(|a, b| a.run_key.cmp(&b.run_key));
Ok(LocksSnapshot {
llcs,
cpus,
cache,
run_dirs,
})
}
fn render_locks_human(snap: &LocksSnapshot) -> String {
use std::fmt::Write;
let mut out = String::new();
let fmt_holders = |hs: &[crate::flock::HolderInfo]| -> String {
if hs.is_empty() {
crate::flock::NO_HOLDERS_RECORDED.to_string()
} else {
hs.iter()
.map(|h| format!("{} ({})", h.pid, h.cmdline))
.collect::<Vec<_>>()
.join("\n")
}
};
let fmt_node = |n: Option<usize>| -> String {
match n {
Some(v) => v.to_string(),
None => "?".to_string(),
}
};
writeln!(out, "LLC locks:").unwrap();
if snap.llcs.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["LLC", "NODE", "LOCKFILE", "HOLDERS"]);
for r in &snap.llcs {
t.add_row([
r.llc_idx.to_string(),
fmt_node(r.numa_node),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nPer-CPU locks:").unwrap();
if snap.cpus.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["CPU", "NODE", "LOCKFILE", "HOLDERS"]);
for r in &snap.cpus {
t.add_row([
r.cpu.to_string(),
fmt_node(r.numa_node),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nCache-entry locks:").unwrap();
if snap.cache.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["CACHE KEY", "LOCKFILE", "HOLDERS"]);
for r in &snap.cache {
t.add_row([
r.cache_key.clone(),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nRun-dir locks:").unwrap();
if snap.run_dirs.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["RUN KEY", "LOCKFILE", "HOLDERS"]);
for r in &snap.run_dirs {
t.add_row([
r.run_key.clone(),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
out
}
static LOCKS_WATCH_KILL: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
extern "C" fn locks_watch_sigint_handler(_sig: libc::c_int) {
LOCKS_WATCH_KILL.store(true, std::sync::atomic::Ordering::SeqCst);
}
pub fn list_locks(json: bool, watch: Option<std::time::Duration>) -> Result<()> {
if watch.is_none() {
let snap = collect_locks_snapshot()?;
if json {
println!("{}", serde_json::to_string_pretty(&snap)?);
} else {
print!("{}", render_locks_human(&snap));
}
return Ok(());
}
let interval = watch.unwrap();
unsafe {
libc::signal(
libc::SIGINT,
locks_watch_sigint_handler as *const () as libc::sighandler_t,
);
}
loop {
if LOCKS_WATCH_KILL.load(std::sync::atomic::Ordering::SeqCst) {
break;
}
let snap = collect_locks_snapshot()?;
if json {
println!("{}", serde_json::to_string(&snap)?);
} else {
print!("\x1b[2J\x1b[H{}", render_locks_human(&snap));
}
std::thread::sleep(interval);
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn locks_snapshot_json_field_names_are_stable() {
let snap = LocksSnapshot {
llcs: vec![LlcLockRow {
llc_idx: 0,
numa_node: Some(1),
lockfile: "/tmp/ktstr-llc-0.lock".to_string(),
holders: Vec::new(),
}],
cpus: vec![CpuLockRow {
cpu: 3,
numa_node: None,
lockfile: "/tmp/ktstr-cpu-3.lock".to_string(),
holders: Vec::new(),
}],
cache: vec![CacheLockRow {
cache_key: "6.14.2-tarball-x86_64".to_string(),
lockfile: "/tmp/.locks/6.14.2-tarball-x86_64.lock".to_string(),
holders: Vec::new(),
}],
run_dirs: vec![RunDirLockRow {
run_key: "6.14-abc1234".to_string(),
lockfile: "/tmp/.locks/6.14-abc1234.lock".to_string(),
holders: Vec::new(),
}],
};
let val = serde_json::to_value(&snap).expect("serde serialize");
assert!(
val.get("llcs").is_some(),
"top-level must have 'llcs': {val}"
);
assert!(
val.get("cpus").is_some(),
"top-level must have 'cpus': {val}"
);
assert!(
val.get("cache").is_some(),
"top-level must have 'cache': {val}"
);
assert!(
val.get("run_dirs").is_some(),
"top-level must have 'run_dirs': {val}"
);
let llc0 = &val["llcs"][0];
assert!(
llc0.get("llc_idx").is_some(),
"llc_idx (snake_case): {llc0}"
);
assert!(llc0.get("numa_node").is_some(), "numa_node: {llc0}");
assert!(llc0.get("lockfile").is_some(), "lockfile: {llc0}");
assert!(llc0.get("holders").is_some(), "holders: {llc0}");
let cpu0 = &val["cpus"][0];
assert!(cpu0.get("cpu").is_some());
assert!(cpu0.get("numa_node").is_some());
let cache0 = &val["cache"][0];
assert!(cache0.get("cache_key").is_some(), "cache_key: {cache0}");
let run0 = &val["run_dirs"][0];
assert!(run0.get("run_key").is_some(), "run_key: {run0}");
assert!(run0.get("lockfile").is_some(), "lockfile: {run0}");
assert!(run0.get("holders").is_some(), "holders: {run0}");
}
#[test]
fn collect_locks_snapshot_empty_roots() {
use tempfile::TempDir;
let tmp_dir = TempDir::new().expect("tempdir tmp_root");
let cache_dir = TempDir::new().expect("tempdir cache_root");
let runs_dir = TempDir::new().expect("tempdir runs_root");
let snap = collect_locks_snapshot_from(
tmp_dir.path(),
Some(cache_dir.path()),
Some(runs_dir.path()),
)
.expect("collect must succeed on empty roots");
assert!(snap.llcs.is_empty(), "no ktstr-llc-*.lock → empty llcs");
assert!(snap.cpus.is_empty(), "no ktstr-cpu-*.lock → empty cpus");
assert!(snap.cache.is_empty(), "no .locks/ → empty cache");
assert!(
snap.run_dirs.is_empty(),
"no .locks/ under runs_root → empty run_dirs",
);
}
#[test]
fn collect_locks_snapshot_discovers_lockfiles() {
use tempfile::TempDir;
let tmp_dir = TempDir::new().expect("tempdir");
let path = tmp_dir.path();
std::fs::write(path.join("ktstr-llc-5.lock"), b"").expect("plant llc-5");
std::fs::write(path.join("ktstr-llc-2.lock"), b"").expect("plant llc-2");
std::fs::write(path.join("ktstr-cpu-7.lock"), b"").expect("plant cpu-7");
std::fs::write(path.join("ktstr-llc-oops.lock"), b"").expect("plant junk");
let snap = collect_locks_snapshot_from(path, None, None).expect("collect must succeed");
assert_eq!(snap.llcs.len(), 2);
assert_eq!(snap.llcs[0].llc_idx, 2, "sort ascending: llc 2 first");
assert_eq!(snap.llcs[1].llc_idx, 5, "sort ascending: llc 5 second");
assert_eq!(snap.cpus.len(), 1);
assert_eq!(snap.cpus[0].cpu, 7);
assert!(snap.cache.is_empty());
assert!(snap.run_dirs.is_empty());
}
#[test]
fn collect_locks_snapshot_discovers_run_dir_lockfiles() {
use tempfile::TempDir;
let runs_dir = TempDir::new().expect("tempdir runs_root");
let locks_dir = runs_dir.path().join(crate::flock::LOCK_DIR_NAME);
std::fs::create_dir_all(&locks_dir).expect("mkdir .locks/");
std::fs::write(locks_dir.join("7.0-def5678.lock"), b"").expect("plant 7.0");
std::fs::write(locks_dir.join("6.14-abc1234.lock"), b"").expect("plant 6.14");
let tmp_dir = TempDir::new().expect("tempdir tmp_root");
let snap = collect_locks_snapshot_from(tmp_dir.path(), None, Some(runs_dir.path()))
.expect("collect must succeed");
assert_eq!(snap.run_dirs.len(), 2);
assert_eq!(
snap.run_dirs[0].run_key, "6.14-abc1234",
"sort ascending: 6.14 lexically before 7.0",
);
assert_eq!(snap.run_dirs[1].run_key, "7.0-def5678");
}
#[test]
fn render_locks_human_empty_snapshot_emits_all_headings_with_none() {
let snap = LocksSnapshot {
llcs: Vec::new(),
cpus: Vec::new(),
cache: Vec::new(),
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
let llc_pos = out.find("LLC locks:").expect("LLC heading");
let cpu_pos = out.find("Per-CPU locks:").expect("Per-CPU heading");
let cache_pos = out.find("Cache-entry locks:").expect("Cache heading");
let run_pos = out.find("Run-dir locks:").expect("Run-dir heading");
assert!(
llc_pos < cpu_pos && cpu_pos < cache_pos && cache_pos < run_pos,
"headings must appear in order LLC → Per-CPU → Cache → Run-dir; got: {out}",
);
let none_count = out.matches("(none)").count();
assert_eq!(
none_count, 4,
"all four empty sections must render `(none)`; got: {out}",
);
}
#[test]
fn render_locks_human_populated_llc_row_includes_pid_cmdline_and_node() {
let snap = LocksSnapshot {
llcs: vec![LlcLockRow {
llc_idx: 3,
numa_node: Some(1),
lockfile: "/tmp/ktstr-llc-3.lock".to_string(),
holders: vec![crate::flock::HolderInfo {
pid: 4321,
cmdline: "ktstr-test-binary".to_string(),
}],
}],
cpus: Vec::new(),
cache: Vec::new(),
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
assert!(out.contains("3"), "LLC index must appear: {out}");
assert!(out.contains("1"), "NUMA node must appear: {out}");
assert!(
out.contains("/tmp/ktstr-llc-3.lock"),
"lockfile path must appear: {out}",
);
assert!(out.contains("4321"), "holder pid must appear: {out}");
assert!(
out.contains("ktstr-test-binary"),
"holder cmdline must appear: {out}",
);
assert!(
out.contains("4321 (ktstr-test-binary)"),
"holder must render as `pid (cmdline)`: {out}",
);
}
#[test]
fn render_locks_human_multi_holder_row_uses_newline_separator() {
let snap = LocksSnapshot {
llcs: vec![LlcLockRow {
llc_idx: 0,
numa_node: None,
lockfile: "/tmp/ktstr-llc-0.lock".to_string(),
holders: vec![
crate::flock::HolderInfo {
pid: 100,
cmdline: "first".to_string(),
},
crate::flock::HolderInfo {
pid: 200,
cmdline: "second".to_string(),
},
],
}],
cpus: Vec::new(),
cache: Vec::new(),
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
assert!(out.contains("100 (first)"), "first holder: {out}");
assert!(out.contains("200 (second)"), "second holder: {out}");
}
#[test]
fn render_locks_human_unknown_node_renders_question_mark() {
let snap = LocksSnapshot {
llcs: vec![LlcLockRow {
llc_idx: 7,
numa_node: None,
lockfile: "/tmp/ktstr-llc-7.lock".to_string(),
holders: Vec::new(),
}],
cpus: Vec::new(),
cache: Vec::new(),
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
assert!(
out.contains('?'),
"missing NUMA node must render as `?`: {out}",
);
}
#[test]
fn render_locks_human_empty_holders_emits_no_holders_sentinel() {
let snap = LocksSnapshot {
llcs: Vec::new(),
cpus: vec![CpuLockRow {
cpu: 5,
numa_node: Some(0),
lockfile: "/tmp/ktstr-cpu-5.lock".to_string(),
holders: Vec::new(),
}],
cache: Vec::new(),
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
assert!(
out.contains(crate::flock::NO_HOLDERS_RECORDED),
"empty holder list must render `{}`: got {out}",
crate::flock::NO_HOLDERS_RECORDED,
);
}
#[test]
fn render_locks_human_cache_section_uses_cache_key_header() {
let snap = LocksSnapshot {
llcs: Vec::new(),
cpus: Vec::new(),
cache: vec![CacheLockRow {
cache_key: "6.14.2-tarball-x86_64".to_string(),
lockfile: "/tmp/.locks/6.14.2-tarball-x86_64.lock".to_string(),
holders: Vec::new(),
}],
run_dirs: Vec::new(),
};
let out = render_locks_human(&snap);
assert!(
out.contains("CACHE KEY"),
"cache-entry section must use `CACHE KEY` header: {out}",
);
assert!(
out.contains("6.14.2-tarball-x86_64"),
"cache key must appear in row: {out}",
);
}
#[test]
fn render_locks_human_run_dir_section_uses_run_key_header() {
let snap = LocksSnapshot {
llcs: Vec::new(),
cpus: Vec::new(),
cache: Vec::new(),
run_dirs: vec![RunDirLockRow {
run_key: "6.14-abc1234".to_string(),
lockfile: "/tmp/.locks/6.14-abc1234.lock".to_string(),
holders: Vec::new(),
}],
};
let out = render_locks_human(&snap);
assert!(
out.contains("RUN KEY"),
"run-dir section must use `RUN KEY` header: {out}",
);
assert!(
out.contains("6.14-abc1234"),
"run key must appear in row: {out}",
);
}
#[test]
fn locks_watch_kill_default_state_is_false() {
let _ = LOCKS_WATCH_KILL.load(std::sync::atomic::Ordering::SeqCst);
}
#[test]
fn locks_watch_sigint_handler_flips_kill_flag() {
LOCKS_WATCH_KILL.store(false, std::sync::atomic::Ordering::SeqCst);
super::locks_watch_sigint_handler(libc::SIGINT);
assert!(
LOCKS_WATCH_KILL.load(std::sync::atomic::Ordering::SeqCst),
"SIGINT handler must flip LOCKS_WATCH_KILL to true",
);
LOCKS_WATCH_KILL.store(false, std::sync::atomic::Ordering::SeqCst);
}
#[test]
fn list_locks_one_shot_no_panic_on_default_host() {
use tempfile::TempDir;
let tmp_dir = TempDir::new().expect("tempdir");
let snap = collect_locks_snapshot_from(tmp_dir.path(), None, None)
.expect("collect on empty roots must succeed");
assert!(snap.llcs.is_empty());
assert!(snap.cpus.is_empty());
assert!(snap.cache.is_empty());
assert!(snap.run_dirs.is_empty());
}
}