#![cfg(test)]
use super::tests_helpers::stage_synthetic_proc;
use super::*;
use crate::metric_types::Bytes;
#[test]
fn capture_with_filters_ghost_threads_with_empty_comm_and_zero_start() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 42;
let live_tid: i32 = 101;
let ghost_tid: i32 = 202;
stage_synthetic_proc(proc_tmp.path(), tgid, live_tid, "pcomm-proc", "live-thread");
let ghost_dir = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(ghost_tid.to_string());
std::fs::create_dir_all(&ghost_dir).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"ghost tid with empty comm + zero start must be filtered; \
got threads: {:?}",
snap.threads
.iter()
.map(|t| (t.tid, &t.comm))
.collect::<Vec<_>>(),
);
assert_eq!(snap.threads[0].tid, live_tid as u32);
assert_eq!(snap.threads[0].comm, "live-thread");
}
#[test]
fn capture_with_synthetic_tree_assembles_thread_state() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 42;
let tid: i32 = 101;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "pcomm-proc", "worker-thread");
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1, "synthetic proc has one tid");
let t = &snap.threads[0];
assert_eq!(t.tid, tid as u32);
assert_eq!(t.tgid, tgid as u32);
assert_eq!(t.pcomm, "pcomm-proc");
assert_eq!(t.comm, "worker-thread");
assert_eq!(t.cgroup, "/ktstr.slice/worker0");
use crate::metric_types::{
Bytes, CategoricalString, ClockTicks, CpuSet, MonotonicCount, MonotonicNs, OrdinalI32,
PeakNs,
};
assert_eq!(t.nice, OrdinalI32(-10));
assert_eq!(t.start_time_clock_ticks, 555_555);
assert_eq!(t.policy, CategoricalString::from("SCHED_OTHER"));
assert_eq!(t.minflt, MonotonicCount(7777));
assert_eq!(t.majflt, MonotonicCount(8888));
assert_eq!(
t.utime_clock_ticks,
ClockTicks(10),
"tail[11] of stat fixture lands at utime_clock_ticks",
);
assert_eq!(
t.stime_clock_ticks,
ClockTicks(11),
"tail[12] of stat fixture lands at stime_clock_ticks",
);
assert_eq!(
t.processor,
OrdinalI32(1700),
"tail[36] of stat fixture (the 17th post-starttime \
token, value 100*17=1700) lands at processor. 1700 is \
a synthetic test value; on real hosts `processor` is \
bounded by the online CPU count and would not exceed \
`nproc - 1`. The synthetic fixture exercises the wire \
format without conforming to that bound, since no real \
/proc is involved.",
);
assert_eq!(t.run_time_ns, MonotonicNs(1_000_000));
assert_eq!(t.wait_time_ns, MonotonicNs(200_000));
assert_eq!(t.timeslices, MonotonicCount(50));
assert_eq!(
t.state, 'R',
"first non-whitespace char of `State:\tR (running)` is \
the single-letter code R",
);
assert_eq!(t.voluntary_csw, MonotonicCount(42));
assert_eq!(t.nonvoluntary_csw, MonotonicCount(7));
assert_eq!(t.cpu_affinity, CpuSet(vec![0, 1, 2, 3]));
assert_eq!(t.rchar, Bytes(100));
assert_eq!(t.wchar, Bytes(200));
assert_eq!(t.syscr, MonotonicCount(10));
assert_eq!(t.syscw, MonotonicCount(20));
assert_eq!(t.read_bytes, Bytes(4096));
assert_eq!(t.write_bytes, Bytes(8192));
assert_eq!(
t.cancelled_write_bytes,
Bytes(512),
"cancelled_write_bytes round-trips from the 7th line of \
/proc/<tid>/io",
);
assert_eq!(t.nr_wakeups, MonotonicCount(11));
assert_eq!(t.nr_wakeups_local, MonotonicCount(8));
assert_eq!(t.nr_wakeups_remote, MonotonicCount(3));
assert_eq!(t.nr_wakeups_sync, MonotonicCount(2));
assert_eq!(t.nr_wakeups_migrate, MonotonicCount(1));
assert_eq!(t.nr_wakeups_affine, MonotonicCount(12));
assert_eq!(
t.nr_wakeups_affine_attempts,
MonotonicCount(20),
"denominator for the affine-wake success ratio \
(nr_wakeups_affine / nr_wakeups_affine_attempts = 12/20)",
);
assert_eq!(t.nr_migrations, MonotonicCount(9));
assert_eq!(t.nr_forced_migrations, MonotonicCount(7));
assert_eq!(t.nr_failed_migrations_affine, MonotonicCount(1));
assert_eq!(t.nr_failed_migrations_running, MonotonicCount(2));
assert_eq!(t.nr_failed_migrations_hot, MonotonicCount(3));
assert_eq!(
t.wait_sum,
MonotonicNs(5_000_250_000),
"PN_SCHEDSTAT 5000.25 reconstructs to 5_000_250_000 ns \
(5000ms + 250_000ns)",
);
assert_eq!(t.wait_count, MonotonicCount(15));
assert_eq!(
t.wait_max,
PeakNs(250_500_000),
"PN_SCHEDSTAT 250.5 reconstructs to 250_500_000 ns",
);
assert_eq!(
t.voluntary_sleep_ns,
MonotonicNs(2_099_750_000),
"voluntary_sleep_ns = sum_sleep_runtime (3_200_500_000) \
minus sum_block_runtime (1_100_750_000) = \
2_099_750_000 ns; capture-side normalization strips \
the kernel's sleep/block double-count",
);
assert_eq!(
t.sleep_max,
PeakNs(180_250_000),
"PN_SCHEDSTAT 180.25 reconstructs to 180_250_000 ns",
);
assert_eq!(
t.block_sum,
MonotonicNs(1_100_750_000),
"PN_SCHEDSTAT 1100.75 reconstructs to 1_100_750_000 ns; \
block_sum is populated from the kernel's `sum_block_runtime` key",
);
assert_eq!(
t.block_max,
PeakNs(60_750_000),
"PN_SCHEDSTAT 60.75 reconstructs to 60_750_000 ns",
);
assert_eq!(
t.iowait_sum,
MonotonicNs(77_000_000),
"PN_SCHEDSTAT 77.0 reconstructs to 77_000_000 ns",
);
assert_eq!(t.iowait_count, MonotonicCount(18));
assert_eq!(
t.exec_max,
PeakNs(90_000_000),
"PN_SCHEDSTAT 90.0 reconstructs to 90_000_000 ns",
);
assert_eq!(
t.slice_max,
PeakNs(400_500_000),
"PN_SCHEDSTAT 400.5 reconstructs to 400_500_000 ns",
);
assert!(
t.ext_enabled,
"ext.enabled = 1 round-trips through the full-key gate \
to ThreadState::ext_enabled true",
);
assert_eq!(
t.allocated_bytes,
Bytes(0),
"synthetic-tree capture must not probe — allocated_bytes \
collapses to absent-counter zero",
);
assert_eq!(
t.deallocated_bytes,
Bytes(0),
"synthetic-tree capture must not probe — deallocated_bytes \
collapses to absent-counter zero",
);
}
#[test]
fn capture_with_empty_proc_root_produces_empty_snapshot() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
std::fs::write(proc_tmp.path().join("loadavg"), "0.0 0.0 0.0 1/1 1\n").unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), true);
assert!(
snap.threads.is_empty(),
"empty proc_root must produce empty snapshot; got {} threads",
snap.threads.len(),
);
}
#[test]
fn capture_with_inode_cache_collapses_duplicate_binaries() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
std::fs::write(proc_tmp.path().join("loadavg"), "0.0 0.0 0.0 1/1 1\n").unwrap();
let shared_exe = proc_tmp.path().join("shared-exe");
std::fs::write(&shared_exe, b"\x7fELFsynthetic\n").unwrap();
for tgid in [4242, 4243] {
stage_synthetic_proc(
proc_tmp.path(),
tgid,
tgid + 1,
"shared-pcomm",
"shared-comm",
);
let exe_link = proc_tmp.path().join(tgid.to_string()).join("exe");
std::os::unix::fs::symlink(&shared_exe, &exe_link).unwrap();
}
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), true);
assert_eq!(
snap.threads.len(),
2,
"both staged threads must land in the snapshot",
);
for thread in &snap.threads {
assert_eq!(
thread.allocated_bytes,
Bytes(0),
"synthetic /proc has no maps; attach fails, allocated_bytes \
collapses to absent-counter zero — cache-hit branch must not \
fabricate a non-zero counter",
);
}
}
#[test]
fn capture_with_nonexistent_proc_root_produces_empty_snapshot() {
let scratch = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let nonexistent = scratch.path().join("does-not-exist");
let snap = capture_with(&nonexistent, cgroup_tmp.path(), &nonexistent, false);
assert!(
snap.threads.is_empty(),
"nonexistent proc_root must produce empty snapshot; got \
{} threads — iter_tgids_at must collapse ENOENT to empty",
snap.threads.len(),
);
}
#[test]
fn capture_with_tgid_missing_task_dir_yields_no_threads_for_that_tgid() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let live_tgid: i32 = 4242;
let live_tid: i32 = 101;
stage_synthetic_proc(
proc_tmp.path(),
live_tgid,
live_tid,
"live-pcomm",
"live-comm",
);
let bare_tgid: i32 = 4243;
std::fs::create_dir_all(proc_tmp.path().join(bare_tgid.to_string())).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"tgid 4243 has no `task/` subdir → contributes zero threads; \
only live tgid 4242's tid should land. got {} threads, expected 1",
snap.threads.len(),
);
assert_eq!(snap.threads[0].tgid, live_tgid as u32);
assert_eq!(snap.threads[0].tid, live_tid as u32);
}
#[test]
fn capture_with_non_numeric_proc_entries_are_filtered() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let live_tgid: i32 = 5151;
let live_tid: i32 = 5152;
stage_synthetic_proc(proc_tmp.path(), live_tgid, live_tid, "real", "real-thread");
for junk in &["self", "thread-self", "sys", "version", "12abc", "abc"] {
std::fs::create_dir_all(proc_tmp.path().join(junk)).unwrap();
}
std::fs::create_dir_all(proc_tmp.path().join("0")).unwrap();
std::fs::create_dir_all(proc_tmp.path().join("-1")).unwrap();
assert_eq!(
iter_tgids_at(proc_tmp.path()),
vec![live_tgid],
"iter_tgids_at must return only the real numeric tgid; \
non-numeric and `0`/`-1` entries must be filtered by \
parse::<i32>().ok() + `> 0` predicates",
);
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"non-numeric proc_root entries (`self`, `12abc`, etc.) and \
`0`/`-1` must be filtered by iter_tgids_at; got {} threads, \
expected 1 (only the real tgid {live_tgid})",
snap.threads.len(),
);
assert_eq!(snap.threads[0].tgid, live_tgid as u32);
}
#[test]
fn capture_pid_with_nonexistent_pid_produces_empty_snapshot() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let snap = capture_pid_with(
proc_tmp.path(),
cgroup_tmp.path(),
sys_tmp.path(),
99999,
false,
);
assert!(
snap.threads.is_empty(),
"capture_pid_with against nonexistent pid must produce empty \
snapshot; got {} threads — iter_task_ids_at must collapse \
ENOENT to empty",
snap.threads.len(),
);
}
#[test]
fn capture_with_corrupt_stat_file_zeroes_stat_fields_only() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 6161;
let tid: i32 = 6162;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let stat_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("stat");
std::fs::write(&stat_path, "garbage no parens here\n").unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"corrupt stat does not block thread landing — comm + status \
+ io still populate; ghost filter only fires when comm AND \
start_time are both empty/zero. got {} threads",
snap.threads.len(),
);
let t = &snap.threads[0];
assert_eq!(
t.start_time_clock_ticks, 0,
"corrupt stat → start_time_clock_ticks default 0; got {}",
t.start_time_clock_ticks
);
use crate::metric_types::{Bytes, CategoricalString, ClockTicks, MonotonicCount, OrdinalI32};
assert_eq!(
t.nice,
OrdinalI32(0),
"corrupt stat → nice default 0; got {}",
t.nice.0,
);
assert_eq!(
t.policy,
CategoricalString::from(""),
"corrupt stat → policy default empty; got {:?}",
t.policy
);
assert_eq!(t.utime_clock_ticks, ClockTicks(0));
assert_eq!(t.stime_clock_ticks, ClockTicks(0));
assert_eq!(t.processor, OrdinalI32(0));
assert_eq!(
t.voluntary_csw,
MonotonicCount(42),
"status file is intact → voluntary_csw still populates"
);
assert_eq!(
t.rchar,
Bytes(100),
"io file is intact → rchar still populates"
);
}
#[test]
fn capture_with_missing_schedstat_zeroes_schedstat_fields() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 7171;
let tid: i32 = 7172;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let schedstat_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("schedstat");
std::fs::remove_file(&schedstat_path).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"thread still lands with schedstat absent"
);
let t = &snap.threads[0];
use crate::metric_types::{MonotonicCount, MonotonicNs};
assert_eq!(
t.run_time_ns,
MonotonicNs(0),
"missing schedstat → run_time_ns default 0; got {}",
t.run_time_ns.0
);
assert_eq!(t.wait_time_ns, MonotonicNs(0));
assert_eq!(t.timeslices, MonotonicCount(0));
assert_eq!(t.start_time_clock_ticks, 555_555);
}
#[test]
fn capture_with_corrupt_status_zeroes_status_fields_and_empty_affinity() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 8181;
let tid: i32 = 8182;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let status_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("status");
std::fs::write(&status_path, "totally malformed garbage no colons here\n").unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::MonotonicCount;
assert_eq!(
t.voluntary_csw,
MonotonicCount(0),
"corrupt status → voluntary_csw default 0; got {}",
t.voluntary_csw.0
);
assert_eq!(t.nonvoluntary_csw, MonotonicCount(0));
assert_eq!(
t.state, '~',
"corrupt status → state collapses to '~' (capture-time \
unwrap_or_else(default_state_char)); got {:?}",
t.state
);
assert!(
t.cpu_affinity.0.is_empty(),
"use_syscall_affinity=false + corrupt status → cpu_affinity \
must be empty Vec, NOT inherit caller's real affinity; got {:?}",
t.cpu_affinity,
);
}
#[test]
fn capture_with_missing_io_zeroes_io_fields() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 9191;
let tid: i32 = 9192;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let io_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("io");
std::fs::remove_file(&io_path).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::{Bytes, MonotonicCount};
assert_eq!(
t.rchar,
Bytes(0),
"missing io → rchar default 0; got {}",
t.rchar.0,
);
assert_eq!(t.wchar, Bytes(0));
assert_eq!(t.syscr, MonotonicCount(0));
assert_eq!(t.syscw, MonotonicCount(0));
assert_eq!(t.read_bytes, Bytes(0));
assert_eq!(t.write_bytes, Bytes(0));
assert_eq!(t.cancelled_write_bytes, Bytes(0));
assert_eq!(t.start_time_clock_ticks, 555_555);
}
#[test]
fn capture_with_missing_sched_zeroes_sched_fields() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 1010;
let tid: i32 = 1011;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let sched_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("sched");
std::fs::remove_file(&sched_path).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::{MonotonicCount, MonotonicNs, PeakNs};
assert_eq!(
t.nr_wakeups,
MonotonicCount(0),
"missing sched → nr_wakeups default 0; got {}",
t.nr_wakeups.0,
);
assert_eq!(t.nr_migrations, MonotonicCount(0));
assert_eq!(t.wait_sum, MonotonicNs(0));
assert_eq!(t.wait_max, PeakNs(0));
assert_eq!(t.voluntary_sleep_ns, MonotonicNs(0));
assert_eq!(t.block_sum, MonotonicNs(0));
assert_eq!(t.iowait_sum, MonotonicNs(0));
assert_eq!(t.exec_max, PeakNs(0));
assert_eq!(t.slice_max, PeakNs(0));
assert!(
!t.ext_enabled,
"missing sched → ext.enabled key absent → ext_enabled false; \
got {}",
t.ext_enabled
);
}
#[test]
fn capture_with_block_runtime_absent_zeroes_voluntary_sleep_not_sleep_sum() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 2020;
let tid: i32 = 2021;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let sched_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("sched");
let sched_no_block = "\
sum_sleep_runtime : 3200.50\n\
se.statistics.sleep_max : 180.25\n";
std::fs::write(&sched_path, sched_no_block).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::MonotonicNs;
assert_eq!(
t.voluntary_sleep_ns,
MonotonicNs(0),
"block_sum absent → voluntary_sleep_ns must collapse to 0; \
falling back to sum_sleep_runtime would mislabel \
involuntary block as voluntary sleep",
);
assert_eq!(t.block_sum, MonotonicNs(0));
}
#[test]
fn capture_with_sleep_runtime_absent_zeroes_voluntary_sleep() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 2030;
let tid: i32 = 2031;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let sched_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("sched");
let sched_no_sleep = "\
sum_block_runtime : 1100.75\n\
se.statistics.block_max : 60.75\n";
std::fs::write(&sched_path, sched_no_sleep).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::MonotonicNs;
assert_eq!(
t.voluntary_sleep_ns,
MonotonicNs(0),
"sleep_sum absent → voluntary_sleep_ns must be 0",
);
assert_eq!(t.block_sum, MonotonicNs(1_100_750_000));
}
#[test]
fn capture_with_partial_mid_capture_race_lands_zero_thread() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 1212;
let tid: i32 = 1213;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "racy-pcomm", "racy-comm");
let task_dir = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string());
for f in &["stat", "schedstat", "status", "io", "sched", "cgroup"] {
std::fs::remove_file(task_dir.join(f)).unwrap();
}
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1, "comm intact → thread still lands");
let t = &snap.threads[0];
use crate::metric_types::{Bytes, MonotonicCount, MonotonicNs};
assert_eq!(t.comm, "racy-comm", "comm survives the racy partial reads");
assert_eq!(t.start_time_clock_ticks, 0);
assert_eq!(t.nr_wakeups, MonotonicCount(0));
assert_eq!(t.run_time_ns, MonotonicNs(0));
assert_eq!(t.voluntary_csw, MonotonicCount(0));
assert_eq!(t.rchar, Bytes(0));
assert_eq!(t.minflt, MonotonicCount(0));
assert_eq!(t.cgroup, "");
assert!(
snap.cgroup_stats.is_empty(),
"all threads have empty cgroup → enrichment loop skips → \
cgroup_stats stays empty",
);
}
#[test]
fn capture_pid_with_filters_ghost_threads() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 1313;
let live_tid: i32 = 1314;
let ghost_tid: i32 = 1315;
stage_synthetic_proc(proc_tmp.path(), tgid, live_tid, "p", "live");
let ghost_dir = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(ghost_tid.to_string());
std::fs::create_dir_all(&ghost_dir).unwrap();
let snap = capture_pid_with(
proc_tmp.path(),
cgroup_tmp.path(),
sys_tmp.path(),
tgid,
false,
);
assert_eq!(
snap.threads.len(),
1,
"capture_pid_with must filter ghost tid {ghost_tid}; got {} \
threads, expected 1 (only live tid {live_tid})",
snap.threads.len(),
);
assert_eq!(snap.threads[0].tid, live_tid as u32);
}
#[test]
fn capture_with_malformed_cpus_allowed_list_yields_empty_affinity() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 1414;
let tid: i32 = 1415;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let status_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("status");
let status = "Name:\tfoo\n\
State:\tR (running)\n\
voluntary_ctxt_switches:\t1\n\
nonvoluntary_ctxt_switches:\t1\n\
Cpus_allowed_list:\t5-3\n";
std::fs::write(&status_path, status).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::MonotonicCount;
assert!(
t.cpu_affinity.0.is_empty(),
"malformed Cpus_allowed_list `5-3` → parse_cpu_list returns \
None → cpu_affinity defaults to empty Vec (NOT a partial \
range, NOT the caller's affinity); got {:?}",
t.cpu_affinity,
);
assert_eq!(
t.voluntary_csw,
MonotonicCount(1),
"malformed cpulist must NOT corrupt csw fields on the same \
status file — per-arm Option isolation"
);
}
#[test]
fn capture_with_huge_cpu_range_in_status_yields_empty_affinity() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 1515;
let tid: i32 = 1516;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let status_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("status");
let status = "Cpus_allowed_list:\t0-4294967295\n\
voluntary_ctxt_switches:\t1\n\
nonvoluntary_ctxt_switches:\t1\n";
std::fs::write(&status_path, status).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let t = &snap.threads[0];
use crate::metric_types::MonotonicCount;
assert!(
t.cpu_affinity.0.is_empty(),
"huge cpulist range `0-4294967295` exceeds the 64 Ki \
expansion cap → parse_cpu_list returns None → cpu_affinity \
empty (NOT a 4-billion-element Vec, NOT a partial range); \
got {} elements",
t.cpu_affinity.0.len(),
);
assert_eq!(
t.voluntary_csw,
MonotonicCount(1),
"huge cpulist rejection must not break csw parsing on the \
same status file — per-arm Option isolation"
);
}
#[test]
fn capture_with_non_numeric_task_entries_are_filtered() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let live_tgid: i32 = 8181;
let live_tid: i32 = 8182;
stage_synthetic_proc(proc_tmp.path(), live_tgid, live_tid, "real", "real-thread");
let task_dir = proc_tmp.path().join(live_tgid.to_string()).join("task");
for junk in &["status", "self", "12abc", "abc"] {
std::fs::create_dir_all(task_dir.join(junk)).unwrap();
}
std::fs::create_dir_all(task_dir.join("0")).unwrap();
std::fs::create_dir_all(task_dir.join("-1")).unwrap();
assert_eq!(
iter_task_ids_at(proc_tmp.path(), live_tgid),
vec![live_tid],
"iter_task_ids_at must return only the real numeric tid; \
non-numeric and `0`/`-1` entries must be filtered by \
parse::<i32>().ok() + `> 0` predicates",
);
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"non-numeric `task/` entries must be filtered by \
iter_task_ids_at; got {} threads, expected 1",
snap.threads.len(),
);
assert_eq!(snap.threads[0].tid, live_tid as u32);
}
#[test]
fn capture_with_v1_only_cgroup_yields_empty_cgroup_string() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 9191;
let tid: i32 = 9192;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let cgroup_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join("cgroup");
let v1_only = "12:cpuset:/legacy/cpuset/path\n\
5:freezer:/legacy/freezer\n\
3:blkio:/\n";
std::fs::write(&cgroup_path, v1_only).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"v1-only cgroup does not block thread landing — comm + \
start_time are intact, ghost filter does not fire; \
got {} threads",
snap.threads.len(),
);
let t = &snap.threads[0];
assert_eq!(
t.cgroup, "",
"v1-only cgroup file → parse_cgroup_v2 returns None → \
ThreadState.cgroup defaults to empty; got {:?}",
t.cgroup,
);
assert!(
!snap.cgroup_stats.contains_key(""),
"empty-cgroup thread must NOT seed an empty-key entry in \
cgroup_stats — the enrichment loop's `!is_empty()` guard \
pins the skip; got keys: {:?}",
snap.cgroup_stats.keys().collect::<Vec<_>>(),
);
}
#[test]
fn capture_to_returns_err_on_unwritable_path() {
let scratch = tempfile::TempDir::new().unwrap();
let unwritable = scratch.path().join("missing-dir").join("snap.ctprof.zst");
let err = capture_to(&unwritable).unwrap_err();
let chain = format!("{err:#}");
assert!(
chain.contains(unwritable.to_string_lossy().as_ref()),
"error chain must name the unwritable target path; got: {chain}",
);
}
#[test]
fn capture_with_stale_cgroup_path_yields_all_zero_stats() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 7373;
let tid: i32 = 7374;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "p", "live");
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 1);
let stats = snap
.cgroup_stats
.get("/ktstr.slice/worker0")
.expect("non-empty cgroup string must seed the stats map");
assert_eq!(stats.cpu.usage_usec, 0, "stale cgroup → cpu_usage_usec 0");
assert_eq!(stats.cpu.nr_throttled, 0, "stale cgroup → nr_throttled 0");
assert_eq!(
stats.cpu.throttled_usec, 0,
"stale cgroup → throttled_usec 0"
);
assert_eq!(stats.memory.current, 0, "stale cgroup → memory_current 0");
}
#[test]
fn read_cgroup_at_v1_only_cgroup_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 4242;
let tid: i32 = 4243;
let task_dir = tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string());
std::fs::create_dir_all(&task_dir).unwrap();
let v1_only = "12:cpuset:/legacy/cpuset/path\n\
5:freezer:/legacy/freezer\n";
std::fs::write(task_dir.join("cgroup"), v1_only).unwrap();
assert_eq!(
read_cgroup_at(tmp.path(), tgid, tid),
None,
"v1-only cgroup file → read_cgroup_at returns None (no 0:: line)",
);
assert_eq!(
read_cgroup_at(tmp.path(), tgid, 9999),
None,
"missing cgroup file → read_cgroup_at returns None",
);
}
#[test]
fn parse_cgroup_v2_root_only_path_returns_slash() {
assert_eq!(parse_cgroup_v2("0::/\n"), Some("/".to_string()));
assert_eq!(parse_cgroup_v2("0::/ \n"), Some("/".to_string()));
let raw = "12:cpuset:/legacy/path\n0::/\n5:freezer:/legacy\n";
assert_eq!(parse_cgroup_v2(raw), Some("/".to_string()));
}
#[test]
fn capture_with_nr_threads_dedup_populates_leader_only() {
use crate::metric_types::GaugeCount;
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 7000;
let leader_tid: i32 = 7000;
let worker_tid: i32 = 7001;
stage_synthetic_proc(
proc_tmp.path(),
tgid,
leader_tid,
"leader-pcomm",
"leader-comm",
);
let leader_status_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(leader_tid.to_string())
.join("status");
let leader_status = "Name:\tfoo\n\
State:\tR (running)\n\
voluntary_ctxt_switches:\t1\n\
nonvoluntary_ctxt_switches:\t1\n\
Cpus_allowed_list:\t0\n\
Threads:\t2\n";
std::fs::write(&leader_status_path, leader_status).unwrap();
stage_synthetic_proc(
proc_tmp.path(),
tgid,
worker_tid,
"leader-pcomm",
"worker-comm",
);
let worker_status_path = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(worker_tid.to_string())
.join("status");
let worker_status = "Name:\tfoo\n\
State:\tR (running)\n\
voluntary_ctxt_switches:\t1\n\
nonvoluntary_ctxt_switches:\t1\n\
Cpus_allowed_list:\t0\n\
Threads:\t2\n";
std::fs::write(&worker_status_path, worker_status).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
assert_eq!(snap.threads.len(), 2, "two threads under tgid {tgid}");
let leader = snap
.threads
.iter()
.find(|t| t.tid == leader_tid as u32)
.expect("leader thread present");
let worker = snap
.threads
.iter()
.find(|t| t.tid == worker_tid as u32)
.expect("worker thread present");
assert_eq!(leader.tid, leader.tgid, "leader: tid == tgid");
assert_ne!(worker.tid, worker.tgid, "worker: tid != tgid");
assert_eq!(
leader.nr_threads,
GaugeCount(2),
"leader.nr_threads must carry the parsed Threads: value (2); \
got {:?}",
leader.nr_threads,
);
assert_eq!(
worker.nr_threads,
GaugeCount(0),
"worker.nr_threads must zero out under leader-dedup; \
got {:?} — populating non-leaders would let any Sum-style \
aggregator multiply the count by itself across the bucket",
worker.nr_threads,
);
}
#[test]
fn capture_with_ghost_filter_four_corner_keeps_nonzero_halves() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let sys_tmp = tempfile::TempDir::new().unwrap();
let tgid_alive: i32 = 8000;
let tid_alive: i32 = 8001;
stage_synthetic_proc(
proc_tmp.path(),
tgid_alive,
tid_alive,
"alive-pcomm",
"alive-comm",
);
let tgid_no_stat: i32 = 8002;
let tid_no_stat: i32 = 8003;
let task_dir = proc_tmp
.path()
.join(tgid_no_stat.to_string())
.join("task")
.join(tid_no_stat.to_string());
std::fs::create_dir_all(&task_dir).unwrap();
std::fs::write(
proc_tmp.path().join(tgid_no_stat.to_string()).join("comm"),
"parseable-pcomm\n",
)
.unwrap();
std::fs::write(task_dir.join("comm"), "parseable-comm\n").unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), sys_tmp.path(), false);
let alive = snap
.threads
.iter()
.find(|t| t.tid == tid_alive as u32)
.expect("alive thread (nonempty comm, nonzero start) must surface");
assert!(
!alive.comm.is_empty(),
"alive thread carries non-empty comm",
);
assert_ne!(
alive.start_time_clock_ticks, 0,
"alive thread carries non-zero start_time",
);
let comm_only = snap
.threads
.iter()
.find(|t| t.tid == tid_no_stat as u32)
.expect(
"comm-only thread must surface; ghost filter is AND-gated, \
so non-empty comm with zero start_time must NOT be filtered",
);
assert_eq!(
comm_only.comm, "parseable-comm",
"comm-only thread surfaces with the parsed non-empty comm",
);
assert_eq!(
comm_only.start_time_clock_ticks, 0,
"comm-only thread has zero start_time (no stat file staged)",
);
}
#[test]
fn smaps_rollup_bytes_saturating_mul_clamps_at_u64_max() {
use crate::metric_types::Bytes;
let mut t = ThreadState {
tid: 1,
tgid: 1,
..ThreadState::default()
};
t.smaps_rollup_kb.insert("Rss".into(), u64::MAX);
t.smaps_rollup_kb.insert("Pss".into(), 4);
t.smaps_rollup_kb.insert("Shared_Clean".into(), 0);
let near_max_kb = u64::MAX / 1024;
t.smaps_rollup_kb.insert("Anonymous".into(), near_max_kb);
let map: std::collections::BTreeMap<&String, Bytes> = t.smaps_rollup_bytes().collect();
let rss_key = "Rss".to_string();
let pss_key = "Pss".to_string();
let shared_key = "Shared_Clean".to_string();
let anon_key = "Anonymous".to_string();
assert_eq!(
map[&rss_key],
Bytes(u64::MAX),
"u64::MAX kB must saturate at u64::MAX bytes; got {:?}",
map[&rss_key],
);
assert_eq!(
map[&pss_key],
Bytes(4 * 1024),
"4 kB must convert to 4096 bytes; got {:?}",
map[&pss_key],
);
assert_eq!(
map[&shared_key],
Bytes(0),
"0 kB must convert to 0 bytes; got {:?}",
map[&shared_key],
);
let expected_anon = near_max_kb.saturating_mul(1024);
assert_eq!(
map[&anon_key],
Bytes(expected_anon),
"below-saturation kB value must convert exactly; \
got {:?}, expected {:?}",
map[&anon_key],
Bytes(expected_anon),
);
assert!(
expected_anon < u64::MAX,
"test fixture: near_max_kb * 1024 must NOT saturate \
so the boundary distinction is meaningful; got {expected_anon}",
);
}