use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub struct HostStateSnapshot {
pub captured_at_unix_ns: u64,
pub host: Option<crate::host_context::HostContext>,
pub threads: Vec<ThreadState>,
pub cgroup_stats: BTreeMap<String, CgroupStats>,
}
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub struct ThreadState {
pub tid: u32,
pub tgid: u32,
pub pcomm: String,
pub comm: String,
pub cgroup: String,
pub start_time_clock_ticks: u64,
pub policy: String,
pub nice: i32,
pub cpu_affinity: Vec<u32>,
pub run_time_ns: u64,
pub wait_time_ns: u64,
pub timeslices: u64,
pub voluntary_csw: u64,
pub nonvoluntary_csw: u64,
pub nr_wakeups: u64,
pub nr_wakeups_local: u64,
pub nr_wakeups_remote: u64,
pub nr_wakeups_sync: u64,
pub nr_wakeups_migrate: u64,
pub nr_wakeups_idle: u64,
pub nr_migrations: u64,
pub wait_sum: u64,
pub wait_count: u64,
pub sleep_sum: u64,
pub block_sum: u64,
pub block_count: u64,
pub iowait_sum: u64,
pub iowait_count: u64,
pub allocated_bytes: u64,
pub deallocated_bytes: u64,
pub minflt: u64,
pub majflt: u64,
pub rchar: u64,
pub wchar: u64,
pub syscr: u64,
pub syscw: u64,
pub read_bytes: u64,
pub write_bytes: u64,
}
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub struct CgroupStats {
pub cpu_usage_usec: u64,
pub nr_throttled: u64,
pub throttled_usec: u64,
pub memory_current: u64,
}
impl HostStateSnapshot {
pub fn load(path: &std::path::Path) -> anyhow::Result<Self> {
use anyhow::Context;
let bytes = std::fs::read(path)
.with_context(|| format!("read host-state snapshot from {}", path.display()))?;
let json = zstd::decode_all(bytes.as_slice())
.with_context(|| format!("zstd decompress host-state snapshot {}", path.display()))?;
let snap: HostStateSnapshot = serde_json::from_slice(&json).with_context(|| {
format!(
"parse host-state snapshot JSON from {} (did the capture format change?)",
path.display(),
)
})?;
Ok(snap)
}
pub fn write(&self, path: &std::path::Path) -> anyhow::Result<()> {
use anyhow::Context;
let json = serde_json::to_vec(self).context("serialize host-state snapshot to JSON")?;
let compressed =
zstd::encode_all(json.as_slice(), 3).context("zstd compress host-state snapshot")?;
std::fs::write(path, compressed)
.with_context(|| format!("write host-state snapshot to {}", path.display()))?;
Ok(())
}
}
pub const SNAPSHOT_EXTENSION: &str = "hst.zst";
pub const DEFAULT_PROC_ROOT: &str = "/proc";
pub const DEFAULT_CGROUP_ROOT: &str = "/sys/fs/cgroup";
fn task_file(proc_root: &Path, tgid: i32, tid: i32, leaf: &str) -> PathBuf {
proc_root
.join(tgid.to_string())
.join("task")
.join(tid.to_string())
.join(leaf)
}
fn proc_file(proc_root: &Path, tgid: i32, leaf: &str) -> PathBuf {
proc_root.join(tgid.to_string()).join(leaf)
}
pub fn policy_name(policy: i32) -> String {
match policy {
libc::SCHED_OTHER => "SCHED_OTHER".to_string(),
libc::SCHED_FIFO => "SCHED_FIFO".to_string(),
libc::SCHED_RR => "SCHED_RR".to_string(),
libc::SCHED_BATCH => "SCHED_BATCH".to_string(),
libc::SCHED_IDLE => "SCHED_IDLE".to_string(),
6 => "SCHED_DEADLINE".to_string(),
7 => "SCHED_EXT".to_string(),
other => format!("SCHED_UNKNOWN({other})"),
}
}
pub fn iter_tgids_at(proc_root: &Path) -> Vec<i32> {
let Ok(entries) = fs::read_dir(proc_root) else {
return Vec::new();
};
let mut tgids: Vec<i32> = entries
.filter_map(|e| e.ok())
.filter_map(|e| e.file_name().to_str().and_then(|s| s.parse::<i32>().ok()))
.filter(|&p| p > 0)
.collect();
tgids.sort_unstable();
tgids
}
pub fn iter_task_ids_at(proc_root: &Path, tgid: i32) -> Vec<i32> {
let path = proc_root.join(tgid.to_string()).join("task");
let Ok(entries) = fs::read_dir(&path) else {
return Vec::new();
};
let mut tids: Vec<i32> = entries
.filter_map(|e| e.ok())
.filter_map(|e| e.file_name().to_str().and_then(|s| s.parse::<i32>().ok()))
.filter(|&t| t > 0)
.collect();
tids.sort_unstable();
tids
}
pub fn iter_tgids() -> Vec<i32> {
iter_tgids_at(Path::new(DEFAULT_PROC_ROOT))
}
pub fn iter_task_ids(tgid: i32) -> Vec<i32> {
iter_task_ids_at(Path::new(DEFAULT_PROC_ROOT), tgid)
}
pub fn read_process_comm_at(proc_root: &Path, tgid: i32) -> Option<String> {
let raw = fs::read_to_string(proc_file(proc_root, tgid, "comm")).ok()?;
let trimmed = raw.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
}
pub fn read_thread_comm_at(proc_root: &Path, tgid: i32, tid: i32) -> Option<String> {
let raw = fs::read_to_string(task_file(proc_root, tgid, tid, "comm")).ok()?;
let trimmed = raw.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
}
pub fn read_process_comm(tgid: i32) -> Option<String> {
read_process_comm_at(Path::new(DEFAULT_PROC_ROOT), tgid)
}
pub fn read_thread_comm(tgid: i32, tid: i32) -> Option<String> {
read_thread_comm_at(Path::new(DEFAULT_PROC_ROOT), tgid, tid)
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
struct StatFields {
minflt: Option<u64>,
majflt: Option<u64>,
nice: Option<i32>,
start_time_clock_ticks: Option<u64>,
policy: Option<i32>,
}
fn parse_stat(raw: &str) -> StatFields {
let Some(line) = raw.lines().next() else {
return StatFields::default();
};
let Some(last_close) = line.rfind(')') else {
return StatFields::default();
};
let Some(tail) = line.get(last_close + 1..) else {
return StatFields::default();
};
let parts: Vec<&str> = tail.split_ascii_whitespace().collect();
let get_u64 = |idx: usize| parts.get(idx).and_then(|s| s.parse::<u64>().ok());
let get_i32 = |idx: usize| parts.get(idx).and_then(|s| s.parse::<i32>().ok());
StatFields {
minflt: get_u64(7),
majflt: get_u64(9),
nice: get_i32(16),
start_time_clock_ticks: get_u64(19),
policy: get_i32(38),
}
}
fn read_stat_at(proc_root: &Path, tgid: i32, tid: i32) -> StatFields {
match fs::read_to_string(task_file(proc_root, tgid, tid, "stat")) {
Ok(raw) => parse_stat(&raw),
Err(_) => StatFields::default(),
}
}
fn parse_schedstat(raw: &str) -> (Option<u64>, Option<u64>, Option<u64>) {
let Some(line) = raw.lines().next() else {
return (None, None, None);
};
let mut parts = line.split_ascii_whitespace();
let run = parts.next().and_then(|s| s.parse::<u64>().ok());
let wait = parts.next().and_then(|s| s.parse::<u64>().ok());
let slices = parts.next().and_then(|s| s.parse::<u64>().ok());
(run, wait, slices)
}
pub fn read_schedstat_at(
proc_root: &Path,
tgid: i32,
tid: i32,
) -> (Option<u64>, Option<u64>, Option<u64>) {
match fs::read_to_string(task_file(proc_root, tgid, tid, "schedstat")) {
Ok(raw) => parse_schedstat(&raw),
Err(_) => (None, None, None),
}
}
pub fn read_schedstat(tgid: i32, tid: i32) -> (Option<u64>, Option<u64>, Option<u64>) {
read_schedstat_at(Path::new(DEFAULT_PROC_ROOT), tgid, tid)
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
struct IoFields {
rchar: Option<u64>,
wchar: Option<u64>,
syscr: Option<u64>,
syscw: Option<u64>,
read_bytes: Option<u64>,
write_bytes: Option<u64>,
}
fn parse_io(raw: &str) -> IoFields {
let mut out = IoFields::default();
for line in raw.lines() {
let Some((key, value)) = line.split_once(':') else {
continue;
};
let parsed = value.trim().parse::<u64>().ok();
match key.trim() {
"rchar" => out.rchar = parsed,
"wchar" => out.wchar = parsed,
"syscr" => out.syscr = parsed,
"syscw" => out.syscw = parsed,
"read_bytes" => out.read_bytes = parsed,
"write_bytes" => out.write_bytes = parsed,
_ => {}
}
}
out
}
fn read_io_at(proc_root: &Path, tgid: i32, tid: i32) -> IoFields {
match fs::read_to_string(task_file(proc_root, tgid, tid, "io")) {
Ok(raw) => parse_io(&raw),
Err(_) => IoFields::default(),
}
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
struct StatusFields {
voluntary_csw: Option<u64>,
nonvoluntary_csw: Option<u64>,
cpus_allowed: Option<Vec<u32>>,
}
fn parse_status(raw: &str) -> StatusFields {
let mut out = StatusFields::default();
for line in raw.lines() {
let Some((key, value)) = line.split_once(':') else {
continue;
};
let value = value.trim();
match key.trim() {
"voluntary_ctxt_switches" => {
out.voluntary_csw = value.parse::<u64>().ok();
}
"nonvoluntary_ctxt_switches" => {
out.nonvoluntary_csw = value.parse::<u64>().ok();
}
"Cpus_allowed_list" => {
out.cpus_allowed = parse_cpu_list(value);
}
_ => {}
}
}
out
}
fn read_status_at(proc_root: &Path, tgid: i32, tid: i32) -> StatusFields {
match fs::read_to_string(task_file(proc_root, tgid, tid, "status")) {
Ok(raw) => parse_status(&raw),
Err(_) => StatusFields::default(),
}
}
pub fn parse_cpu_list(s: &str) -> Option<Vec<u32>> {
const MAX_CPU_RANGE_EXPANSION: u64 = 65_536;
let s = s.trim();
if s.is_empty() {
return None;
}
let mut out: Vec<u32> = Vec::new();
for token in s.split(',') {
let token = token.trim();
if token.is_empty() {
continue;
}
if let Some((lo, hi)) = token.split_once('-') {
let lo: u32 = lo.parse().ok()?;
let hi: u32 = hi.parse().ok()?;
if hi < lo {
return None;
}
let span = (hi as u64) - (lo as u64) + 1;
if span > MAX_CPU_RANGE_EXPANSION {
return None;
}
for c in lo..=hi {
out.push(c);
}
} else {
out.push(token.parse::<u32>().ok()?);
}
}
out.sort_unstable();
out.dedup();
Some(out)
}
pub fn read_affinity(tid: i32) -> Option<Vec<u32>> {
let mut bits = AFFINITY_INITIAL_BITS;
loop {
let mut buffer = affinity_zeroed_buffer(bits);
let bytes = std::mem::size_of_val(buffer.as_slice());
let ret = unsafe {
libc::syscall(
libc::SYS_sched_getaffinity,
tid as libc::pid_t,
bytes,
buffer.as_mut_ptr(),
)
};
if ret >= 0 {
let written_bytes = ret as usize;
return extract_cpus_from_mask(&buffer, written_bytes);
}
let errno = std::io::Error::last_os_error().raw_os_error();
if errno != Some(libc::EINVAL) {
return None;
}
let Some(next) = affinity_next_bits(bits) else {
return None;
};
bits = next;
}
}
pub(crate) const AFFINITY_INITIAL_BITS: usize = 8192;
pub(crate) const AFFINITY_MAX_BITS: usize = 262144;
pub(crate) fn affinity_next_bits(current_bits: usize) -> Option<usize> {
let doubled = current_bits.checked_mul(2)?;
if doubled > AFFINITY_MAX_BITS {
None
} else {
Some(doubled)
}
}
fn affinity_zeroed_buffer(bits: usize) -> Vec<libc::c_ulong> {
let word_bits = libc::c_ulong::BITS as usize;
let words = bits.div_ceil(word_bits);
vec![0 as libc::c_ulong; words]
}
fn extract_cpus_from_mask(buffer: &[libc::c_ulong], written_bytes: usize) -> Option<Vec<u32>> {
let word_bytes = std::mem::size_of::<libc::c_ulong>();
let word_bits = libc::c_ulong::BITS as usize;
let written_words = written_bytes / word_bytes;
let mut cpus: Vec<u32> = Vec::new();
for (word_idx, &word) in buffer.iter().take(written_words).enumerate() {
if word == 0 {
continue;
}
for bit in 0..word_bits {
if word & (1 as libc::c_ulong) << bit != 0 {
let cpu = word_idx * word_bits + bit;
cpus.push(cpu as u32);
}
}
}
if cpus.is_empty() { None } else { Some(cpus) }
}
pub fn read_cgroup_at(proc_root: &Path, tgid: i32, tid: i32) -> Option<String> {
let raw = fs::read_to_string(task_file(proc_root, tgid, tid, "cgroup")).ok()?;
parse_cgroup_v2(&raw)
}
pub fn read_cgroup(tgid: i32, tid: i32) -> Option<String> {
read_cgroup_at(Path::new(DEFAULT_PROC_ROOT), tgid, tid)
}
fn parse_cgroup_v2(raw: &str) -> Option<String> {
for line in raw.lines() {
if let Some(rest) = line.strip_prefix("0::") {
let trimmed = rest.trim();
if !trimmed.is_empty() {
return Some(trimmed.to_string());
}
}
}
None
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
struct SchedFields {
nr_wakeups: Option<u64>,
nr_wakeups_local: Option<u64>,
nr_wakeups_remote: Option<u64>,
nr_wakeups_sync: Option<u64>,
nr_wakeups_migrate: Option<u64>,
nr_wakeups_idle: Option<u64>,
nr_migrations: Option<u64>,
wait_sum: Option<u64>,
wait_count: Option<u64>,
sleep_sum: Option<u64>,
block_sum: Option<u64>,
block_count: Option<u64>,
iowait_sum: Option<u64>,
iowait_count: Option<u64>,
}
fn parse_sched(raw: &str) -> SchedFields {
let mut out = SchedFields::default();
for line in raw.lines() {
let Some((key, value)) = line.split_once(':') else {
continue;
};
let key = key.trim();
let value = value.trim();
let short = key.rsplit('.').next().unwrap_or(key);
let parsed_u64 = || value.parse::<u64>().ok();
let parsed_u64_lossy = || {
value
.parse::<u64>()
.ok()
.or_else(|| value.parse::<f64>().ok().map(|f| f.max(0.0) as u64))
};
match short {
"nr_wakeups" => out.nr_wakeups = parsed_u64(),
"nr_wakeups_local" => out.nr_wakeups_local = parsed_u64(),
"nr_wakeups_remote" => out.nr_wakeups_remote = parsed_u64(),
"nr_wakeups_sync" => out.nr_wakeups_sync = parsed_u64(),
"nr_wakeups_migrate" => out.nr_wakeups_migrate = parsed_u64(),
"nr_wakeups_idle" => out.nr_wakeups_idle = parsed_u64(),
"nr_migrations" => out.nr_migrations = parsed_u64(),
"wait_sum" => out.wait_sum = parsed_u64_lossy(),
"wait_count" => out.wait_count = parsed_u64(),
"sum_sleep_runtime" => out.sleep_sum = parsed_u64_lossy(),
"block_sum" => out.block_sum = parsed_u64_lossy(),
"block_count" => out.block_count = parsed_u64(),
"iowait_sum" => out.iowait_sum = parsed_u64_lossy(),
"iowait_count" => out.iowait_count = parsed_u64(),
_ => {}
}
}
out
}
fn read_sched_at(proc_root: &Path, tgid: i32, tid: i32) -> SchedFields {
match fs::read_to_string(task_file(proc_root, tgid, tid, "sched")) {
Ok(raw) => parse_sched(&raw),
Err(_) => SchedFields::default(),
}
}
fn parse_cpu_stat(raw: &str) -> (Option<u64>, Option<u64>, Option<u64>) {
let mut usage = None;
let mut throttled = None;
let mut throttled_usec = None;
for line in raw.lines() {
let mut parts = line.split_ascii_whitespace();
let Some(key) = parts.next() else { continue };
let Some(value) = parts.next() else { continue };
let parsed = value.parse::<u64>().ok();
match key {
"usage_usec" => usage = parsed,
"nr_throttled" => throttled = parsed,
"throttled_usec" => throttled_usec = parsed,
_ => {}
}
}
(usage, throttled, throttled_usec)
}
pub fn read_cgroup_stats_at(cgroup_root: &Path, path: &str) -> CgroupStats {
let relative = path.strip_prefix('/').unwrap_or(path);
let dir = if relative.is_empty() {
cgroup_root.to_path_buf()
} else {
cgroup_root.join(relative)
};
let (usage, throttled, throttled_usec) = fs::read_to_string(dir.join("cpu.stat"))
.ok()
.as_deref()
.map(parse_cpu_stat)
.unwrap_or((None, None, None));
let memory_current = fs::read_to_string(dir.join("memory.current"))
.ok()
.and_then(|s| s.trim().parse::<u64>().ok());
CgroupStats {
cpu_usage_usec: usage.unwrap_or(0),
nr_throttled: throttled.unwrap_or(0),
throttled_usec: throttled_usec.unwrap_or(0),
memory_current: memory_current.unwrap_or(0),
}
}
#[allow(dead_code)]
pub fn process_linked_against_jemalloc(tgid: i32) -> bool {
process_linked_against_jemalloc_at(Path::new(DEFAULT_PROC_ROOT), tgid)
}
#[allow(dead_code)]
pub fn process_linked_against_jemalloc_at(proc_root: &Path, tgid: i32) -> bool {
let Ok(raw) = fs::read_to_string(proc_root.join(tgid.to_string()).join("maps")) else {
return false;
};
for line in raw.lines() {
if line.contains("jemalloc") {
return true;
}
}
false
}
pub fn capture_thread_at(
proc_root: &Path,
tgid: i32,
tid: i32,
pcomm: &str,
use_syscall_affinity: bool,
) -> ThreadState {
let comm = read_thread_comm_at(proc_root, tgid, tid).unwrap_or_default();
let cgroup = read_cgroup_at(proc_root, tgid, tid).unwrap_or_default();
let stat = read_stat_at(proc_root, tgid, tid);
let (run_time_ns, wait_time_ns, timeslices) = read_schedstat_at(proc_root, tgid, tid);
let io = read_io_at(proc_root, tgid, tid);
let status = read_status_at(proc_root, tgid, tid);
let sched = read_sched_at(proc_root, tgid, tid);
let cpu_affinity = if use_syscall_affinity {
read_affinity(tid)
.or(status.cpus_allowed)
.unwrap_or_default()
} else {
status.cpus_allowed.unwrap_or_default()
};
ThreadState {
tid: tid as u32,
tgid: tgid as u32,
pcomm: pcomm.to_string(),
comm,
cgroup,
start_time_clock_ticks: stat.start_time_clock_ticks.unwrap_or(0),
policy: stat.policy.map(policy_name).unwrap_or_default(),
nice: stat.nice.unwrap_or(0),
cpu_affinity,
run_time_ns: run_time_ns.unwrap_or(0),
wait_time_ns: wait_time_ns.unwrap_or(0),
timeslices: timeslices.unwrap_or(0),
voluntary_csw: status.voluntary_csw.unwrap_or(0),
nonvoluntary_csw: status.nonvoluntary_csw.unwrap_or(0),
nr_wakeups: sched.nr_wakeups.unwrap_or(0),
nr_wakeups_local: sched.nr_wakeups_local.unwrap_or(0),
nr_wakeups_remote: sched.nr_wakeups_remote.unwrap_or(0),
nr_wakeups_sync: sched.nr_wakeups_sync.unwrap_or(0),
nr_wakeups_migrate: sched.nr_wakeups_migrate.unwrap_or(0),
nr_wakeups_idle: sched.nr_wakeups_idle.unwrap_or(0),
nr_migrations: sched.nr_migrations.unwrap_or(0),
wait_sum: sched.wait_sum.unwrap_or(0),
wait_count: sched.wait_count.unwrap_or(0),
sleep_sum: sched.sleep_sum.unwrap_or(0),
block_sum: sched.block_sum.unwrap_or(0),
block_count: sched.block_count.unwrap_or(0),
iowait_sum: sched.iowait_sum.unwrap_or(0),
iowait_count: sched.iowait_count.unwrap_or(0),
allocated_bytes: 0,
deallocated_bytes: 0,
minflt: stat.minflt.unwrap_or(0),
majflt: stat.majflt.unwrap_or(0),
rchar: io.rchar.unwrap_or(0),
wchar: io.wchar.unwrap_or(0),
syscr: io.syscr.unwrap_or(0),
syscw: io.syscw.unwrap_or(0),
read_bytes: io.read_bytes.unwrap_or(0),
write_bytes: io.write_bytes.unwrap_or(0),
}
}
#[cfg(test)]
fn capture_thread(tgid: i32, tid: i32, pcomm: &str) -> ThreadState {
capture_thread_at(Path::new(DEFAULT_PROC_ROOT), tgid, tid, pcomm, true)
}
pub fn capture_with(
proc_root: &Path,
cgroup_root: &Path,
use_syscall_affinity: bool,
) -> HostStateSnapshot {
let captured_at_unix_ns = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0);
let host = if use_syscall_affinity {
Some(crate::host_context::collect_host_context())
} else {
None
};
let mut threads: Vec<ThreadState> = Vec::new();
for tgid in iter_tgids_at(proc_root) {
let pcomm = read_process_comm_at(proc_root, tgid).unwrap_or_default();
for tid in iter_task_ids_at(proc_root, tgid) {
let t = capture_thread_at(proc_root, tgid, tid, &pcomm, use_syscall_affinity);
if t.comm.is_empty() && t.start_time_clock_ticks == 0 {
continue;
}
threads.push(t);
}
}
let mut cgroup_stats: BTreeMap<String, CgroupStats> = BTreeMap::new();
for t in &threads {
if !t.cgroup.is_empty() && !cgroup_stats.contains_key(&t.cgroup) {
cgroup_stats.insert(
t.cgroup.clone(),
read_cgroup_stats_at(cgroup_root, &t.cgroup),
);
}
}
HostStateSnapshot {
captured_at_unix_ns,
host,
threads,
cgroup_stats,
}
}
pub fn capture() -> HostStateSnapshot {
capture_with(
Path::new(DEFAULT_PROC_ROOT),
Path::new(DEFAULT_CGROUP_ROOT),
true,
)
}
pub fn capture_to(path: &Path) -> Result<()> {
let snap = capture();
snap.write(path)
.with_context(|| format!("write host-state snapshot to {}", path.display()))
}
#[cfg(test)]
mod tests {
use super::*;
fn thread(pcomm: &str, comm: &str, run_time_ns: u64) -> ThreadState {
ThreadState {
tid: 1,
tgid: 1,
pcomm: pcomm.into(),
comm: comm.into(),
cgroup: "/".into(),
start_time_clock_ticks: 0,
policy: "SCHED_OTHER".into(),
nice: 0,
cpu_affinity: vec![0, 1],
run_time_ns,
..ThreadState::default()
}
}
#[test]
fn snapshot_roundtrip_through_zstd_json() {
let snap = HostStateSnapshot {
captured_at_unix_ns: 42,
host: None,
threads: vec![
thread("proc_a", "worker_0", 1_000_000),
thread("proc_a", "worker_1", 2_000_000),
],
cgroup_stats: BTreeMap::from([(
"/".into(),
CgroupStats {
cpu_usage_usec: 500,
nr_throttled: 0,
throttled_usec: 0,
memory_current: 1 << 20,
},
)]),
};
let tmp = tempfile::NamedTempFile::new().unwrap();
snap.write(tmp.path()).unwrap();
let back = HostStateSnapshot::load(tmp.path()).unwrap();
assert_eq!(back.captured_at_unix_ns, 42);
assert_eq!(back.threads.len(), 2);
assert_eq!(back.threads[1].run_time_ns, 2_000_000);
assert_eq!(back.cgroup_stats["/"].cpu_usage_usec, 500);
}
#[test]
fn load_rejects_non_zstd_payload() {
let tmp = tempfile::NamedTempFile::new().unwrap();
std::fs::write(tmp.path(), b"{\"not\": \"zstd\"}").unwrap();
let err = HostStateSnapshot::load(tmp.path()).unwrap_err();
let msg = format!("{err:?}");
assert!(
msg.contains("zstd"),
"expected zstd error in context chain, got: {msg}",
);
}
#[test]
fn load_rejects_zstd_of_garbage_json() {
let tmp = tempfile::NamedTempFile::new().unwrap();
let compressed = zstd::encode_all(&b"not json"[..], 3).unwrap();
std::fs::write(tmp.path(), compressed).unwrap();
let err = HostStateSnapshot::load(tmp.path()).unwrap_err();
let msg = format!("{err:?}");
assert!(
msg.contains("parse host-state"),
"expected parse error in context chain, got: {msg}",
);
}
#[test]
fn parse_stat_robust_against_paren_in_comm() {
let mut line = String::from("1234 (weird)name) ");
for i in 0..20 {
line.push_str(&format!("{i} "));
}
let f = parse_stat(&line);
assert_eq!(f.start_time_clock_ticks, Some(19));
}
#[test]
fn parse_stat_extracts_min_maj_nice_and_policy() {
let mut line = String::from("1 (n) ");
for i in 0..=38 {
line.push_str(&format!("{i} "));
}
let f = parse_stat(&line);
assert_eq!(f.minflt, Some(7));
assert_eq!(f.majflt, Some(9));
assert_eq!(f.nice, Some(16));
assert_eq!(f.start_time_clock_ticks, Some(19));
assert_eq!(f.policy, Some(38));
}
#[test]
fn parse_stat_short_line_drops_missing_fields() {
let line = "1 (n) 0 1 2 3 4 5 6 7";
let f = parse_stat(line);
assert_eq!(f.minflt, Some(7));
assert_eq!(f.majflt, None);
assert_eq!(f.nice, None);
assert_eq!(f.start_time_clock_ticks, None);
assert_eq!(f.policy, None);
}
#[test]
fn parse_schedstat_three_fields() {
let (a, b, c) = parse_schedstat("12345 67890 42\n");
assert_eq!(a, Some(12345));
assert_eq!(b, Some(67890));
assert_eq!(c, Some(42));
}
#[test]
fn parse_schedstat_missing_fields_drop_individually() {
let (a, b, c) = parse_schedstat("12345\n");
assert_eq!(a, Some(12345));
assert_eq!(b, None);
assert_eq!(c, None);
}
#[test]
fn parse_io_extracts_all_six_fields() {
let raw = "rchar: 1\n\
wchar: 2\n\
syscr: 3\n\
syscw: 4\n\
read_bytes: 5\n\
write_bytes: 6\n\
cancelled_write_bytes: 7\n";
let f = parse_io(raw);
assert_eq!(f.rchar, Some(1));
assert_eq!(f.wchar, Some(2));
assert_eq!(f.syscr, Some(3));
assert_eq!(f.syscw, Some(4));
assert_eq!(f.read_bytes, Some(5));
assert_eq!(f.write_bytes, Some(6));
}
#[test]
fn parse_status_extracts_csw_and_affinity() {
let raw = "Name:\tbash\n\
Cpus_allowed_list:\t0-3,5\n\
voluntary_ctxt_switches:\t100\n\
nonvoluntary_ctxt_switches:\t5\n";
let f = parse_status(raw);
assert_eq!(f.voluntary_csw, Some(100));
assert_eq!(f.nonvoluntary_csw, Some(5));
assert_eq!(f.cpus_allowed.as_deref(), Some(&[0u32, 1, 2, 3, 5][..]));
}
#[test]
fn parse_cpu_list_accepts_ranges_singletons_and_mixtures() {
assert_eq!(parse_cpu_list("0-3").unwrap(), vec![0, 1, 2, 3]);
assert_eq!(parse_cpu_list("5").unwrap(), vec![5]);
assert_eq!(parse_cpu_list("0,2,4").unwrap(), vec![0, 2, 4]);
assert_eq!(parse_cpu_list("0-2,4,6-7").unwrap(), vec![0, 1, 2, 4, 6, 7]);
}
#[test]
fn parse_cpu_list_rejects_malformed_input() {
assert!(parse_cpu_list("").is_none());
assert!(parse_cpu_list("5-3").is_none());
assert!(parse_cpu_list("abc").is_none());
assert!(parse_cpu_list("0-").is_none());
assert!(parse_cpu_list("-3").is_none());
}
#[test]
fn parse_cpu_list_dedups_and_sorts() {
assert_eq!(parse_cpu_list("3,0-2,1,2").unwrap(), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_rejects_huge_range() {
assert_eq!(parse_cpu_list("0-4294967295"), None);
assert_eq!(parse_cpu_list("0-65536"), None);
let at_cap = parse_cpu_list("0-65535").unwrap();
assert_eq!(at_cap.len(), 65_536);
let realistic = parse_cpu_list("0-8191").unwrap();
assert_eq!(realistic.len(), 8192);
}
#[test]
fn parse_cgroup_v2_picks_unified_hierarchy() {
let raw = "12:cpuset:/legacy/cpuset/path\n\
0::/unified/path\n\
5:freezer:/legacy/freezer\n";
assert_eq!(parse_cgroup_v2(raw), Some("/unified/path".to_string()));
}
#[test]
fn parse_cgroup_v2_none_when_only_legacy_present() {
let raw = "12:cpuset:/legacy/path\n";
assert_eq!(parse_cgroup_v2(raw), None);
}
#[test]
fn parse_sched_accepts_prefixed_and_bare_keys() {
let raw = "se.statistics.nr_wakeups : 1000\n\
se.nr_migrations : 42\n\
se.statistics.nr_wakeups_local : 600\n\
se.statistics.wait_sum : 12345.678\n";
let f = parse_sched(raw);
assert_eq!(f.nr_wakeups, Some(1000));
assert_eq!(f.nr_migrations, Some(42));
assert_eq!(f.nr_wakeups_local, Some(600));
assert_eq!(f.wait_sum, Some(12345));
}
#[test]
fn parse_cpu_stat_space_separated_format() {
let raw = "usage_usec 1234\n\
user_usec 1000\n\
system_usec 234\n\
nr_periods 10\n\
nr_throttled 2\n\
throttled_usec 500\n";
let (usage, throttled, throttled_usec) = parse_cpu_stat(raw);
assert_eq!(usage, Some(1234));
assert_eq!(throttled, Some(2));
assert_eq!(throttled_usec, Some(500));
}
#[test]
fn policy_name_known_and_unknown() {
assert_eq!(policy_name(libc::SCHED_OTHER), "SCHED_OTHER");
assert_eq!(policy_name(libc::SCHED_FIFO), "SCHED_FIFO");
assert_eq!(policy_name(libc::SCHED_RR), "SCHED_RR");
assert_eq!(policy_name(libc::SCHED_BATCH), "SCHED_BATCH");
assert_eq!(policy_name(libc::SCHED_IDLE), "SCHED_IDLE");
assert_eq!(policy_name(6), "SCHED_DEADLINE");
assert_eq!(policy_name(7), "SCHED_EXT");
assert_eq!(policy_name(99), "SCHED_UNKNOWN(99)");
}
#[test]
fn iter_tgids_includes_self() {
let tgids = iter_tgids();
let pid = std::process::id() as i32;
assert!(tgids.contains(&pid), "self pid {pid} not in /proc walk");
}
#[test]
fn iter_task_ids_self_returns_at_least_main_tid() {
let pid = std::process::id() as i32;
let tids = iter_task_ids(pid);
assert!(
tids.contains(&pid),
"main tid {pid} absent from /proc/self/task"
);
}
#[test]
fn read_process_comm_for_self_is_populated() {
let pid = std::process::id() as i32;
let comm = read_process_comm(pid).expect("self comm must be readable");
assert!(!comm.is_empty());
}
#[test]
fn capture_thread_self_populates_identity() {
let pid = std::process::id() as i32;
let t = capture_thread(pid, pid, "testproc");
assert_eq!(t.tid, pid as u32);
assert_eq!(t.tgid, pid as u32);
assert_eq!(t.pcomm, "testproc");
assert!(!t.comm.is_empty());
assert!(t.start_time_clock_ticks > 0);
assert!(!t.policy.is_empty());
}
#[test]
fn capture_produces_non_empty_snapshot() {
let snap = capture();
assert!(!snap.threads.is_empty());
let pid = std::process::id();
let self_threads: Vec<_> = snap.threads.iter().filter(|t| t.tgid == pid).collect();
assert!(!self_threads.is_empty(), "own tgid missing from capture");
}
#[test]
fn snapshot_extension_is_stable() {
assert_eq!(SNAPSHOT_EXTENSION, "hst.zst");
}
#[test]
fn parse_io_empty_input_yields_all_none() {
let f = parse_io("");
assert_eq!(f, IoFields::default());
}
#[test]
fn parse_io_malformed_value_drops_only_that_field() {
let raw = "rchar: 100\n\
wchar: not-a-number\n\
syscr: 3\n";
let f = parse_io(raw);
assert_eq!(f.rchar, Some(100));
assert_eq!(f.wchar, None, "malformed value drops to None");
assert_eq!(f.syscr, Some(3));
}
#[test]
fn parse_cpu_list_single_element_range_lo_equals_hi() {
assert_eq!(parse_cpu_list("5-5").unwrap(), vec![5]);
assert_eq!(parse_cpu_list("0-0").unwrap(), vec![0]);
}
#[test]
fn parse_cpu_list_trailing_comma_accepted() {
assert_eq!(parse_cpu_list("0,1,").unwrap(), vec![0, 1]);
assert_eq!(parse_cpu_list(",0,1").unwrap(), vec![0, 1]);
}
#[test]
fn parse_stat_empty_and_no_paren_return_default() {
assert_eq!(parse_stat(""), StatFields::default());
assert_eq!(
parse_stat("garbage line with no close paren 1 2 3"),
StatFields::default(),
"line without `)` must return Default, not panic on \
out-of-bounds indexing",
);
assert_eq!(
parse_stat(" \n"),
StatFields::default(),
"whitespace-only input must also land at Default",
);
}
#[test]
fn parse_stat_multi_line_input_uses_only_first_line() {
let mut first = String::from("1 (proc) ");
for i in 0..=38 {
first.push_str(&format!("{i} "));
}
let second = "2 (other) 999 999 999 999 999 999 999 999 999 999 \
999 999 999 999 999 999 999 999 999 999 999 999 999\n";
let raw = format!("{first}\n{second}");
let f = parse_stat(&raw);
assert_eq!(f.nice, Some(16));
assert_eq!(f.start_time_clock_ticks, Some(19));
assert_eq!(f.policy, Some(38));
}
#[test]
fn parse_schedstat_extra_fields_and_invalid_tokens() {
let (a, b, c) = parse_schedstat("1 2 3 4\n");
assert_eq!((a, b, c), (Some(1), Some(2), Some(3)));
let (a, b, c) = parse_schedstat("1 invalid 3\n");
assert_eq!(a, Some(1));
assert_eq!(b, None);
assert_eq!(c, Some(3));
let (a, b, c) = parse_schedstat("");
assert_eq!((a, b, c), (None, None, None));
}
#[test]
fn policy_name_negative_integer_renders_unknown() {
assert_eq!(policy_name(-1), "SCHED_UNKNOWN(-1)");
assert_eq!(
policy_name(i32::MIN),
format!("SCHED_UNKNOWN({})", i32::MIN)
);
}
#[test]
fn parse_cpu_stat_empty_and_keyonly_lines_yield_none() {
let (u, t, tu) = parse_cpu_stat("");
assert_eq!((u, t, tu), (None, None, None));
let (u, t, tu) = parse_cpu_stat("usage_usec\n");
assert_eq!((u, t, tu), (None, None, None));
}
#[test]
fn parse_status_partial_and_malformed_fields_isolate_correctly() {
let only_v = "Name:\tfoo\n\
voluntary_ctxt_switches:\t9\n";
let f = parse_status(only_v);
assert_eq!(f.voluntary_csw, Some(9));
assert_eq!(f.nonvoluntary_csw, None);
assert_eq!(f.cpus_allowed, None);
let bad_cpu_list = "Cpus_allowed_list:\t5-3\n\
voluntary_ctxt_switches:\t1\n";
let f = parse_status(bad_cpu_list);
assert_eq!(f.voluntary_csw, Some(1));
assert_eq!(
f.cpus_allowed, None,
"malformed cpulist must route parse_cpu_list's None \
into the StatusFields field — not collapse to empty vec",
);
}
#[test]
fn parse_cgroup_v2_empty_path_and_multiple_unified_lines() {
assert_eq!(parse_cgroup_v2("0::\n"), None);
assert_eq!(parse_cgroup_v2("0:: \n"), None);
let raw = "0::/first\n0::/second\n";
assert_eq!(parse_cgroup_v2(raw), Some("/first".to_string()));
}
#[test]
fn read_thread_comm_at_whitespace_only_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let tgid = 1;
let tid = 1;
let task_dir = tmp
.path()
.join(tgid.to_string())
.join("task")
.join(tid.to_string());
std::fs::create_dir_all(&task_dir).unwrap();
std::fs::write(task_dir.join("comm"), " \n").unwrap();
assert_eq!(read_thread_comm_at(tmp.path(), tgid, tid), None);
assert_eq!(read_thread_comm_at(tmp.path(), tgid, 9999), None);
}
#[test]
fn affinity_next_bits_doubles_until_ceiling() {
assert_eq!(AFFINITY_INITIAL_BITS, 8192);
assert_eq!(AFFINITY_MAX_BITS, 262144);
let mut cur = AFFINITY_INITIAL_BITS;
let expected = [16384usize, 32768, 65536, 131072, 262144];
for &want in &expected {
let next = affinity_next_bits(cur).expect("doubling must succeed below ceiling");
assert_eq!(next, want, "expected {want}, got {next}");
cur = next;
}
assert_eq!(
affinity_next_bits(AFFINITY_MAX_BITS),
None,
"at the ceiling, no further retry must be allowed",
);
}
#[test]
fn extract_cpus_from_mask_single_bit_in_first_word() {
let mut buf = vec![0 as libc::c_ulong; 4];
buf[0] = (1 as libc::c_ulong) << 5;
let bytes = std::mem::size_of_val(buf.as_slice());
let cpus = extract_cpus_from_mask(&buf, bytes).expect("non-empty mask");
assert_eq!(cpus, vec![5]);
}
#[test]
fn extract_cpus_from_mask_offset_bit_in_later_word() {
let word_bits = libc::c_ulong::BITS as usize;
let mut buf = vec![0 as libc::c_ulong; 4];
buf[2] = (1 as libc::c_ulong) << 3;
let bytes = std::mem::size_of_val(buf.as_slice());
let cpus = extract_cpus_from_mask(&buf, bytes).expect("non-empty mask");
let expected = (2 * word_bits + 3) as u32;
assert_eq!(cpus, vec![expected]);
}
#[test]
fn extract_cpus_from_mask_respects_written_bytes() {
let mut buf = vec![0 as libc::c_ulong; 4];
buf[0] = (1 as libc::c_ulong) << 7; buf[3] = 1 as libc::c_ulong; let one_word_bytes = std::mem::size_of::<libc::c_ulong>();
let cpus = extract_cpus_from_mask(&buf, one_word_bytes).expect("non-empty mask");
assert_eq!(cpus, vec![7]);
}
#[test]
fn extract_cpus_from_mask_empty_buffer_returns_none() {
let buf = vec![0 as libc::c_ulong; 4];
let bytes = std::mem::size_of_val(buf.as_slice());
assert_eq!(extract_cpus_from_mask(&buf, bytes), None);
}
#[test]
fn affinity_zeroed_buffer_rounds_up_and_is_zeroed() {
let word_bits = libc::c_ulong::BITS as usize;
let exact = affinity_zeroed_buffer(word_bits);
assert_eq!(exact.len(), 1);
let over = affinity_zeroed_buffer(word_bits + 1);
assert_eq!(over.len(), 2);
let init = affinity_zeroed_buffer(AFFINITY_INITIAL_BITS);
assert_eq!(init.len(), AFFINITY_INITIAL_BITS / word_bits);
assert!(init.iter().all(|&w| w == 0));
}
#[test]
fn read_affinity_for_self_returns_at_least_one_cpu() {
let pid = std::process::id() as i32;
let cpus = read_affinity(pid).expect("own affinity must resolve");
assert!(
!cpus.is_empty(),
"self affinity must carry at least one CPU"
);
let mut sorted = cpus.clone();
sorted.sort_unstable();
assert_eq!(cpus, sorted, "cpus must be returned sorted ascending");
}
fn stage_synthetic_proc(root: &Path, tgid: i32, tid: i32, pcomm: &str, comm: &str) {
use std::fs;
let tgid_dir = root.join(tgid.to_string());
let task_dir = tgid_dir.join("task").join(tid.to_string());
fs::create_dir_all(&task_dir).unwrap();
fs::write(tgid_dir.join("comm"), format!("{pcomm}\n")).unwrap();
fs::write(task_dir.join("comm"), format!("{comm}\n")).unwrap();
let stat_line = format!(
"{tid} (proc (with) parens) R 1 2 3 4 5 6 \
7777 0 8888 0 10 11 12 13 14 {nice} 1 0 \
{starttime} 100 200 300 400 500 600 700 800 \
900 1000 1100 1200 1300 1400 1500 1600 1700 1800 {policy}\n",
tid = tid,
nice = -10_i32,
starttime = 555_555u64,
policy = 0, );
fs::write(task_dir.join("stat"), stat_line).unwrap();
fs::write(task_dir.join("schedstat"), "1000000 200000 50\n").unwrap();
let status = "Name:\tfoo\n\
voluntary_ctxt_switches:\t42\n\
nonvoluntary_ctxt_switches:\t7\n\
Cpus_allowed_list:\t0-3\n";
fs::write(task_dir.join("status"), status).unwrap();
let io = "rchar: 100\n\
wchar: 200\n\
syscr: 10\n\
syscw: 20\n\
read_bytes: 4096\n\
write_bytes: 8192\n";
fs::write(task_dir.join("io"), io).unwrap();
let sched = "\
se.statistics.nr_wakeups : 11\n\
se.statistics.nr_wakeups_local : 8\n\
se.statistics.nr_wakeups_remote : 3\n\
se.statistics.nr_wakeups_sync : 2\n\
se.statistics.nr_wakeups_migrate : 1\n\
se.statistics.nr_wakeups_idle : 4\n\
nr_migrations : 9\n\
wait_sum : 5000.25\n\
wait_count : 15\n\
sum_sleep_runtime : 3200.50\n\
block_sum : 1100.75\n\
block_count : 2\n\
iowait_sum : 77.0\n\
iowait_count : 18\n";
fs::write(task_dir.join("sched"), sched).unwrap();
fs::write(task_dir.join("cgroup"), "0::/ktstr.slice/worker0\n").unwrap();
}
#[test]
fn capture_with_filters_ghost_threads_with_empty_comm_and_zero_start() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 42;
let live_tid: i32 = 101;
let ghost_tid: i32 = 202;
stage_synthetic_proc(proc_tmp.path(), tgid, live_tid, "pcomm-proc", "live-thread");
let ghost_dir = proc_tmp
.path()
.join(tgid.to_string())
.join("task")
.join(ghost_tid.to_string());
std::fs::create_dir_all(&ghost_dir).unwrap();
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), false);
assert_eq!(
snap.threads.len(),
1,
"ghost tid with empty comm + zero start must be filtered; \
got threads: {:?}",
snap.threads
.iter()
.map(|t| (t.tid, &t.comm))
.collect::<Vec<_>>(),
);
assert_eq!(snap.threads[0].tid, live_tid as u32);
assert_eq!(snap.threads[0].comm, "live-thread");
}
#[test]
fn capture_with_synthetic_tree_assembles_thread_state() {
let proc_tmp = tempfile::TempDir::new().unwrap();
let cgroup_tmp = tempfile::TempDir::new().unwrap();
let tgid: i32 = 42;
let tid: i32 = 101;
stage_synthetic_proc(proc_tmp.path(), tgid, tid, "pcomm-proc", "worker-thread");
let snap = capture_with(proc_tmp.path(), cgroup_tmp.path(), false);
assert_eq!(snap.threads.len(), 1, "synthetic proc has one tid");
let t = &snap.threads[0];
assert_eq!(t.tid, tid as u32);
assert_eq!(t.tgid, tgid as u32);
assert_eq!(t.pcomm, "pcomm-proc");
assert_eq!(t.comm, "worker-thread");
assert_eq!(t.cgroup, "/ktstr.slice/worker0");
assert_eq!(t.nice, -10);
assert_eq!(t.start_time_clock_ticks, 555_555);
assert_eq!(t.policy, "SCHED_OTHER");
assert_eq!(t.minflt, 7777);
assert_eq!(t.majflt, 8888);
assert_eq!(t.run_time_ns, 1_000_000);
assert_eq!(t.wait_time_ns, 200_000);
assert_eq!(t.timeslices, 50);
assert_eq!(t.voluntary_csw, 42);
assert_eq!(t.nonvoluntary_csw, 7);
assert_eq!(t.cpu_affinity, vec![0, 1, 2, 3]);
assert_eq!(t.rchar, 100);
assert_eq!(t.wchar, 200);
assert_eq!(t.syscr, 10);
assert_eq!(t.syscw, 20);
assert_eq!(t.read_bytes, 4096);
assert_eq!(t.write_bytes, 8192);
assert_eq!(t.nr_wakeups, 11);
assert_eq!(t.nr_wakeups_local, 8);
assert_eq!(t.nr_wakeups_remote, 3);
assert_eq!(t.nr_wakeups_sync, 2);
assert_eq!(t.nr_wakeups_migrate, 1);
assert_eq!(t.nr_wakeups_idle, 4);
assert_eq!(t.nr_migrations, 9);
assert_eq!(t.wait_sum, 5000, "fractional 5000.25 truncates to 5000");
assert_eq!(t.wait_count, 15);
assert_eq!(
t.sleep_sum, 3200,
"fractional 3200.50 truncates to 3200 — sourced from the \
kernel `sum_sleep_runtime` key, NOT the misnamed `sleep_sum` \
of earlier drafts",
);
assert_eq!(t.block_sum, 1100, "fractional 1100.75 truncates to 1100");
assert_eq!(t.block_count, 2);
assert_eq!(t.iowait_sum, 77, "fractional 77.0 truncates to 77");
assert_eq!(t.iowait_count, 18);
}
fn write_cpu_stat(root: &Path, relative: &str, contents: &str) {
let dir = root.join(relative.trim_start_matches('/'));
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join("cpu.stat"), contents).unwrap();
}
fn write_memory_current(root: &Path, relative: &str, contents: &str) {
let dir = root.join(relative.trim_start_matches('/'));
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join("memory.current"), contents).unwrap();
}
#[test]
fn read_cgroup_stats_at_both_files_populate_all_fields() {
let tmp = tempfile::TempDir::new().unwrap();
write_cpu_stat(
tmp.path(),
"worker",
"usage_usec 12345\nnr_throttled 7\nthrottled_usec 8\n",
);
write_memory_current(tmp.path(), "worker", "9999\n");
let stats = read_cgroup_stats_at(tmp.path(), "/worker");
assert_eq!(stats.cpu_usage_usec, 12345);
assert_eq!(stats.nr_throttled, 7);
assert_eq!(stats.throttled_usec, 8);
assert_eq!(stats.memory_current, 9999);
}
#[test]
fn read_cgroup_stats_at_cpu_stat_only_memory_defaults_zero() {
let tmp = tempfile::TempDir::new().unwrap();
write_cpu_stat(
tmp.path(),
"cpu-only",
"usage_usec 500\nnr_throttled 0\nthrottled_usec 0\n",
);
let stats = read_cgroup_stats_at(tmp.path(), "/cpu-only");
assert_eq!(stats.cpu_usage_usec, 500);
assert_eq!(stats.nr_throttled, 0);
assert_eq!(stats.throttled_usec, 0);
assert_eq!(
stats.memory_current, 0,
"missing memory.current must collapse to 0, not None",
);
}
#[test]
fn read_cgroup_stats_at_memory_only_cpu_defaults_zero() {
let tmp = tempfile::TempDir::new().unwrap();
write_memory_current(tmp.path(), "mem-only", "2048\n");
let stats = read_cgroup_stats_at(tmp.path(), "/mem-only");
assert_eq!(stats.cpu_usage_usec, 0);
assert_eq!(stats.nr_throttled, 0);
assert_eq!(stats.throttled_usec, 0);
assert_eq!(stats.memory_current, 2048);
}
#[test]
fn read_cgroup_stats_at_both_files_missing_all_zero() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::create_dir_all(tmp.path().join("empty-cg")).unwrap();
let stats = read_cgroup_stats_at(tmp.path(), "/empty-cg");
assert_eq!(stats.cpu_usage_usec, 0);
assert_eq!(stats.nr_throttled, 0);
assert_eq!(stats.throttled_usec, 0);
assert_eq!(stats.memory_current, 0);
}
#[test]
fn read_cgroup_stats_at_cpu_stat_missing_key_defaults_field_zero() {
let tmp = tempfile::TempDir::new().unwrap();
write_cpu_stat(
tmp.path(),
"partial",
"usage_usec 999\nthrottled_usec 111\n",
);
let stats = read_cgroup_stats_at(tmp.path(), "/partial");
assert_eq!(stats.cpu_usage_usec, 999);
assert_eq!(stats.nr_throttled, 0, "absent key collapses to 0");
assert_eq!(stats.throttled_usec, 111);
}
#[test]
fn parse_sched_populates_all_fourteen_fields() {
let raw = "\
se.statistics.nr_wakeups : 11\n\
se.statistics.nr_wakeups_sync : 2\n\
se.statistics.nr_wakeups_local : 8\n\
se.statistics.nr_wakeups_migrate : 1\n\
se.statistics.nr_wakeups_remote : 3\n\
se.statistics.nr_wakeups_idle : 4\n\
nr_migrations : 9\n\
wait_sum : 500\n\
wait_count : 15\n\
sum_sleep_runtime : 320\n\
block_sum : 110\n\
block_count : 2\n\
iowait_sum : 77\n\
iowait_count : 18\n";
let s = parse_sched(raw);
assert_eq!(s.nr_wakeups, Some(11));
assert_eq!(s.nr_wakeups_local, Some(8));
assert_eq!(s.nr_wakeups_remote, Some(3));
assert_eq!(s.nr_wakeups_sync, Some(2));
assert_eq!(s.nr_wakeups_migrate, Some(1));
assert_eq!(s.nr_wakeups_idle, Some(4));
assert_eq!(s.nr_migrations, Some(9));
assert_eq!(s.wait_sum, Some(500));
assert_eq!(s.wait_count, Some(15));
assert_eq!(
s.sleep_sum,
Some(320),
"kernel key is `sum_sleep_runtime`, not `sleep_sum` — the \
old match arm was a ghost and never populated",
);
assert_eq!(s.block_sum, Some(110));
assert_eq!(s.block_count, Some(2));
assert_eq!(s.iowait_sum, Some(77));
assert_eq!(s.iowait_count, Some(18));
}
#[test]
fn parse_sched_fractional_fields_truncate_to_integer() {
let raw = "\
wait_sum : 1234.5\n\
sum_sleep_runtime : 678.9\n\
block_sum : 42.1\n\
iowait_sum : 7.999\n";
let s = parse_sched(raw);
assert_eq!(s.wait_sum, Some(1234));
assert_eq!(s.sleep_sum, Some(678));
assert_eq!(s.block_sum, Some(42));
assert_eq!(s.iowait_sum, Some(7));
}
#[test]
fn parse_sched_negative_fractional_clamps_to_zero() {
let raw = "wait_sum : -5.0\n";
let s = parse_sched(raw);
assert_eq!(s.wait_sum, Some(0));
}
#[test]
fn parse_sched_bare_key_names_populate_same_fields() {
let raw = "\
nr_wakeups : 11\n\
nr_wakeups_local : 8\n\
nr_wakeups_remote : 3\n\
nr_wakeups_sync : 2\n\
nr_wakeups_migrate : 1\n\
nr_wakeups_idle : 4\n";
let s = parse_sched(raw);
assert_eq!(s.nr_wakeups, Some(11));
assert_eq!(s.nr_wakeups_local, Some(8));
assert_eq!(s.nr_wakeups_remote, Some(3));
assert_eq!(s.nr_wakeups_sync, Some(2));
assert_eq!(s.nr_wakeups_migrate, Some(1));
assert_eq!(s.nr_wakeups_idle, Some(4));
}
#[test]
fn parse_sched_alternative_prefix_populates_same_fields() {
let raw = "\
stats.nr_wakeups : 42\n\
some.other.prefix.nr_migrations : 9\n";
let s = parse_sched(raw);
assert_eq!(s.nr_wakeups, Some(42));
assert_eq!(s.nr_migrations, Some(9));
}
#[test]
fn parse_sched_unknown_keys_are_ignored() {
let raw = "\
nr_wakeups : 11\n\
fictional_new_kernel_stat : 9999\n\
nr_migrations : 9\n";
let s = parse_sched(raw);
assert_eq!(s.nr_wakeups, Some(11));
assert_eq!(s.nr_migrations, Some(9));
}
#[test]
fn process_linked_against_jemalloc_at_detects_dso_in_maps() {
let tmp = tempfile::TempDir::new().unwrap();
let tgid = 777;
let proc_dir = tmp.path().join(tgid.to_string());
std::fs::create_dir_all(&proc_dir).unwrap();
let maps = "\
5583e6f7a000-5583e6f7b000 r-xp 00000000 00:00 0\n\
7f4567890000-7f4567abc000 r-xp 00000000 fe:00 12345 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2\n\
7f4567abc000-7f4567def000 r--p 00000000 fe:00 67890 /usr/lib/x86_64-linux-gnu/libc.so.6\n";
std::fs::write(proc_dir.join("maps"), maps).unwrap();
assert!(process_linked_against_jemalloc_at(tmp.path(), tgid));
}
#[test]
fn process_linked_against_jemalloc_at_absent_returns_false() {
let tmp = tempfile::TempDir::new().unwrap();
let tgid = 888;
let proc_dir = tmp.path().join(tgid.to_string());
std::fs::create_dir_all(&proc_dir).unwrap();
let maps = "\
5583e6f7a000-5583e6f7b000 r-xp 00000000 00:00 0\n\
7f4567abc000-7f4567def000 r--p 00000000 fe:00 67890 /usr/lib/x86_64-linux-gnu/libc.so.6\n";
std::fs::write(proc_dir.join("maps"), maps).unwrap();
assert!(!process_linked_against_jemalloc_at(tmp.path(), tgid));
}
#[test]
fn process_linked_against_jemalloc_at_missing_file_returns_false() {
let tmp = tempfile::TempDir::new().unwrap();
assert!(!process_linked_against_jemalloc_at(tmp.path(), 999));
}
#[test]
fn process_linked_against_jemalloc_at_detects_static_linked() {
let tmp = tempfile::TempDir::new().unwrap();
let tgid = 1234;
let proc_dir = tmp.path().join(tgid.to_string());
std::fs::create_dir_all(&proc_dir).unwrap();
let maps = "\
5583e6f7a000-5583e6f7b000 r-xp 00000000 fe:00 555 /usr/local/bin/my-jemalloc-linked-app\n\
7f4567abc000-7f4567def000 r--p 00000000 fe:00 67890 /usr/lib/x86_64-linux-gnu/libc.so.6\n";
std::fs::write(proc_dir.join("maps"), maps).unwrap();
assert!(process_linked_against_jemalloc_at(tmp.path(), tgid));
}
}