use std::collections::BTreeMap;
use std::fmt;
use std::path::Path;
use std::sync::LazyLock;
use anyhow::Context;
use regex::Regex;
use crate::ctprof::{
CgroupCpuStats, CgroupMemoryStats, CgroupPidsStats, CgroupStats, CtprofSnapshot, Psi, PsiHalf,
PsiResource, ThreadState,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
pub enum GroupBy {
Pcomm,
Cgroup,
Comm,
CommExact,
All,
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct CompareOptions {
pub group_by: GroupByOrDefault,
pub cgroup_flatten: Vec<String>,
pub no_thread_normalize: bool,
pub no_cg_normalize: bool,
pub sort_by: Vec<SortKey>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct SortKey {
pub metric: &'static str,
pub descending: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct GroupByOrDefault(pub GroupBy);
impl Default for GroupByOrDefault {
fn default() -> Self {
Self(GroupBy::Pcomm)
}
}
impl From<GroupBy> for GroupByOrDefault {
fn from(g: GroupBy) -> Self {
Self(g)
}
}
#[derive(Debug, Clone, Copy)]
pub enum AggRule {
SumCount(fn(&ThreadState) -> crate::metric_types::MonotonicCount),
SumNs(fn(&ThreadState) -> crate::metric_types::MonotonicNs),
SumTicks(fn(&ThreadState) -> crate::metric_types::ClockTicks),
SumBytes(fn(&ThreadState) -> crate::metric_types::Bytes),
MaxPeak(fn(&ThreadState) -> crate::metric_types::PeakNs),
MaxPeakBytes(fn(&ThreadState) -> crate::metric_types::PeakBytes),
MaxGaugeNs(fn(&ThreadState) -> crate::metric_types::GaugeNs),
MaxGaugeCount(fn(&ThreadState) -> crate::metric_types::GaugeCount),
RangeI32(fn(&ThreadState) -> crate::metric_types::OrdinalI32),
RangeU32(fn(&ThreadState) -> crate::metric_types::OrdinalU32),
Mode(fn(&ThreadState) -> crate::metric_types::CategoricalString),
ModeChar(fn(&ThreadState) -> char),
ModeBool(fn(&ThreadState) -> bool),
Affinity(fn(&ThreadState) -> crate::metric_types::CpuSet),
}
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub struct CtprofMetricDef {
pub name: &'static str,
pub rule: AggRule,
pub sched_class: Option<&'static str>,
pub config_gates: &'static [&'static str],
pub is_dead: bool,
pub description: &'static str,
pub section: Section,
}
pub static CTPROF_METRICS: &[CtprofMetricDef] = &[
CtprofMetricDef {
name: "thread_count",
rule: AggRule::SumCount(|_| crate::metric_types::MonotonicCount(1)),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Number of threads in this group. Each thread contributes 1; the sum is the group population. Useful for --sort-by thread_count:desc to find groups where thread count changed the most.",
section: Section::Primary,
},
CtprofMetricDef {
name: "policy",
rule: AggRule::Mode(|t| t.policy.clone()),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Scheduling policy (SCHED_OTHER, SCHED_FIFO, SCHED_RR, SCHED_BATCH, SCHED_IDLE, SCHED_DEADLINE, SCHED_EXT).",
section: Section::Primary,
},
CtprofMetricDef {
name: "nice",
rule: AggRule::RangeI32(|t| t.nice),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Nice value (-20..19); CFS priority knob.",
section: Section::Primary,
},
CtprofMetricDef {
name: "priority",
rule: AggRule::RangeI32(|t| t.priority),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Kernel task priority from /proc/<tid>/stat field 18 (CFS=[0..39], RT=[-2..-100], DL=-101).",
section: Section::Primary,
},
CtprofMetricDef {
name: "rt_priority",
rule: AggRule::RangeU32(|t| t.rt_priority),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Real-time scheduler priority (0..99); 0 for non-RT tasks.",
section: Section::Primary,
},
CtprofMetricDef {
name: "cpu_affinity",
rule: AggRule::Affinity(|t| t.cpu_affinity.clone()),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Set of CPUs the task is allowed to run on (sched_getaffinity result).",
section: Section::Primary,
},
CtprofMetricDef {
name: "processor",
rule: AggRule::RangeI32(|t| t.processor),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Last CPU the task ran on.",
section: Section::Primary,
},
CtprofMetricDef {
name: "state",
rule: AggRule::ModeChar(|t| t.state),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Task state letter (R running, S sleeping, D uninterruptible, Z zombie, T stopped).",
section: Section::Primary,
},
CtprofMetricDef {
name: "ext_enabled",
rule: AggRule::ModeBool(|t| t.ext_enabled),
sched_class: None,
config_gates: &["CONFIG_SCHED_CLASS_EXT"],
is_dead: false,
description: "Whether the task is currently dispatched on the sched_ext class.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_threads",
rule: AggRule::MaxGaugeCount(|t| t.nr_threads),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Process-wide thread count (signal_struct->nr_threads); leader-only.",
section: Section::Primary,
},
CtprofMetricDef {
name: "run_time_ns",
rule: AggRule::SumNs(|t| t.run_time_ns),
sched_class: None,
config_gates: &["CONFIG_SCHED_INFO"],
is_dead: false,
description: "Cumulative on-CPU time, ns; /proc/<tid>/schedstat field 1.",
section: Section::Primary,
},
CtprofMetricDef {
name: "wait_time_ns",
rule: AggRule::SumNs(|t| t.wait_time_ns),
sched_class: None,
config_gates: &["CONFIG_SCHED_INFO"],
is_dead: false,
description: "Cumulative time waiting on the runqueue, ns; schedstat field 2.",
section: Section::Primary,
},
CtprofMetricDef {
name: "timeslices",
rule: AggRule::SumCount(|t| t.timeslices),
sched_class: None,
config_gates: &["CONFIG_SCHED_INFO"],
is_dead: false,
description: "Number of times the task was run on a CPU; schedstat field 3.",
section: Section::Primary,
},
CtprofMetricDef {
name: "voluntary_csw",
rule: AggRule::SumCount(|t| t.voluntary_csw),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Voluntary context switches (task gave up the CPU itself).",
section: Section::Primary,
},
CtprofMetricDef {
name: "nonvoluntary_csw",
rule: AggRule::SumCount(|t| t.nonvoluntary_csw),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Involuntary context switches (task was preempted).",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups",
rule: AggRule::SumCount(|t| t.nr_wakeups),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Total wakeups via try_to_wake_up().",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_local",
rule: AggRule::SumCount(|t| t.nr_wakeups_local),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Wakeups landed on the same CPU as the waker.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_remote",
rule: AggRule::SumCount(|t| t.nr_wakeups_remote),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Wakeups landed on a different CPU than the waker.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_sync",
rule: AggRule::SumCount(|t| t.nr_wakeups_sync),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "WF_SYNC wakeups (synchronous wakeup hint to scheduler).",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_migrate",
rule: AggRule::SumCount(|t| t.nr_wakeups_migrate),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Wakeups where the task migrated to a different CPU than its prior one (WF_MIGRATED); distinct from nr_wakeups_remote (waker CPU != target CPU).",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_affine",
rule: AggRule::SumCount(|t| t.nr_wakeups_affine),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Wakeups that succeeded under the wake_affine() heuristic.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_wakeups_affine_attempts",
rule: AggRule::SumCount(|t| t.nr_wakeups_affine_attempts),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "wake_affine() attempts; success rate = nr_wakeups_affine / attempts.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_migrations",
rule: AggRule::SumCount(|t| t.nr_migrations),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Cumulative cross-CPU migrations of the task.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_forced_migrations",
rule: AggRule::SumCount(|t| t.nr_forced_migrations),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Migrations forced by the CFS load balancer.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_failed_migrations_affine",
rule: AggRule::SumCount(|t| t.nr_failed_migrations_affine),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Load-balancer migrations rejected for cpu-affinity reasons.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_failed_migrations_running",
rule: AggRule::SumCount(|t| t.nr_failed_migrations_running),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Load-balancer migrations rejected because the task was running.",
section: Section::Primary,
},
CtprofMetricDef {
name: "nr_failed_migrations_hot",
rule: AggRule::SumCount(|t| t.nr_failed_migrations_hot),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Load-balancer migrations rejected because the task was cache-hot.",
section: Section::Primary,
},
CtprofMetricDef {
name: "wait_sum",
rule: AggRule::SumNs(|t| t.wait_sum),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Cumulative time the task waited on the runqueue, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "wait_count",
rule: AggRule::SumCount(|t| t.wait_count),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Number of distinct runqueue-wait intervals the task accumulated.",
section: Section::Primary,
},
CtprofMetricDef {
name: "wait_max",
rule: AggRule::MaxPeak(|t| t.wait_max),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Longest single runqueue-wait interval observed, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "voluntary_sleep_ns",
rule: AggRule::SumNs(|t| t.voluntary_sleep_ns),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Pure voluntary sleep time (TASK_INTERRUPTIBLE only), ns; capture-side normalized as sum_sleep_runtime - sum_block_runtime so the kernel's sleep/block double-count is stripped before delta math.",
section: Section::Primary,
},
CtprofMetricDef {
name: "sleep_max",
rule: AggRule::MaxPeak(|t| t.sleep_max),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Longest single sleep interval observed, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "block_sum",
rule: AggRule::SumNs(|t| t.block_sum),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Cumulative time the task spent blocked (TASK_UNINTERRUPTIBLE), ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "block_max",
rule: AggRule::MaxPeak(|t| t.block_max),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Longest single uninterruptible-block interval observed, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "iowait_sum",
rule: AggRule::SumNs(|t| t.iowait_sum),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Cumulative time the task spent in iowait, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "iowait_count",
rule: AggRule::SumCount(|t| t.iowait_count),
sched_class: Some("non-ext"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Number of distinct iowait intervals the task accumulated.",
section: Section::Primary,
},
CtprofMetricDef {
name: "exec_max",
rule: AggRule::MaxPeak(|t| t.exec_max),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Longest single uninterrupted on-CPU run observed, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "slice_max",
rule: AggRule::MaxPeak(|t| t.slice_max),
sched_class: Some("cfs-only"),
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Longest CFS slice the task was granted, ns.",
section: Section::Primary,
},
CtprofMetricDef {
name: "core_forceidle_sum",
rule: AggRule::SumNs(|t| t.core_forceidle_sum),
sched_class: None,
config_gates: &["CONFIG_SCHED_CORE", "CONFIG_SCHEDSTATS"],
is_dead: false,
description: "Cumulative time this task forced its SMT sibling idle, ns (core scheduling).",
section: Section::Primary,
},
CtprofMetricDef {
name: "fair_slice_ns",
rule: AggRule::MaxGaugeNs(|t| t.fair_slice_ns),
sched_class: Some("fair-policy"),
config_gates: &[],
is_dead: false,
description: "Current scheduler slice, ns; snapshot from /proc/<tid>/sched (stale under sched_ext).",
section: Section::Primary,
},
CtprofMetricDef {
name: "allocated_bytes",
rule: AggRule::SumBytes(|t| t.allocated_bytes),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "jemalloc per-thread allocated bytes (TSD thread_allocated counter).",
section: Section::Primary,
},
CtprofMetricDef {
name: "deallocated_bytes",
rule: AggRule::SumBytes(|t| t.deallocated_bytes),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "jemalloc per-thread deallocated bytes (TSD thread_deallocated counter).",
section: Section::Primary,
},
CtprofMetricDef {
name: "minflt",
rule: AggRule::SumCount(|t| t.minflt),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Minor page faults (resolved without I/O).",
section: Section::Primary,
},
CtprofMetricDef {
name: "majflt",
rule: AggRule::SumCount(|t| t.majflt),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Major page faults (required disk I/O to resolve).",
section: Section::Primary,
},
CtprofMetricDef {
name: "utime_clock_ticks",
rule: AggRule::SumTicks(|t| t.utime_clock_ticks),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "User-mode CPU time, USER_HZ ticks; /proc/<tid>/stat field 14.",
section: Section::Primary,
},
CtprofMetricDef {
name: "stime_clock_ticks",
rule: AggRule::SumTicks(|t| t.stime_clock_ticks),
sched_class: None,
config_gates: &[],
is_dead: false,
description: "Kernel-mode CPU time, USER_HZ ticks; /proc/<tid>/stat field 15.",
section: Section::Primary,
},
CtprofMetricDef {
name: "rchar",
rule: AggRule::SumBytes(|t| t.rchar),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Bytes read at the read syscall layer (incl. cached / pagecache hits).",
section: Section::Primary,
},
CtprofMetricDef {
name: "wchar",
rule: AggRule::SumBytes(|t| t.wchar),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Bytes written at the write syscall layer (incl. pagecache / writeback).",
section: Section::Primary,
},
CtprofMetricDef {
name: "syscr",
rule: AggRule::SumCount(|t| t.syscr),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Number of read syscalls.",
section: Section::Primary,
},
CtprofMetricDef {
name: "syscw",
rule: AggRule::SumCount(|t| t.syscw),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Number of write syscalls.",
section: Section::Primary,
},
CtprofMetricDef {
name: "read_bytes",
rule: AggRule::SumBytes(|t| t.read_bytes),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Bytes that hit the storage device on read (excludes pagecache hits).",
section: Section::Primary,
},
CtprofMetricDef {
name: "write_bytes",
rule: AggRule::SumBytes(|t| t.write_bytes),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Bytes that hit the storage device on write (post-writeback).",
section: Section::Primary,
},
CtprofMetricDef {
name: "cancelled_write_bytes",
rule: AggRule::SumBytes(|t| t.cancelled_write_bytes),
sched_class: None,
config_gates: &["CONFIG_TASK_IO_ACCOUNTING"],
is_dead: false,
description: "Bytes the kernel deaccounted from a prior dirty-write because the page was reclaimed without writeback (truncate / inode invalidation); recorded on the truncating task, not the writer. Per-thread `write_bytes - cancelled_write_bytes` is NOT a valid derivation — see field doc.",
section: Section::Primary,
},
CtprofMetricDef {
name: "cpu_delay_count",
rule: AggRule::SumCount(|t| t.cpu_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of off-CPU windows the task waited for the runqueue to schedule it (taskstats cpu_count). RACY: count + total are not updated atomically.",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "cpu_delay_total_ns",
rule: AggRule::SumNs(|t| t.cpu_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns the task waited on the runqueue (taskstats cpu_delay_total). Distinct from `wait_sum` (schedstat) which captures the same wait-for-CPU bucket via a different code path. RACY (see cpu_delay_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "cpu_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.cpu_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single CPU-wait window observed, ns (taskstats cpu_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "cpu_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.cpu_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero CPU-wait window observed, ns (taskstats cpu_delay_min). Sentinel 0 means \"no events observed\" — compare against cpu_delay_count.",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "blkio_delay_count",
rule: AggRule::SumCount(|t| t.blkio_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of synchronous block-I/O wait windows (taskstats blkio_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "blkio_delay_total_ns",
rule: AggRule::SumNs(|t| t.blkio_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting on synchronous block I/O (taskstats blkio_delay_total). Distinct from `iowait_sum` (schedstat).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "blkio_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.blkio_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single block-I/O wait observed, ns (taskstats blkio_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "blkio_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.blkio_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero block-I/O wait observed, ns (taskstats blkio_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "swapin_delay_count",
rule: AggRule::SumCount(|t| t.swapin_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of swap-in wait windows (taskstats swapin_count). OVERLAPS with thrashing_delay_count — do not sum.",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "swapin_delay_total_ns",
rule: AggRule::SumNs(|t| t.swapin_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting for swap-in to complete (taskstats swapin_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "swapin_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.swapin_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single swap-in wait observed, ns (taskstats swapin_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "swapin_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.swapin_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero swap-in wait observed, ns (taskstats swapin_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "freepages_delay_count",
rule: AggRule::SumCount(|t| t.freepages_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of direct-reclaim wait windows (taskstats freepages_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "freepages_delay_total_ns",
rule: AggRule::SumNs(|t| t.freepages_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting in direct memory reclaim (taskstats freepages_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "freepages_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.freepages_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single direct-reclaim wait observed, ns (taskstats freepages_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "freepages_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.freepages_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero direct-reclaim wait observed, ns (taskstats freepages_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "thrashing_delay_count",
rule: AggRule::SumCount(|t| t.thrashing_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of thrashing wait windows (taskstats thrashing_count). OVERLAPS with swapin_delay_count — do not sum.",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "thrashing_delay_total_ns",
rule: AggRule::SumNs(|t| t.thrashing_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting under thrashing pressure (taskstats thrashing_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "thrashing_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.thrashing_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single thrashing wait observed, ns (taskstats thrashing_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "thrashing_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.thrashing_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero thrashing wait observed, ns (taskstats thrashing_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "compact_delay_count",
rule: AggRule::SumCount(|t| t.compact_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of memory-compaction wait windows (taskstats compact_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "compact_delay_total_ns",
rule: AggRule::SumNs(|t| t.compact_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting on memory compaction (taskstats compact_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "compact_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.compact_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single compaction wait observed, ns (taskstats compact_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "compact_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.compact_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero compaction wait observed, ns (taskstats compact_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "wpcopy_delay_count",
rule: AggRule::SumCount(|t| t.wpcopy_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of write-protect-copy (CoW) fault wait windows (taskstats wpcopy_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "wpcopy_delay_total_ns",
rule: AggRule::SumNs(|t| t.wpcopy_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns waiting on write-protect-copy faults (taskstats wpcopy_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "wpcopy_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.wpcopy_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single write-protect-copy fault wait observed, ns (taskstats wpcopy_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "wpcopy_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.wpcopy_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero write-protect-copy fault wait observed, ns (taskstats wpcopy_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "irq_delay_count",
rule: AggRule::SumCount(|t| t.irq_delay_count),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Number of IRQ-handler windows charged to the task (taskstats irq_count).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "irq_delay_total_ns",
rule: AggRule::SumNs(|t| t.irq_delay_total_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Cumulative ns of IRQ handling charged to the task (taskstats irq_delay_total).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "irq_delay_max_ns",
rule: AggRule::MaxPeak(|t| t.irq_delay_max_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Longest single IRQ-handler window observed, ns (taskstats irq_delay_max).",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "irq_delay_min_ns",
rule: AggRule::MaxPeak(|t| t.irq_delay_min_ns),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
is_dead: false,
description: "Shortest non-zero IRQ-handler window observed, ns (taskstats irq_delay_min). Sentinel 0 means \"no events observed\".",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "hiwater_rss_bytes",
rule: AggRule::MaxPeakBytes(|t| t.hiwater_rss_bytes),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_XACCT"],
is_dead: false,
description: "Lifetime high-watermark of resident-set size, bytes (taskstats hiwater_rss). Distinct from smaps_rollup_kb[\"Rss\"] which is the CURRENT RSS.",
section: Section::TaskstatsDelay,
},
CtprofMetricDef {
name: "hiwater_vm_bytes",
rule: AggRule::MaxPeakBytes(|t| t.hiwater_vm_bytes),
sched_class: None,
config_gates: &["CONFIG_TASKSTATS", "CONFIG_TASK_XACCT"],
is_dead: false,
description: "Lifetime high-watermark of virtual-memory size, bytes (taskstats hiwater_vm).",
section: Section::TaskstatsDelay,
},
];
#[derive(Debug, Clone, Copy, PartialEq)]
#[non_exhaustive]
pub enum DerivedValue {
Scalar(f64),
}
impl DerivedValue {
pub fn as_f64(&self) -> f64 {
match self {
DerivedValue::Scalar(v) => *v,
}
}
}
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub struct DerivedMetricDef {
pub name: &'static str,
pub ladder: ScaleLadder,
pub description: &'static str,
pub inputs: &'static [&'static str],
pub is_ratio: bool,
pub compute: fn(&BTreeMap<String, Aggregated>) -> Option<DerivedValue>,
pub section: Section,
}
fn input_scalar(metrics: &BTreeMap<String, Aggregated>, name: &str) -> Option<f64> {
metrics.get(name).and_then(|a| a.numeric())
}
fn ratio_compute(
metrics: &BTreeMap<String, Aggregated>,
numerator: &str,
denominator: &str,
) -> Option<DerivedValue> {
let num = input_scalar(metrics, numerator)?;
let den = input_scalar(metrics, denominator)?;
if den == 0.0 {
return None;
}
Some(DerivedValue::Scalar(num / den))
}
fn ratio_of_sum_compute(
metrics: &BTreeMap<String, Aggregated>,
numerator: &str,
addend: &str,
) -> Option<DerivedValue> {
let num = input_scalar(metrics, numerator)?;
let other = input_scalar(metrics, addend)?;
let den = num + other;
if den == 0.0 {
return None;
}
Some(DerivedValue::Scalar(num / den))
}
pub static CTPROF_DERIVED_METRICS: &[DerivedMetricDef] = &[
DerivedMetricDef {
name: "affine_success_ratio",
ladder: ScaleLadder::None,
description: "wake_affine() success ratio: nr_wakeups_affine / nr_wakeups_affine_attempts.",
inputs: &["nr_wakeups_affine", "nr_wakeups_affine_attempts"],
is_ratio: true,
compute: |m| ratio_compute(m, "nr_wakeups_affine", "nr_wakeups_affine_attempts"),
section: Section::Derived,
},
DerivedMetricDef {
name: "avg_wait_ns",
ladder: ScaleLadder::Ns,
description: "Average runqueue-wait duration per scheduling event: wait_sum / wait_count (ns/event).",
inputs: &["wait_sum", "wait_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "wait_sum", "wait_count"),
section: Section::Derived,
},
DerivedMetricDef {
name: "cpu_efficiency",
ladder: ScaleLadder::None,
description: "Fraction of total scheduler-tracked time spent on-CPU: run_time_ns / (run_time_ns + wait_time_ns).",
inputs: &["run_time_ns", "wait_time_ns"],
is_ratio: true,
compute: |m| ratio_of_sum_compute(m, "run_time_ns", "wait_time_ns"),
section: Section::Derived,
},
DerivedMetricDef {
name: "avg_slice_ns",
ladder: ScaleLadder::Ns,
description: "Average on-CPU slice length per timeslice: run_time_ns / timeslices (ns/timeslice).",
inputs: &["run_time_ns", "timeslices"],
is_ratio: false,
compute: |m| ratio_compute(m, "run_time_ns", "timeslices"),
section: Section::Derived,
},
DerivedMetricDef {
name: "involuntary_csw_ratio",
ladder: ScaleLadder::None,
description: "Fraction of context switches that were preemptions: nonvoluntary_csw / (voluntary_csw + nonvoluntary_csw).",
inputs: &["nonvoluntary_csw", "voluntary_csw"],
is_ratio: true,
compute: |m| ratio_of_sum_compute(m, "nonvoluntary_csw", "voluntary_csw"),
section: Section::Derived,
},
DerivedMetricDef {
name: "disk_io_fraction",
ladder: ScaleLadder::None,
description: "Fraction of read syscall bytes that hit storage: read_bytes / rchar. Typically <= 1.0 but can exceed when readahead pulls more block-device bytes than the syscall requested.",
inputs: &["read_bytes", "rchar"],
is_ratio: true,
compute: |m| ratio_compute(m, "read_bytes", "rchar"),
section: Section::Derived,
},
DerivedMetricDef {
name: "live_heap_estimate",
ladder: ScaleLadder::Bytes,
description: "jemalloc live-heap estimate: allocated_bytes - deallocated_bytes. Signed: negative when deallocations dominate (freelist drains memory allocated before capture, or sampled mid-update on a thread that just released a large arena). Renders with explicit `-` and the IEC binary suffix (e.g. `-1.907MiB`).",
inputs: &["allocated_bytes", "deallocated_bytes"],
is_ratio: false,
compute: |m| {
let alloc = input_scalar(m, "allocated_bytes")?;
let dealloc = input_scalar(m, "deallocated_bytes")?;
Some(DerivedValue::Scalar(alloc - dealloc))
},
section: Section::Derived,
},
DerivedMetricDef {
name: "avg_iowait_ns",
ladder: ScaleLadder::Ns,
description: "Average iowait interval per iowait event: iowait_sum / iowait_count (ns/event).",
inputs: &["iowait_sum", "iowait_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "iowait_sum", "iowait_count"),
section: Section::Derived,
},
DerivedMetricDef {
name: "avg_cpu_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average CPU-wait per scheduling event: cpu_delay_total_ns / cpu_delay_count (ns/event). RACY: the kernel updates count + total via the lockless sched_info path, so a concurrent reader may observe one ahead of the other; the quotient is approximate at the sub-event scale and stable at the integrated scale.",
inputs: &["cpu_delay_total_ns", "cpu_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "cpu_delay_total_ns", "cpu_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_blkio_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average synchronous block-I/O wait per event: blkio_delay_total_ns / blkio_delay_count (ns/event). Distinct from avg_iowait_ns (schedstat) — this travels through the delayacct path and is the canonical delay-accounting block-I/O reading.",
inputs: &["blkio_delay_total_ns", "blkio_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "blkio_delay_total_ns", "blkio_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_swapin_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average swap-in wait per event: swapin_delay_total_ns / swapin_delay_count (ns/event). OVERLAPS with thrashing — every thrashing event is also a swapin event from the syscall layer; do not sum the two averages or the underlying totals directly.",
inputs: &["swapin_delay_total_ns", "swapin_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "swapin_delay_total_ns", "swapin_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_freepages_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average direct-reclaim wait per event: freepages_delay_total_ns / freepages_delay_count (ns/event).",
inputs: &["freepages_delay_total_ns", "freepages_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "freepages_delay_total_ns", "freepages_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_thrashing_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average thrashing wait per event: thrashing_delay_total_ns / thrashing_delay_count (ns/event). OVERLAPS with swapin (see avg_swapin_delay_ns).",
inputs: &["thrashing_delay_total_ns", "thrashing_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "thrashing_delay_total_ns", "thrashing_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_compact_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average memory-compaction wait per event: compact_delay_total_ns / compact_delay_count (ns/event).",
inputs: &["compact_delay_total_ns", "compact_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "compact_delay_total_ns", "compact_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_wpcopy_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average write-protect-copy fault wait per event: wpcopy_delay_total_ns / wpcopy_delay_count (ns/event).",
inputs: &["wpcopy_delay_total_ns", "wpcopy_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "wpcopy_delay_total_ns", "wpcopy_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "avg_irq_delay_ns",
ladder: ScaleLadder::Ns,
description: "Average IRQ-handler window per event: irq_delay_total_ns / irq_delay_count (ns/event).",
inputs: &["irq_delay_total_ns", "irq_delay_count"],
is_ratio: false,
compute: |m| ratio_compute(m, "irq_delay_total_ns", "irq_delay_count"),
section: Section::TaskstatsDelay,
},
DerivedMetricDef {
name: "total_offcpu_delay_ns",
ladder: ScaleLadder::Ns,
description: "Sum of all off-CPU delay-accounting buckets, ns: cpu + blkio + freepages + compact + wpcopy + irq + max(swapin, thrashing). The swapin/thrashing pair is OR'd with .max() rather than summed because the two share syscall-layer events (every thrashing event is also a swapin). Returns `-` when any input is missing (CONFIG_TASK_DELAY_ACCT off, runtime toggle off, or kernel older than the bucket's introduction version).",
inputs: &[
"cpu_delay_total_ns",
"blkio_delay_total_ns",
"swapin_delay_total_ns",
"freepages_delay_total_ns",
"thrashing_delay_total_ns",
"compact_delay_total_ns",
"wpcopy_delay_total_ns",
"irq_delay_total_ns",
],
is_ratio: false,
compute: |m| {
let cpu = input_scalar(m, "cpu_delay_total_ns")?;
let blkio = input_scalar(m, "blkio_delay_total_ns")?;
let swapin = input_scalar(m, "swapin_delay_total_ns")?;
let freepages = input_scalar(m, "freepages_delay_total_ns")?;
let thrashing = input_scalar(m, "thrashing_delay_total_ns")?;
let compact = input_scalar(m, "compact_delay_total_ns")?;
let wpcopy = input_scalar(m, "wpcopy_delay_total_ns")?;
let irq = input_scalar(m, "irq_delay_total_ns")?;
let mem_overlap = swapin.max(thrashing);
Some(DerivedValue::Scalar(
cpu + blkio + freepages + compact + wpcopy + irq + mem_overlap,
))
},
section: Section::TaskstatsDelay,
},
];
pub fn metric_display_name(metric: &CtprofMetricDef) -> &'static str {
metric.name
}
pub fn metric_tags(metric: &CtprofMetricDef) -> String {
let mut out = String::new();
if let Some(class) = metric.sched_class {
out.push('[');
out.push_str(class);
out.push(']');
}
if metric.is_dead {
if !out.is_empty() {
out.push(' ');
}
out.push_str("[dead]");
}
for gate in metric.config_gates {
if !out.is_empty() {
out.push(' ');
}
out.push('[');
let short = gate.strip_prefix("CONFIG_").unwrap_or(gate);
out.push_str(short);
out.push(']');
}
out
}
#[derive(Debug, Clone)]
pub enum Aggregated {
Sum(u64),
Max(u64),
OrdinalRange {
min: i64,
max: i64,
},
Mode {
value: String,
count: usize,
total: usize,
},
Affinity(AffinitySummary),
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct AffinitySummary {
pub min_cpus: usize,
pub max_cpus: usize,
pub uniform: Option<Vec<u32>>,
}
impl Aggregated {
pub fn numeric(&self) -> Option<f64> {
match self {
Aggregated::Sum(v) => Some(*v as f64),
Aggregated::Max(v) => Some(*v as f64),
Aggregated::OrdinalRange { min, max } => {
Some((*min as f64 + *max as f64) / 2.0)
}
Aggregated::Mode { .. } => None,
Aggregated::Affinity(s) => {
Some((s.min_cpus as f64 + s.max_cpus as f64) / 2.0)
}
}
}
}
impl fmt::Display for Aggregated {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Aggregated::Sum(v) => write!(f, "{v}"),
Aggregated::Max(v) => write!(f, "{v}"),
Aggregated::OrdinalRange { min, max } => {
if min == max {
write!(f, "{min}")
} else {
write!(f, "{min}..{max}")
}
}
Aggregated::Mode {
value,
count,
total,
} => {
if count == total {
write!(f, "{value}")
} else {
write!(f, "{value} ({count}/{total})")
}
}
Aggregated::Affinity(s) => {
if let Some(cpus) = &s.uniform {
let n = cpus.len();
let range = format_cpu_range(cpus);
write!(f, "{n} cpus ({range})")
} else if s.min_cpus == s.max_cpus {
write!(f, "{} cpus (mixed)", s.min_cpus)
} else {
write!(f, "{}-{} cpus (mixed)", s.min_cpus, s.max_cpus)
}
}
}
}
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct ThreadGroup {
pub key: String,
pub thread_count: usize,
pub metrics: BTreeMap<String, Aggregated>,
pub cgroup_stats: Option<CgroupStats>,
pub members: Vec<String>,
pub avg_start_ticks: u64,
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct DiffRow {
pub group_key: String,
pub thread_count_a: usize,
pub thread_count_b: usize,
pub uptime_pct: Option<f64>,
pub sort_by_cell: Option<String>,
pub sort_by_delta: Option<f64>,
pub metric_name: &'static str,
pub metric_ladder: ScaleLadder,
pub baseline: Aggregated,
pub candidate: Aggregated,
pub delta: Option<f64>,
pub delta_pct: Option<f64>,
pub display_key: String,
}
impl DiffRow {
fn sort_key(&self) -> f64 {
if let Some(p) = self.delta_pct {
p.abs()
} else if let Some(d) = self.delta {
d.abs() * 1e9
} else {
f64::NEG_INFINITY
}
}
}
fn sort_diff_rows_by_keys(
rows: &mut [DiffRow],
derived_rows: &mut [DerivedRow],
sort_keys: &[SortKey],
) {
debug_assert!(
!sort_keys.is_empty(),
"sort_diff_rows_by_keys called with empty sort_keys; \
caller must short-circuit before invoking the multi-key \
sort path",
);
use std::collections::{BTreeMap, BTreeSet};
let metric_idx: BTreeMap<&'static str, usize> = CTPROF_METRICS
.iter()
.enumerate()
.map(|(i, m)| (m.name, i))
.collect();
let derived_idx: BTreeMap<&'static str, usize> = CTPROF_DERIVED_METRICS
.iter()
.enumerate()
.map(|(i, m)| (m.name, i))
.collect();
let mut group_metrics: BTreeMap<String, BTreeMap<&'static str, f64>> = BTreeMap::new();
for row in rows.iter() {
if let Some(d) = row.delta {
group_metrics
.entry(row.group_key.clone())
.or_default()
.insert(row.metric_name, d);
}
}
for row in derived_rows.iter() {
if let Some(d) = row.delta {
group_metrics
.entry(row.group_key.clone())
.or_default()
.insert(row.metric_name, d);
}
}
let mut unique_groups: BTreeSet<String> = group_metrics.keys().cloned().collect();
for row in rows.iter() {
unique_groups.insert(row.group_key.clone());
}
for row in derived_rows.iter() {
unique_groups.insert(row.group_key.clone());
}
let mut groups_with_tuples: Vec<(String, Vec<f64>)> = unique_groups
.into_iter()
.map(|g| {
let metrics = group_metrics.get(&g);
let tuple: Vec<f64> = sort_keys
.iter()
.map(|k| {
metrics
.and_then(|m| m.get(k.metric).copied())
.unwrap_or(if k.descending {
f64::NEG_INFINITY
} else {
f64::INFINITY
})
})
.collect();
(g, tuple)
})
.collect();
groups_with_tuples.sort_by(|(ga, ta), (gb, tb)| {
for (i, key) in sort_keys.iter().enumerate() {
let (va, vb) = (ta[i], tb[i]);
let ord = if key.descending {
vb.partial_cmp(&va).unwrap_or(std::cmp::Ordering::Equal)
} else {
va.partial_cmp(&vb).unwrap_or(std::cmp::Ordering::Equal)
};
if ord != std::cmp::Ordering::Equal {
return ord;
}
}
ga.cmp(gb)
});
let group_ranks: BTreeMap<String, usize> = groups_with_tuples
.into_iter()
.enumerate()
.map(|(i, (g, _))| (g, i))
.collect();
rows.sort_by(|a, b| {
let ra = group_ranks.get(&a.group_key).copied().unwrap_or(usize::MAX);
let rb = group_ranks.get(&b.group_key).copied().unwrap_or(usize::MAX);
ra.cmp(&rb).then_with(|| {
let ia = metric_idx.get(a.metric_name).copied().unwrap_or(usize::MAX);
let ib = metric_idx.get(b.metric_name).copied().unwrap_or(usize::MAX);
ia.cmp(&ib)
})
});
derived_rows.sort_by(|a, b| {
let ra = group_ranks.get(&a.group_key).copied().unwrap_or(usize::MAX);
let rb = group_ranks.get(&b.group_key).copied().unwrap_or(usize::MAX);
ra.cmp(&rb).then_with(|| {
let ia = derived_idx
.get(a.metric_name)
.copied()
.unwrap_or(usize::MAX);
let ib = derived_idx
.get(b.metric_name)
.copied()
.unwrap_or(usize::MAX);
ia.cmp(&ib)
})
});
}
#[derive(Debug, Clone, Default)]
pub struct FudgedPair {
pub baseline_key: String,
pub candidate_key: String,
pub overlap: usize,
pub jaccard: f64,
pub residual_baseline: Vec<String>,
pub residual_candidate: Vec<String>,
pub cascaded_children: usize,
pub baseline_root: String,
pub candidate_root: String,
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct CtprofDiff {
pub sort_metric_name: Option<&'static str>,
pub rows: Vec<DiffRow>,
pub only_baseline: Vec<String>,
pub only_candidate: Vec<String>,
pub fudged_pairs: Vec<FudgedPair>,
pub cgroup_stats_a: BTreeMap<String, CgroupStats>,
pub cgroup_stats_b: BTreeMap<String, CgroupStats>,
pub host_psi_a: Psi,
pub host_psi_b: Psi,
pub smaps_rollup_a: BTreeMap<String, BTreeMap<String, u64>>,
pub smaps_rollup_b: BTreeMap<String, BTreeMap<String, u64>>,
pub sched_ext_a: Option<crate::ctprof::SchedExtSysfs>,
pub sched_ext_b: Option<crate::ctprof::SchedExtSysfs>,
pub derived_rows: Vec<DerivedRow>,
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct DerivedRow {
pub group_key: String,
pub display_key: String,
pub thread_count_a: usize,
pub thread_count_b: usize,
pub metric_name: &'static str,
pub metric_ladder: ScaleLadder,
pub is_ratio: bool,
pub baseline: Option<DerivedValue>,
pub candidate: Option<DerivedValue>,
pub delta: Option<f64>,
pub delta_pct: Option<f64>,
}
impl DerivedRow {
fn sort_key(&self) -> f64 {
if let Some(p) = self.delta_pct {
p.abs()
} else if let Some(d) = self.delta {
d.abs() * 1e9
} else {
f64::NEG_INFINITY
}
}
}
fn build_derived_row(
key: &str,
display_key: &str,
n_a: usize,
n_b: usize,
def: &DerivedMetricDef,
metrics_a: &BTreeMap<String, Aggregated>,
metrics_b: &BTreeMap<String, Aggregated>,
) -> DerivedRow {
let baseline = (def.compute)(metrics_a);
let candidate = (def.compute)(metrics_b);
let (delta, delta_pct) = match (baseline, candidate) {
(Some(a), Some(b)) => {
let va = a.as_f64();
let vb = b.as_f64();
let d = vb - va;
let pct = if def.is_ratio {
None
} else if va.abs() > f64::EPSILON {
Some(d / va)
} else {
None
};
(Some(d), pct)
}
_ => (None, None),
};
DerivedRow {
group_key: key.to_string(),
display_key: display_key.to_string(),
thread_count_a: n_a,
thread_count_b: n_b,
metric_name: def.name,
metric_ladder: def.ladder,
is_ratio: def.is_ratio,
baseline,
candidate,
delta,
delta_pct,
}
}
pub fn compare(
baseline: &CtprofSnapshot,
candidate: &CtprofSnapshot,
opts: &CompareOptions,
) -> CtprofDiff {
let flatten = compile_flatten_patterns(&opts.cgroup_flatten);
let group_by = opts.group_by.0;
let pattern_counts: Option<BTreeMap<String, usize>> = match (group_by, opts.no_thread_normalize)
{
(GroupBy::Comm, false) => Some(pattern_counts_union(baseline, candidate, |t| {
t.comm.as_str()
})),
(GroupBy::Pcomm | GroupBy::All, false) => {
Some(pattern_counts_union(baseline, candidate, |t| {
t.pcomm.as_str()
}))
}
_ => None,
};
let cgroup_key_map: Option<BTreeMap<String, String>> =
if matches!(group_by, GroupBy::Cgroup | GroupBy::All) && !opts.no_cg_normalize {
Some(build_cgroup_key_map(baseline, candidate, &flatten))
} else {
None
};
let groups_a = build_groups(
baseline,
group_by,
&flatten,
pattern_counts.as_ref(),
cgroup_key_map.as_ref(),
opts.no_thread_normalize,
);
let groups_b = build_groups(
candidate,
group_by,
&flatten,
pattern_counts.as_ref(),
cgroup_key_map.as_ref(),
opts.no_thread_normalize,
);
let mut diff = CtprofDiff::default();
let now_b = candidate
.threads
.iter()
.map(|t| t.start_time_clock_ticks)
.max()
.unwrap_or(0);
for (key, group_a) in &groups_a {
let Some(group_b) = groups_b.get(key) else {
diff.only_baseline.push(key.clone());
continue;
};
let pattern_axis_active =
matches!(group_by, GroupBy::Comm | GroupBy::Pcomm) && !opts.no_thread_normalize;
let display_key = if pattern_axis_active {
let mut union: Vec<String> = group_a.members.clone();
union.extend(group_b.members.iter().cloned());
union.sort();
union.dedup();
pattern_display_label(key, &union)
} else {
key.clone()
};
for metric in CTPROF_METRICS {
let Some(a) = group_a.metrics.get(metric.name).cloned() else {
continue;
};
let Some(b) = group_b.metrics.get(metric.name).cloned() else {
continue;
};
diff.rows.push(build_row(
key,
&display_key,
group_a.thread_count,
group_b.thread_count,
metric,
a,
b,
None, ));
}
for def in CTPROF_DERIVED_METRICS {
diff.derived_rows.push(build_derived_row(
key,
&display_key,
group_a.thread_count,
group_b.thread_count,
def,
&group_a.metrics,
&group_b.metrics,
));
}
}
for key in groups_b.keys() {
if !groups_a.contains_key(key) {
diff.only_candidate.push(key.clone());
}
}
let mut fudged_key_pairs: Vec<(String, String)> = Vec::new();
if group_by == GroupBy::All && !diff.only_baseline.is_empty() && !diff.only_candidate.is_empty()
{
fn cg_prefix(key: &str) -> &str {
key.split_once('\x00').map_or(key, |(cg, _)| cg)
}
type TypeSet = std::collections::BTreeSet<(String, String)>;
let mut cg_types_a: BTreeMap<String, TypeSet> = BTreeMap::new();
let mut cg_types_b: BTreeMap<String, TypeSet> = BTreeMap::new();
let matched_prefixes: std::collections::BTreeSet<String> = groups_a
.keys()
.filter(|k| groups_b.contains_key(*k))
.map(|k| cg_prefix(k).to_string())
.collect();
let mut cg_prefixes_a: std::collections::BTreeSet<String> =
std::collections::BTreeSet::new();
let mut cg_prefixes_b: std::collections::BTreeSet<String> =
std::collections::BTreeSet::new();
for key in &diff.only_baseline {
let pfx = cg_prefix(key).to_string();
if !matched_prefixes.contains(&pfx) {
cg_prefixes_a.insert(pfx);
}
}
for key in &diff.only_candidate {
let pfx = cg_prefix(key).to_string();
if !matched_prefixes.contains(&pfx) {
cg_prefixes_b.insert(pfx);
}
}
for t in &baseline.threads {
let cg = flatten_cgroup_path(&t.cgroup, &flatten);
let cg_key = match cgroup_key_map.as_ref().and_then(|m| m.get(&cg)) {
Some(k) => k.clone(),
None => cg,
};
if cg_prefixes_a.contains(&cg_key) {
cg_types_a
.entry(cg_key)
.or_default()
.insert((pattern_key(&t.pcomm), pattern_key(&t.comm)));
}
}
for t in &candidate.threads {
let cg = flatten_cgroup_path(&t.cgroup, &flatten);
let cg_key = match cgroup_key_map.as_ref().and_then(|m| m.get(&cg)) {
Some(k) => k.clone(),
None => cg,
};
if cg_prefixes_b.contains(&cg_key) {
cg_types_b
.entry(cg_key)
.or_default()
.insert((pattern_key(&t.pcomm), pattern_key(&t.comm)));
}
}
let mut fudged_cg: Vec<(String, String)> = Vec::new();
for ccg in &cg_prefixes_b {
let Some(set_b) = cg_types_b.get(ccg) else {
continue;
};
if set_b.len() < 10 {
continue;
}
let mut best: Option<(&str, f64, usize)> = None;
for bcg in &cg_prefixes_a {
let Some(set_a) = cg_types_a.get(bcg) else {
continue;
};
let intersection = set_a.intersection(set_b).count();
if intersection < 10 {
continue;
}
let union = set_a.union(set_b).count();
let jaccard = intersection as f64 / union as f64;
if jaccard >= 0.90 && best.is_none_or(|(_, bj, _)| jaccard > bj) {
best = Some((bcg.as_str(), jaccard, intersection));
}
}
if let Some((bcg, _jaccard, _overlap)) = best {
fudged_cg.push((bcg.to_string(), ccg.clone()));
}
}
let mut remove_baseline: std::collections::BTreeSet<String> =
std::collections::BTreeSet::new();
let mut remove_candidate: std::collections::BTreeSet<String> =
std::collections::BTreeSet::new();
let mut fudge_matches: BTreeMap<String, Vec<String>> = BTreeMap::new(); for (bcg, ccg) in &fudged_cg {
let b_keys: Vec<&String> = diff
.only_baseline
.iter()
.filter(|k| cg_prefix(k) == bcg.as_str())
.collect();
let c_keys: Vec<&String> = diff
.only_candidate
.iter()
.filter(|k| cg_prefix(k) == ccg.as_str())
.collect();
let c_suffix_map: BTreeMap<&str, &String> = c_keys
.iter()
.map(|k| {
let suffix = k.split_once('\x00').map_or("", |(_, s)| s);
(suffix, *k)
})
.collect();
for bkey in &b_keys {
let b_suffix = bkey.split_once('\x00').map_or("", |(_, s)| s);
if let Some(ckey) = c_suffix_map.get(b_suffix) {
remove_baseline.insert((*bkey).clone());
remove_candidate.insert((*ckey).clone());
fudged_key_pairs.push(((*bkey).clone(), (*ckey).clone()));
fudge_matches
.entry((*bkey).clone())
.or_default()
.push((*ckey).clone());
}
}
}
for (bkey, ckeys) in &fudge_matches {
let Some(ga) = groups_a.get(bkey) else {
continue;
};
let mut merged_metrics: BTreeMap<String, Aggregated> = BTreeMap::new();
let mut merged_thread_count: usize = 0;
for ckey in ckeys {
let Some(gb) = groups_b.get(ckey) else {
continue;
};
merged_thread_count += gb.thread_count;
for (name, val) in &gb.metrics {
let entry = merged_metrics.entry(name.clone());
match entry {
std::collections::btree_map::Entry::Vacant(e) => {
e.insert(val.clone());
}
std::collections::btree_map::Entry::Occupied(mut e) => {
let existing = e.get_mut();
match (existing, val) {
(Aggregated::Sum(s), Aggregated::Sum(v)) => {
*s += v;
}
(Aggregated::Max(m), Aggregated::Max(v)) => {
*m += v;
}
(
Aggregated::OrdinalRange { min, max },
Aggregated::OrdinalRange {
min: vmin,
max: vmax,
},
) => {
*min = (*min).min(*vmin);
*max = (*max).max(*vmax);
}
(
Aggregated::Mode {
value,
count,
total,
},
Aggregated::Mode {
value: vv,
count: vc,
total: vt,
},
) => {
*total += vt;
if *vc > *count {
*value = vv.clone();
*count = *vc;
}
}
_ => {}
}
}
}
}
}
let display_key = "[fudged]".to_string();
for metric in CTPROF_METRICS {
let Some(a) = ga.metrics.get(metric.name).cloned() else {
continue;
};
let Some(b) = merged_metrics.get(metric.name).cloned() else {
continue;
};
diff.rows.push(build_row(
bkey,
&display_key,
ga.thread_count,
merged_thread_count,
metric,
a,
b,
None,
));
}
for def in CTPROF_DERIVED_METRICS {
diff.derived_rows.push(build_derived_row(
bkey,
&display_key,
ga.thread_count,
merged_thread_count,
def,
&ga.metrics,
&merged_metrics,
));
}
}
let mut cascade_counts: BTreeMap<String, usize> = BTreeMap::new();
let mut cascade_roots: BTreeMap<(String, String), (String, String)> = BTreeMap::new();
let mut cascade_matches: BTreeMap<String, Vec<String>> = BTreeMap::new();
for (bcg, ccg) in &fudged_cg {
let b_segs: Vec<&str> = bcg.split('/').collect();
let c_segs: Vec<&str> = ccg.split('/').collect();
let common_suffix_len = b_segs
.iter()
.rev()
.zip(c_segs.iter().rev())
.take_while(|(a, b)| a == b)
.count();
let b_root: String = b_segs[..b_segs.len().saturating_sub(common_suffix_len)].join("/");
let c_root: String = c_segs[..c_segs.len().saturating_sub(common_suffix_len)].join("/");
let b_root = if b_root.is_empty() {
bcg.clone()
} else {
b_root
};
let c_root = if c_root.is_empty() {
ccg.clone()
} else {
c_root
};
cascade_roots.insert((bcg.clone(), ccg.clone()), (b_root.clone(), c_root.clone()));
let remaining_b: Vec<String> = diff
.only_baseline
.iter()
.filter(|k| {
!remove_baseline.contains(*k) && cg_prefix(k).starts_with(b_root.as_str())
})
.cloned()
.collect();
let remaining_c: Vec<String> = diff
.only_candidate
.iter()
.filter(|k| {
!remove_candidate.contains(*k) && cg_prefix(k).starts_with(c_root.as_str())
})
.cloned()
.collect();
let c_by_suffix: BTreeMap<String, &String> = remaining_c
.iter()
.map(|k| {
let child_cg = cg_prefix(k);
let tail = &child_cg[c_root.len()..];
if !tail.is_empty() && !tail.starts_with('/') {
return (String::new(), k);
}
let rewritten = format!("{b_root}{tail}");
let suffix = k.split_once('\x00').map_or("", |(_, s)| s);
(format!("{rewritten}\x00{suffix}"), k)
})
.collect();
for bkey in &remaining_b {
if let Some(ckey) = c_by_suffix.get(bkey) {
remove_baseline.insert(bkey.clone());
remove_candidate.insert((*ckey).clone());
fudged_key_pairs.push((bkey.clone(), (*ckey).clone()));
*cascade_counts.entry(bcg.clone()).or_insert(0) += 1;
cascade_matches
.entry(bkey.clone())
.or_default()
.push((*ckey).clone());
}
}
}
for (bkey, ckeys) in &cascade_matches {
let Some(ga) = groups_a.get(bkey) else {
continue;
};
let mut merged_metrics: BTreeMap<String, Aggregated> = BTreeMap::new();
let mut merged_thread_count: usize = 0;
for ckey in ckeys {
let Some(gb) = groups_b.get(ckey) else {
continue;
};
merged_thread_count += gb.thread_count;
for (name, val) in &gb.metrics {
let entry = merged_metrics.entry(name.clone());
match entry {
std::collections::btree_map::Entry::Vacant(e) => {
e.insert(val.clone());
}
std::collections::btree_map::Entry::Occupied(mut e) => {
let existing = e.get_mut();
match (existing, val) {
(Aggregated::Sum(s), Aggregated::Sum(v)) => {
*s += v;
}
(Aggregated::Max(m), Aggregated::Max(v)) => {
*m += v;
}
(
Aggregated::OrdinalRange { min, max },
Aggregated::OrdinalRange {
min: vmin,
max: vmax,
},
) => {
*min = (*min).min(*vmin);
*max = (*max).max(*vmax);
}
(
Aggregated::Mode {
value,
count,
total,
},
Aggregated::Mode {
value: vv,
count: vc,
total: vt,
},
) => {
*total += vt;
if *vc > *count {
*value = vv.clone();
*count = *vc;
}
}
_ => {}
}
}
}
}
}
let display_key = "[fudged]".to_string();
for metric in CTPROF_METRICS {
let Some(a) = ga.metrics.get(metric.name).cloned() else {
continue;
};
let Some(b) = merged_metrics.get(metric.name).cloned() else {
continue;
};
diff.rows.push(build_row(
bkey,
&display_key,
ga.thread_count,
merged_thread_count,
metric,
a,
b,
None,
));
}
for def in CTPROF_DERIVED_METRICS {
diff.derived_rows.push(build_derived_row(
bkey,
&display_key,
ga.thread_count,
merged_thread_count,
def,
&ga.metrics,
&merged_metrics,
));
}
}
diff.only_baseline.retain(|k| !remove_baseline.contains(k));
diff.only_candidate
.retain(|k| !remove_candidate.contains(k));
diff.fudged_pairs = fudged_cg
.iter()
.map(|(bcg, ccg)| {
let set_a = cg_types_a.get(bcg).cloned().unwrap_or_default();
let set_b = cg_types_b.get(ccg).cloned().unwrap_or_default();
let residual_a: Vec<String> = set_a
.difference(&set_b)
.map(|(p, c)| format!("{p}:{c}"))
.collect();
let residual_b: Vec<String> = set_b
.difference(&set_a)
.map(|(p, c)| format!("{p}:{c}"))
.collect();
let intersection = set_a.intersection(&set_b).count();
let union = set_a.union(&set_b).count();
FudgedPair {
baseline_key: bcg.clone(),
candidate_key: ccg.clone(),
overlap: intersection,
jaccard: if union > 0 {
intersection as f64 / union as f64
} else {
0.0
},
residual_baseline: residual_a,
residual_candidate: residual_b,
cascaded_children: cascade_counts.get(bcg).copied().unwrap_or(0),
baseline_root: cascade_roots
.get(&(bcg.clone(), ccg.clone()))
.map(|(b, _)| b.clone())
.unwrap_or_else(|| bcg.clone()),
candidate_root: cascade_roots
.get(&(bcg.clone(), ccg.clone()))
.map(|(_, c)| c.clone())
.unwrap_or_else(|| ccg.clone()),
}
})
.collect();
}
diff.only_baseline.sort();
diff.only_candidate.sort();
{
let mut group_lifetime: BTreeMap<String, u64> = BTreeMap::new();
for (key, group_b) in &groups_b {
if groups_a.contains_key(key) {
group_lifetime.insert(key.clone(), now_b.saturating_sub(group_b.avg_start_ticks));
}
}
let mut fudge_lt_sum: BTreeMap<String, (u64, u64)> = BTreeMap::new();
for (bkey, ckey) in &fudged_key_pairs {
if let Some(gb) = groups_b.get(ckey) {
let lt = now_b.saturating_sub(gb.avg_start_ticks);
let entry = fudge_lt_sum.entry(bkey.clone()).or_insert((0, 0));
entry.0 += lt;
entry.1 += 1;
}
}
for (bkey, (sum, count)) in &fudge_lt_sum {
if *count > 0 {
group_lifetime.insert(bkey.clone(), sum / count);
}
}
let max_lifetime = group_lifetime.values().copied().max().unwrap_or(1).max(1);
for row in &mut diff.rows {
if let Some(<) = group_lifetime.get(&row.group_key) {
row.uptime_pct = Some(lt as f64 / max_lifetime as f64 * 100.0);
}
}
}
if opts.sort_by.is_empty() {
diff.rows.sort_by(|a, b| {
b.sort_key()
.partial_cmp(&a.sort_key())
.unwrap_or(std::cmp::Ordering::Equal)
.then_with(|| a.group_key.cmp(&b.group_key))
});
diff.derived_rows.sort_by(|a, b| {
b.sort_key()
.partial_cmp(&a.sort_key())
.unwrap_or(std::cmp::Ordering::Equal)
.then_with(|| a.group_key.cmp(&b.group_key))
});
} else {
sort_diff_rows_by_keys(&mut diff.rows, &mut diff.derived_rows, &opts.sort_by);
let sort_metric = opts.sort_by.first().map(|sk| sk.metric);
diff.sort_metric_name = sort_metric;
if let Some(metric_name) = sort_metric {
let mut group_cells: BTreeMap<String, (String, Option<f64>)> = BTreeMap::new();
for row in &diff.rows {
if row.metric_name == metric_name && !group_cells.contains_key(&row.group_key) {
let b = format_value_cell(&row.baseline, row.metric_ladder);
let c = format_value_cell(&row.candidate, row.metric_ladder);
let pct = match row.delta_pct {
Some(p) => format!(" ({:+.1}%)", p * 100.0),
None => String::new(),
};
group_cells.insert(
row.group_key.clone(),
(format!("{b}\u{2192}{c}{pct}"), row.delta),
);
}
}
for row in &mut diff.rows {
if let Some((cell, delta)) = group_cells.get(&row.group_key) {
row.sort_by_cell = Some(cell.clone());
row.sort_by_delta = *delta;
}
}
}
}
if group_by == GroupBy::Cgroup {
diff.cgroup_stats_a =
flatten_cgroup_stats(&baseline.cgroup_stats, &flatten, cgroup_key_map.as_ref());
diff.cgroup_stats_b =
flatten_cgroup_stats(&candidate.cgroup_stats, &flatten, cgroup_key_map.as_ref());
}
diff.host_psi_a = baseline.psi;
diff.host_psi_b = candidate.psi;
if group_by == GroupBy::All {
diff.smaps_rollup_a = collect_smaps_rollup_hierarchical(
baseline,
opts.no_thread_normalize,
&flatten,
cgroup_key_map.as_ref(),
);
diff.smaps_rollup_b = collect_smaps_rollup_hierarchical(
candidate,
opts.no_thread_normalize,
&flatten,
cgroup_key_map.as_ref(),
);
} else {
diff.smaps_rollup_a = collect_smaps_rollup(baseline, opts.no_thread_normalize);
diff.smaps_rollup_b = collect_smaps_rollup(candidate, opts.no_thread_normalize);
}
{
let mut summed_by_rel: BTreeMap<String, BTreeMap<String, u64>> = BTreeMap::new();
for fp in &diff.fudged_pairs {
let cr = &fp.candidate_root;
let cr_slash = format!("{cr}/");
let cr_nul = format!("{cr}\x00");
let keys: Vec<String> = diff
.smaps_rollup_b
.keys()
.filter(|k| {
k.starts_with(&cr_slash) || k.starts_with(&cr_nul) || k.as_str() == cr.as_str()
})
.cloned()
.collect();
for k in keys {
if let Some(val) = diff.smaps_rollup_b.remove(&k) {
let (cg_path, pcomm) = k.split_once('\x00').unwrap_or((&k, ""));
let child = if cg_path == cr.as_str() {
""
} else if let Some(rest) = cg_path.strip_prefix(&cr_slash) {
rest
} else {
continue;
};
let rel_key = format!("{child}\x00{pcomm}");
let entry = summed_by_rel.entry(rel_key).or_default();
for (field, v) in &val {
*entry.entry(field.clone()).or_insert(0) += v;
}
}
}
}
if let Some(fp0) = diff.fudged_pairs.first() {
let br = &fp0.baseline_root;
for (rel_key, summed) in summed_by_rel {
let (child, pcomm) = rel_key.split_once('\x00').unwrap_or((&rel_key, ""));
let base_key = if child.is_empty() {
format!("{br}\x00{pcomm}")
} else {
format!("{br}/{child}\x00{pcomm}")
};
diff.smaps_rollup_b.insert(base_key, summed);
}
}
}
diff.sched_ext_a = baseline.sched_ext.clone();
diff.sched_ext_b = candidate.sched_ext.clone();
diff
}
pub fn collect_smaps_rollup(
snap: &CtprofSnapshot,
no_thread_normalize: bool,
) -> BTreeMap<String, BTreeMap<String, u64>> {
collect_smaps_rollup_inner(snap, no_thread_normalize, false, &[], None)
}
pub fn collect_smaps_rollup_hierarchical(
snap: &CtprofSnapshot,
no_thread_normalize: bool,
flatten: &[glob::Pattern],
cgroup_key_map: Option<&BTreeMap<String, String>>,
) -> BTreeMap<String, BTreeMap<String, u64>> {
collect_smaps_rollup_inner(snap, no_thread_normalize, true, flatten, cgroup_key_map)
}
fn collect_smaps_rollup_inner(
snap: &CtprofSnapshot,
no_thread_normalize: bool,
compound_cgroup: bool,
flatten: &[glob::Pattern],
cgroup_key_map: Option<&BTreeMap<String, String>>,
) -> BTreeMap<String, BTreeMap<String, u64>> {
let mut out: BTreeMap<String, BTreeMap<String, u64>> = BTreeMap::new();
for t in &snap.threads {
if t.smaps_rollup_kb.is_empty() {
continue;
}
let pcomm_key = if no_thread_normalize {
format!("{}[{}]", t.pcomm, t.tgid)
} else {
pattern_key(&t.pcomm)
};
let key = if compound_cgroup {
let cg = flatten_cgroup_path(&t.cgroup, flatten);
let cg_key = match cgroup_key_map.and_then(|m| m.get(&cg)) {
Some(k) => k.clone(),
None => cg,
};
format!("{cg_key}\x00{pcomm_key}")
} else {
pcomm_key
};
let entry = out.entry(key).or_default();
for (k, b) in t.smaps_rollup_bytes() {
entry
.entry(k.clone())
.and_modify(|v| *v = v.saturating_add(b.0))
.or_insert(b.0);
}
}
out
}
pub fn build_cgroup_key_map(
baseline: &CtprofSnapshot,
candidate: &CtprofSnapshot,
flatten: &[glob::Pattern],
) -> BTreeMap<String, String> {
use std::collections::BTreeSet;
let mut paths: BTreeSet<String> = BTreeSet::new();
for snap in [baseline, candidate] {
for t in &snap.threads {
paths.insert(flatten_cgroup_path(&t.cgroup, flatten));
}
for k in snap.cgroup_stats.keys() {
paths.insert(flatten_cgroup_path(k, flatten));
}
}
let entries: Vec<(String, String, String, Vec<String>)> = paths
.into_iter()
.map(|p| {
let (skeleton, post_l1, tokens) = cgroup_normalize_skeleton(&p);
(p, skeleton, post_l1, tokens)
})
.collect();
let mut groups: BTreeMap<String, Vec<usize>> = BTreeMap::new();
for (idx, (_, skel, _, _)) in entries.iter().enumerate() {
groups.entry(skel.clone()).or_default().push(idx);
}
let mut tightened: Vec<String> = vec![String::new(); entries.len()];
for (skeleton, indices) in &groups {
if indices.len() < 2 {
for &i in indices {
tightened[i] = skeleton.clone();
}
} else {
let post_l1_paths: Vec<String> =
indices.iter().map(|&i| entries[i].2.clone()).collect();
let member_tokens: Vec<Vec<String>> =
indices.iter().map(|&i| entries[i].3.clone()).collect();
let key = tighten_group(&post_l1_paths, &member_tokens);
for &i in indices {
tightened[i] = key.clone();
}
}
}
let mut out: BTreeMap<String, String> = BTreeMap::new();
for (i, (orig, _, _, _)) in entries.into_iter().enumerate() {
out.insert(orig, tightened[i].clone());
}
out
}
#[allow(clippy::too_many_arguments)]
fn build_row(
key: &str,
display_key: &str,
n_a: usize,
n_b: usize,
metric: &'static CtprofMetricDef,
a: Aggregated,
b: Aggregated,
uptime_pct: Option<f64>,
) -> DiffRow {
let (delta, delta_pct) = match (a.numeric(), b.numeric()) {
(Some(va), Some(vb)) => {
let d = vb - va;
let pct = if va.abs() > f64::EPSILON {
Some(d / va)
} else {
None
};
(Some(d), pct)
}
_ => (None, None),
};
DiffRow {
group_key: key.to_string(),
thread_count_a: n_a,
thread_count_b: n_b,
uptime_pct,
metric_name: metric.name,
metric_ladder: metric.rule.ladder(),
baseline: a,
candidate: b,
delta,
delta_pct,
display_key: display_key.to_string(),
sort_by_cell: None,
sort_by_delta: None,
}
}
const TOKEN_DIGIT_PLACEHOLDER: &str = "{N}";
const TOKEN_HEX_PLACEHOLDER: &str = "{H}";
const TOKEN_INSTANCE_PLACEHOLDER: &str = "{I}";
static TOKEN_RULE_PURE_DIGITS: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^[0-9]+$").unwrap());
static TOKEN_RULE_HEX_LIKE: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^[0-9a-f]{2,}$").unwrap());
static TOKEN_RULE_ALPHA_PREFIX_DIGITS: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^([A-Za-z]+)[0-9]+$").unwrap());
static TOKEN_RULE_DIGITS_ALPHA_SUFFIX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^[0-9]+([A-Za-z]+)$").unwrap());
fn classify_token(t: &str) -> String {
if t.is_empty() {
return String::new();
}
if TOKEN_RULE_PURE_DIGITS.is_match(t) {
return TOKEN_DIGIT_PLACEHOLDER.to_string();
}
if TOKEN_RULE_HEX_LIKE.is_match(t) && t.chars().any(|c| c.is_ascii_digit()) {
return TOKEN_HEX_PLACEHOLDER.to_string();
}
if let Some(caps) = TOKEN_RULE_ALPHA_PREFIX_DIGITS.captures(t) {
return format!("{}{}", &caps[1], TOKEN_DIGIT_PLACEHOLDER);
}
if let Some(caps) = TOKEN_RULE_DIGITS_ALPHA_SUFFIX.captures(t) {
return format!("{}{}", TOKEN_DIGIT_PLACEHOLDER, &caps[1]);
}
t.to_string()
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Segment<'a> {
Token(&'a str),
Separator(&'a str),
}
fn is_token_separator(c: char) -> bool {
matches!(c, '.' | '-' | '_' | '/' | ':' | '@' | '+' | '[' | ']') || c.is_whitespace()
}
fn split_into_segments(s: &str) -> Vec<Segment<'_>> {
let mut out = Vec::new();
if s.is_empty() {
return out;
}
let mut chars = s.char_indices().peekable();
while let Some(&(start, first_c)) = chars.peek() {
let is_sep = is_token_separator(first_c);
let mut end = start;
while let Some(&(idx, c)) = chars.peek() {
if is_token_separator(c) != is_sep {
break;
}
end = idx + c.len_utf8();
chars.next();
}
let slice = &s[start..end];
if is_sep {
out.push(Segment::Separator(slice));
} else {
out.push(Segment::Token(slice));
}
}
out
}
fn pattern_key(name: &str) -> String {
let segments = split_into_segments(name);
let mut out = String::new();
for seg in segments {
match seg {
Segment::Token(t) => out.push_str(&classify_token(t)),
Segment::Separator(s) => out.push_str(s),
}
}
out
}
fn apply_systemd_template(path: &str) -> String {
let mut out = String::new();
let mut rest = path;
while let Some(at_idx) = rest.find('@') {
out.push_str(&rest[..at_idx]);
out.push('@');
let after_at = &rest[at_idx + 1..];
let segment_end = after_at.find('/').unwrap_or(after_at.len());
let segment = &after_at[..segment_end];
if let Some(instance) = segment.strip_suffix(".service") {
if instance.is_empty() || instance.contains(['.', '_', '-']) {
out.push_str(segment);
} else {
out.push_str(TOKEN_INSTANCE_PLACEHOLDER);
out.push_str(".service");
}
rest = &after_at[segment_end..];
} else {
rest = after_at;
}
}
out.push_str(rest);
out
}
fn cgroup_skeleton_tokens(post_l1: &str) -> (String, Vec<String>) {
let segments = split_into_segments(post_l1);
let mut skeleton = String::new();
let mut tokens = Vec::new();
for seg in segments {
match seg {
Segment::Token(t) => {
tokens.push(t.to_string());
skeleton.push_str(&classify_token(t));
}
Segment::Separator(s) => {
skeleton.push_str(s);
}
}
}
(skeleton, tokens)
}
fn tighten_group(post_l1_paths: &[String], member_tokens: &[Vec<String>]) -> String {
let representative = match post_l1_paths.first() {
Some(p) => p,
None => return String::new(),
};
let segments = split_into_segments(representative);
let mut out = String::new();
let mut token_pos = 0;
for seg in segments {
match seg {
Segment::Token(_) => {
let first = &member_tokens[0][token_pos];
let classified = classify_token(first);
let all_equal = member_tokens
.iter()
.all(|tokens| &tokens[token_pos] == first);
if all_equal && classified == *first {
out.push_str(first);
} else {
out.push_str(&classified);
}
token_pos += 1;
}
Segment::Separator(s) => {
out.push_str(s);
}
}
}
out
}
fn cgroup_normalize_skeleton(path: &str) -> (String, String, Vec<String>) {
let post_l1 = apply_systemd_template(path);
let (skeleton, tokens) = cgroup_skeleton_tokens(&post_l1);
(skeleton, post_l1, tokens)
}
pub fn pattern_display_label(key: &str, members: &[String]) -> String {
if members.len() < 2 {
return key.to_string();
}
let regex = grex::RegExpBuilder::from(members).build();
if regex.len() <= key.len() {
regex
} else {
key.to_string()
}
}
fn pattern_counts_union(
baseline: &CtprofSnapshot,
candidate: &CtprofSnapshot,
field: fn(&ThreadState) -> &str,
) -> BTreeMap<String, usize> {
let mut counts: BTreeMap<String, usize> = BTreeMap::new();
for t in baseline.threads.iter().chain(candidate.threads.iter()) {
*counts.entry(pattern_key(field(t))).or_insert(0) += 1;
}
counts
}
pub fn build_groups(
snap: &CtprofSnapshot,
group_by: GroupBy,
flatten: &[glob::Pattern],
pattern_counts: Option<&BTreeMap<String, usize>>,
cgroup_key_map: Option<&BTreeMap<String, String>>,
no_thread_normalize: bool,
) -> BTreeMap<String, ThreadGroup> {
let pattern_field: Option<fn(&ThreadState) -> &str> = match (group_by, no_thread_normalize) {
(GroupBy::Comm | GroupBy::All, false) => Some(|t: &ThreadState| t.comm.as_str()),
(GroupBy::Pcomm, false) => Some(|t: &ThreadState| t.pcomm.as_str()),
_ => None,
};
let local_counts: Option<BTreeMap<String, usize>> = match (pattern_field, pattern_counts) {
(Some(field), None) => {
let mut counts: BTreeMap<String, usize> = BTreeMap::new();
for t in &snap.threads {
*counts.entry(pattern_key(field(t))).or_insert(0) += 1;
}
Some(counts)
}
_ => None,
};
let counts_ref: Option<&BTreeMap<String, usize>> = pattern_counts.or(local_counts.as_ref());
let mut buckets: BTreeMap<String, Vec<&ThreadState>> = BTreeMap::new();
for t in &snap.threads {
let key = match group_by {
GroupBy::All => {
let cg = flatten_cgroup_path(&t.cgroup, flatten);
let cg_key = match cgroup_key_map.and_then(|m| m.get(&cg)) {
Some(k) => k.clone(),
None => cg,
};
let pcomm_key = if no_thread_normalize {
t.pcomm.clone()
} else {
pattern_key(&t.pcomm)
};
let comm_key = if no_thread_normalize {
t.comm.clone()
} else {
pattern_key(&t.comm)
};
format!("{cg_key}\x00{pcomm_key}\x00{comm_key}")
}
GroupBy::Pcomm | GroupBy::Comm => match pattern_field {
Some(field) => {
let name = field(t);
let pk = pattern_key(name);
let counts = counts_ref.expect("pattern_counts seeded for Pcomm/Comm");
if counts.get(&pk).copied().unwrap_or(0) >= 2 {
pk
} else {
name.to_string()
}
}
None => {
if group_by == GroupBy::Pcomm {
t.pcomm.clone()
} else {
t.comm.clone()
}
}
},
GroupBy::CommExact => t.comm.clone(),
GroupBy::Cgroup => {
let post_flatten = flatten_cgroup_path(&t.cgroup, flatten);
match cgroup_key_map.and_then(|m| m.get(&post_flatten)) {
Some(k) => k.clone(),
None => post_flatten,
}
}
};
buckets.entry(key).or_default().push(t);
}
let mut out = BTreeMap::new();
for (key, threads) in buckets {
let mut metrics = BTreeMap::new();
for m in CTPROF_METRICS {
metrics.insert(m.name.to_string(), aggregate(m.rule, &threads));
}
let cgroup_stats = if group_by == GroupBy::Cgroup {
threads
.first()
.and_then(|t| snap.cgroup_stats.get(&t.cgroup).cloned())
} else {
None
};
let members: Vec<String> = match pattern_field {
Some(field) => {
let mut v: Vec<String> = threads.iter().map(|t| field(t).to_string()).collect();
v.sort();
v.dedup();
v
}
None => Vec::new(),
};
let valid_starts: Vec<u64> = threads
.iter()
.map(|t| t.start_time_clock_ticks)
.filter(|&t| t > 0)
.collect();
let avg_start_ticks = if valid_starts.is_empty() {
0
} else {
valid_starts.iter().sum::<u64>() / valid_starts.len() as u64
};
out.insert(
key.clone(),
ThreadGroup {
key,
thread_count: threads.len(),
metrics,
cgroup_stats,
members,
avg_start_ticks,
},
);
}
out
}
fn mode_aggregate(
total: usize,
items: impl IntoIterator<Item = crate::metric_types::CategoricalString>,
) -> Aggregated {
use crate::metric_types::{CategoricalString, Modeable};
match CategoricalString::mode_across(items) {
Some((value, count, _total)) => Aggregated::Mode {
value: value.0,
count,
total,
},
None => Aggregated::Mode {
value: String::new(),
count: 0,
total,
},
}
}
pub fn aggregate(rule: AggRule, threads: &[&ThreadState]) -> Aggregated {
use crate::metric_types::{CategoricalString, Maxable, Rangeable, Summable};
match rule {
AggRule::SumCount(f) => {
let s = crate::metric_types::MonotonicCount::sum_across(threads.iter().map(|t| f(t)));
Aggregated::Sum(s.0)
}
AggRule::SumNs(f) => {
let s = crate::metric_types::MonotonicNs::sum_across(threads.iter().map(|t| f(t)));
Aggregated::Sum(s.0)
}
AggRule::SumTicks(f) => {
let s = crate::metric_types::ClockTicks::sum_across(threads.iter().map(|t| f(t)));
Aggregated::Sum(s.0)
}
AggRule::SumBytes(f) => {
let s = crate::metric_types::Bytes::sum_across(threads.iter().map(|t| f(t)));
Aggregated::Sum(s.0)
}
AggRule::MaxPeak(f) => {
let m = crate::metric_types::PeakNs::max_across(threads.iter().map(|t| f(t)));
Aggregated::Max(m.map(|v| v.0).unwrap_or(0))
}
AggRule::MaxPeakBytes(f) => {
let m = crate::metric_types::PeakBytes::max_across(threads.iter().map(|t| f(t)));
Aggregated::Max(m.map(|v| v.0).unwrap_or(0))
}
AggRule::MaxGaugeNs(f) => {
let m = crate::metric_types::GaugeNs::max_across(threads.iter().map(|t| f(t)));
Aggregated::Max(m.map(|v| v.0).unwrap_or(0))
}
AggRule::MaxGaugeCount(f) => {
let m = crate::metric_types::GaugeCount::max_across(threads.iter().map(|t| f(t)));
Aggregated::Max(m.map(|v| v.0).unwrap_or(0))
}
AggRule::RangeI32(f) => {
match crate::metric_types::OrdinalI32::range_across(threads.iter().map(|t| f(t))) {
Some(r) => {
let (min, max) = r.into_tuple();
Aggregated::OrdinalRange {
min: i64::from(min.0),
max: i64::from(max.0),
}
}
None => Aggregated::OrdinalRange { min: 0, max: 0 },
}
}
AggRule::RangeU32(f) => {
match crate::metric_types::OrdinalU32::range_across(threads.iter().map(|t| f(t))) {
Some(r) => {
let (min, max) = r.into_tuple();
Aggregated::OrdinalRange {
min: i64::from(min.0),
max: i64::from(max.0),
}
}
None => Aggregated::OrdinalRange { min: 0, max: 0 },
}
}
AggRule::Mode(f) => mode_aggregate(threads.len(), threads.iter().map(|t| f(t))),
AggRule::ModeChar(f) => mode_aggregate(
threads.len(),
threads.iter().map(|t| CategoricalString(f(t).to_string())),
),
AggRule::ModeBool(f) => mode_aggregate(
threads.len(),
threads.iter().map(|t| CategoricalString(f(t).to_string())),
),
AggRule::Affinity(f) => {
let mut seen: Vec<Vec<u32>> = Vec::new();
let mut min_cpus = usize::MAX;
let mut max_cpus = 0usize;
for t in threads {
let cpus = f(t).0;
min_cpus = min_cpus.min(cpus.len());
max_cpus = max_cpus.max(cpus.len());
if !seen.iter().any(|s| s == &cpus) {
seen.push(cpus);
}
}
if threads.is_empty() {
min_cpus = 0;
}
let uniform = if seen.len() == 1 {
seen.into_iter().next()
} else {
None
};
Aggregated::Affinity(AffinitySummary {
min_cpus,
max_cpus,
uniform,
})
}
}
}
pub fn flatten_cgroup_path(path: &str, patterns: &[glob::Pattern]) -> String {
for p in patterns {
if p.matches(path) {
return p.as_str().to_string();
}
}
path.to_string()
}
pub fn compile_flatten_patterns(raw: &[String]) -> Vec<glob::Pattern> {
raw.iter()
.filter_map(|s| glob::Pattern::new(s).ok())
.collect()
}
pub fn parse_sort_by(spec: &str) -> anyhow::Result<Vec<SortKey>> {
if spec.is_empty() {
return Ok(Vec::new());
}
let registry: std::collections::BTreeMap<&'static str, &'static CtprofMetricDef> =
CTPROF_METRICS.iter().map(|m| (m.name, m)).collect();
let mut out: Vec<SortKey> = Vec::new();
let mut seen: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for entry in spec.split(',') {
let entry = entry.trim();
if entry.is_empty() {
anyhow::bail!(
"empty entry in --sort-by spec {spec:?}; \
entries are comma-separated and must be non-empty"
);
}
let (metric, descending) = match entry.split_once(':') {
Some((m, dir)) => {
let dir_norm = dir.trim().to_ascii_lowercase();
match dir_norm.as_str() {
"desc" => (m, true),
"asc" => (m, false),
_ => anyhow::bail!(
"invalid direction {dir:?} in --sort-by entry \
{entry:?}; expected `asc` or `desc`"
),
}
}
None => (entry, true),
};
let metric = metric.trim();
let resolved_name: Option<&'static str> = if let Some(def) = registry.get(metric).copied() {
if matches!(
def.rule,
AggRule::Mode(_) | AggRule::ModeChar(_) | AggRule::ModeBool(_),
) {
anyhow::bail!(
"metric {metric:?} is categorical (no numeric value to sort by); \
--sort-by accepts only metrics whose AggRule yields a scalar \
(Sum*, Max*, Range*, or Affinity)"
);
}
Some(def.name)
} else {
CTPROF_DERIVED_METRICS
.iter()
.find(|d| d.name == metric)
.map(|d| d.name)
};
let Some(canonical) = resolved_name else {
let mut valid: Vec<&'static str> = registry.keys().copied().collect();
for d in CTPROF_DERIVED_METRICS {
valid.push(d.name);
}
valid.sort();
let valid = valid.join(", ");
anyhow::bail!(
"unknown metric {metric:?} in --sort-by spec {spec:?}; \
use the bare metric name, not the rendered cell with \
[tag] suffixes; must be one of: {valid}",
);
};
if !seen.insert(canonical) {
anyhow::bail!(
"duplicate metric {metric:?} in --sort-by spec {spec:?}; \
each metric may appear at most once across all sort keys"
);
}
out.push(SortKey {
metric: canonical,
descending,
});
}
Ok(out)
}
pub fn flatten_cgroup_stats(
stats: &BTreeMap<String, CgroupStats>,
patterns: &[glob::Pattern],
cgroup_key_map: Option<&BTreeMap<String, String>>,
) -> BTreeMap<String, CgroupStats> {
let mut out: BTreeMap<String, CgroupStats> = BTreeMap::new();
for (path, cs) in stats {
let post_flatten = flatten_cgroup_path(path, patterns);
let key = match cgroup_key_map.and_then(|m| m.get(&post_flatten)) {
Some(k) => k.clone(),
None => post_flatten,
};
match out.get_mut(&key) {
None => {
out.insert(key, cs.clone());
}
Some(agg) => {
merge_cgroup_cpu(&mut agg.cpu, &cs.cpu);
merge_cgroup_memory(&mut agg.memory, &cs.memory);
merge_cgroup_pids(&mut agg.pids, &cs.pids);
agg.psi = merge_psi(agg.psi, cs.psi);
}
}
}
out
}
fn merge_psi(a: Psi, b: Psi) -> Psi {
Psi {
cpu: merge_psi_resource(a.cpu, b.cpu),
memory: merge_psi_resource(a.memory, b.memory),
io: merge_psi_resource(a.io, b.io),
irq: merge_psi_resource(a.irq, b.irq),
}
}
fn merge_psi_resource(a: PsiResource, b: PsiResource) -> PsiResource {
PsiResource {
some: merge_psi_half(a.some, b.some),
full: merge_psi_half(a.full, b.full),
}
}
fn merge_psi_half(a: PsiHalf, b: PsiHalf) -> PsiHalf {
PsiHalf {
avg10: a.avg10.max(b.avg10),
avg60: a.avg60.max(b.avg60),
avg300: a.avg300.max(b.avg300),
total_usec: a.total_usec.saturating_add(b.total_usec),
}
}
fn merge_cgroup_cpu(agg: &mut CgroupCpuStats, src: &CgroupCpuStats) {
agg.usage_usec = agg.usage_usec.saturating_add(src.usage_usec);
agg.nr_throttled = agg.nr_throttled.saturating_add(src.nr_throttled);
agg.throttled_usec = agg.throttled_usec.saturating_add(src.throttled_usec);
agg.max_quota_us = merge_max_option(agg.max_quota_us, src.max_quota_us);
agg.max_period_us = agg.max_period_us.max(src.max_period_us);
agg.weight = merge_max_option(agg.weight, src.weight);
agg.weight_nice = match (agg.weight_nice, src.weight_nice) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(_), None) | (None, Some(_)) | (None, None) => None,
};
}
fn merge_cgroup_memory(agg: &mut CgroupMemoryStats, src: &CgroupMemoryStats) {
agg.current = agg.current.max(src.current);
agg.max = merge_max_option(agg.max, src.max);
agg.high = merge_max_option(agg.high, src.high);
agg.low = merge_min_option(agg.low, src.low);
agg.min = merge_min_option(agg.min, src.min);
merge_memory_stat(&mut agg.stat, &src.stat);
merge_kv_counters(&mut agg.events, &src.events);
}
const MEMORY_STAT_GAUGE_KEYS: &[&str] = &[
"anon",
"file",
"kernel",
"kernel_stack",
"pagetables",
"sec_pagetables",
"percpu",
"sock",
"vmalloc",
"shmem",
"zswap",
"zswapped",
"file_mapped",
"file_dirty",
"file_writeback",
"swapcached",
"anon_thp",
"file_thp",
"shmem_thp",
"inactive_anon",
"active_anon",
"inactive_file",
"active_file",
"unevictable",
"slab_reclaimable",
"slab_unreclaimable",
"slab",
"hugetlb",
];
fn merge_memory_stat(agg: &mut BTreeMap<String, u64>, src: &BTreeMap<String, u64>) {
for (key, value) in src {
let is_gauge = MEMORY_STAT_GAUGE_KEYS.contains(&key.as_str());
agg.entry(key.clone())
.and_modify(|v| {
*v = if is_gauge {
(*v).max(*value)
} else {
v.saturating_add(*value)
};
})
.or_insert(*value);
}
}
fn merge_cgroup_pids(agg: &mut CgroupPidsStats, src: &CgroupPidsStats) {
agg.current = match (agg.current, src.current) {
(Some(a), Some(b)) => Some(a.saturating_add(b)),
(Some(v), None) | (None, Some(v)) => Some(v),
(None, None) => None,
};
agg.max = merge_max_option(agg.max, src.max);
}
fn merge_max_option(a: Option<u64>, b: Option<u64>) -> Option<u64> {
match (a, b) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(_), None) | (None, Some(_)) => None,
(None, None) => None,
}
}
fn merge_min_option(a: Option<u64>, b: Option<u64>) -> Option<u64> {
match (a, b) {
(Some(a), Some(b)) => Some(a.min(b)),
(Some(_), None) | (None, Some(_)) => None,
(None, None) => None,
}
}
fn merge_kv_counters(agg: &mut BTreeMap<String, u64>, src: &BTreeMap<String, u64>) {
for (key, value) in src {
agg.entry(key.clone())
.and_modify(|v| *v = v.saturating_add(*value))
.or_insert(*value);
}
}
fn format_cpu_range(cpus: &[u32]) -> String {
if cpus.is_empty() {
return String::new();
}
let mut out = String::new();
let mut start = cpus[0];
let mut prev = cpus[0];
for &c in &cpus[1..] {
if c == prev + 1 {
prev = c;
continue;
}
if !out.is_empty() {
out.push(',');
}
if start == prev {
out.push_str(&start.to_string());
} else {
out.push_str(&format!("{start}-{prev}"));
}
start = c;
prev = c;
}
if !out.is_empty() {
out.push(',');
}
if start == prev {
out.push_str(&start.to_string());
} else {
out.push_str(&format!("{start}-{prev}"));
}
out
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ScaleLadder {
Ns,
Us,
Bytes,
Ticks,
Unitless,
None,
}
impl ScaleLadder {
pub fn base_unit(&self) -> &'static str {
match self {
ScaleLadder::Ns => "ns",
ScaleLadder::Us => "µs",
ScaleLadder::Bytes => "B",
ScaleLadder::Ticks => "ticks",
ScaleLadder::Unitless | ScaleLadder::None => "",
}
}
}
impl AggRule {
pub fn ladder(&self) -> ScaleLadder {
match self {
AggRule::SumCount(_) => ScaleLadder::Unitless,
AggRule::SumNs(_) => ScaleLadder::Ns,
AggRule::SumTicks(_) => ScaleLadder::Ticks,
AggRule::SumBytes(_) => ScaleLadder::Bytes,
AggRule::MaxPeak(_) => ScaleLadder::Ns,
AggRule::MaxPeakBytes(_) => ScaleLadder::Bytes,
AggRule::MaxGaugeNs(_) => ScaleLadder::Ns,
AggRule::MaxGaugeCount(_) => ScaleLadder::Unitless,
AggRule::RangeI32(_)
| AggRule::RangeU32(_)
| AggRule::Mode(_)
| AggRule::ModeChar(_)
| AggRule::ModeBool(_)
| AggRule::Affinity(_) => ScaleLadder::None,
}
}
}
fn auto_scale(value: f64, ladder: ScaleLadder) -> (f64, &'static str) {
let abs = value.abs();
match ladder {
ScaleLadder::Ns => {
if abs >= 1e9 {
(value / 1e9, "s")
} else if abs >= 1e6 {
(value / 1e6, "ms")
} else if abs >= 1e3 {
(value / 1e3, "µs")
} else {
(value, "ns")
}
}
ScaleLadder::Us => {
if abs >= 1e6 {
(value / 1e6, "s")
} else if abs >= 1e3 {
(value / 1e3, "ms")
} else {
(value, "µs")
}
}
ScaleLadder::Bytes => {
const KIB: f64 = 1024.0;
const MIB: f64 = 1024.0 * KIB;
const GIB: f64 = 1024.0 * MIB;
const TIB: f64 = 1024.0 * GIB;
if abs >= TIB {
(value / TIB, "TiB")
} else if abs >= GIB {
(value / GIB, "GiB")
} else if abs >= MIB {
(value / MIB, "MiB")
} else if abs >= KIB {
(value / KIB, "KiB")
} else {
(value, "B")
}
}
ScaleLadder::Ticks => {
if abs >= 1e6 {
(value / 1e6, "Mticks")
} else if abs >= 1e3 {
(value / 1e3, "Kticks")
} else {
(value, "ticks")
}
}
ScaleLadder::Unitless => {
if abs >= 1e9 {
(value / 1e9, "G")
} else if abs >= 1e6 {
(value / 1e6, "M")
} else if abs >= 1e3 {
(value / 1e3, "K")
} else {
(value, "")
}
}
ScaleLadder::None => (value, ""),
}
}
pub fn format_value_cell(agg: &Aggregated, ladder: ScaleLadder) -> String {
match agg {
Aggregated::Sum(v) => format_scaled_u64(*v, ladder),
Aggregated::Max(v) => format_scaled_u64(*v, ladder),
_ => format!("{agg}{}", ladder.base_unit()),
}
}
pub fn format_scaled_u64(v: u64, ladder: ScaleLadder) -> String {
let (scaled, scaled_unit) = auto_scale(v as f64, ladder);
if scaled_unit == ladder.base_unit() {
format!("{v}{}", ladder.base_unit())
} else {
format!("{scaled:.3}{scaled_unit}")
}
}
pub fn format_derived_value_cell(v: DerivedValue, ladder: ScaleLadder, is_ratio: bool) -> String {
let value = v.as_f64();
if is_ratio {
return format!("{value:.3}");
}
let (scaled, scaled_unit) = auto_scale(value, ladder);
if scaled_unit == ladder.base_unit() {
format!("{value:.2}{}", ladder.base_unit())
} else {
format!("{scaled:.3}{scaled_unit}")
}
}
pub fn format_derived_delta_cell(d: f64, ladder: ScaleLadder, is_ratio: bool) -> String {
if is_ratio {
return format!("{d:+.3}");
}
let (scaled, scaled_unit) = auto_scale(d, ladder);
if scaled_unit == ladder.base_unit() {
format!("{d:+.2}{}", ladder.base_unit())
} else {
format!("{scaled:+.3}{scaled_unit}")
}
}
pub fn format_optional_limit(v: Option<u64>, ladder: ScaleLadder) -> String {
match v {
Some(n) => format_scaled_u64(n, ladder),
None => "max".to_string(),
}
}
pub fn format_cpu_max(quota: Option<u64>, period_us: u64) -> String {
let q = match quota {
Some(q) => format_scaled_u64(q, ScaleLadder::Us),
None => "max".to_string(),
};
let p = format_scaled_u64(period_us, ScaleLadder::Us);
format!("{q}/{p}")
}
pub fn cgroup_optional_limit_cell(
baseline: Option<u64>,
candidate: Option<u64>,
ladder: ScaleLadder,
) -> String {
let bl = format_optional_limit(baseline, ladder);
let cd = format_optional_limit(candidate, ladder);
if baseline == candidate {
return bl;
}
format!("{bl} → {cd}")
}
pub fn cgroup_limits_cell(
baseline: Option<(Option<u64>, u64)>,
candidate: Option<(Option<u64>, u64)>,
) -> String {
let render = |pair: Option<(Option<u64>, u64)>| match pair {
Some((q, p)) => format_cpu_max(q, p),
None => "-".to_string(),
};
let bl = render(baseline);
let cd = render(candidate);
if bl == cd {
return bl;
}
format!("{bl} → {cd}")
}
fn format_delta_cell(delta: f64, ladder: ScaleLadder) -> String {
let (scaled, scaled_unit) = auto_scale(delta, ladder);
if scaled_unit == ladder.base_unit() && delta.fract() == 0.0 {
format!("{:+}{scaled_unit}", delta as i64)
} else {
format!("{scaled:+.3}{scaled_unit}")
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, clap::ValueEnum)]
#[non_exhaustive]
pub enum DisplayFormat {
#[default]
Full,
DeltaOnly,
NoPct,
Arrow,
PctOnly,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum Column {
Group,
Threads,
Metric,
Baseline,
Candidate,
Delta,
Pct,
Arrow,
Value,
Tags,
Uptime,
SortBy,
}
impl Column {
pub fn cli_name(self) -> &'static str {
match self {
Column::Group => "group",
Column::Threads => "threads",
Column::Metric => "metric",
Column::Baseline => "baseline",
Column::Candidate => "candidate",
Column::Delta => "delta",
Column::Pct => "%",
Column::Arrow => "arrow",
Column::Value => "value",
Column::Tags => "tags",
Column::Uptime => "uptime",
Column::SortBy => "sort-by", }
}
pub fn header(self, group_header: &'static str) -> &'static str {
match self {
Column::Group => group_header,
Column::Threads => "threads",
Column::Metric => "metric",
Column::Baseline => "baseline",
Column::Candidate => "candidate",
Column::Delta => "delta",
Column::Pct => "%",
Column::Arrow => "value",
Column::Value => "value",
Column::Tags => "tags",
Column::Uptime => "%uptime",
Column::SortBy => "sort-by", }
}
}
fn compare_columns_for(format: DisplayFormat) -> Vec<Column> {
let mut cols = vec![Column::Group, Column::Threads, Column::Metric];
let trailing: &[Column] = match format {
DisplayFormat::Full => &[
Column::Baseline,
Column::Candidate,
Column::Delta,
Column::Pct,
],
DisplayFormat::DeltaOnly => &[Column::Delta, Column::Pct],
DisplayFormat::NoPct => &[Column::Baseline, Column::Candidate, Column::Delta],
DisplayFormat::Arrow => &[Column::Arrow, Column::Delta, Column::Pct, Column::Uptime],
DisplayFormat::PctOnly => &[Column::Pct],
};
cols.extend_from_slice(trailing);
cols
}
fn show_columns_default() -> Vec<Column> {
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Value,
]
}
pub fn parse_columns(spec: &str, compare_side: bool) -> anyhow::Result<Vec<Column>> {
if spec.trim().is_empty() {
return Ok(Vec::new());
}
let allowed: &[Column] = if compare_side {
&[
Column::Group,
Column::Threads,
Column::Metric,
Column::Baseline,
Column::Candidate,
Column::Delta,
Column::Pct,
Column::Arrow,
Column::Tags,
Column::Uptime,
]
} else {
&[
Column::Group,
Column::Threads,
Column::Metric,
Column::Value,
Column::Tags,
Column::Uptime,
]
};
let valid_names = allowed
.iter()
.map(|c| c.cli_name())
.collect::<Vec<_>>()
.join(", ");
let mut out: Vec<Column> = Vec::new();
let mut seen: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for entry in spec.split(',') {
let entry = entry.trim();
if entry.is_empty() {
anyhow::bail!(
"empty entry in --columns spec {spec:?}; \
entries are comma-separated and must be non-empty"
);
}
let normalized = entry.to_ascii_lowercase();
let Some(col) = allowed.iter().copied().find(|c| c.cli_name() == normalized) else {
anyhow::bail!(
"unknown column {entry:?} in --columns spec {spec:?}; \
must be one of: {valid_names}",
);
};
if !seen.insert(col.cli_name()) {
anyhow::bail!(
"duplicate column {entry:?} in --columns spec {spec:?}; \
each column may appear at most once"
);
}
out.push(col);
}
let has_arrow = out.iter().any(|c| matches!(c, Column::Arrow));
let has_fused = out.iter().any(|c| {
matches!(
c,
Column::Baseline | Column::Candidate | Column::Delta | Column::Pct
)
});
if has_arrow && has_fused {
anyhow::bail!(
"column 'arrow' is mutually exclusive with baseline/candidate/delta/% \
— the arrow form fuses them into a single cell."
);
}
Ok(out)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum Section {
Primary,
Derived,
CgroupStats,
Limits,
MemoryStat,
MemoryEvents,
Pressure,
HostPressure,
Smaps,
SchedExt,
TaskstatsDelay,
}
impl Section {
pub const ALL: &'static [Section] = &[
Section::Primary,
Section::TaskstatsDelay,
Section::Derived,
Section::CgroupStats,
Section::Limits,
Section::MemoryStat,
Section::MemoryEvents,
Section::Pressure,
Section::HostPressure,
Section::Smaps,
Section::SchedExt,
];
pub fn cli_name(self) -> &'static str {
match self {
Section::Primary => "primary",
Section::TaskstatsDelay => "taskstats-delay",
Section::Derived => "derived",
Section::CgroupStats => "cgroup-stats",
Section::Limits => "cgroup-limits",
Section::MemoryStat => "memory-stat",
Section::MemoryEvents => "memory-events",
Section::Pressure => "pressure",
Section::HostPressure => "host-pressure",
Section::Smaps => "smaps-rollup",
Section::SchedExt => "sched-ext",
}
}
pub fn requires_cgroup_grouping(self) -> bool {
matches!(
self,
Section::CgroupStats
| Section::Limits
| Section::MemoryStat
| Section::MemoryEvents
| Section::Pressure
)
}
}
pub fn warn_cgroup_only_sections_under_non_cgroup(sections: &[Section], group_by: GroupBy) {
if sections.is_empty() || group_by == GroupBy::Cgroup {
return;
}
for section in sections {
if section.requires_cgroup_grouping() {
eprintln!("{}", format_cgroup_only_section_warning(*section, group_by));
}
}
}
pub(crate) fn format_cgroup_only_section_warning(section: Section, group_by: GroupBy) -> String {
format!(
"section '{}' requires --group-by cgroup; omitted under --group-by {}",
section.cli_name(),
group_by_cli_name(group_by),
)
}
fn group_by_cli_name(group_by: GroupBy) -> &'static str {
match group_by {
GroupBy::Pcomm => "pcomm",
GroupBy::Cgroup => "cgroup",
GroupBy::Comm => "comm",
GroupBy::CommExact => "comm-exact",
GroupBy::All => "all",
}
}
pub fn parse_sections(spec: &str) -> anyhow::Result<Vec<Section>> {
if spec.trim().is_empty() {
return Ok(Vec::new());
}
let valid_names = Section::ALL
.iter()
.map(|s| s.cli_name())
.collect::<Vec<_>>()
.join(", ");
let mut out: Vec<Section> = Vec::new();
let mut seen: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for entry in spec.split(',') {
let entry = entry.trim();
if entry.is_empty() {
anyhow::bail!(
"empty entry in --sections spec {spec:?}; \
entries are comma-separated and must be non-empty"
);
}
let normalized = entry.to_ascii_lowercase();
let Some(section) = Section::ALL
.iter()
.copied()
.find(|s| s.cli_name() == normalized)
else {
anyhow::bail!(
"unknown section {entry:?} in --sections spec {spec:?}; \
must be one of: {valid_names}",
);
};
if !seen.insert(section.cli_name()) {
anyhow::bail!(
"duplicate section {entry:?} in --sections spec {spec:?}; \
each section may appear at most once"
);
}
out.push(section);
}
Ok(out)
}
pub fn parse_metrics(spec: &str) -> anyhow::Result<Vec<&'static str>> {
if spec.trim().is_empty() {
return Ok(Vec::new());
}
let mut out: Vec<&'static str> = Vec::new();
let mut seen: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for entry in spec.split(',') {
let entry = entry.trim();
if entry.is_empty() {
anyhow::bail!(
"empty entry in --metrics spec {spec:?}; \
entries are comma-separated and must be non-empty"
);
}
let primary = CTPROF_METRICS
.iter()
.find(|m| m.name == entry)
.map(|m| m.name);
let derived = CTPROF_DERIVED_METRICS
.iter()
.find(|d| d.name == entry)
.map(|d| d.name);
let Some(name) = primary.or(derived) else {
anyhow::bail!(
"unknown metric {entry:?} in --metrics spec {spec:?}; \
must be one of the names from `ctprof metric-list` \
(CTPROF_METRICS or CTPROF_DERIVED_METRICS)",
);
};
if !seen.insert(name) {
anyhow::bail!(
"duplicate metric {entry:?} in --metrics spec {spec:?}; \
each metric may appear at most once"
);
}
out.push(name);
}
Ok(out)
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct DisplayOptions {
pub format: DisplayFormat,
pub columns: Vec<Column>,
pub wrap: bool,
pub sections: Vec<Section>,
pub metrics: Vec<&'static str>,
pub section_line_limit: usize,
}
impl DisplayOptions {
pub fn resolved_compare_columns(&self) -> Vec<Column> {
if self.columns.is_empty() {
compare_columns_for(self.format)
} else {
self.columns.clone()
}
}
pub fn resolved_show_columns(&self) -> Vec<Column> {
if self.columns.is_empty() {
show_columns_default()
} else {
self.columns.clone()
}
}
pub fn is_section_enabled(&self, section: Section) -> bool {
self.sections.is_empty() || self.sections.contains(§ion)
}
pub fn is_metric_enabled(&self, name: &str) -> bool {
self.metrics.is_empty() || self.metrics.contains(&name)
}
pub fn new_table(&self) -> comfy_table::Table {
if self.wrap {
crate::cli::new_wrapped_table()
} else {
crate::cli::new_table()
}
}
pub fn new_constrained_table(&self, max_widths: &[u16]) -> comfy_table::Table {
let mut t = self.new_table();
let dummy: Vec<&str> = (0..max_widths.len()).map(|_| "").collect();
t.set_header(dummy);
for (i, &w) in max_widths.iter().enumerate() {
if let Some(col) = t.column_mut(i) {
col.set_constraint(comfy_table::ColumnConstraint::UpperBoundary(
comfy_table::Width::Fixed(w),
));
}
}
t
}
}
fn format_arrow_cell(
baseline: &Aggregated,
candidate: &Aggregated,
delta: Option<f64>,
ladder: ScaleLadder,
) -> String {
let baseline_cell = format_value_cell(baseline, ladder);
let candidate_cell = format_value_cell(candidate, ladder);
let _ = delta;
format!("{baseline_cell} \u{2192} {candidate_cell}")
}
fn format_arrow_cell_derived(row: &DerivedRow) -> String {
let baseline_cell = match row.baseline {
Some(v) => format_derived_value_cell(v, row.metric_ladder, row.is_ratio),
None => "-".to_string(),
};
let candidate_cell = match row.candidate {
Some(v) => format_derived_value_cell(v, row.metric_ladder, row.is_ratio),
None => "-".to_string(),
};
format!("{baseline_cell} \u{2192} {candidate_cell}")
}
fn render_threads_cell(a: usize, b: usize) -> String {
if a == b {
a.to_string()
} else {
format!("{}\u{2192}{}", a, b)
}
}
fn render_diff_row_cells(row: &DiffRow, columns: &[Column]) -> Vec<String> {
let metric_def = CTPROF_METRICS
.iter()
.find(|m| m.name == row.metric_name)
.expect("metric_name comes from CTPROF_METRICS via build_row");
let metric_cell = metric_display_name(metric_def).to_string();
let mut cells = Vec::with_capacity(columns.len());
for col in columns {
let cell = match col {
Column::Group => row.display_key.clone(),
Column::Threads => render_threads_cell(row.thread_count_a, row.thread_count_b),
Column::Metric => metric_cell.clone(),
Column::Baseline => format_value_cell(&row.baseline, row.metric_ladder),
Column::Candidate => format_value_cell(&row.candidate, row.metric_ladder),
Column::Delta => match row.delta {
Some(d) => format_delta_cell(d, row.metric_ladder),
None => match (&row.baseline, &row.candidate) {
(Aggregated::Mode { value: a, .. }, Aggregated::Mode { value: b, .. }) => {
if a == b {
"same".to_string()
} else {
"differs".to_string()
}
}
_ => "-".to_string(),
},
},
Column::Pct => match row.delta_pct {
Some(p) => format!("{:+.1}%", p * 100.0),
None => "-".to_string(),
},
Column::Arrow => {
format_arrow_cell(&row.baseline, &row.candidate, row.delta, row.metric_ladder)
}
Column::Value => "-".to_string(),
Column::Tags => metric_tags(metric_def),
Column::Uptime => match row.uptime_pct {
Some(pct) => format!("{pct:.0}%"),
None => "-".to_string(),
},
Column::SortBy => row.sort_by_cell.clone().unwrap_or_else(|| "-".to_string()),
};
cells.push(cell);
}
cells
}
pub fn color_diff_cell(
text: String,
col: Column,
delta: Option<f64>,
uptime_pct: Option<f64>,
sort_by_delta: Option<f64>,
) -> comfy_table::Cell {
use comfy_table::{Attribute, Color};
match col {
Column::Pct => {
let color = match delta {
Some(d) if d > 0.0 => Color::Yellow,
Some(d) if d < 0.0 => Color::Magenta,
_ => Color::White,
};
let mut cell = comfy_table::Cell::new(text).fg(color);
if matches!(delta, Some(d) if d.abs() > 0.5) {
cell = cell.add_attribute(Attribute::Bold);
}
cell
}
Column::Delta => {
let color = match delta {
Some(d) if d > 0.0 => Color::Yellow,
Some(d) if d < 0.0 => Color::Magenta,
_ => Color::White,
};
comfy_table::Cell::new(text).fg(color)
}
Column::Uptime => {
let color = match uptime_pct {
Some(p) if p >= 75.0 => Color::Green,
Some(p) if p >= 50.0 => Color::Yellow,
Some(_) => Color::Red,
None => Color::White,
};
let mut cell = comfy_table::Cell::new(text).fg(color);
if matches!(uptime_pct, Some(p) if p < 50.0) {
cell = cell.add_attribute(Attribute::Bold);
}
cell
}
Column::SortBy => {
let color = match sort_by_delta {
Some(d) if d > 0.0 => Color::Yellow,
Some(d) if d < 0.0 => Color::Magenta,
_ => Color::Cyan,
};
comfy_table::Cell::new(text).fg(color)
}
_ => comfy_table::Cell::new(text),
}
}
fn cgroup_parent_leaf(path: &str) -> (&str, &str) {
match path.rfind('/') {
Some(0) => ("/", &path[1..]),
Some(i) => (&path[..i], &path[i + 1..]),
None => ("", path),
}
}
pub fn colored_header(columns: &[Column], group_header: &'static str) -> Vec<comfy_table::Cell> {
colored_header_with_sort(columns, group_header, None)
}
pub fn colored_header_with_sort(
columns: &[Column],
group_header: &'static str,
sort_metric: Option<&str>,
) -> Vec<comfy_table::Cell> {
columns
.iter()
.map(|c| {
let label = if *c == Column::SortBy {
sort_metric.unwrap_or("sort-by")
} else {
c.header(group_header)
};
comfy_table::Cell::new(label).fg(comfy_table::Color::Cyan)
})
.collect()
}
pub fn color_derived_cells(cells: Vec<String>) -> Vec<comfy_table::Cell> {
cells
.into_iter()
.map(|c| comfy_table::Cell::new(c).fg(comfy_table::Color::Blue))
.collect()
}
fn render_derived_row_cells(row: &DerivedRow, columns: &[Column]) -> Vec<String> {
let mut cells = Vec::with_capacity(columns.len());
for col in columns {
let cell = match col {
Column::Group => row.display_key.clone(),
Column::Threads => render_threads_cell(row.thread_count_a, row.thread_count_b),
Column::Metric => row.metric_name.to_string(),
Column::Baseline => match row.baseline {
Some(v) => format_derived_value_cell(v, row.metric_ladder, row.is_ratio),
None => "-".to_string(),
},
Column::Candidate => match row.candidate {
Some(v) => format_derived_value_cell(v, row.metric_ladder, row.is_ratio),
None => "-".to_string(),
},
Column::Delta => match row.delta {
Some(d) => format_derived_delta_cell(d, row.metric_ladder, row.is_ratio),
None => "-".to_string(),
},
Column::Pct => match row.delta_pct {
Some(p) => format!("{:+.1}%", p * 100.0),
None => "-".to_string(),
},
Column::Arrow => format_arrow_cell_derived(row),
Column::Value => "-".to_string(),
Column::Tags => String::new(),
Column::Uptime => "-".to_string(),
Column::SortBy => "-".to_string(),
};
cells.push(cell);
}
cells
}
#[derive(Debug, clap::Args)]
pub struct CtprofCompareArgs {
pub baseline: std::path::PathBuf,
pub candidate: std::path::PathBuf,
#[arg(long, value_enum, default_value_t = GroupBy::All, help_heading = "Grouping")]
pub group_by: GroupBy,
#[arg(long, help_heading = "Grouping")]
pub cgroup_flatten: Vec<String>,
#[arg(long, help_heading = "Grouping")]
pub no_thread_normalize: bool,
#[arg(long, help_heading = "Grouping")]
pub no_cg_normalize: bool,
#[arg(long, default_value = "", help_heading = "Display")]
pub sort_by: String,
#[arg(long, value_enum, default_value_t = DisplayFormat::Arrow, help_heading = "Display")]
pub display_format: DisplayFormat,
#[arg(long, default_value = "", help_heading = "Display")]
pub columns: String,
#[arg(long, default_value = "", help_heading = "Filter")]
pub sections: String,
#[arg(long, default_value = "", help_heading = "Filter")]
pub metrics: String,
#[arg(long, help_heading = "Display")]
pub wrap: bool,
#[arg(long, default_value_t = 500, help_heading = "Display")]
pub limit: usize,
}
pub fn run_compare(args: &CtprofCompareArgs) -> anyhow::Result<i32> {
let sort_by = parse_sort_by(&args.sort_by)
.with_context(|| format!("parse --sort-by {:?}", args.sort_by))?;
let columns = parse_columns(&args.columns, true)
.with_context(|| format!("parse --columns {:?}", args.columns))?;
let sections = parse_sections(&args.sections)
.with_context(|| format!("parse --sections {:?}", args.sections))?;
let metrics = parse_metrics(&args.metrics)
.with_context(|| format!("parse --metrics {:?}", args.metrics))?;
warn_cgroup_only_sections_under_non_cgroup(§ions, args.group_by);
let baseline = CtprofSnapshot::load(&args.baseline)
.with_context(|| format!("load baseline {}", args.baseline.display()))?;
let candidate = CtprofSnapshot::load(&args.candidate)
.with_context(|| format!("load candidate {}", args.candidate.display()))?;
let display = DisplayOptions {
format: args.display_format,
columns,
wrap: args.wrap,
sections,
metrics,
section_line_limit: args.limit,
};
let opts = CompareOptions {
group_by: args.group_by.into(),
cgroup_flatten: args.cgroup_flatten.clone(),
no_thread_normalize: args.no_thread_normalize,
no_cg_normalize: args.no_cg_normalize,
sort_by,
};
let diff = compare(&baseline, &candidate, &opts);
print_diff(
&diff,
&args.baseline,
&args.candidate,
args.group_by,
&display,
);
Ok(0)
}
pub fn write_metric_list<W: fmt::Write>(w: &mut W) -> fmt::Result {
writeln!(w, "## Tag legend")?;
writeln!(w)?;
writeln!(w, "sched_class:")?;
writeln!(
w,
" [cfs-only] metric increments only inside CFS-class call paths (kernel/sched/fair.c);"
)?;
writeln!(w, " zero under sched_ext / RT / DL / IDLE.")?;
writeln!(
w,
" [non-ext] metric is written by the schedstat sleep/wait family wrappers"
)?;
writeln!(
w,
" (kernel/sched/stats.c); CFS / RT / DL accumulate, sched_ext bypasses."
)?;
writeln!(
w,
" [fair-policy] metric emits only when fair_policy(p->policy) is true:"
)?;
writeln!(
w,
" SCHED_NORMAL, SCHED_BATCH, AND SCHED_EXT under CONFIG_SCHED_CLASS_EXT."
)?;
writeln!(w)?;
writeln!(
w,
"config_gates (compact form; full kconfig symbol prefixed with CONFIG_):"
)?;
writeln!(
w,
" [SCHED_INFO] requires CONFIG_SCHED_INFO; gates the sched_info_* counters"
)?;
writeln!(
w,
" surfaced via /proc/<tid>/schedstat (run_time_ns, wait_time_ns,"
)?;
writeln!(w, " timeslices).")?;
writeln!(
w,
" [SCHEDSTATS] requires CONFIG_SCHEDSTATS; gates every __schedstat_* /"
)?;
writeln!(
w,
" schedstat_* macro call (kernel/sched/stats.h:75-82)."
)?;
writeln!(
w,
" [SCHED_CORE] requires CONFIG_SCHED_CORE; gates the core-scheduling"
)?;
writeln!(
w,
" subsystem (core_forceidle_sum)."
)?;
writeln!(
w,
" [SCHED_CLASS_EXT] requires CONFIG_SCHED_CLASS_EXT; without it no task can"
)?;
writeln!(w, " land on the sched_ext class.")?;
writeln!(
w,
" [TASK_DELAY_ACCT] requires CONFIG_TASK_DELAY_ACCT AND runtime delayacct=on"
)?;
writeln!(
w,
" (boot param or kernel.task_delayacct sysctl)."
)?;
writeln!(
w,
" [TASK_IO_ACCOUNTING] requires CONFIG_TASK_IO_ACCOUNTING; gates /proc/<tid>/io."
)?;
writeln!(
w,
" [TASKSTATS] requires CONFIG_TASKSTATS; gates the netlink TASKSTATS family"
)?;
writeln!(
w,
" (kernel/taskstats.c) used by the taskstats delay-accounting"
)?;
writeln!(
w,
" and hiwater_rss/hiwater_vm capture path. Calls also need"
)?;
writeln!(w, " CAP_NET_ADMIN.")?;
writeln!(
w,
" [TASK_XACCT] requires CONFIG_TASK_XACCT; gates extended accounting fields"
)?;
writeln!(
w,
" (hiwater_rss, hiwater_vm) populated by xacct_add_tsk."
)?;
writeln!(w)?;
writeln!(w, "status:")?;
writeln!(
w,
" [dead] kernel exposes the counter via /proc but never increments it; always"
)?;
writeln!(
w,
" reads zero. Surfaced for forward-compat parity with the kernel's"
)?;
writeln!(w, " exposure surface.")?;
writeln!(w)?;
writeln!(w, "## Sections")?;
writeln!(w)?;
let mut sections_table = crate::cli::new_table();
sections_table.set_header(vec!["section", "rendered heading", "description"]);
for section in Section::ALL {
let (heading, desc) = match section {
Section::Primary => (
"(no heading; first table)",
"Per-thread metric table — the primary aggregated rows EXCLUDING the taskstats genetlink rows (those carry the `taskstats-delay` tag).",
),
Section::TaskstatsDelay => (
"(rendered inside the primary table)",
"Taskstats genetlink-sourced rows — eight delay-accounting categories (cpu/blkio/swapin/freepages/thrashing/compact/wpcopy/irq × count/total/max/min) plus hiwater_rss_bytes / hiwater_vm_bytes. Per-row filter inside the primary table.",
),
Section::Derived => (
"## Derived metrics",
"Computed metrics derived from the primary registry (ratios, averages, signed differences).",
),
Section::CgroupStats => (
"(no heading; cgroup-stats table)",
"Per-cgroup CPU + memory enrichment from cpu.stat / memory.current. Requires --group-by cgroup.",
),
Section::Limits => (
"## Cgroup limits / knobs",
"Operator-set cgroup configuration — cpu.max, cpu.weight, memory.max, memory.high, pids.*. Requires --group-by cgroup.",
),
Section::MemoryStat => (
"## memory.stat",
"Kernel-emitted memory.stat counters per cgroup. Requires --group-by cgroup.",
),
Section::MemoryEvents => (
"## memory.events",
"Pressure-event counters from memory.events per cgroup. Requires --group-by cgroup.",
),
Section::Pressure => (
"## Pressure / <resource>",
"Per-cgroup PSI sub-tables — one per resource (cpu / memory / io / irq). Requires --group-by cgroup.",
),
Section::HostPressure => (
"## Host pressure / <resource>",
"System-level PSI sub-tables from /proc/pressure/<resource>.",
),
Section::Smaps => (
"## smaps_rollup",
"Per-process memory-mapping summary from /proc/<pid>/smaps_rollup (Rss / Pss / private / shared / swap). Compare-side keys default to per-pcomm-pattern aggregates (`worker-{N}`); pass `--no-thread-normalize` to switch back to literal `pcomm[tgid]` per-PID rows. Under default normalization, byte counts per (pcomm-pattern, key) pair are field-summed across all PIDs sharing the same pcomm skeleton.",
),
Section::SchedExt => (
"## sched_ext",
"Global sched_ext sysfs state — state, switch_all, nr_rejected, hotplug_seq, enable_seq.",
),
};
sections_table.add_row(vec![
section.cli_name().to_string(),
heading.to_string(),
desc.to_string(),
]);
}
writeln!(w, "{sections_table}")?;
writeln!(w)?;
writeln!(w, "## Metrics")?;
writeln!(w)?;
let mut table = crate::cli::new_table();
table.set_header(vec!["metric", "tags", "description"]);
for m in CTPROF_METRICS {
let tags = metric_tags(m);
table.add_row(vec![m.name.to_string(), tags, m.description.to_string()]);
}
writeln!(w, "{table}")?;
writeln!(w)?;
writeln!(w, "## Derived metrics")?;
writeln!(w)?;
let mut dt = crate::cli::new_table();
dt.set_header(vec!["metric", "unit", "inputs", "description"]);
for d in CTPROF_DERIVED_METRICS {
let unit_cell = if d.is_ratio {
"ratio".to_string()
} else {
d.ladder.base_unit().to_string()
};
dt.add_row(vec![
d.name.to_string(),
unit_cell,
d.inputs.join(", "),
d.description.to_string(),
]);
}
writeln!(w, "{dt}")?;
Ok(())
}
pub fn print_metric_list() {
let mut out = String::new();
let _ = write_metric_list(&mut out);
print!("{out}");
}
pub fn run_metric_list() -> anyhow::Result<i32> {
print_metric_list();
Ok(0)
}
pub fn print_diff(
diff: &CtprofDiff,
baseline_path: &Path,
candidate_path: &Path,
group_by: GroupBy,
display: &DisplayOptions,
) {
let mut out = String::new();
let _ = write_diff(
&mut out,
diff,
baseline_path,
candidate_path,
group_by,
display,
);
if display.section_line_limit > 0 {
print!("{}", limit_sections(&out, display.section_line_limit));
} else {
print!("{out}");
}
}
pub fn limit_sections(output: &str, limit: usize) -> String {
let mut result = String::with_capacity(output.len());
let mut section_lines: Vec<&str> = Vec::new();
let mut section_header: Option<&str> = None;
for line in output.lines() {
if line.starts_with("## ") {
flush_section(&mut result, section_header, §ion_lines, limit);
section_lines.clear();
section_header = Some(line);
} else if section_header.is_some() {
section_lines.push(line);
} else {
result.push_str(line);
result.push('\n');
}
}
flush_section(&mut result, section_header, §ion_lines, limit);
result
}
fn flush_section(result: &mut String, header: Option<&str>, lines: &[&str], limit: usize) {
let Some(header) = header else { return };
result.push_str(header);
result.push('\n');
if lines.len() <= limit {
for line in lines {
result.push_str(line);
result.push('\n');
}
} else {
for line in &lines[..limit] {
result.push_str(line);
result.push('\n');
}
result.push_str(&format!(
"... {} more lines truncated (use --limit 0 for unlimited)\n",
lines.len() - limit,
));
}
}
pub fn write_diff<W: fmt::Write>(
w: &mut W,
diff: &CtprofDiff,
baseline_path: &Path,
candidate_path: &Path,
group_by: GroupBy,
display: &DisplayOptions,
) -> fmt::Result {
let group_header = match group_by {
GroupBy::Pcomm => "pcomm",
GroupBy::Cgroup => "cgroup",
GroupBy::Comm => "comm-pattern",
GroupBy::CommExact => "comm",
GroupBy::All => "comm",
};
let mut columns = display.resolved_compare_columns();
let has_sort_col = diff.rows.first().is_some_and(|r| r.sort_by_cell.is_some());
if has_sort_col {
columns.push(Column::SortBy);
}
let global_max_widths: Vec<u16> = if group_by == GroupBy::All {
let mut measure = display.new_table();
measure.set_header(colored_header_with_sort(
&columns,
group_header,
diff.sort_metric_name,
));
for row in &diff.rows {
let mut cells = render_diff_row_cells(row, &columns);
if let Some(pos) = columns.iter().position(|c| *c == Column::Group) {
let comm = row.group_key.splitn(3, '\x00').nth(2).unwrap_or("");
cells[pos] = comm.to_string();
}
measure.add_row(cells);
}
for row in &diff.derived_rows {
let mut cells = render_derived_row_cells(row, &columns);
if let Some(pos) = columns.iter().position(|c| *c == Column::Group) {
let comm = row.group_key.splitn(3, '\x00').nth(2).unwrap_or("");
cells[pos] = comm.to_string();
}
measure.add_row(cells);
}
measure.column_max_content_widths()
} else {
Vec::new()
};
if display.is_section_enabled(Section::Primary)
|| display.is_section_enabled(Section::TaskstatsDelay)
{
let primary_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|row| {
if !display.is_metric_enabled(row.metric_name) {
return false;
}
let metric = CTPROF_METRICS
.iter()
.find(|m| m.name == row.metric_name)
.expect("metric_name comes from CTPROF_METRICS via build_row");
display.is_section_enabled(metric.section)
})
.collect();
if group_by == GroupBy::All {
let limited_rows: Vec<&DiffRow> = if display.section_line_limit > 0 {
primary_rows
.iter()
.copied()
.take(display.section_line_limit)
.collect()
} else {
primary_rows.clone()
};
struct HierRow<'a> {
cgroup: &'a str,
pcomm: &'a str,
comm: &'a str,
row: &'a DiffRow,
}
let mut hier: Vec<HierRow<'_>> = limited_rows
.iter()
.map(|row| {
let mut parts = row.group_key.splitn(3, '\x00');
let cgroup = parts.next().unwrap_or("");
let pcomm = parts.next().unwrap_or("");
let comm = parts.next().unwrap_or(pcomm);
HierRow {
cgroup,
pcomm,
comm,
row,
}
})
.collect();
let row_rank: BTreeMap<*const DiffRow, usize> = hier
.iter()
.enumerate()
.map(|(i, h)| (h.row as *const DiffRow, i))
.collect();
let mut leaf_rank: BTreeMap<(&str, &str), usize> = BTreeMap::new();
let mut cg_rank: BTreeMap<&str, usize> = BTreeMap::new();
for h in &hier {
let rank = row_rank[&(h.row as *const DiffRow)];
let le = leaf_rank.entry((h.cgroup, h.pcomm)).or_insert(usize::MAX);
if rank < *le {
*le = rank;
}
let ce = cg_rank.entry(h.cgroup).or_insert(usize::MAX);
if rank < *ce {
*ce = rank;
}
}
hier.sort_by(|a, b| {
let cga = cg_rank.get(a.cgroup).copied().unwrap_or(usize::MAX);
let cgb = cg_rank.get(b.cgroup).copied().unwrap_or(usize::MAX);
cga.cmp(&cgb)
.then_with(|| {
let sa = leaf_rank
.get(&(a.cgroup, a.pcomm))
.copied()
.unwrap_or(usize::MAX);
let sb = leaf_rank
.get(&(b.cgroup, b.pcomm))
.copied()
.unwrap_or(usize::MAX);
sa.cmp(&sb)
})
.then_with(|| {
let ra = row_rank[&(a.row as *const DiffRow)];
let rb = row_rank[&(b.row as *const DiffRow)];
ra.cmp(&rb)
})
});
writeln!(w, "## Primary metrics")?;
let mut last_segments: Vec<&str> = Vec::new();
let mut last_pcomm = "";
let mut table = display.new_constrained_table(&global_max_widths);
table.set_header(colored_header_with_sort(
&columns,
"comm",
diff.sort_metric_name,
));
let depth_color = |depth: usize| -> comfy_table::Color {
match depth {
0 => comfy_table::Color::Green,
1 => comfy_table::Color::Cyan,
_ => comfy_table::Color::DarkGrey,
}
};
for h in &hier {
let segments: Vec<&str> = h.cgroup.split('/').filter(|s| !s.is_empty()).collect();
let common = segments
.iter()
.zip(last_segments.iter())
.take_while(|(a, b)| a == b)
.count();
let cg_changed =
common < last_segments.len() || segments.len() > last_segments.len();
if cg_changed {
for (depth, seg) in segments.iter().enumerate().skip(common) {
let indent = " ".repeat(depth);
let label = format!("{indent}{seg}");
let heading_cells: Vec<comfy_table::Cell> = columns
.iter()
.map(|c| {
if *c == Column::Group {
comfy_table::Cell::new(&label)
.fg(depth_color(depth))
.add_attribute(comfy_table::Attribute::Bold)
} else {
comfy_table::Cell::new("")
}
})
.collect();
table.add_row(heading_cells);
}
last_segments = segments;
last_pcomm = "";
}
if h.pcomm != last_pcomm {
let cg_depth = last_segments.len();
let indent = " ".repeat(cg_depth);
let label = format!("{indent}{}", h.pcomm);
let heading_cells: Vec<comfy_table::Cell> = columns
.iter()
.map(|c| {
if *c == Column::Group {
comfy_table::Cell::new(&label)
.fg(comfy_table::Color::White)
.add_attribute(comfy_table::Attribute::Bold)
} else {
comfy_table::Cell::new("")
}
})
.collect();
table.add_row(heading_cells);
last_pcomm = h.pcomm;
}
let mut string_cells = render_diff_row_cells(h.row, &columns);
if let Some(pos) = columns.iter().position(|c| *c == Column::Group) {
let cg_depth = last_segments.len();
string_cells[pos] = format!("{} {}", " ".repeat(cg_depth + 1), h.comm);
}
let cells: Vec<comfy_table::Cell> = string_cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| {
color_diff_cell(s, *col, h.row.delta, h.row.uptime_pct, h.row.sort_by_delta)
})
.collect();
table.add_row(cells);
}
writeln!(w, "{table}")?;
} else if group_by == GroupBy::Cgroup {
let mut by_parent: BTreeMap<&str, Vec<&DiffRow>> = BTreeMap::new();
for row in &primary_rows {
let (parent, _) = cgroup_parent_leaf(&row.display_key);
by_parent.entry(parent).or_default().push(row);
}
for (parent, rows) in &by_parent {
writeln!(w)?;
writeln!(w, "\x1b[1;32m## {parent}\x1b[0m")?;
let mut table = display.new_table();
table.set_header(colored_header_with_sort(
&columns,
"cgroup",
diff.sort_metric_name,
));
let cg_limit = if display.section_line_limit > 0 {
&rows[..rows.len().min(display.section_line_limit)]
} else {
&rows[..]
};
for row in cg_limit {
let (_, leaf) = cgroup_parent_leaf(&row.display_key);
let mut string_cells = render_diff_row_cells(row, &columns);
if let Some(pos) = columns.iter().position(|c| *c == Column::Group) {
string_cells[pos] = leaf.to_string();
}
let cells: Vec<comfy_table::Cell> = string_cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| {
color_diff_cell(s, *col, row.delta, row.uptime_pct, row.sort_by_delta)
})
.collect();
table.add_row(cells);
}
writeln!(w, "{table}")?;
}
} else {
writeln!(w, "## Primary metrics")?;
let mut table = display.new_table();
table.set_header(colored_header_with_sort(
&columns,
group_header,
diff.sort_metric_name,
));
let limit_iter = if display.section_line_limit > 0 {
&primary_rows[..primary_rows.len().min(display.section_line_limit)]
} else {
&primary_rows[..]
};
for row in limit_iter {
let string_cells = render_diff_row_cells(row, &columns);
let cells: Vec<comfy_table::Cell> = string_cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| {
color_diff_cell(s, *col, row.delta, row.uptime_pct, row.sort_by_delta)
})
.collect();
table.add_row(cells);
}
writeln!(w, "{table}")?;
}
}
if (display.is_section_enabled(Section::Derived)
|| display.is_section_enabled(Section::TaskstatsDelay))
&& !diff.derived_rows.is_empty()
{
let derived_rows: Vec<&DerivedRow> = diff
.derived_rows
.iter()
.filter(|row| {
if !display.is_metric_enabled(row.metric_name) {
return false;
}
let metric = CTPROF_DERIVED_METRICS
.iter()
.find(|d| d.name == row.metric_name)
.expect("derived metric_name from CTPROF_DERIVED_METRICS");
display.is_section_enabled(metric.section)
})
.collect();
let uptime_map: BTreeMap<&str, Option<f64>> = diff
.rows
.iter()
.map(|r| (r.group_key.as_str(), r.uptime_pct))
.collect();
if group_by == GroupBy::All {
let limited: Vec<&DerivedRow> = if display.section_line_limit > 0 {
derived_rows
.iter()
.copied()
.take(display.section_line_limit)
.collect()
} else {
derived_rows
};
struct DHier<'a> {
cgroup: &'a str,
pcomm: &'a str,
comm: &'a str,
row: &'a DerivedRow,
}
let mut hier: Vec<DHier<'_>> = limited
.iter()
.map(|row| {
let mut parts = row.group_key.splitn(3, '\x00');
let cg = parts.next().unwrap_or("");
let pc = parts.next().unwrap_or("");
let cm = parts.next().unwrap_or(pc);
DHier {
cgroup: cg,
pcomm: pc,
comm: cm,
row,
}
})
.collect();
let row_rank: BTreeMap<*const DerivedRow, usize> = hier
.iter()
.enumerate()
.map(|(i, h)| (h.row as *const DerivedRow, i))
.collect();
let mut leaf_rank: BTreeMap<(&str, &str), usize> = BTreeMap::new();
let mut cg_rank: BTreeMap<&str, usize> = BTreeMap::new();
for h in &hier {
let rank = row_rank[&(h.row as *const DerivedRow)];
let le = leaf_rank.entry((h.cgroup, h.pcomm)).or_insert(usize::MAX);
if rank < *le {
*le = rank;
}
let ce = cg_rank.entry(h.cgroup).or_insert(usize::MAX);
if rank < *ce {
*ce = rank;
}
}
hier.sort_by(|a, b| {
let cga = cg_rank.get(a.cgroup).copied().unwrap_or(usize::MAX);
let cgb = cg_rank.get(b.cgroup).copied().unwrap_or(usize::MAX);
cga.cmp(&cgb)
.then_with(|| {
let sa = leaf_rank
.get(&(a.cgroup, a.pcomm))
.copied()
.unwrap_or(usize::MAX);
let sb = leaf_rank
.get(&(b.cgroup, b.pcomm))
.copied()
.unwrap_or(usize::MAX);
sa.cmp(&sb)
})
.then_with(|| {
let ra = row_rank[&(a.row as *const DerivedRow)];
let rb = row_rank[&(b.row as *const DerivedRow)];
ra.cmp(&rb)
})
});
writeln!(w)?;
writeln!(w, "## Derived metrics")?;
let mut dt = display.new_constrained_table(&global_max_widths);
dt.set_header(colored_header_with_sort(
&columns,
"comm",
diff.sort_metric_name,
));
let mut last_segs: Vec<&str> = Vec::new();
let mut last_pc = "";
let depth_color = |d: usize| -> comfy_table::Color {
match d {
0 => comfy_table::Color::Green,
1 => comfy_table::Color::Cyan,
_ => comfy_table::Color::DarkGrey,
}
};
for h in &hier {
let segs: Vec<&str> = h.cgroup.split('/').filter(|s| !s.is_empty()).collect();
let common = segs
.iter()
.zip(last_segs.iter())
.take_while(|(a, b)| a == b)
.count();
if common < last_segs.len() || segs.len() > last_segs.len() {
for (depth, seg) in segs.iter().enumerate().skip(common) {
let indent = " ".repeat(depth);
let label = format!("{indent}{seg}");
let hcells: Vec<comfy_table::Cell> = columns
.iter()
.map(|c| {
if *c == Column::Group {
comfy_table::Cell::new(&label)
.fg(depth_color(depth))
.add_attribute(comfy_table::Attribute::Bold)
} else {
comfy_table::Cell::new("")
}
})
.collect();
dt.add_row(hcells);
}
last_segs = segs;
last_pc = "";
}
if h.pcomm != last_pc {
let cg_depth = last_segs.len();
let indent = " ".repeat(cg_depth);
let label = format!("{indent}{}", h.pcomm);
let hcells: Vec<comfy_table::Cell> = columns
.iter()
.map(|c| {
if *c == Column::Group {
comfy_table::Cell::new(&label)
.fg(comfy_table::Color::White)
.add_attribute(comfy_table::Attribute::Bold)
} else {
comfy_table::Cell::new("")
}
})
.collect();
dt.add_row(hcells);
last_pc = h.pcomm;
}
let mut cells = render_derived_row_cells(h.row, &columns);
if let Some(pos) = columns.iter().position(|c| *c == Column::Group) {
let cg_depth = last_segs.len();
cells[pos] = format!("{} {}", " ".repeat(cg_depth + 1), h.comm);
}
let colored: Vec<comfy_table::Cell> = cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| {
let up = uptime_map.get(h.row.group_key.as_str()).copied().flatten();
if *col == Column::Uptime {
let text = match up {
Some(pct) => format!("{pct:.0}%"),
None => "-".to_string(),
};
color_diff_cell(text, *col, h.row.delta, up, None)
} else {
color_diff_cell(s, *col, h.row.delta, up, None)
}
})
.collect();
dt.add_row(colored);
}
writeln!(w, "{dt}")?;
} else {
writeln!(w)?;
writeln!(w, "## Derived metrics")?;
let mut dt = display.new_table();
dt.set_header(colored_header_with_sort(
&columns,
group_header,
diff.sort_metric_name,
));
let d_limit = if display.section_line_limit > 0 {
&derived_rows[..derived_rows.len().min(display.section_line_limit)]
} else {
&derived_rows[..]
};
for row in d_limit {
let string_cells = render_derived_row_cells(row, &columns);
let cells: Vec<comfy_table::Cell> = string_cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| {
let up = uptime_map.get(row.group_key.as_str()).copied().flatten();
if *col == Column::Uptime {
let text = match up {
Some(pct) => format!("{pct:.0}%"),
None => "-".to_string(),
};
color_diff_cell(text, *col, row.delta, up, None)
} else {
color_diff_cell(s, *col, row.delta, up, None)
}
})
.collect();
dt.add_row(cells);
}
writeln!(w, "{dt}")?;
}
}
if display.is_section_enabled(Section::Smaps)
&& (!diff.smaps_rollup_a.is_empty() || !diff.smaps_rollup_b.is_empty())
{
let mut process_keys: std::collections::BTreeSet<&String> =
diff.smaps_rollup_a.keys().collect();
process_keys.extend(diff.smaps_rollup_b.keys());
let max_field_for = |pkey: &&String, field: &str| -> u64 {
let a = diff
.smaps_rollup_a
.get(*pkey)
.and_then(|m| m.get(field).copied())
.unwrap_or(0);
let b = diff
.smaps_rollup_b
.get(*pkey)
.and_then(|m| m.get(field).copied())
.unwrap_or(0);
a.max(b)
};
let abs_rss_delta = |pkey: &&String| -> u64 {
let a = diff
.smaps_rollup_a
.get(*pkey)
.and_then(|m| m.get("Rss").copied())
.unwrap_or(0);
let b = diff
.smaps_rollup_b
.get(*pkey)
.and_then(|m| m.get("Rss").copied())
.unwrap_or(0);
(b as i128 - a as i128).unsigned_abs() as u64
};
let mut sorted_process_keys: Vec<&String> = process_keys.iter().copied().collect();
sorted_process_keys.sort_by(|a, b| {
abs_rss_delta(b)
.cmp(&abs_rss_delta(a))
.then_with(|| max_field_for(b, "Rss").cmp(&max_field_for(a, "Rss")))
.then_with(|| a.cmp(b))
});
if display.section_line_limit > 0 {
sorted_process_keys.truncate(display.section_line_limit);
}
let any_delta = sorted_process_keys.iter().any(|pkey| {
let a = diff.smaps_rollup_a.get(*pkey);
let b = diff.smaps_rollup_b.get(*pkey);
let mut keys: std::collections::BTreeSet<&String> =
a.map(|m| m.keys().collect()).unwrap_or_default();
if let Some(m) = b {
keys.extend(m.keys());
}
keys.iter().any(|k| {
let av = a.and_then(|m| m.get(*k).copied());
let bv = b.and_then(|m| m.get(*k).copied());
av != bv
})
});
if any_delta {
writeln!(w)?;
writeln!(w, "## smaps_rollup")?;
let mut st = if global_max_widths.is_empty() {
display.new_table()
} else {
display.new_constrained_table(&global_max_widths)
};
st.set_header(colored_header_with_sort(
&columns,
"pcomm",
diff.sort_metric_name,
));
let is_compound = group_by == GroupBy::All;
let mut sorted_keys = sorted_process_keys.clone();
if is_compound {
sorted_keys.sort();
}
let mut last_segs: Vec<&str> = Vec::new();
let depth_color = |d: usize| -> comfy_table::Color {
match d {
0 => comfy_table::Color::Green,
1 => comfy_table::Color::Cyan,
_ => comfy_table::Color::DarkGrey,
}
};
for pkey in &sorted_keys {
let (cg_part, display_process) = if is_compound {
pkey.split_once('\x00').unwrap_or(("", pkey))
} else {
("", pkey.as_str())
};
if is_compound {
let segs: Vec<&str> = cg_part.split('/').filter(|s| !s.is_empty()).collect();
let common = segs
.iter()
.zip(last_segs.iter())
.take_while(|(a, b)| a == b)
.count();
if common < last_segs.len() || segs.len() > last_segs.len() {
for (depth, seg) in segs.iter().enumerate().skip(common) {
let indent = " ".repeat(depth);
let label = format!("{indent}{seg}");
let hcells: Vec<comfy_table::Cell> = columns
.iter()
.map(|c| {
if *c == Column::Group {
comfy_table::Cell::new(&label)
.fg(depth_color(depth))
.add_attribute(comfy_table::Attribute::Bold)
} else {
comfy_table::Cell::new("")
}
})
.collect();
st.add_row(hcells);
}
last_segs = segs;
}
}
let a = diff.smaps_rollup_a.get(*pkey);
let b = diff.smaps_rollup_b.get(*pkey);
let mut keys_union: std::collections::BTreeSet<&String> =
a.map(|m| m.keys().collect()).unwrap_or_default();
if let Some(m) = b {
keys_union.extend(m.keys());
}
for sk in keys_union {
let av = a.and_then(|m| m.get(sk).copied());
let bv = b.and_then(|m| m.get(sk).copied());
if av == bv {
continue;
}
let a_cell = av
.map(|v| format_scaled_u64(v, ScaleLadder::Bytes))
.unwrap_or_else(|| "-".to_string());
let b_cell = bv
.map(|v| format_scaled_u64(v, ScaleLadder::Bytes))
.unwrap_or_else(|| "-".to_string());
let value_cell = format!("{a_cell} \u{2192} {b_cell}");
let a_val = av.unwrap_or(0);
let b_val = bv.unwrap_or(0);
let delta = b_val as i128 - a_val as i128;
let delta_cell = if av.is_none() || bv.is_none() {
"-".to_string()
} else {
format_delta_cell(delta as f64, ScaleLadder::Bytes)
};
let pct_cell = if a_val == 0 || av.is_none() || bv.is_none() {
"-".to_string()
} else {
let pct = (delta as f64 / a_val as f64) * 100.0;
format!("{pct:+.1}%")
};
let cg_depth = last_segs.len();
let group_label = format!("{} {}", " ".repeat(cg_depth + 1), display_process);
let delta_pct_opt: Option<f64> = if a_val > 0 && av.is_some() && bv.is_some() {
Some(delta as f64 / a_val as f64)
} else {
None
};
let string_cells: Vec<String> = columns
.iter()
.map(|c| match c {
Column::Group => group_label.clone(),
Column::Threads => String::new(),
Column::Metric => sk.clone(),
Column::Arrow => value_cell.clone(),
Column::Delta => delta_cell.clone(),
Column::Pct => pct_cell.clone(),
Column::Uptime => String::new(),
_ => String::new(),
})
.collect();
let cells: Vec<comfy_table::Cell> = string_cells
.into_iter()
.zip(columns.iter())
.map(|(s, col)| color_diff_cell(s, *col, delta_pct_opt, None, None))
.collect();
st.add_row(cells);
}
}
writeln!(w, "{st}")?;
}
}
if group_by == GroupBy::Cgroup
&& (!diff.cgroup_stats_a.is_empty() || !diff.cgroup_stats_b.is_empty())
{
let mut all_keys: std::collections::BTreeSet<&String> =
diff.cgroup_stats_a.keys().collect();
all_keys.extend(diff.cgroup_stats_b.keys());
if display.is_section_enabled(Section::CgroupStats) {
writeln!(w)?;
let mut ct = display.new_table();
ct.set_header(vec![
"cgroup",
"cpu_usage_usec",
"nr_throttled",
"throttled_usec",
"memory_current",
]);
for key in &all_keys {
let a = diff.cgroup_stats_a.get(*key);
let b = diff.cgroup_stats_b.get(*key);
ct.add_row(vec![
key.to_string(),
cgroup_cell(
a.map(|s| s.cpu.usage_usec),
b.map(|s| s.cpu.usage_usec),
ScaleLadder::Us,
),
cgroup_cell(
a.map(|s| s.cpu.nr_throttled),
b.map(|s| s.cpu.nr_throttled),
ScaleLadder::Unitless,
),
cgroup_cell(
a.map(|s| s.cpu.throttled_usec),
b.map(|s| s.cpu.throttled_usec),
ScaleLadder::Us,
),
cgroup_cell(
a.map(|s| s.memory.current),
b.map(|s| s.memory.current),
ScaleLadder::Bytes,
),
]);
}
writeln!(w, "{ct}")?;
}
if display.is_section_enabled(Section::Limits) {
let any_limits = all_keys.iter().any(|key| {
let has_limits = |s: &CgroupStats| {
s.cpu.max_quota_us.is_some()
|| s.cpu.weight.is_some()
|| s.memory.max.is_some()
|| s.memory.high.is_some()
|| s.pids.current.is_some()
|| s.pids.max.is_some()
};
diff.cgroup_stats_a.get(*key).is_some_and(has_limits)
|| diff.cgroup_stats_b.get(*key).is_some_and(has_limits)
});
if any_limits {
writeln!(w)?;
writeln!(w, "## Cgroup limits / knobs")?;
let mut lt = display.new_table();
lt.set_header(vec![
"cgroup",
"cpu.max",
"cpu.weight",
"memory.max",
"memory.high",
"pids.current",
"pids.max",
]);
for key in &all_keys {
let a = diff.cgroup_stats_a.get(*key);
let b = diff.cgroup_stats_b.get(*key);
let row_has_data = |s: &CgroupStats| {
s.cpu.max_quota_us.is_some()
|| s.cpu.weight.is_some()
|| s.memory.max.is_some()
|| s.memory.high.is_some()
|| s.pids.current.is_some()
|| s.pids.max.is_some()
};
if !a.is_some_and(row_has_data) && !b.is_some_and(row_has_data) {
continue;
}
lt.add_row(vec![
key.to_string(),
cgroup_limits_cell(
a.map(|s| (s.cpu.max_quota_us, s.cpu.max_period_us)),
b.map(|s| (s.cpu.max_quota_us, s.cpu.max_period_us)),
),
cgroup_cell(
a.and_then(|s| s.cpu.weight),
b.and_then(|s| s.cpu.weight),
ScaleLadder::Unitless,
),
cgroup_optional_limit_cell(
a.and_then(|s| s.memory.max),
b.and_then(|s| s.memory.max),
ScaleLadder::Bytes,
),
cgroup_optional_limit_cell(
a.and_then(|s| s.memory.high),
b.and_then(|s| s.memory.high),
ScaleLadder::Bytes,
),
cgroup_cell(
a.and_then(|s| s.pids.current),
b.and_then(|s| s.pids.current),
ScaleLadder::Unitless,
),
cgroup_optional_limit_cell(
a.and_then(|s| s.pids.max),
b.and_then(|s| s.pids.max),
ScaleLadder::Unitless,
),
]);
}
writeln!(w, "{lt}")?;
}
}
if display.is_section_enabled(Section::MemoryStat)
&& all_keys.iter().any(|key| {
let has_stat = |s: &CgroupStats| !s.memory.stat.is_empty();
diff.cgroup_stats_a.get(*key).is_some_and(has_stat)
|| diff.cgroup_stats_b.get(*key).is_some_and(has_stat)
})
{
writeln!(w)?;
writeln!(w, "## memory.stat")?;
let mut mt = display.new_table();
mt.set_header(vec!["cgroup", "key", "value"]);
for key in &all_keys {
let a = diff.cgroup_stats_a.get(*key);
let b = diff.cgroup_stats_b.get(*key);
let mut keys_union: std::collections::BTreeSet<&String> = a
.map(|s| s.memory.stat.keys().collect())
.unwrap_or_default();
if let Some(s) = b {
keys_union.extend(s.memory.stat.keys());
}
for stat_key in keys_union {
let av = a.and_then(|s| s.memory.stat.get(stat_key).copied());
let bv = b.and_then(|s| s.memory.stat.get(stat_key).copied());
if av == bv {
continue;
}
mt.add_row(vec![
key.to_string(),
stat_key.clone(),
cgroup_cell(av, bv, ScaleLadder::Unitless),
]);
}
}
writeln!(w, "{mt}")?;
}
if display.is_section_enabled(Section::MemoryEvents)
&& all_keys.iter().any(|key| {
let has_events = |s: &CgroupStats| !s.memory.events.is_empty();
diff.cgroup_stats_a.get(*key).is_some_and(has_events)
|| diff.cgroup_stats_b.get(*key).is_some_and(has_events)
})
{
writeln!(w)?;
writeln!(w, "## memory.events")?;
let mut et = display.new_table();
et.set_header(vec!["cgroup", "event", "count"]);
for key in &all_keys {
let a = diff.cgroup_stats_a.get(*key);
let b = diff.cgroup_stats_b.get(*key);
let mut keys_union: std::collections::BTreeSet<&String> = a
.map(|s| s.memory.events.keys().collect())
.unwrap_or_default();
if let Some(s) = b {
keys_union.extend(s.memory.events.keys());
}
for event_key in keys_union {
let av = a.and_then(|s| s.memory.events.get(event_key).copied());
let bv = b.and_then(|s| s.memory.events.get(event_key).copied());
if av == bv {
continue;
}
et.add_row(vec![
key.to_string(),
event_key.clone(),
cgroup_cell(av, bv, ScaleLadder::Unitless),
]);
}
}
writeln!(w, "{et}")?;
}
if display.is_section_enabled(Section::Pressure) {
for (resource_name, accessor) in psi_resource_accessors() {
let any_data = all_keys.iter().any(|key| {
let a = diff.cgroup_stats_a.get(*key).map(|s| accessor(&s.psi));
let b = diff.cgroup_stats_b.get(*key).map(|s| accessor(&s.psi));
a.as_ref().is_some_and(psi_resource_has_data)
|| b.as_ref().is_some_and(psi_resource_has_data)
});
if !any_data {
continue;
}
writeln!(w)?;
writeln!(w, "## Pressure / {resource_name}")?;
let mut pt = display.new_table();
pt.set_header(vec!["cgroup", "row", "avg10", "avg60", "avg300", "total"]);
for key in &all_keys {
let a = diff.cgroup_stats_a.get(*key).map(|s| accessor(&s.psi));
let b = diff.cgroup_stats_b.get(*key).map(|s| accessor(&s.psi));
pt.add_row(vec![
key.to_string(),
"some".into(),
format_psi_avg_cell(a.map(|r| r.some.avg10), b.map(|r| r.some.avg10)),
format_psi_avg_cell(a.map(|r| r.some.avg60), b.map(|r| r.some.avg60)),
format_psi_avg_cell(a.map(|r| r.some.avg300), b.map(|r| r.some.avg300)),
cgroup_cell(
a.map(|r| r.some.total_usec),
b.map(|r| r.some.total_usec),
ScaleLadder::Us,
),
]);
pt.add_row(vec![
key.to_string(),
"full".into(),
format_psi_avg_cell(a.map(|r| r.full.avg10), b.map(|r| r.full.avg10)),
format_psi_avg_cell(a.map(|r| r.full.avg60), b.map(|r| r.full.avg60)),
format_psi_avg_cell(a.map(|r| r.full.avg300), b.map(|r| r.full.avg300)),
cgroup_cell(
a.map(|r| r.full.total_usec),
b.map(|r| r.full.total_usec),
ScaleLadder::Us,
),
]);
}
writeln!(w, "{pt}")?;
}
}
}
if display.is_section_enabled(Section::HostPressure)
&& psi_pair_has_data(&diff.host_psi_a, &diff.host_psi_b)
{
for (resource_name, accessor) in psi_resource_accessors() {
let a = accessor(&diff.host_psi_a);
let b = accessor(&diff.host_psi_b);
if !psi_resource_has_data(&a) && !psi_resource_has_data(&b) {
continue;
}
writeln!(w)?;
writeln!(w, "## Host pressure / {resource_name}")?;
let mut pt = display.new_table();
pt.set_header(vec!["row", "avg10", "avg60", "avg300", "total"]);
pt.add_row(vec![
"some".into(),
format_psi_avg_cell(Some(a.some.avg10), Some(b.some.avg10)),
format_psi_avg_cell(Some(a.some.avg60), Some(b.some.avg60)),
format_psi_avg_cell(Some(a.some.avg300), Some(b.some.avg300)),
cgroup_cell(
Some(a.some.total_usec),
Some(b.some.total_usec),
ScaleLadder::Us,
),
]);
pt.add_row(vec![
"full".into(),
format_psi_avg_cell(Some(a.full.avg10), Some(b.full.avg10)),
format_psi_avg_cell(Some(a.full.avg60), Some(b.full.avg60)),
format_psi_avg_cell(Some(a.full.avg300), Some(b.full.avg300)),
cgroup_cell(
Some(a.full.total_usec),
Some(b.full.total_usec),
ScaleLadder::Us,
),
]);
writeln!(w, "{pt}")?;
}
}
let scx_emit = match (&diff.sched_ext_a, &diff.sched_ext_b) {
(None, None) => false,
(Some(_), None) | (None, Some(_)) => true,
(Some(a), Some(b)) => {
a.state != b.state
|| a.switch_all != b.switch_all
|| a.nr_rejected != b.nr_rejected
|| a.hotplug_seq != b.hotplug_seq
|| a.enable_seq != b.enable_seq
}
};
if display.is_section_enabled(Section::SchedExt) && scx_emit {
writeln!(w)?;
writeln!(w, "## sched_ext")?;
let mut at = display.new_table();
at.set_header(vec!["attr", "value"]);
fn state_cell_for(s: Option<&crate::ctprof::SchedExtSysfs>) -> String {
match s {
None => "-".to_string(),
Some(scx) if scx.state.is_empty() => "-".to_string(),
Some(scx) => scx.state.clone(),
}
}
let state_a = state_cell_for(diff.sched_ext_a.as_ref());
let state_b = state_cell_for(diff.sched_ext_b.as_ref());
let state_cell = if state_a == state_b {
state_a
} else {
format!("{state_a} → {state_b}")
};
at.add_row(vec!["state".into(), state_cell]);
at.add_row(vec![
"switch_all".into(),
cgroup_cell(
diff.sched_ext_a.as_ref().map(|s| s.switch_all),
diff.sched_ext_b.as_ref().map(|s| s.switch_all),
ScaleLadder::Unitless,
),
]);
at.add_row(vec![
"nr_rejected".into(),
cgroup_cell(
diff.sched_ext_a.as_ref().map(|s| s.nr_rejected),
diff.sched_ext_b.as_ref().map(|s| s.nr_rejected),
ScaleLadder::Unitless,
),
]);
at.add_row(vec![
"hotplug_seq".into(),
cgroup_cell(
diff.sched_ext_a.as_ref().map(|s| s.hotplug_seq),
diff.sched_ext_b.as_ref().map(|s| s.hotplug_seq),
ScaleLadder::Unitless,
),
]);
at.add_row(vec![
"enable_seq".into(),
cgroup_cell(
diff.sched_ext_a.as_ref().map(|s| s.enable_seq),
diff.sched_ext_b.as_ref().map(|s| s.enable_seq),
ScaleLadder::Unitless,
),
]);
writeln!(w, "{at}")?;
}
let write_only_list = |w: &mut W, label: &str, path: &Path, keys: &[String]| -> fmt::Result {
if keys.is_empty() {
return Ok(());
}
writeln!(
w,
"\n{} group(s) only in {label} ({}):",
keys.len(),
path.display()
)?;
if group_by == GroupBy::All {
let mut sorted: Vec<&str> = keys.iter().map(|s| s.as_str()).collect();
sorted.sort();
let mut last_segs: Vec<&str> = Vec::new();
for k in &sorted {
let (cg, pc) = k.split_once('\x00').unwrap_or(("", k));
let segs: Vec<&str> = cg.split('/').filter(|s| !s.is_empty()).collect();
let common = segs
.iter()
.zip(last_segs.iter())
.take_while(|(a, b)| a == b)
.count();
if common < last_segs.len() || segs.len() > last_segs.len() {
for (depth, seg) in segs.iter().enumerate().skip(common) {
let indent = " ".repeat(depth + 1);
writeln!(w, "{indent}{seg}")?;
}
last_segs = segs;
}
let indent = " ".repeat(last_segs.len() + 1);
writeln!(w, "{indent}{pc}")?;
}
} else {
for k in keys {
writeln!(w, " {k}")?;
}
}
Ok(())
};
write_only_list(w, "baseline", baseline_path, &diff.only_baseline)?;
write_only_list(w, "candidate", candidate_path, &diff.only_candidate)?;
if !diff.fudged_pairs.is_empty() {
writeln!(
w,
"\n\x1b[1;33m## Fudged cgroup matches ({} pair(s))\x1b[0m",
diff.fudged_pairs.len()
)?;
for fp in &diff.fudged_pairs {
writeln!(w, "\n \x1b[36mbaseline:\x1b[0m {}", fp.baseline_key)?;
writeln!(w, " \x1b[36mcandidate:\x1b[0m {}", fp.candidate_key)?;
writeln!(
w,
" overlap: {} thread types, Jaccard: {:.1}%, cascaded children: {}",
fp.overlap,
fp.jaccard * 100.0,
fp.cascaded_children
)?;
if !fp.residual_baseline.is_empty() {
writeln!(
w,
" residual (baseline only): {}",
fp.residual_baseline.join(", ")
)?;
}
if !fp.residual_candidate.is_empty() {
writeln!(
w,
" residual (candidate only): {}",
fp.residual_candidate.join(", ")
)?;
}
}
}
Ok(())
}
pub fn cgroup_cell(baseline: Option<u64>, candidate: Option<u64>, ladder: ScaleLadder) -> String {
match (baseline, candidate) {
(Some(baseline), Some(candidate)) => {
let baseline_cell = format_scaled_u64(baseline, ladder);
let candidate_cell = format_scaled_u64(candidate, ladder);
let d = candidate as i128 - baseline as i128;
let delta_cell = format_delta_cell(d as f64, ladder);
format!("{baseline_cell} → {candidate_cell} ({delta_cell})")
}
(Some(baseline), None) => format!("{} → -", format_scaled_u64(baseline, ladder)),
(None, Some(candidate)) => format!("- → {}", format_scaled_u64(candidate, ladder)),
(None, None) => "-".to_string(),
}
}
pub fn format_psi_avg_cell(baseline: Option<u16>, candidate: Option<u16>) -> String {
match (baseline, candidate) {
(Some(b), Some(c)) => {
let baseline_cell = format_psi_avg_centi_percent(b);
let candidate_cell = format_psi_avg_centi_percent(c);
let d = c as i32 - b as i32;
let sign = if d >= 0 { "+" } else { "-" };
let abs = d.unsigned_abs();
let delta_int = abs / 100;
let delta_frac = abs % 100;
format!("{baseline_cell} → {candidate_cell} ({sign}{delta_int}.{delta_frac:02}%)")
}
(Some(b), None) => format!("{} → -", format_psi_avg_centi_percent(b)),
(None, Some(c)) => format!("- → {}", format_psi_avg_centi_percent(c)),
(None, None) => "-".to_string(),
}
}
pub fn format_psi_avg_centi_percent(v: u16) -> String {
let int = v / 100;
let frac = v % 100;
format!("{int}.{frac:02}%")
}
type PsiAccessor = (&'static str, fn(&Psi) -> PsiResource);
fn psi_resource_accessors() -> [PsiAccessor; 4] {
[
("cpu", |p| p.cpu),
("memory", |p| p.memory),
("io", |p| p.io),
("irq", |p| p.irq),
]
}
fn psi_pair_has_data(a: &Psi, b: &Psi) -> bool {
psi_has_data(a) || psi_has_data(b)
}
fn psi_has_data(p: &Psi) -> bool {
[p.cpu, p.memory, p.io, p.irq]
.iter()
.any(psi_resource_has_data)
}
fn psi_resource_has_data(r: &PsiResource) -> bool {
let h = |h: &PsiHalf| h.avg10 != 0 || h.avg60 != 0 || h.avg300 != 0 || h.total_usec != 0;
h(&r.some) || h(&r.full)
}
#[cfg(test)]
#[allow(clippy::field_reassign_with_default)]
mod tests {
use super::*;
use crate::metric_types::{
Bytes, CategoricalString, CpuSet, MonotonicCount, MonotonicNs, OrdinalI32, PeakNs,
};
fn make_thread(pcomm: &str, comm: &str) -> ThreadState {
ThreadState {
tid: 1,
tgid: 1,
pcomm: pcomm.into(),
comm: comm.into(),
cgroup: "/".into(),
start_time_clock_ticks: 0,
policy: CategoricalString("SCHED_OTHER".into()),
nice: OrdinalI32(0),
cpu_affinity: CpuSet(vec![0, 1, 2, 3]),
..ThreadState::default()
}
}
fn snap_with(threads: Vec<ThreadState>) -> CtprofSnapshot {
CtprofSnapshot {
captured_at_unix_ns: 0,
host: None,
threads,
cgroup_stats: BTreeMap::new(),
probe_summary: None,
parse_summary: None,
taskstats_summary: None,
psi: crate::ctprof::Psi::default(),
sched_ext: None,
}
}
fn simple_cgroup_stats(
cpu_usage_usec: u64,
nr_throttled: u64,
throttled_usec: u64,
memory_current: u64,
) -> CgroupStats {
let mut cs = CgroupStats::default();
cs.cpu.usage_usec = cpu_usage_usec;
cs.cpu.nr_throttled = nr_throttled;
cs.cpu.throttled_usec = throttled_usec;
cs.memory.current = memory_current;
cs
}
#[test]
fn sum_aggregation_totals_across_group() {
let mut a = make_thread("app", "w1");
a.run_time_ns = MonotonicNs(1_000);
let mut b = make_thread("app", "w2");
b.run_time_ns = MonotonicNs(3_000);
let v = aggregate(AggRule::SumNs(|t| t.run_time_ns), &[&a, &b]);
match v {
Aggregated::Sum(s) => assert_eq!(s, 4_000),
other => panic!("expected Sum, got {other:?}"),
}
}
#[test]
fn sum_saturates_on_overflow() {
let mut a = make_thread("app", "w1");
a.run_time_ns = MonotonicNs(u64::MAX);
let mut b = make_thread("app", "w2");
b.run_time_ns = MonotonicNs(5);
let v = aggregate(AggRule::SumNs(|t| t.run_time_ns), &[&a, &b]);
match v {
Aggregated::Sum(s) => assert_eq!(s, u64::MAX),
other => panic!("expected Sum, got {other:?}"),
}
}
#[test]
fn ordinal_range_picks_extremes() {
let mut a = make_thread("app", "w1");
a.nice = OrdinalI32(-5);
let mut b = make_thread("app", "w2");
b.nice = OrdinalI32(10);
let v = aggregate(AggRule::RangeI32(|t| t.nice), &[&a, &b]);
match v {
Aggregated::OrdinalRange { min, max } => {
assert_eq!(min, -5);
assert_eq!(max, 10);
}
other => panic!("expected OrdinalRange, got {other:?}"),
}
}
#[test]
fn mode_aggregation_picks_most_frequent() {
let mut a = make_thread("app", "w1");
a.policy = "SCHED_OTHER".into();
let mut b = make_thread("app", "w2");
b.policy = "SCHED_OTHER".into();
let mut c = make_thread("app", "w3");
c.policy = "SCHED_FIFO".into();
let v = aggregate(AggRule::Mode(|t| t.policy.clone()), &[&a, &b, &c]);
match v {
Aggregated::Mode {
value,
count,
total,
} => {
assert_eq!(value, "SCHED_OTHER");
assert_eq!(count, 2);
assert_eq!(total, 3);
}
other => panic!("expected Mode, got {other:?}"),
}
}
#[test]
fn affinity_uniform_preserves_cpuset() {
let a = make_thread("app", "w1");
let b = make_thread("app", "w2");
let v = aggregate(AggRule::Affinity(|t| t.cpu_affinity.clone()), &[&a, &b]);
match v {
Aggregated::Affinity(s) => {
assert_eq!(s.min_cpus, 4);
assert_eq!(s.max_cpus, 4);
assert_eq!(s.uniform, Some(vec![0, 1, 2, 3]));
}
other => panic!("expected Affinity, got {other:?}"),
}
}
#[test]
fn affinity_heterogeneous_drops_uniform() {
let a = make_thread("app", "w1");
let mut b = make_thread("app", "w2");
b.cpu_affinity = CpuSet(vec![4, 5]);
let v = aggregate(AggRule::Affinity(|t| t.cpu_affinity.clone()), &[&a, &b]);
match v {
Aggregated::Affinity(s) => {
assert_eq!(s.min_cpus, 2);
assert_eq!(s.max_cpus, 4);
assert!(s.uniform.is_none());
}
other => panic!("expected Affinity, got {other:?}"),
}
}
#[test]
fn format_cpu_range_collapses_contiguous_runs() {
assert_eq!(format_cpu_range(&[0, 1, 2, 3]), "0-3");
assert_eq!(format_cpu_range(&[0, 1, 4, 5, 7]), "0-1,4-5,7");
assert_eq!(format_cpu_range(&[3]), "3");
assert_eq!(format_cpu_range(&[]), "");
}
#[test]
fn flatten_cgroup_path_collapses_via_pattern() {
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into()]);
let out = flatten_cgroup_path("/kubepods/pod-abc-123/workload", &pats);
assert_eq!(out, "/kubepods/*/workload");
}
#[test]
fn flatten_cgroup_path_falls_through_unmatched() {
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into()]);
assert_eq!(
flatten_cgroup_path("/system.slice/sshd.service", &pats),
"/system.slice/sshd.service",
);
}
#[test]
fn compare_emits_rows_for_matched_groups() {
let mut ta = make_thread("app", "w1");
ta.run_time_ns = MonotonicNs(1_000);
let mut tb = make_thread("app", "w1");
tb.run_time_ns = MonotonicNs(2_000);
let a = snap_with(vec![ta]);
let b = snap_with(vec![tb]);
let diff = compare(&a, &b, &CompareOptions::default());
let run_time = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns")
.expect("run_time_ns row");
assert_eq!(run_time.group_key, "app");
assert_eq!(run_time.delta, Some(1_000.0));
assert!((run_time.delta_pct.unwrap() - 1.0).abs() < 1e-9);
}
#[test]
fn compare_reports_unmatched_groups() {
let a = snap_with(vec![make_thread("only_a", "w1")]);
let b = snap_with(vec![make_thread("only_b", "w1")]);
let diff = compare(&a, &b, &CompareOptions::default());
assert_eq!(diff.only_baseline, vec!["only_a".to_string()]);
assert_eq!(diff.only_candidate, vec!["only_b".to_string()]);
}
#[test]
fn compare_sorts_by_abs_delta_pct_descending() {
let mut a1 = make_thread("big", "w");
a1.run_time_ns = MonotonicNs(100);
let mut a2 = make_thread("small", "w");
a2.run_time_ns = MonotonicNs(1_000);
let mut b1 = make_thread("big", "w");
b1.run_time_ns = MonotonicNs(1_000);
let mut b2 = make_thread("small", "w");
b2.run_time_ns = MonotonicNs(1_100);
let diff = compare(
&snap_with(vec![a1, a2]),
&snap_with(vec![b1, b2]),
&CompareOptions::default(),
);
let run_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.collect();
assert_eq!(run_rows[0].group_key, "big");
assert_eq!(run_rows[1].group_key, "small");
}
#[test]
fn group_by_cgroup_applies_flatten_patterns() {
let mut ta = make_thread("app", "w1");
ta.cgroup = "/kubepods/pod-xxx/workload".into();
ta.run_time_ns = MonotonicNs(1_000);
let mut tb = make_thread("app", "w1");
tb.cgroup = "/kubepods/pod-yyy/workload".into();
tb.run_time_ns = MonotonicNs(2_000);
let opts = CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec!["/kubepods/*/workload".into()],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
};
let diff = compare(&snap_with(vec![ta]), &snap_with(vec![tb]), &opts);
assert!(diff.only_baseline.is_empty(), "{:?}", diff.only_baseline);
assert!(diff.only_candidate.is_empty(), "{:?}", diff.only_candidate,);
assert!(
diff.rows
.iter()
.any(|r| r.group_key == "/kubepods/*/workload"),
"rows={:?}",
diff.rows.iter().map(|r| &r.group_key).collect::<Vec<_>>(),
);
}
#[test]
fn group_by_cgroup_surfaces_enrichment_on_diff() {
let mut ta = make_thread("app", "w1");
ta.cgroup = "/app".into();
let mut snap_a = snap_with(vec![ta]);
snap_a
.cgroup_stats
.insert("/app".into(), simple_cgroup_stats(100, 1, 50, 1 << 20));
let mut tb = make_thread("app", "w1");
tb.cgroup = "/app".into();
let mut snap_b = snap_with(vec![tb]);
snap_b
.cgroup_stats
.insert("/app".into(), simple_cgroup_stats(500, 3, 250, 2 << 20));
let opts = CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
};
let diff = compare(&snap_a, &snap_b, &opts);
assert_eq!(diff.cgroup_stats_a["/app"].cpu.usage_usec, 100);
assert_eq!(diff.cgroup_stats_b["/app"].cpu.usage_usec, 500);
}
#[test]
fn categorical_row_labels_same_or_differs() {
let mut ta = make_thread("app", "w1");
ta.policy = "SCHED_OTHER".into();
let mut tb = make_thread("app", "w1");
tb.policy = "SCHED_FIFO".into();
let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb]),
&CompareOptions::default(),
);
let policy_row = diff
.rows
.iter()
.find(|r| r.metric_name == "policy")
.expect("policy row");
assert!(policy_row.delta.is_none());
match (&policy_row.baseline, &policy_row.candidate) {
(Aggregated::Mode { value: a, .. }, Aggregated::Mode { value: b, .. }) => {
assert_eq!(a, "SCHED_OTHER");
assert_eq!(b, "SCHED_FIFO");
}
_ => panic!("expected two Mode aggregates"),
}
}
#[test]
fn delta_pct_absent_when_baseline_zero() {
let mut ta = make_thread("app", "w1");
ta.run_time_ns = MonotonicNs(0);
let mut tb = make_thread("app", "w1");
tb.run_time_ns = MonotonicNs(100);
let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb]),
&CompareOptions::default(),
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns")
.expect("row");
assert_eq!(row.delta, Some(100.0));
assert!(row.delta_pct.is_none());
}
#[test]
fn empty_snapshots_produce_empty_diff() {
let diff = compare(
&snap_with(vec![]),
&snap_with(vec![]),
&CompareOptions::default(),
);
assert!(diff.rows.is_empty());
assert!(diff.only_baseline.is_empty());
assert!(diff.only_candidate.is_empty());
}
#[test]
fn baseline_empty_surfaces_only_candidate_groups() {
let t = make_thread("new_proc", "t1");
let diff = compare(
&snap_with(vec![]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
assert!(diff.rows.is_empty());
assert!(diff.only_baseline.is_empty());
assert_eq!(diff.only_candidate, vec!["new_proc".to_string()]);
}
#[test]
fn identical_snapshots_produce_zero_deltas() {
let mut t = make_thread("app", "w1");
t.run_time_ns = MonotonicNs(1_000);
t.voluntary_csw = MonotonicCount(50);
let snap = snap_with(vec![t]);
let diff = compare(&snap, &snap, &CompareOptions::default());
let mode_metrics: std::collections::BTreeSet<&str> = CTPROF_METRICS
.iter()
.filter(|m| {
matches!(
m.rule,
AggRule::Mode(_) | AggRule::ModeChar(_) | AggRule::ModeBool(_),
)
})
.map(|m| m.name)
.collect();
for row in &diff.rows {
match row.delta {
Some(d) => assert_eq!(d, 0.0, "metric {} had nonzero delta", row.metric_name),
None => assert!(
mode_metrics.contains(row.metric_name),
"non-Mode metric {} produced a None-delta — \
identical snapshots should yield Some(0.0) for \
numeric metrics; only Mode-aggregated metrics \
({:?}) are allowed to surface None",
row.metric_name,
mode_metrics,
),
}
}
}
#[test]
fn single_thread_group_yields_one_row_per_metric() {
let a = make_thread("solo", "t");
let mut b = make_thread("solo", "t");
b.run_time_ns = MonotonicNs(1);
let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let solo_rows: Vec<&DiffRow> = diff.rows.iter().filter(|r| r.group_key == "solo").collect();
assert_eq!(solo_rows.len(), CTPROF_METRICS.len());
}
#[test]
fn all_zero_metrics_emit_zero_delta_rows() {
let a = make_thread("quiet", "t");
let b = make_thread("quiet", "t");
let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let run_time = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns")
.expect("row");
assert_eq!(run_time.delta, Some(0.0));
assert!(run_time.delta_pct.is_none());
}
#[test]
fn group_by_comm_aggregates_across_processes() {
let mut ta = make_thread("procA", "worker");
ta.run_time_ns = MonotonicNs(100);
let mut tb = make_thread("procB", "worker");
tb.run_time_ns = MonotonicNs(200);
let mut candidate = make_thread("procA", "worker");
candidate.run_time_ns = MonotonicNs(500);
let mut candidate2 = make_thread("procB", "worker");
candidate2.run_time_ns = MonotonicNs(500);
let diff = compare(
&snap_with(vec![ta, tb]),
&snap_with(vec![candidate, candidate2]),
&CompareOptions {
group_by: GroupBy::Comm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker")
.expect("worker row");
assert_eq!(row.thread_count_a, 2);
assert_eq!(row.thread_count_b, 2);
assert_eq!(row.delta, Some(700.0));
}
#[test]
fn thread_count_diff_surfaces_when_group_grows() {
let ta = make_thread("pool", "t");
let tb1 = make_thread("pool", "t");
let tb2 = make_thread("pool", "t");
let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb1, tb2]),
&CompareOptions::default(),
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns")
.expect("row");
assert_eq!(row.thread_count_a, 1);
assert_eq!(row.thread_count_b, 2);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("1\u{2192}2"),
"expected thread-count diff rendering, got:\n{out}",
);
}
#[test]
fn flatten_first_match_wins_over_later_pattern() {
let pats =
compile_flatten_patterns(&["/kubepods/*/workload".into(), "/kubepods/**".into()]);
assert_eq!(
flatten_cgroup_path("/kubepods/pod-abc/workload", &pats),
"/kubepods/*/workload",
);
}
#[test]
fn flatten_cgroup_stats_collapses_overlapping_paths() {
let mut stats = BTreeMap::new();
stats.insert(
"/kubepods/pod-a/workload".into(),
simple_cgroup_stats(100, 1, 10, 500),
);
stats.insert(
"/kubepods/pod-b/workload".into(),
simple_cgroup_stats(200, 2, 20, 800),
);
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into()]);
let out = flatten_cgroup_stats(&stats, &pats, None);
let agg = &out["/kubepods/*/workload"];
assert_eq!(agg.cpu.usage_usec, 300);
assert_eq!(agg.cpu.nr_throttled, 3);
assert_eq!(agg.cpu.throttled_usec, 30);
assert_eq!(agg.memory.current, 800);
}
#[test]
fn merge_max_option_propagates_no_limit() {
assert_eq!(merge_max_option(Some(100), Some(200)), Some(200));
assert_eq!(merge_max_option(Some(200), Some(100)), Some(200));
assert_eq!(merge_max_option(Some(50), Some(50)), Some(50));
assert_eq!(merge_max_option(None, Some(100)), None);
assert_eq!(merge_max_option(Some(100), None), None);
assert_eq!(merge_max_option(None, None), None);
}
#[test]
fn merge_min_option_propagates_no_floor() {
assert_eq!(merge_min_option(Some(100), Some(200)), Some(100));
assert_eq!(merge_min_option(Some(200), Some(100)), Some(100));
assert_eq!(merge_min_option(None, Some(100)), None);
assert_eq!(merge_min_option(Some(100), None), None);
assert_eq!(merge_min_option(None, None), None);
}
#[test]
fn merge_kv_counters_per_key_sum() {
let mut agg: BTreeMap<String, u64> = BTreeMap::new();
agg.insert("oom_kill".into(), 10);
agg.insert("high".into(), 20);
let mut src: BTreeMap<String, u64> = BTreeMap::new();
src.insert("oom_kill".into(), 5);
src.insert("low".into(), 7);
merge_kv_counters(&mut agg, &src);
assert_eq!(agg.get("oom_kill"), Some(&15), "common key sums");
assert_eq!(agg.get("high"), Some(&20), "agg-only key preserved");
assert_eq!(agg.get("low"), Some(&7), "src-only key copied");
}
#[test]
fn merge_memory_stat_dispatches_gauge_vs_counter() {
let mut agg: BTreeMap<String, u64> = BTreeMap::new();
agg.insert("anon".into(), 1_000_000);
agg.insert("file".into(), 500_000);
agg.insert("slab".into(), 800_000);
agg.insert("pgfault".into(), 100);
agg.insert("workingset_refault_anon".into(), 50);
let mut src: BTreeMap<String, u64> = BTreeMap::new();
src.insert("anon".into(), 2_000_000);
src.insert("file".into(), 100_000);
src.insert("slab".into(), 300_000);
src.insert("pgfault".into(), 25);
src.insert("workingset_refault_anon".into(), 10);
merge_memory_stat(&mut agg, &src);
assert_eq!(agg.get("anon"), Some(&2_000_000), "anon is gauge → max");
assert_eq!(agg.get("file"), Some(&500_000), "file is gauge → max");
assert_eq!(agg.get("slab"), Some(&800_000), "slab is gauge → max");
assert_eq!(agg.get("pgfault"), Some(&125), "pgfault is counter → sum");
assert_eq!(
agg.get("workingset_refault_anon"),
Some(&60),
"workingset_refault_anon is counter → sum"
);
}
#[test]
fn flatten_cgroup_stats_merges_limits_and_kv_maps() {
let mut a = CgroupStats::default();
a.cpu.usage_usec = 100;
a.cpu.max_quota_us = Some(50_000);
a.cpu.max_period_us = 100_000;
a.cpu.weight = Some(100);
a.memory.max = Some(1_000_000);
a.memory.high = Some(800_000);
a.memory.low = Some(400_000);
a.memory.stat.insert("anon".into(), 1000);
a.memory.events.insert("oom_kill".into(), 0);
a.pids.current = Some(10);
a.pids.max = Some(1024);
let mut b = CgroupStats::default();
b.cpu.usage_usec = 200;
b.cpu.max_quota_us = Some(80_000);
b.cpu.max_period_us = 100_000;
b.cpu.weight = Some(300);
b.memory.max = Some(2_000_000);
b.memory.high = Some(1_500_000);
b.memory.low = Some(200_000);
b.memory.stat.insert("anon".into(), 500);
b.memory.stat.insert("file".into(), 200);
b.memory.events.insert("oom_kill".into(), 1);
b.pids.current = Some(5);
b.pids.max = Some(2048);
let mut stats = BTreeMap::new();
stats.insert("/a".into(), a);
stats.insert("/b".into(), b);
let pats = compile_flatten_patterns(&["/[ab]".into()]);
let out = flatten_cgroup_stats(&stats, &pats, None);
let agg = &out["/[ab]"];
assert_eq!(agg.cpu.usage_usec, 300);
assert_eq!(agg.cpu.max_quota_us, Some(80_000));
assert_eq!(agg.cpu.weight, Some(300));
assert_eq!(agg.memory.max, Some(2_000_000));
assert_eq!(agg.memory.high, Some(1_500_000));
assert_eq!(agg.memory.low, Some(200_000));
assert_eq!(agg.memory.stat.get("anon"), Some(&1000));
assert_eq!(agg.memory.stat.get("file"), Some(&200));
assert_eq!(agg.memory.events.get("oom_kill"), Some(&1));
assert_eq!(agg.pids.current, Some(15));
assert_eq!(agg.pids.max, Some(2048));
}
#[test]
fn flatten_cgroup_stats_single_contributor_preserves_concrete_limits() {
let mut a = CgroupStats::default();
a.cpu.usage_usec = 12_345;
a.cpu.max_quota_us = Some(50_000);
a.cpu.max_period_us = 100_000;
a.cpu.weight = Some(150);
a.cpu.weight_nice = Some(0);
a.memory.current = 1_500_000;
a.memory.max = Some(2 << 30);
a.memory.high = Some(1 << 30);
a.memory.low = Some(1 << 28);
a.memory.min = Some(1 << 27);
a.pids.current = Some(42);
a.pids.max = Some(2048);
let mut stats = BTreeMap::new();
stats.insert("/lone".into(), a);
let out = flatten_cgroup_stats(&stats, &[], None);
let agg = &out["/lone"];
assert_eq!(agg.cpu.usage_usec, 12_345);
assert_eq!(agg.cpu.max_quota_us, Some(50_000));
assert_eq!(agg.cpu.max_period_us, 100_000);
assert_eq!(agg.cpu.weight, Some(150));
assert_eq!(agg.cpu.weight_nice, Some(0));
assert_eq!(agg.memory.current, 1_500_000);
assert_eq!(agg.memory.max, Some(2 << 30));
assert_eq!(agg.memory.high, Some(1 << 30));
assert_eq!(agg.memory.low, Some(1 << 28));
assert_eq!(agg.memory.min, Some(1 << 27));
assert_eq!(agg.pids.current, Some(42));
assert_eq!(agg.pids.max, Some(2048));
}
#[test]
fn flatten_cgroup_stats_propagates_no_limit() {
let mut a = CgroupStats::default();
a.memory.max = None;
a.memory.low = None;
let mut b = CgroupStats::default();
b.memory.max = Some(1_000_000);
b.memory.low = Some(500_000);
let mut stats = BTreeMap::new();
stats.insert("/a".into(), a);
stats.insert("/b".into(), b);
let pats = compile_flatten_patterns(&["/[ab]".into()]);
let out = flatten_cgroup_stats(&stats, &pats, None);
let agg = &out["/[ab]"];
assert_eq!(agg.memory.max, None, "any unbounded → bucket unbounded");
assert_eq!(agg.memory.low, None, "any no-floor → bucket unprotected");
}
#[test]
fn write_diff_limits_table_skips_cgroups_without_caps() {
let mut diff = CtprofDiff::default();
diff.cgroup_stats_a.insert(
"/counters-only".into(),
simple_cgroup_stats(100, 0, 0, 1024),
);
diff.cgroup_stats_b.insert(
"/counters-only".into(),
simple_cgroup_stats(200, 0, 0, 2048),
);
let mut capped_a = CgroupStats::default();
capped_a.memory.max = Some(1 << 30);
capped_a.cpu.weight = Some(150);
let mut capped_b = CgroupStats::default();
capped_b.memory.max = Some(1 << 30);
capped_b.cpu.weight = Some(150);
diff.cgroup_stats_a.insert("/capped".into(), capped_a);
diff.cgroup_stats_b.insert("/capped".into(), capped_b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("## Cgroup limits / knobs"),
"limits header missing:\n{out}",
);
let header_pos = out.find("## Cgroup limits / knobs").unwrap();
let after_header = &out[header_pos..];
let next_section = after_header
.find("\n## ")
.map(|p| p + 1)
.unwrap_or(after_header.len());
let limits_section = &after_header[..next_section];
assert!(
limits_section.contains("/capped"),
"capped cgroup should appear in limits table:\n{limits_section}",
);
assert!(
!limits_section.contains("/counters-only"),
"counters-only cgroup should NOT appear (no caps/weight/pids):\n{limits_section}",
);
}
#[test]
fn write_diff_memory_stat_skips_unchanged_rows() {
let mut diff = CtprofDiff::default();
let mut a = CgroupStats::default();
a.memory.stat.insert("pgfault".into(), 100);
a.memory.stat.insert("anon".into(), 1_000_000);
let mut b = CgroupStats::default();
b.memory.stat.insert("pgfault".into(), 250);
b.memory.stat.insert("anon".into(), 1_000_000);
diff.cgroup_stats_a.insert("/app".into(), a);
diff.cgroup_stats_b.insert("/app".into(), b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
let header_pos = out
.find("## memory.stat")
.expect("memory.stat header missing");
let after_header = &out[header_pos..];
let next_section = after_header
.find("\n## ")
.map(|p| p + 1)
.unwrap_or(after_header.len());
let stat_section = &after_header[..next_section];
assert!(
stat_section.contains("pgfault"),
"changed key (pgfault: 100 → 250) must appear:\n{stat_section}",
);
assert!(
!stat_section.contains("anon"),
"unchanged gauge key (anon: 1M = 1M) must be suppressed:\n{stat_section}",
);
}
#[test]
fn write_diff_memory_events_skips_unchanged_rows() {
let mut diff = CtprofDiff::default();
let mut a = CgroupStats::default();
a.memory.events.insert("low".into(), 5);
a.memory.events.insert("oom_kill".into(), 0);
let mut b = CgroupStats::default();
b.memory.events.insert("low".into(), 12);
b.memory.events.insert("oom_kill".into(), 0);
diff.cgroup_stats_a.insert("/app".into(), a);
diff.cgroup_stats_b.insert("/app".into(), b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
let header_pos = out
.find("## memory.events")
.expect("memory.events header missing");
let after_header = &out[header_pos..];
let next_section = after_header
.find("\n## ")
.map(|p| p + 1)
.unwrap_or(after_header.len());
let events_section = &after_header[..next_section];
assert!(
events_section.contains("low"),
"changed event (low: 5 → 12) must appear:\n{events_section}",
);
assert!(
!events_section.contains("oom_kill"),
"unchanged event (oom_kill: 0 = 0) must be suppressed:\n{events_section}",
);
}
#[test]
fn compile_flatten_patterns_skips_malformed() {
let pats = compile_flatten_patterns(&["[invalid".into(), "/ok/*".into()]);
assert_eq!(pats.len(), 1);
assert_eq!(pats[0].as_str(), "/ok/*");
}
#[test]
fn sum_metric_accessors_read_expected_field() {
use crate::metric_types::{Bytes, ClockTicks, MonotonicCount, MonotonicNs};
type MetricSetter = fn(&mut ThreadState);
let cases: &[(&str, MetricSetter)] = &[
("run_time_ns", |t| t.run_time_ns = MonotonicNs(1)),
("wait_time_ns", |t| t.wait_time_ns = MonotonicNs(1)),
("timeslices", |t| t.timeslices = MonotonicCount(1)),
("voluntary_csw", |t| t.voluntary_csw = MonotonicCount(1)),
("nonvoluntary_csw", |t| {
t.nonvoluntary_csw = MonotonicCount(1)
}),
("nr_wakeups", |t| t.nr_wakeups = MonotonicCount(1)),
("nr_wakeups_local", |t| {
t.nr_wakeups_local = MonotonicCount(1)
}),
("nr_wakeups_remote", |t| {
t.nr_wakeups_remote = MonotonicCount(1)
}),
("nr_wakeups_sync", |t| t.nr_wakeups_sync = MonotonicCount(1)),
("nr_wakeups_migrate", |t| {
t.nr_wakeups_migrate = MonotonicCount(1)
}),
("nr_wakeups_affine", |t| {
t.nr_wakeups_affine = MonotonicCount(1)
}),
("nr_wakeups_affine_attempts", |t| {
t.nr_wakeups_affine_attempts = MonotonicCount(1)
}),
("nr_migrations", |t| t.nr_migrations = MonotonicCount(1)),
("nr_forced_migrations", |t| {
t.nr_forced_migrations = MonotonicCount(1)
}),
("nr_failed_migrations_affine", |t| {
t.nr_failed_migrations_affine = MonotonicCount(1)
}),
("nr_failed_migrations_running", |t| {
t.nr_failed_migrations_running = MonotonicCount(1)
}),
("nr_failed_migrations_hot", |t| {
t.nr_failed_migrations_hot = MonotonicCount(1)
}),
("wait_sum", |t| t.wait_sum = MonotonicNs(1)),
("wait_count", |t| t.wait_count = MonotonicCount(1)),
("voluntary_sleep_ns", |t| {
t.voluntary_sleep_ns = MonotonicNs(1)
}),
("block_sum", |t| t.block_sum = MonotonicNs(1)),
("iowait_sum", |t| t.iowait_sum = MonotonicNs(1)),
("iowait_count", |t| t.iowait_count = MonotonicCount(1)),
("allocated_bytes", |t| t.allocated_bytes = Bytes(1)),
("deallocated_bytes", |t| t.deallocated_bytes = Bytes(1)),
("minflt", |t| t.minflt = MonotonicCount(1)),
("majflt", |t| t.majflt = MonotonicCount(1)),
("utime_clock_ticks", |t| t.utime_clock_ticks = ClockTicks(1)),
("stime_clock_ticks", |t| t.stime_clock_ticks = ClockTicks(1)),
("rchar", |t| t.rchar = Bytes(1)),
("wchar", |t| t.wchar = Bytes(1)),
("syscr", |t| t.syscr = MonotonicCount(1)),
("syscw", |t| t.syscw = MonotonicCount(1)),
("read_bytes", |t| t.read_bytes = Bytes(1)),
("write_bytes", |t| t.write_bytes = Bytes(1)),
("cancelled_write_bytes", |t| {
t.cancelled_write_bytes = Bytes(1)
}),
];
for (name, set) in cases {
let mut t = make_thread("p", "w");
set(&mut t);
let def = CTPROF_METRICS
.iter()
.find(|m| m.name == *name)
.unwrap_or_else(|| panic!("metric {name} not in registry"));
let agg = aggregate(def.rule, &[&t]);
match agg {
Aggregated::Sum(v) => {
assert_eq!(v, 1, "accessor for {name} did not read the {name} field",)
}
other => panic!("expected Sum for {name}, got {other:?}"),
}
}
}
#[test]
fn ctprof_metric_names_are_unique() {
let mut seen = std::collections::BTreeSet::new();
for m in CTPROF_METRICS {
assert!(
seen.insert(m.name),
"duplicate metric name in registry: {}",
m.name,
);
}
}
fn lookup_metric(name: &str) -> &'static CtprofMetricDef {
CTPROF_METRICS
.iter()
.find(|m| m.name == name)
.unwrap_or_else(|| panic!("metric {name} registered"))
}
#[test]
fn metric_display_name_no_gates_returns_bare_name() {
let policy = lookup_metric("policy");
assert_eq!(metric_display_name(policy), "policy");
assert!(metric_tags(policy).is_empty());
let cpu_aff = lookup_metric("cpu_affinity");
assert_eq!(metric_display_name(cpu_aff), "cpu_affinity");
assert!(metric_tags(cpu_aff).is_empty());
}
#[test]
fn metric_tags_renders_class_and_config_tags() {
let m = lookup_metric("nr_wakeups_affine");
assert_eq!(metric_display_name(m), "nr_wakeups_affine");
assert_eq!(metric_tags(m), "[cfs-only] [SCHEDSTATS]");
}
#[test]
fn metric_tags_emits_each_config_gate_in_order() {
let core = lookup_metric("core_forceidle_sum");
assert_eq!(metric_display_name(core), "core_forceidle_sum");
assert_eq!(metric_tags(core), "[SCHED_CORE] [SCHEDSTATS]");
}
#[test]
fn metric_tags_class_only_no_config_gate() {
let fair = lookup_metric("fair_slice_ns");
assert_eq!(metric_display_name(fair), "fair_slice_ns");
assert_eq!(metric_tags(fair), "[fair-policy]");
}
#[test]
fn metric_tags_strips_config_prefix() {
for m in CTPROF_METRICS {
for gate in m.config_gates {
assert!(
gate.starts_with("CONFIG_"),
"registry config_gate {gate:?} on metric {} \
must spell the literal CONFIG_X kconfig symbol",
m.name,
);
let tags = metric_tags(m);
let expected_short = gate.strip_prefix("CONFIG_").unwrap();
assert!(
tags.contains(&format!("[{expected_short}]")),
"metric {} tags {tags:?} must contain [{expected_short}]",
m.name,
);
assert!(
!tags.contains(&format!("[{gate}]")),
"metric {} tags {tags:?} must not contain full [{gate}]",
m.name,
);
}
}
}
#[test]
fn metric_tags_marks_synthetic_dead_counter() {
let m = CtprofMetricDef {
name: "synthetic_dead",
rule: AggRule::SumCount(|_| crate::metric_types::MonotonicCount(0)),
sched_class: None,
config_gates: &["CONFIG_SCHEDSTATS"],
is_dead: true,
description: "synthetic dead-counter test fixture.",
section: Section::Primary,
};
assert_eq!(metric_display_name(&m), "synthetic_dead");
assert_eq!(metric_tags(&m), "[dead] [SCHEDSTATS]",);
for m in CTPROF_METRICS {
assert!(
!m.is_dead,
"{} unexpectedly carries is_dead: true — the \
registry is currently empty of dead counters; \
add the entry to the matrix-pin test below if \
a new dead counter is intentional",
m.name,
);
}
}
#[test]
fn metric_tags_renders_non_ext_class() {
let m = lookup_metric("wait_sum");
assert_eq!(metric_display_name(m), "wait_sum");
assert_eq!(metric_tags(m), "[non-ext] [SCHEDSTATS]",);
}
#[test]
fn registry_tag_matrix_is_pinned() {
let matrix: &[(&str, Option<&str>, &[&str], bool)] = &[
("policy", None, &[], false),
("nice", None, &[], false),
("priority", None, &[], false),
("rt_priority", None, &[], false),
("cpu_affinity", None, &[], false),
("processor", None, &[], false),
("state", None, &[], false),
("ext_enabled", None, &["CONFIG_SCHED_CLASS_EXT"], false),
("nr_threads", None, &[], false),
("run_time_ns", None, &["CONFIG_SCHED_INFO"], false),
("wait_time_ns", None, &["CONFIG_SCHED_INFO"], false),
("timeslices", None, &["CONFIG_SCHED_INFO"], false),
("voluntary_csw", None, &[], false),
("nonvoluntary_csw", None, &[], false),
("nr_wakeups", None, &["CONFIG_SCHEDSTATS"], false),
("nr_wakeups_local", None, &["CONFIG_SCHEDSTATS"], false),
("nr_wakeups_remote", None, &["CONFIG_SCHEDSTATS"], false),
("nr_wakeups_sync", None, &["CONFIG_SCHEDSTATS"], false),
("nr_wakeups_migrate", None, &["CONFIG_SCHEDSTATS"], false),
(
"nr_wakeups_affine",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
(
"nr_wakeups_affine_attempts",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
("nr_migrations", None, &[], false),
(
"nr_forced_migrations",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
(
"nr_failed_migrations_affine",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
(
"nr_failed_migrations_running",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
(
"nr_failed_migrations_hot",
Some("cfs-only"),
&["CONFIG_SCHEDSTATS"],
false,
),
("wait_sum", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
("wait_count", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
("wait_max", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
(
"voluntary_sleep_ns",
Some("non-ext"),
&["CONFIG_SCHEDSTATS"],
false,
),
("sleep_max", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
("block_sum", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
("block_max", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
("iowait_sum", Some("non-ext"), &["CONFIG_SCHEDSTATS"], false),
(
"iowait_count",
Some("non-ext"),
&["CONFIG_SCHEDSTATS"],
false,
),
("exec_max", None, &["CONFIG_SCHEDSTATS"], false),
("slice_max", Some("cfs-only"), &["CONFIG_SCHEDSTATS"], false),
(
"core_forceidle_sum",
None,
&["CONFIG_SCHED_CORE", "CONFIG_SCHEDSTATS"],
false,
),
("fair_slice_ns", Some("fair-policy"), &[], false),
("allocated_bytes", None, &[], false),
("deallocated_bytes", None, &[], false),
("minflt", None, &[], false),
("majflt", None, &[], false),
("utime_clock_ticks", None, &[], false),
("stime_clock_ticks", None, &[], false),
("rchar", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
("wchar", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
("syscr", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
("syscw", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
("read_bytes", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
("write_bytes", None, &["CONFIG_TASK_IO_ACCOUNTING"], false),
(
"cancelled_write_bytes",
None,
&["CONFIG_TASK_IO_ACCOUNTING"],
false,
),
(
"cpu_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"cpu_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"cpu_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"cpu_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"blkio_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"blkio_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"blkio_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"blkio_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"swapin_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"swapin_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"swapin_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"swapin_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"freepages_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"freepages_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"freepages_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"freepages_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"thrashing_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"thrashing_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"thrashing_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"thrashing_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"compact_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"compact_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"compact_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"compact_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"wpcopy_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"wpcopy_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"wpcopy_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"wpcopy_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"irq_delay_count",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"irq_delay_total_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"irq_delay_max_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"irq_delay_min_ns",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_DELAY_ACCT"],
false,
),
(
"hiwater_rss_bytes",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_XACCT"],
false,
),
(
"hiwater_vm_bytes",
None,
&["CONFIG_TASKSTATS", "CONFIG_TASK_XACCT"],
false,
),
];
let registry_names: std::collections::BTreeSet<&str> =
CTPROF_METRICS.iter().map(|m| m.name).collect();
let matrix_names: std::collections::BTreeSet<&str> =
matrix.iter().map(|(n, _, _, _)| *n).collect();
assert_eq!(
registry_names, matrix_names,
"registry vs matrix key mismatch — every metric must be \
pinned in the locked matrix and the matrix must not name \
metrics that aren't registered",
);
for (name, expected_class, expected_gates, expected_dead) in matrix {
let m = lookup_metric(name);
assert_eq!(m.sched_class, *expected_class, "{name}: sched_class drift",);
assert_eq!(
m.config_gates, *expected_gates,
"{name}: config_gates drift",
);
assert_eq!(m.is_dead, *expected_dead, "{name}: is_dead drift");
}
}
#[test]
fn registry_tag_vocabulary_is_closed() {
let allowed_classes: std::collections::BTreeSet<&str> =
["non-ext", "cfs-only", "fair-policy"].into_iter().collect();
let allowed_gates: std::collections::BTreeSet<&str> = [
"CONFIG_SCHED_INFO",
"CONFIG_SCHEDSTATS",
"CONFIG_SCHED_CORE",
"CONFIG_TASK_DELAY_ACCT",
"CONFIG_TASK_IO_ACCOUNTING",
"CONFIG_TASK_XACCT",
"CONFIG_SCHED_CLASS_EXT",
"CONFIG_TASKSTATS",
]
.into_iter()
.collect();
for m in CTPROF_METRICS {
if let Some(class) = m.sched_class {
assert!(
allowed_classes.contains(class),
"{}: sched_class {class:?} outside the closed set \
{{None, \"non-ext\", \"cfs-only\", \"fair-policy\"}}",
m.name,
);
}
for gate in m.config_gates {
assert!(
gate.starts_with("CONFIG_"),
"{}: config_gate {gate:?} must start with CONFIG_",
m.name,
);
assert!(
allowed_gates.contains(gate),
"{}: config_gate {gate:?} outside the closed set \
{allowed_gates:?}",
m.name,
);
}
}
}
#[test]
fn write_diff_renders_tagged_metric_cell() {
let mut a = make_thread("p", "w");
a.nr_wakeups_affine = MonotonicCount(5);
let mut b = make_thread("p", "w");
b.nr_wakeups_affine = MonotonicCount(9);
let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("nr_wakeups_affine [cfs-only] [SCHEDSTATS]"),
"tagged metric cell missing from rendered table:\n{out}",
);
}
#[test]
fn write_diff_renders_non_ext_metric_cell() {
let mut a = make_thread("p", "w");
a.wait_sum = MonotonicNs(100);
let mut b = make_thread("p", "w");
b.wait_sum = MonotonicNs(200);
let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("wait_sum [non-ext] [SCHEDSTATS]"),
"non-ext metric cell missing from rendered table:\n{out}",
);
}
fn snap_pair_for_display() -> (CtprofSnapshot, CtprofSnapshot) {
let mut a = make_thread("p", "w");
a.run_time_ns = MonotonicNs(100);
a.wait_count = MonotonicCount(4);
a.wait_sum = MonotonicNs(1000);
let mut b = make_thread("p", "w");
b.run_time_ns = MonotonicNs(200);
b.wait_count = MonotonicCount(4);
b.wait_sum = MonotonicNs(2000);
(snap_with(vec![a]), snap_with(vec![b]))
}
#[test]
fn display_format_default_is_full() {
assert_eq!(DisplayFormat::default(), DisplayFormat::Full);
}
#[test]
fn compare_columns_for_resolves_per_variant() {
assert_eq!(
compare_columns_for(DisplayFormat::Full),
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Baseline,
Column::Candidate,
Column::Delta,
Column::Pct,
]
);
assert_eq!(
compare_columns_for(DisplayFormat::DeltaOnly),
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Delta,
Column::Pct
]
);
assert_eq!(
compare_columns_for(DisplayFormat::NoPct),
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Baseline,
Column::Candidate,
Column::Delta,
]
);
assert_eq!(
compare_columns_for(DisplayFormat::Arrow),
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Arrow
]
);
assert_eq!(
compare_columns_for(DisplayFormat::PctOnly),
vec![Column::Group, Column::Threads, Column::Metric, Column::Pct]
);
}
#[test]
fn parse_columns_round_trips_compare_names() {
let spec = "group,threads,metric,baseline,candidate,delta,%";
let cols = parse_columns(spec, true).expect("valid compare spec");
assert_eq!(
cols,
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Baseline,
Column::Candidate,
Column::Delta,
Column::Pct,
]
);
}
#[test]
fn parse_columns_round_trips_arrow_form() {
let spec = "group,threads,metric,arrow";
let cols = parse_columns(spec, true).expect("valid arrow-form spec");
assert_eq!(
cols,
vec![
Column::Group,
Column::Threads,
Column::Metric,
Column::Arrow,
]
);
}
#[test]
fn parse_columns_rejects_compare_only_on_show_side() {
let err = parse_columns("baseline", false).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("baseline"),
"error must cite the offending name: {msg}"
);
assert!(
msg.contains("group, threads, metric, value"),
"error must list the show-side allowed names: {msg}"
);
}
#[test]
fn parse_columns_rejects_show_only_on_compare_side() {
let err = parse_columns("value", true).unwrap_err();
let msg = format!("{err:#}");
assert!(msg.contains("value"), "error must cite name: {msg}");
}
#[test]
fn parse_columns_rejects_unknown_name() {
let err = parse_columns("not_a_column", true).unwrap_err();
let msg = format!("{err:#}");
assert!(msg.contains("not_a_column"), "error must cite name: {msg}",);
}
#[test]
fn parse_columns_rejects_duplicate() {
let err = parse_columns("metric,delta,metric", true).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("duplicate"),
"error must mention duplicates: {msg}"
);
}
#[test]
fn parse_columns_rejects_empty_entry() {
let err = parse_columns("metric,,delta", true).unwrap_err();
let msg = format!("{err:#}");
assert!(msg.contains("empty"), "error must mention empty: {msg}");
}
#[test]
fn parse_columns_empty_returns_empty_vec() {
let cols = parse_columns("", true).expect("empty parses");
assert!(cols.is_empty());
let cols = parse_columns(" ", true).expect("whitespace-only parses as empty");
assert!(cols.is_empty());
}
#[test]
fn parse_columns_accepts_show_side_metric_value() {
let cols = parse_columns("metric,value", false).expect("metric,value is show-side valid");
assert_eq!(cols, vec![Column::Metric, Column::Value]);
}
#[test]
fn parse_columns_rejects_arrow_with_fused_columns() {
for fused in &["baseline", "candidate", "delta", "%"] {
let spec = format!("arrow,{fused}");
let res = parse_columns(&spec, true);
let err = res
.err()
.unwrap_or_else(|| panic!("arrow+{fused} must be rejected"));
let msg = format!("{err:#}");
assert!(
msg.contains("arrow") && msg.contains("mutually exclusive"),
"error must name arrow's mutual exclusivity for spec {spec:?}: {msg}"
);
}
}
#[test]
fn parse_sections_empty_returns_empty_vec() {
let secs = parse_sections("").expect("empty parses");
assert!(secs.is_empty());
let secs = parse_sections(" ").expect("whitespace-only parses as empty");
assert!(secs.is_empty());
}
#[test]
fn parse_sections_round_trips_every_name() {
let spec = Section::ALL
.iter()
.map(|s| s.cli_name())
.collect::<Vec<_>>()
.join(",");
let parsed = parse_sections(&spec).expect("every cli_name must round-trip");
assert_eq!(
parsed,
Section::ALL.to_vec(),
"round-trip must preserve order and identity"
);
}
#[test]
fn parse_sections_rejects_unknown_name() {
let err = parse_sections("not_a_section").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("not_a_section"),
"error must cite the offending name: {msg}"
);
assert!(
msg.contains("primary"),
"error must list valid names: {msg}"
);
assert!(
msg.contains("host-pressure"),
"error must list valid names: {msg}"
);
}
#[test]
fn parse_sections_rejects_duplicate() {
let err = parse_sections("primary,derived,primary").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("duplicate"),
"error must mention duplicates: {msg}"
);
}
#[test]
fn parse_sections_rejects_empty_entry() {
let err = parse_sections("primary,,derived").unwrap_err();
let msg = format!("{err:#}");
assert!(msg.contains("empty"), "error must mention empty: {msg}");
}
#[test]
fn parse_sections_accepts_multiple_in_input_order() {
let secs =
parse_sections("derived,primary,host-pressure").expect("multi-section spec parses");
assert_eq!(
secs,
vec![Section::Derived, Section::Primary, Section::HostPressure],
"input order must be preserved",
);
}
#[test]
fn parse_sections_trims_whitespace_around_entries() {
let secs =
parse_sections(" primary , derived ").expect("whitespace-tolerant spec parses");
assert_eq!(secs, vec![Section::Primary, Section::Derived]);
}
#[test]
fn section_all_is_exhaustive_and_unique() {
let mut names: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for s in Section::ALL {
assert!(
names.insert(s.cli_name()),
"duplicate cli_name in Section::ALL: {}",
s.cli_name()
);
let parsed = parse_sections(s.cli_name())
.unwrap_or_else(|e| panic!("cli_name {} failed parse: {e:#}", s.cli_name()));
assert_eq!(parsed, vec![*s]);
}
assert_eq!(
names.len(),
Section::ALL.len(),
"ALL count must match the unique-names count",
);
}
#[test]
fn is_section_enabled_empty_treats_all_as_on() {
let opts = DisplayOptions::default();
for s in Section::ALL {
assert!(
opts.is_section_enabled(*s),
"empty filter must enable {} (default = all-on)",
s.cli_name()
);
}
}
#[test]
fn is_section_enabled_non_empty_restricts_to_listed() {
let mut opts = DisplayOptions::default();
opts.sections = vec![Section::Primary, Section::HostPressure];
for s in Section::ALL {
let in_filter = matches!(s, Section::Primary | Section::HostPressure);
assert_eq!(
opts.is_section_enabled(*s),
in_filter,
"is_section_enabled({}) under {{Primary, HostPressure}} \
must be {in_filter}",
s.cli_name(),
);
}
}
#[test]
fn section_requires_cgroup_grouping_classifies_correctly() {
for s in Section::ALL {
let expected = matches!(
s,
Section::CgroupStats
| Section::Limits
| Section::MemoryStat
| Section::MemoryEvents
| Section::Pressure
);
assert_eq!(
s.requires_cgroup_grouping(),
expected,
"Section::{s:?}.requires_cgroup_grouping() must be {expected}",
);
}
}
#[test]
fn parse_metrics_empty_returns_empty_vec() {
assert!(parse_metrics("").expect("empty parses").is_empty());
assert!(
parse_metrics(" ")
.expect("whitespace-only parses as empty")
.is_empty()
);
}
#[test]
fn parse_metrics_round_trips_every_primary_registry_name() {
for m in CTPROF_METRICS {
let parsed = parse_metrics(m.name)
.unwrap_or_else(|e| panic!("metric name {} failed parse: {e:#}", m.name));
assert_eq!(parsed, vec![m.name]);
}
}
#[test]
fn parse_metrics_round_trips_every_derived_registry_name() {
for d in CTPROF_DERIVED_METRICS {
let parsed = parse_metrics(d.name)
.unwrap_or_else(|e| panic!("derived name {} failed parse: {e:#}", d.name));
assert_eq!(parsed, vec![d.name]);
}
}
#[test]
fn parse_metrics_accepts_primary_and_derived_in_input_order() {
let parsed = parse_metrics("cpu_efficiency,run_time_ns")
.expect("mixed primary+derived spec must parse");
assert_eq!(parsed.len(), 2);
assert_eq!(parsed[0], "cpu_efficiency");
assert_eq!(parsed[1], "run_time_ns");
}
#[test]
fn parse_metrics_rejects_unknown_name() {
let err = parse_metrics("not_a_real_metric").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("not_a_real_metric"),
"error must cite the offending name: {msg}"
);
assert!(
msg.contains("metric-list"),
"error must point operator at the discovery command: {msg}"
);
}
#[test]
fn parse_metrics_rejects_duplicate() {
let err = parse_metrics("run_time_ns,wait_sum,run_time_ns").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("duplicate"),
"error must mention duplicates: {msg}"
);
}
#[test]
fn parse_metrics_rejects_empty_entry() {
let err = parse_metrics("run_time_ns,,wait_sum").unwrap_err();
let msg = format!("{err:#}");
assert!(msg.contains("empty"), "error must mention empty: {msg}");
}
#[test]
fn parse_metrics_trims_whitespace_around_entries() {
let parsed =
parse_metrics(" run_time_ns , wait_sum ").expect("whitespace-tolerant spec parses");
assert_eq!(parsed, vec!["run_time_ns", "wait_sum"]);
}
#[test]
fn is_metric_enabled_empty_treats_all_as_on() {
let opts = DisplayOptions::default();
assert!(opts.is_metric_enabled("run_time_ns"));
assert!(opts.is_metric_enabled("cpu_efficiency"));
assert!(opts.is_metric_enabled("anything_under_empty_filter"));
}
#[test]
fn is_metric_enabled_non_empty_restricts_to_listed() {
let mut opts = DisplayOptions::default();
opts.metrics = vec!["run_time_ns", "wait_sum"];
assert!(opts.is_metric_enabled("run_time_ns"));
assert!(opts.is_metric_enabled("wait_sum"));
assert!(!opts.is_metric_enabled("nr_wakeups"));
assert!(!opts.is_metric_enabled("cpu_efficiency"));
}
#[test]
fn format_cgroup_only_section_warning_names_all_three_elements() {
let msg = format_cgroup_only_section_warning(Section::Pressure, GroupBy::Pcomm);
assert!(
msg.contains("'pressure'"),
"warning must quote the section cli_name: {msg}",
);
assert!(
msg.contains("--group-by cgroup"),
"warning must name the cgroup requirement: {msg}",
);
assert!(
msg.contains("pcomm"),
"warning must echo the operator's --group-by axis: {msg}",
);
}
#[test]
fn format_cgroup_only_section_warning_uses_comm_exact_spelling() {
let msg = format_cgroup_only_section_warning(Section::CgroupStats, GroupBy::CommExact);
assert!(
msg.contains("comm-exact"),
"warning must use the clap value-enum spelling: {msg}",
);
assert!(
!msg.contains("CommExact"),
"warning must not surface the rust variant name: {msg}",
);
}
#[test]
fn columns_override_wins_over_display_format() {
let mut opts = DisplayOptions::default();
opts.format = DisplayFormat::Full;
opts.columns = vec![Column::Metric, Column::Delta];
let resolved = opts.resolved_compare_columns();
assert_eq!(resolved, vec![Column::Metric, Column::Delta]);
}
#[test]
fn write_diff_delta_only_omits_baseline_candidate_columns() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::DeltaOnly;
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
let header_line = out.lines().next().unwrap_or("");
assert!(
!header_line.contains("baseline"),
"delta-only header must drop baseline column:\n{header_line}"
);
assert!(
!header_line.contains("candidate"),
"delta-only header must drop candidate column:\n{header_line}"
);
assert!(
header_line.contains("delta"),
"delta column must remain:\n{header_line}"
);
}
#[test]
fn write_diff_no_pct_omits_pct_column() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::NoPct;
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
let header_line = out.lines().next().unwrap_or("");
assert!(
!header_line.contains(" % "),
"no-pct header must drop percent column:\n{header_line}"
);
}
#[test]
fn write_diff_arrow_renders_combined_cell() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::Arrow;
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
assert!(
out.contains("\u{2192}"),
"arrow glyph must appear in output:\n{out}"
);
assert!(
out.contains("100ns") && out.contains("200ns"),
"baseline and candidate values must surface in arrow cell:\n{out}"
);
assert!(
out.contains("(+100ns)") || out.contains("(+100"),
"delta must appear in parens:\n{out}"
);
}
#[test]
fn write_diff_arrow_renders_derived_arrow_cell() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::Arrow;
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
assert!(
out.contains("avg_wait_ns"),
"derived metric must appear in arrow rendering:\n{out}"
);
assert!(
out.contains("250.00ns") || out.contains("250ns"),
"baseline derived value must appear in arrow cell:\n{out}"
);
}
#[test]
fn write_diff_pct_only_keeps_only_pct() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::PctOnly;
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
let header_line = out.lines().next().unwrap_or("");
assert!(
!header_line.contains("baseline"),
"pct-only header must drop baseline:\n{header_line}"
);
assert!(
!header_line.contains("candidate"),
"pct-only header must drop candidate:\n{header_line}"
);
assert!(
!header_line.contains("delta"),
"pct-only header must drop delta:\n{header_line}"
);
assert!(
out.contains("+100.0%"),
"pct-only must render percent cell:\n{out}",
);
}
#[test]
fn write_diff_columns_override_emits_only_selected_columns() {
let (a, b) = snap_pair_for_display();
let diff = compare(&a, &b, &CompareOptions::default());
let mut display = DisplayOptions::default();
display.format = DisplayFormat::Full; display.columns = vec![Column::Metric, Column::Delta];
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&display,
)
.unwrap();
let header_line = out.lines().next().unwrap_or("");
assert!(
header_line.contains("metric"),
"metric column must appear:\n{header_line}"
);
assert!(
header_line.contains("delta"),
"delta column must appear:\n{header_line}"
);
assert!(
!header_line.contains("baseline"),
"baseline must NOT appear when --columns excludes it:\n{header_line}"
);
assert!(
!header_line.contains("candidate"),
"candidate must NOT appear when --columns excludes it:\n{header_line}"
);
}
#[test]
fn derived_affine_success_ratio_formula() {
let mut t = make_thread("p", "w");
t.nr_wakeups_affine = MonotonicCount(7);
t.nr_wakeups_affine_attempts = MonotonicCount(10);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "affine_success_ratio")
.expect("affine_success_ratio row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(0.7)));
assert_eq!(row.candidate, Some(DerivedValue::Scalar(0.7)));
assert!(row.is_ratio, "affine_success_ratio is a ratio");
}
#[test]
fn derived_avg_wait_ns_formula() {
let mut t = make_thread("p", "w");
t.wait_sum = MonotonicNs(1000);
t.wait_count = MonotonicCount(4);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "avg_wait_ns")
.expect("avg_wait_ns row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(250.0)));
}
#[test]
fn voluntary_sleep_ns_sums_through_registry() {
let mut t = make_thread("p", "w");
t.voluntary_sleep_ns = MonotonicNs(1000);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "voluntary_sleep_ns")
.expect("voluntary_sleep_ns row in diff");
assert_eq!(
row.baseline.numeric(),
Some(1000.0),
"voluntary_sleep_ns flows through SumNs aggregation \
carrying the capture-side normalized value verbatim",
);
}
#[test]
fn voluntary_sleep_sum_derived_metric_is_removed() {
let names: std::collections::BTreeSet<&'static str> =
CTPROF_DERIVED_METRICS.iter().map(|m| m.name).collect();
assert!(
!names.contains("voluntary_sleep_sum"),
"voluntary_sleep_sum derived metric must not exist — \
the normalization moved to capture-side \
`voluntary_sleep_ns` (see ThreadState field doc). \
Got derived metrics: {names:?}",
);
}
#[test]
fn derived_cpu_efficiency_formula() {
let mut t = make_thread("p", "w");
t.run_time_ns = MonotonicNs(100);
t.wait_time_ns = MonotonicNs(100);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "cpu_efficiency")
.expect("cpu_efficiency row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(0.5)));
assert!(row.is_ratio);
}
#[test]
fn derived_avg_slice_ns_formula() {
let mut t = make_thread("p", "w");
t.run_time_ns = MonotonicNs(4000);
t.timeslices = MonotonicCount(8);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "avg_slice_ns")
.expect("avg_slice_ns row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(500.0)));
}
#[test]
fn derived_involuntary_csw_ratio_formula() {
let mut t = make_thread("p", "w");
t.voluntary_csw = MonotonicCount(75);
t.nonvoluntary_csw = MonotonicCount(25);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "involuntary_csw_ratio")
.expect("involuntary_csw_ratio row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(0.25)));
assert!(row.is_ratio);
}
#[test]
fn derived_disk_io_fraction_formula() {
let mut t = make_thread("p", "w");
t.rchar = Bytes(10_000);
t.read_bytes = Bytes(2_500);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "disk_io_fraction")
.expect("disk_io_fraction row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(0.25)));
assert!(row.is_ratio);
}
#[test]
fn derived_live_heap_estimate_signed() {
let mut t = make_thread("p", "w");
t.allocated_bytes = Bytes(1000);
t.deallocated_bytes = Bytes(1500);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "live_heap_estimate")
.expect("live_heap_estimate row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(-500.0)));
assert!(!row.is_ratio, "live_heap_estimate is a B-unit, not ratio");
}
#[test]
fn derived_avg_iowait_ns_formula() {
let mut t = make_thread("p", "w");
t.iowait_sum = MonotonicNs(9000);
t.iowait_count = MonotonicCount(3);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "avg_iowait_ns")
.expect("avg_iowait_ns row present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(3000.0)));
}
#[test]
fn derived_avg_delay_ns_formulas_match_manual_division() {
let mut t = make_thread("p", "w");
t.cpu_delay_count = MonotonicCount(3);
t.cpu_delay_total_ns = MonotonicNs(9_000);
t.blkio_delay_count = MonotonicCount(4);
t.blkio_delay_total_ns = MonotonicNs(20_000);
t.swapin_delay_count = MonotonicCount(5);
t.swapin_delay_total_ns = MonotonicNs(35_000);
t.freepages_delay_count = MonotonicCount(6);
t.freepages_delay_total_ns = MonotonicNs(54_000);
t.thrashing_delay_count = MonotonicCount(7);
t.thrashing_delay_total_ns = MonotonicNs(77_000);
t.compact_delay_count = MonotonicCount(8);
t.compact_delay_total_ns = MonotonicNs(104_000);
t.wpcopy_delay_count = MonotonicCount(9);
t.wpcopy_delay_total_ns = MonotonicNs(135_000);
t.irq_delay_count = MonotonicCount(10);
t.irq_delay_total_ns = MonotonicNs(170_000);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
for (name, expected) in [
("avg_cpu_delay_ns", 3_000.0),
("avg_blkio_delay_ns", 5_000.0),
("avg_swapin_delay_ns", 7_000.0),
("avg_freepages_delay_ns", 9_000.0),
("avg_thrashing_delay_ns", 11_000.0),
("avg_compact_delay_ns", 13_000.0),
("avg_wpcopy_delay_ns", 15_000.0),
("avg_irq_delay_ns", 17_000.0),
] {
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == name)
.unwrap_or_else(|| panic!("{name} row present"));
assert_eq!(
row.baseline,
Some(DerivedValue::Scalar(expected)),
"{name} formula mismatch — expected {expected}",
);
}
}
#[test]
fn derived_total_offcpu_delay_ns_sums_with_max_overlap() {
let mut t_a = make_thread("p", "w");
t_a.cpu_delay_total_ns = MonotonicNs(10);
t_a.blkio_delay_total_ns = MonotonicNs(20);
t_a.swapin_delay_total_ns = MonotonicNs(200);
t_a.freepages_delay_total_ns = MonotonicNs(30);
t_a.thrashing_delay_total_ns = MonotonicNs(50);
t_a.compact_delay_total_ns = MonotonicNs(40);
t_a.wpcopy_delay_total_ns = MonotonicNs(60);
t_a.irq_delay_total_ns = MonotonicNs(70);
let diff_a = compare(
&snap_with(vec![t_a.clone()]),
&snap_with(vec![t_a]),
&CompareOptions::default(),
);
let row_a = diff_a
.derived_rows
.iter()
.find(|r| r.metric_name == "total_offcpu_delay_ns")
.expect("total_offcpu_delay_ns row present (case a)");
assert_eq!(
row_a.baseline,
Some(DerivedValue::Scalar(430.0)),
"case (a) swapin>thrashing: expected 430, got {:?}",
row_a.baseline,
);
let mut t_b = make_thread("p", "w");
t_b.cpu_delay_total_ns = MonotonicNs(10);
t_b.blkio_delay_total_ns = MonotonicNs(20);
t_b.swapin_delay_total_ns = MonotonicNs(75);
t_b.freepages_delay_total_ns = MonotonicNs(30);
t_b.thrashing_delay_total_ns = MonotonicNs(300);
t_b.compact_delay_total_ns = MonotonicNs(40);
t_b.wpcopy_delay_total_ns = MonotonicNs(60);
t_b.irq_delay_total_ns = MonotonicNs(70);
let diff_b = compare(
&snap_with(vec![t_b.clone()]),
&snap_with(vec![t_b]),
&CompareOptions::default(),
);
let row_b = diff_b
.derived_rows
.iter()
.find(|r| r.metric_name == "total_offcpu_delay_ns")
.expect("total_offcpu_delay_ns row present (case b)");
assert_eq!(
row_b.baseline,
Some(DerivedValue::Scalar(530.0)),
"case (b) thrashing>swapin: expected 530, got {:?}",
row_b.baseline,
);
}
#[test]
fn derived_avg_delay_ns_returns_none_on_missing_input() {
let lookup = |name: &str| -> &DerivedMetricDef {
CTPROF_DERIVED_METRICS
.iter()
.find(|d| d.name == name)
.unwrap_or_else(|| panic!("{name} present in registry"))
};
for (name, numerator) in [
("avg_cpu_delay_ns", "cpu_delay_total_ns"),
("avg_blkio_delay_ns", "blkio_delay_total_ns"),
("avg_swapin_delay_ns", "swapin_delay_total_ns"),
("avg_freepages_delay_ns", "freepages_delay_total_ns"),
("avg_thrashing_delay_ns", "thrashing_delay_total_ns"),
("avg_compact_delay_ns", "compact_delay_total_ns"),
("avg_wpcopy_delay_ns", "wpcopy_delay_total_ns"),
("avg_irq_delay_ns", "irq_delay_total_ns"),
] {
let mut metrics: BTreeMap<String, Aggregated> = BTreeMap::new();
metrics.insert(numerator.to_string(), Aggregated::Sum(123));
let def = lookup(name);
assert!(
(def.compute)(&metrics).is_none(),
"{name}: compute must return None when denominator is \
missing from metrics map (only {numerator} present)",
);
}
let mut partial: BTreeMap<String, Aggregated> = BTreeMap::new();
for name in [
"cpu_delay_total_ns",
"blkio_delay_total_ns",
"swapin_delay_total_ns",
"freepages_delay_total_ns",
"thrashing_delay_total_ns",
"wpcopy_delay_total_ns",
"irq_delay_total_ns",
] {
partial.insert(name.to_string(), Aggregated::Sum(100));
}
let total_def = lookup("total_offcpu_delay_ns");
assert!(
(total_def.compute)(&partial).is_none(),
"total_offcpu_delay_ns: compute must return None when ANY \
input is missing — exercised here with compact_delay_total_ns \
omitted from the metrics map",
);
}
#[test]
fn derived_division_by_zero_returns_none() {
let mut t = make_thread("p", "w");
t.nr_wakeups_affine = MonotonicCount(0);
t.nr_wakeups_affine_attempts = MonotonicCount(0);
t.wait_sum = MonotonicNs(0);
t.wait_count = MonotonicCount(0);
t.run_time_ns = MonotonicNs(0);
t.wait_time_ns = MonotonicNs(0);
t.timeslices = MonotonicCount(0);
t.voluntary_csw = MonotonicCount(0);
t.nonvoluntary_csw = MonotonicCount(0);
t.rchar = Bytes(0);
t.read_bytes = Bytes(0);
t.iowait_sum = MonotonicNs(0);
t.iowait_count = MonotonicCount(0);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
for name in [
"affine_success_ratio",
"avg_wait_ns",
"cpu_efficiency",
"avg_slice_ns",
"involuntary_csw_ratio",
"disk_io_fraction",
"avg_iowait_ns",
"avg_cpu_delay_ns",
"avg_blkio_delay_ns",
"avg_swapin_delay_ns",
"avg_freepages_delay_ns",
"avg_thrashing_delay_ns",
"avg_compact_delay_ns",
"avg_wpcopy_delay_ns",
"avg_irq_delay_ns",
] {
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == name)
.unwrap_or_else(|| panic!("{name} row present"));
assert!(
row.baseline.is_none(),
"{name} divides by zero — baseline must be None, got {:?}",
row.baseline
);
assert!(
row.delta.is_none(),
"{name} delta must be None when inputs are zero"
);
}
let total_row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "total_offcpu_delay_ns")
.expect("total_offcpu_delay_ns row present");
assert_eq!(
total_row.baseline,
Some(DerivedValue::Scalar(0.0)),
"total_offcpu_delay_ns with all-zero inputs must be \
Some(0.0), not None — genuine zero is meaningful for a sum",
);
}
#[test]
fn write_diff_derived_ratio_suppresses_pct() {
let mut a = make_thread("p", "w");
a.nr_wakeups_affine = MonotonicCount(50);
a.nr_wakeups_affine_attempts = MonotonicCount(100); let mut b = make_thread("p", "w");
b.nr_wakeups_affine = MonotonicCount(60);
b.nr_wakeups_affine_attempts = MonotonicCount(100); let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "affine_success_ratio")
.expect("affine_success_ratio present");
let delta = row.delta.expect("delta present when both sides defined");
assert!(
(delta - 0.1).abs() < 1e-10,
"expected delta ~0.1 (0.6 - 0.5 in f64), got {delta}",
);
assert!(
row.delta_pct.is_none(),
"ratio row must suppress delta_pct, got {:?}",
row.delta_pct
);
}
#[test]
fn write_diff_derived_ns_keeps_pct() {
let mut a = make_thread("p", "w");
a.wait_sum = MonotonicNs(1000);
a.wait_count = MonotonicCount(10); let mut b = make_thread("p", "w");
b.wait_sum = MonotonicNs(1500);
b.wait_count = MonotonicCount(10); let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let row = diff
.derived_rows
.iter()
.find(|r| r.metric_name == "avg_wait_ns")
.expect("avg_wait_ns present");
assert_eq!(row.baseline, Some(DerivedValue::Scalar(100.0)));
assert_eq!(row.candidate, Some(DerivedValue::Scalar(150.0)));
assert_eq!(row.delta, Some(50.0));
assert!(row.delta_pct.is_some());
let pct = row.delta_pct.unwrap();
assert!(
(pct - 0.5).abs() < 1e-9,
"expected delta_pct ~0.5, got {pct}"
);
}
#[test]
fn write_diff_emits_derived_section() {
let mut t = make_thread("p", "w");
t.run_time_ns = MonotonicNs(1000);
t.timeslices = MonotonicCount(4);
let diff = compare(
&snap_with(vec![t.clone()]),
&snap_with(vec![t]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("## Derived metrics"),
"missing derived section header:\n{out}",
);
assert!(
out.contains("avg_slice_ns"),
"missing avg_slice_ns row in derived section:\n{out}",
);
}
#[test]
fn parse_sort_by_accepts_derived_metric_name() {
let keys = parse_sort_by("cpu_efficiency").expect("derived name parses");
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].metric, "cpu_efficiency");
assert!(keys[0].descending);
}
#[test]
fn parse_sort_by_unknown_lists_derived_names() {
let err = parse_sort_by("not_a_real_metric").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("affine_success_ratio")
|| msg.contains("cpu_efficiency")
|| msg.contains("avg_wait_ns"),
"error must list derived metric names alongside primary; got: {msg}",
);
}
#[test]
fn registry_and_derived_names_disjoint() {
let primary: std::collections::BTreeSet<&str> =
CTPROF_METRICS.iter().map(|m| m.name).collect();
for d in CTPROF_DERIVED_METRICS {
assert!(
!primary.contains(d.name),
"derived metric {} shadows primary registry name",
d.name,
);
}
}
#[test]
fn registry_derived_metrics_well_formed() {
for d in CTPROF_DERIVED_METRICS {
assert!(
!d.description.is_empty(),
"derived metric {} has empty description",
d.name,
);
assert!(
!d.inputs.is_empty(),
"derived metric {} has empty inputs list",
d.name,
);
let primary: std::collections::BTreeSet<&str> =
CTPROF_METRICS.iter().map(|m| m.name).collect();
for input in d.inputs {
assert!(
primary.contains(input),
"derived metric {} cites unknown input {input}",
d.name,
);
}
}
}
#[test]
fn write_metric_list_emits_derived_section() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
assert!(
out.contains("## Derived metrics"),
"metric-list must emit a Derived metrics header:\n{out}",
);
for d in CTPROF_DERIVED_METRICS {
assert!(
out.contains(d.name),
"derived metric {} missing from metric-list:\n{out}",
d.name,
);
}
}
#[test]
fn write_metric_list_emits_sections_vocabulary() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
assert!(
out.contains("## Sections"),
"metric-list must emit the Sections vocabulary heading:\n{out}",
);
for section in Section::ALL {
assert!(
out.contains(section.cli_name()),
"section cli_name {} missing from Sections \
vocabulary table:\n{out}",
section.cli_name(),
);
}
}
#[test]
fn write_metric_list_sections_precedes_metrics() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
let sections_at = out
.find("## Sections")
.expect("Sections heading must be present");
let metrics_at = out
.find("## Metrics")
.expect("Metrics heading must be present");
assert!(
sections_at < metrics_at,
"Sections heading must precede Metrics heading; \
got Sections@{sections_at} Metrics@{metrics_at}\n{out}",
);
}
#[test]
fn format_derived_value_cell_ratio_three_decimals() {
let v = DerivedValue::Scalar(0.873_5);
let cell = format_derived_value_cell(v, ScaleLadder::None, true);
assert_eq!(cell, "0.874");
}
#[test]
fn format_derived_value_cell_ns_auto_scales() {
let v = DerivedValue::Scalar(2_500_000.0);
let cell = format_derived_value_cell(v, ScaleLadder::Ns, false);
assert_eq!(cell, "2.500ms");
}
#[test]
fn format_derived_value_cell_ns_preserves_fractional_precision() {
let v = DerivedValue::Scalar(123.4);
let cell = format_derived_value_cell(v, ScaleLadder::Ns, false);
assert_eq!(cell, "123.40ns");
}
#[test]
fn format_derived_value_cell_negative_bytes_signed() {
let two_kib_neg = -(2.0 * 1024.0);
let v = DerivedValue::Scalar(two_kib_neg);
let cell = format_derived_value_cell(v, ScaleLadder::Bytes, false);
assert_eq!(cell, "-2.000KiB");
}
#[test]
fn format_derived_delta_cell_ratio_carries_sign() {
let cell = format_derived_delta_cell(0.1, ScaleLadder::None, true);
assert_eq!(cell, "+0.100");
}
#[test]
fn format_derived_value_cell_negative_bytes_at_mib_step() {
let v = DerivedValue::Scalar(-2_000_000.0);
let cell = format_derived_value_cell(v, ScaleLadder::Bytes, false);
assert_eq!(cell, "-1.907MiB");
}
#[test]
fn format_derived_value_cell_ratio_above_one_renders_verbatim() {
let v = DerivedValue::Scalar(1.5);
let cell = format_derived_value_cell(v, ScaleLadder::None, true);
assert_eq!(cell, "1.500");
}
#[test]
fn write_diff_sort_by_derived_metric_ranks_groups() {
let mut high_a = make_thread("p", "w");
high_a.pcomm = "high".to_string();
high_a.wait_sum = MonotonicNs(100);
high_a.wait_count = MonotonicCount(1);
let mut high_b = make_thread("p", "w");
high_b.pcomm = "high".to_string();
high_b.wait_sum = MonotonicNs(300);
high_b.wait_count = MonotonicCount(1);
let mut low_a = make_thread("p", "w");
low_a.pcomm = "low".to_string();
low_a.wait_sum = MonotonicNs(100);
low_a.wait_count = MonotonicCount(1);
let mut low_b = make_thread("p", "w");
low_b.pcomm = "low".to_string();
low_b.wait_sum = MonotonicNs(150);
low_b.wait_count = MonotonicCount(1);
let opts = CompareOptions {
sort_by: vec![SortKey {
metric: "avg_wait_ns",
descending: true,
}],
..CompareOptions::default()
};
let diff = compare(
&snap_with(vec![high_a, low_a]),
&snap_with(vec![high_b, low_b]),
&opts,
);
let first = &diff.derived_rows[0];
assert_eq!(
first.group_key, "high",
"descending sort by avg_wait_ns must put `high` first; \
got {:?}",
first.group_key,
);
}
#[test]
fn write_metric_list_emits_full_tag_legend() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
assert!(
out.contains("[cfs-only]"),
"missing [cfs-only] in legend:\n{out}"
);
assert!(
out.contains("[non-ext]"),
"missing [non-ext] in legend:\n{out}"
);
assert!(
out.contains("[fair-policy]"),
"missing [fair-policy] in legend:\n{out}",
);
assert!(
out.contains("[SCHED_INFO]"),
"missing [SCHED_INFO] in legend:\n{out}"
);
assert!(
out.contains("[SCHEDSTATS]"),
"missing [SCHEDSTATS] in legend:\n{out}",
);
assert!(
out.contains("[SCHED_CORE]"),
"missing [SCHED_CORE] in legend:\n{out}"
);
assert!(
out.contains("[SCHED_CLASS_EXT]"),
"missing [SCHED_CLASS_EXT] in legend:\n{out}",
);
assert!(
out.contains("[TASK_DELAY_ACCT]"),
"missing [TASK_DELAY_ACCT] in legend:\n{out}",
);
assert!(
out.contains("[TASK_IO_ACCOUNTING]"),
"missing [TASK_IO_ACCOUNTING] in legend:\n{out}",
);
assert!(
out.contains("[TASKSTATS]"),
"missing [TASKSTATS] in legend:\n{out}",
);
assert!(
out.contains("[TASK_XACCT]"),
"missing [TASK_XACCT] in legend:\n{out}",
);
assert!(out.contains("[dead]"), "missing [dead] in legend:\n{out}");
assert!(
out.contains("## Tag legend"),
"missing Tag legend section header:\n{out}",
);
assert!(
out.contains("## Metrics"),
"missing Metrics section header:\n{out}",
);
}
#[test]
fn write_metric_list_covers_every_registered_metric() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
for m in CTPROF_METRICS {
assert!(
out.contains(m.name),
"metric {} missing from metric-list output:\n{out}",
m.name,
);
assert!(
out.contains(m.description),
"description for {} missing from metric-list output:\n{out}",
m.name,
);
}
}
#[test]
fn write_metric_list_tags_column_excludes_metric_name() {
let mut out = String::new();
write_metric_list(&mut out).unwrap();
assert!(
out.contains("[cfs-only] [SCHEDSTATS]"),
"expected bare tag pair `[cfs-only] [SCHEDSTATS]` in tags column:\n{out}",
);
assert!(
!out.contains("nr_wakeups_affine [cfs-only]"),
"metric name must not leak into tags column:\n{out}",
);
}
#[test]
fn registry_descriptions_are_non_empty() {
for m in CTPROF_METRICS {
assert!(
!m.description.is_empty(),
"metric {} has empty description",
m.name,
);
assert_eq!(
m.description.trim(),
m.description,
"metric {} description has leading/trailing whitespace",
m.name,
);
}
}
#[test]
fn mode_rule_tie_break_is_lexicographic() {
let mut a = make_thread("app", "w1");
a.policy = "SCHED_FIFO".into();
let mut b = make_thread("app", "w2");
b.policy = "SCHED_OTHER".into();
let v = aggregate(AggRule::Mode(|t| t.policy.clone()), &[&a, &b]);
match v {
Aggregated::Mode { value, count, .. } => {
assert_eq!(value, "SCHED_FIFO");
assert_eq!(count, 1);
}
other => panic!("expected Mode, got {other:?}"),
}
}
#[test]
fn affinity_aggregate_on_empty_threads_is_zero() {
let empty: Vec<&ThreadState> = vec![];
let v = aggregate(AggRule::Affinity(|t| t.cpu_affinity.clone()), &empty);
match v {
Aggregated::Affinity(s) => {
assert_eq!(s.min_cpus, 0);
assert_eq!(s.max_cpus, 0);
assert!(s.uniform.is_none());
}
other => panic!("expected Affinity, got {other:?}"),
}
}
#[test]
fn ordinal_display_collapses_degenerate_range() {
let r = Aggregated::OrdinalRange { min: 0, max: 0 };
assert_eq!(r.to_string(), "0");
let r = Aggregated::OrdinalRange { min: -5, max: 10 };
assert_eq!(r.to_string(), "-5..10");
}
#[test]
fn mode_display_hides_ratio_when_unanimous() {
let m = Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 4,
total: 4,
};
assert_eq!(m.to_string(), "SCHED_OTHER");
let m = Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 3,
total: 5,
};
assert_eq!(m.to_string(), "SCHED_OTHER (3/5)");
}
#[test]
fn write_diff_emits_expected_column_headers() {
let diff = compare(
&snap_with(vec![make_thread("p", "w")]),
&snap_with(vec![make_thread("p", "w")]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
for h in [
"pcomm",
"threads",
"metric",
"baseline",
"candidate",
"delta",
"%",
] {
assert!(out.contains(h), "missing header {h}:\n{out}");
}
}
#[test]
fn write_diff_header_switches_on_group_by() {
let empty = CtprofDiff::default();
let mut out = String::new();
write_diff(
&mut out,
&empty,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("cgroup"));
let mut out = String::new();
write_diff(
&mut out,
&empty,
Path::new("a"),
Path::new("b"),
GroupBy::Comm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("comm"));
assert!(!out.contains("pcomm"));
}
#[test]
fn write_diff_prints_only_baseline_section() {
let diff = CtprofDiff {
only_baseline: vec!["missing_proc".into()],
..CtprofDiff::default()
};
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("/tmp/a.ctprof.zst"),
Path::new("/tmp/b.ctprof.zst"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("only in baseline"));
assert!(out.contains("missing_proc"));
assert!(out.contains("/tmp/a.ctprof.zst"));
}
#[test]
fn write_diff_prints_only_candidate_section() {
let diff = CtprofDiff {
only_candidate: vec!["new_proc".into()],
..CtprofDiff::default()
};
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("/tmp/a.ctprof.zst"),
Path::new("/tmp/b.ctprof.zst"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("only in candidate"));
assert!(out.contains("new_proc"));
assert!(out.contains("/tmp/b.ctprof.zst"));
}
#[test]
fn write_diff_cgroup_enrichment_section_for_cgroup_mode() {
let mut diff = CtprofDiff::default();
diff.cgroup_stats_a
.insert("/app".into(), simple_cgroup_stats(10, 0, 0, 100));
diff.cgroup_stats_b
.insert("/app".into(), simple_cgroup_stats(50, 0, 0, 200));
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("cpu_usage_usec"),
"missing enrichment header:\n{out}"
);
assert!(
out.contains("10µs → 50µs (+40µs)"),
"missing contiguous scaled triple `10µs → 50µs (+40µs)`:\n{out}",
);
assert!(
out.contains("100B → 200B (+100B)"),
"missing contiguous scaled triple `100B → 200B (+100B)`:\n{out}",
);
}
#[test]
fn write_diff_enrichment_section_absent_when_group_by_pcomm() {
let mut diff = CtprofDiff::default();
diff.cgroup_stats_a
.insert("/app".into(), simple_cgroup_stats(10, 0, 0, 0));
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(!out.contains("cpu_usage_usec"), "enrichment leaked:\n{out}");
}
#[test]
fn write_diff_delta_cell_has_plus_minus_sign() {
let mut ta = make_thread("app", "w");
ta.run_time_ns = MonotonicNs(100);
let mut tb = make_thread("app", "w");
tb.run_time_ns = MonotonicNs(50);
let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("-50ns"),
"missing signed delta with unit:\n{out}",
);
assert!(out.contains("-50.0%"), "missing signed pct:\n{out}");
}
#[test]
fn write_diff_categorical_delta_labels_same_or_differs() {
let mut ta = make_thread("app", "w");
ta.policy = "SCHED_OTHER".into();
let mut tb = make_thread("app", "w");
tb.policy = "SCHED_FIFO".into();
let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("differs"), "missing 'differs' label:\n{out}");
}
#[test]
fn load_compare_render_pipeline_end_to_end() {
let mut a = make_thread("etoe_proc", "thread_a");
a.run_time_ns = MonotonicNs(1_000_000);
a.voluntary_csw = MonotonicCount(10);
a.policy = "SCHED_OTHER".into();
let snap_a = snap_with(vec![a]);
let mut b = make_thread("etoe_proc", "thread_a");
b.run_time_ns = MonotonicNs(3_000_000);
b.voluntary_csw = MonotonicCount(30);
b.policy = "SCHED_FIFO".into();
let snap_b = snap_with(vec![b]);
let tmp_a = tempfile::NamedTempFile::new().unwrap();
let tmp_b = tempfile::NamedTempFile::new().unwrap();
snap_a.write(tmp_a.path()).unwrap();
snap_b.write(tmp_b.path()).unwrap();
let loaded_a = CtprofSnapshot::load(tmp_a.path()).unwrap();
let loaded_b = CtprofSnapshot::load(tmp_b.path()).unwrap();
let diff = compare(&loaded_a, &loaded_b, &CompareOptions::default());
let mut out = String::new();
write_diff(
&mut out,
&diff,
tmp_a.path(),
tmp_b.path(),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("pcomm"));
assert!(out.contains("metric"));
assert!(out.contains("etoe_proc"));
assert!(
out.contains("+2.000ms"),
"run_time delta missing in:\n{out}",
);
assert!(out.contains("differs"));
}
#[test]
fn cgroup_cell_renders_all_four_branches() {
assert_eq!(
cgroup_cell(Some(10), Some(42), ScaleLadder::Unitless),
"10 → 42 (+32)"
);
assert_eq!(
cgroup_cell(Some(50), Some(5), ScaleLadder::Unitless),
"50 → 5 (-45)"
);
assert_eq!(cgroup_cell(Some(7), None, ScaleLadder::Unitless), "7 → -");
assert_eq!(cgroup_cell(None, Some(99), ScaleLadder::Unitless), "- → 99");
assert_eq!(cgroup_cell(None, None, ScaleLadder::Unitless), "-");
}
#[test]
fn format_psi_avg_cell_renders_all_four_branches() {
assert_eq!(
format_psi_avg_cell(Some(1859), Some(2431)),
"18.59% → 24.31% (+5.72%)",
);
assert_eq!(
format_psi_avg_cell(Some(2431), Some(1859)),
"24.31% → 18.59% (-5.72%)",
);
assert_eq!(format_psi_avg_cell(Some(750), None), "7.50% → -");
assert_eq!(format_psi_avg_cell(None, Some(50)), "- → 0.50%");
assert_eq!(format_psi_avg_cell(None, None), "-");
}
#[test]
fn format_psi_avg_centi_percent_zero_pads_fraction() {
assert_eq!(format_psi_avg_centi_percent(0), "0.00%");
assert_eq!(format_psi_avg_centi_percent(5), "0.05%");
assert_eq!(format_psi_avg_centi_percent(50), "0.50%");
assert_eq!(format_psi_avg_centi_percent(100), "1.00%");
assert_eq!(format_psi_avg_centi_percent(101), "1.01%");
assert_eq!(format_psi_avg_centi_percent(10000), "100.00%");
assert_eq!(format_psi_avg_centi_percent(10099), "100.99%");
}
#[test]
fn psi_pair_has_data_returns_false_when_both_sides_zero() {
let zero = Psi::default();
assert!(!psi_pair_has_data(&zero, &zero));
}
#[test]
fn psi_pair_has_data_returns_true_when_one_side_nonzero() {
let zero = Psi::default();
let mut nonzero = Psi::default();
nonzero.cpu.some.avg10 = 1;
assert!(psi_pair_has_data(&zero, &nonzero));
assert!(psi_pair_has_data(&nonzero, &zero));
}
#[test]
fn psi_pair_has_data_detects_total_usec_only_data() {
let zero = Psi::default();
let mut total_only = Psi::default();
total_only.io.full.total_usec = 1;
assert!(psi_pair_has_data(&zero, &total_only));
assert!(psi_pair_has_data(&total_only, &zero));
}
#[test]
fn cgroup_cell_scales_microseconds_to_ms_or_s() {
assert_eq!(
cgroup_cell(Some(1_500_000), Some(3_000_000), ScaleLadder::Us),
"1.500s → 3.000s (+1.500s)",
);
assert_eq!(
cgroup_cell(Some(500), Some(900), ScaleLadder::Us),
"500µs → 900µs (+400µs)",
);
}
#[test]
fn cgroup_cell_scales_bytes_to_iec_prefix() {
let one_gib: u64 = 1024 * 1024 * 1024;
let two_gib: u64 = 2 * one_gib;
assert_eq!(
cgroup_cell(Some(one_gib), Some(two_gib), ScaleLadder::Bytes),
"1.000GiB → 2.000GiB (+1.000GiB)",
);
}
#[test]
fn cgroup_cell_scales_unitless_count_to_k_m_g() {
assert_eq!(
cgroup_cell(Some(1_500), Some(2_500), ScaleLadder::Unitless),
"1.500K → 2.500K (+1.000K)",
);
assert_eq!(
cgroup_cell(Some(1_500_000), Some(2_500_000), ScaleLadder::Unitless),
"1.500M → 2.500M (+1.000M)",
);
assert_eq!(
cgroup_cell(
Some(1_500_000_000),
Some(2_500_000_000),
ScaleLadder::Unitless
),
"1.500G → 2.500G (+1.000G)",
);
}
#[test]
fn write_diff_enrichment_handles_one_sided_cgroup_keys() {
let mut diff = CtprofDiff::default();
diff.cgroup_stats_a
.insert("/only-baseline".into(), simple_cgroup_stats(111, 0, 0, 0));
diff.cgroup_stats_b
.insert("/only-candidate".into(), simple_cgroup_stats(222, 0, 0, 0));
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Cgroup,
&DisplayOptions::default(),
)
.unwrap();
assert!(
out.contains("/only-baseline"),
"baseline-only key missing:\n{out}",
);
assert!(
out.contains("/only-candidate"),
"candidate-only key missing:\n{out}",
);
assert!(
out.contains("111µs → -"),
"baseline-only row missing '111µs → -' cell:\n{out}",
);
assert!(
out.contains("- → 222µs"),
"candidate-only row missing '- → 222µs' cell:\n{out}",
);
}
#[test]
fn write_diff_stable_sort_tie_breaks_by_group_key_ascending() {
let mut a1 = make_thread("alpha", "w");
a1.run_time_ns = MonotonicNs(1_000);
let mut a2 = make_thread("bravo", "w");
a2.run_time_ns = MonotonicNs(1_000);
let mut b1 = make_thread("alpha", "w");
b1.run_time_ns = MonotonicNs(2_000);
let mut b2 = make_thread("bravo", "w");
b2.run_time_ns = MonotonicNs(2_000);
let diff = compare(
&snap_with(vec![a1, a2]),
&snap_with(vec![b1, b2]),
&CompareOptions::default(),
);
let run_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.collect();
assert_eq!(run_rows.len(), 2);
assert!(
(run_rows[0].delta_pct.unwrap() - 1.0).abs() < 1e-9
&& (run_rows[1].delta_pct.unwrap() - 1.0).abs() < 1e-9,
"test fixture must produce identical delta_pct for both groups",
);
assert_eq!(
run_rows[0].group_key, "alpha",
"ascending group_key tie-break expected alpha first",
);
assert_eq!(run_rows[1].group_key, "bravo");
}
#[test]
fn sort_key_zero_delta_rows_sink_below_nonzero() {
let mut a1 = make_thread("calm", "w");
a1.run_time_ns = MonotonicNs(500);
let mut b1 = make_thread("calm", "w");
b1.run_time_ns = MonotonicNs(500);
let a2 = make_thread("birth", "w");
let mut b2 = make_thread("birth", "w");
b2.run_time_ns = MonotonicNs(100);
let diff = compare(
&snap_with(vec![a1, a2]),
&snap_with(vec![b1, b2]),
&CompareOptions::default(),
);
let run_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.collect();
assert_eq!(run_rows[0].group_key, "birth");
assert_eq!(run_rows[1].group_key, "calm");
assert_eq!(run_rows[0].delta, Some(100.0));
assert!(run_rows[0].delta_pct.is_none());
assert_eq!(run_rows[1].delta, Some(0.0));
assert_eq!(run_rows[1].delta_pct, Some(0.0));
}
#[test]
fn sort_key_none_delta_rows_sink_to_bottom() {
let mut a = make_thread("app", "w");
a.run_time_ns = MonotonicNs(100);
a.policy = "SCHED_OTHER".into();
let mut b = make_thread("app", "w");
b.run_time_ns = MonotonicNs(200);
b.policy = "SCHED_FIFO".into();
let diff = compare(
&snap_with(vec![a]),
&snap_with(vec![b]),
&CompareOptions::default(),
);
let run_idx = diff
.rows
.iter()
.position(|r| r.metric_name == "run_time_ns")
.expect("run_time_ns row");
let policy_idx = diff
.rows
.iter()
.position(|r| r.metric_name == "policy")
.expect("policy row");
assert!(
run_idx < policy_idx,
"numeric row at {run_idx} must sort above Mode row at {policy_idx}",
);
assert!(diff.rows[policy_idx].delta.is_none());
}
#[test]
fn aggregate_ordinal_range_on_empty_threads_is_zero() {
let empty: Vec<&ThreadState> = vec![];
let v = aggregate(AggRule::RangeI32(|t| t.nice), &empty);
match v {
Aggregated::OrdinalRange { min, max } => {
assert_eq!(min, 0);
assert_eq!(max, 0);
}
other => panic!("expected OrdinalRange, got {other:?}"),
}
}
#[test]
fn aggregate_mode_on_empty_threads_is_empty() {
let empty: Vec<&ThreadState> = vec![];
let v = aggregate(AggRule::Mode(|t| t.policy.clone()), &empty);
match v {
Aggregated::Mode {
value,
count,
total,
} => {
assert!(value.is_empty());
assert_eq!(count, 0);
assert_eq!(total, 0);
}
other => panic!("expected Mode, got {other:?}"),
}
}
#[test]
fn mode_aggregate_helper_dispatches_all_three_arms() {
use crate::metric_types::CategoricalString;
let mut t1 = make_thread("p", "w");
let mut t2 = make_thread("p", "w");
let mut t3 = make_thread("p", "w");
t1.policy = CategoricalString::from("SCHED_OTHER");
t2.policy = CategoricalString::from("SCHED_OTHER");
t3.policy = CategoricalString::from("SCHED_FIFO");
t1.state = 'R';
t2.state = 'R';
t3.state = 'S';
t1.ext_enabled = true;
t2.ext_enabled = true;
t3.ext_enabled = false;
let threads: Vec<&ThreadState> = vec![&t1, &t2, &t3];
match aggregate(AggRule::Mode(|t| t.policy.clone()), &threads) {
Aggregated::Mode {
value,
count,
total,
} => {
assert_eq!(value, "SCHED_OTHER");
assert_eq!(count, 2);
assert_eq!(total, 3);
}
other => panic!("expected Mode for AggRule::Mode, got {other:?}"),
}
match aggregate(AggRule::ModeChar(|t| t.state), &threads) {
Aggregated::Mode {
value,
count,
total,
} => {
assert_eq!(value, "R");
assert_eq!(count, 2);
assert_eq!(total, 3);
}
other => panic!("expected Mode for AggRule::ModeChar, got {other:?}"),
}
match aggregate(AggRule::ModeBool(|t| t.ext_enabled), &threads) {
Aggregated::Mode {
value,
count,
total,
} => {
assert_eq!(value, "true");
assert_eq!(count, 2);
assert_eq!(total, 3);
}
other => panic!("expected Mode for AggRule::ModeBool, got {other:?}"),
}
}
#[test]
fn aggregate_sum_on_empty_threads_is_zero() {
let empty: Vec<&ThreadState> = vec![];
let v = aggregate(AggRule::SumNs(|t| t.run_time_ns), &empty);
match v {
Aggregated::Sum(s) => assert_eq!(s, 0),
other => panic!("expected Sum, got {other:?}"),
}
}
#[test]
fn aggregate_max_picks_group_maximum_not_sum() {
let mut a = make_thread("p", "w");
let mut b = make_thread("p", "w");
let mut c = make_thread("p", "w");
a.wait_max = PeakNs(100);
b.wait_max = PeakNs(999_999_999); c.wait_max = PeakNs(50);
let v = aggregate(AggRule::MaxPeak(|t| t.wait_max), &[&a, &b, &c]);
match v {
Aggregated::Max(m) => {
assert_eq!(
m, 999_999_999,
"Max must pick the largest value, not sum (sum \
would be 1_000_000_149)"
);
}
other => panic!("expected Max, got {other:?}"),
}
}
#[test]
fn aggregate_max_on_empty_threads_is_zero() {
let empty: Vec<&ThreadState> = vec![];
let v = aggregate(AggRule::MaxPeak(|t| t.wait_max), &empty);
match v {
Aggregated::Max(m) => assert_eq!(m, 0),
other => panic!("expected Max, got {other:?}"),
}
}
#[test]
fn aggregate_max_single_thread_returns_thread_value() {
let mut t = make_thread("p", "w");
t.sleep_max = PeakNs(12_345_678_901);
let v = aggregate(AggRule::MaxPeak(|t| t.sleep_max), &[&t]);
match v {
Aggregated::Max(m) => assert_eq!(m, 12_345_678_901),
other => panic!("expected Max, got {other:?}"),
}
}
#[test]
fn max_metric_accessors_read_expected_field() {
type MetricSetter = fn(&mut ThreadState);
let cases: &[(&str, MetricSetter)] = &[
("wait_max", |t| t.wait_max = PeakNs(1)),
("sleep_max", |t| t.sleep_max = PeakNs(1)),
("block_max", |t| t.block_max = PeakNs(1)),
("exec_max", |t| t.exec_max = PeakNs(1)),
("slice_max", |t| t.slice_max = PeakNs(1)),
];
for (name, set) in cases {
let mut t = make_thread("p", "w");
set(&mut t);
let def = CTPROF_METRICS
.iter()
.find(|m| m.name == *name)
.unwrap_or_else(|| panic!("metric {name} not in registry"));
let agg = aggregate(def.rule, &[&t]);
match agg {
Aggregated::Max(v) => {
assert_eq!(v, 1, "accessor for {name} did not read the {name} field")
}
other => panic!("expected Max for {name}, got {other:?}"),
}
}
}
#[test]
fn aggregated_max_numeric_and_display() {
let m = Aggregated::Max(7_500_000);
assert_eq!(m.numeric(), Some(7_500_000.0));
assert_eq!(format!("{m}"), "7500000");
}
#[test]
fn numeric_returns_none_for_mode() {
let m = Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 4,
total: 4,
};
assert!(m.numeric().is_none());
}
#[test]
fn numeric_returns_midpoint_for_affinity_heterogeneous() {
let a = Aggregated::Affinity(AffinitySummary {
min_cpus: 2,
max_cpus: 8,
uniform: None,
});
assert_eq!(a.numeric(), Some(5.0));
let b = Aggregated::Affinity(AffinitySummary {
min_cpus: 4,
max_cpus: 4,
uniform: None,
});
assert_eq!(b.numeric(), Some(4.0));
}
#[test]
fn affinity_display_uniform_noncontiguous_renders_comma_separated() {
let a = Aggregated::Affinity(AffinitySummary {
min_cpus: 2,
max_cpus: 2,
uniform: Some(vec![0, 2]),
});
assert_eq!(a.to_string(), "2 cpus (0,2)");
}
#[test]
fn affinity_display_heterogeneous_same_count_renders_mixed() {
let a = Aggregated::Affinity(AffinitySummary {
min_cpus: 3,
max_cpus: 3,
uniform: None,
});
assert_eq!(a.to_string(), "3 cpus (mixed)");
}
#[test]
fn flatten_cgroup_stats_with_no_patterns_preserves_keys() {
let mut stats = BTreeMap::new();
stats.insert("/alpha".into(), simple_cgroup_stats(10, 1, 5, 100));
stats.insert("/beta".into(), simple_cgroup_stats(20, 2, 15, 200));
let out = flatten_cgroup_stats(&stats, &[], None);
assert_eq!(out.len(), 2);
assert_eq!(out["/alpha"].cpu.usage_usec, 10);
assert_eq!(out["/alpha"].memory.current, 100);
assert_eq!(out["/beta"].cpu.usage_usec, 20);
assert_eq!(out["/beta"].memory.current, 200);
}
#[test]
fn pattern_key_strips_trailing_digits() {
assert_eq!(pattern_key("tokio-worker-12"), "tokio-worker-{N}");
assert_eq!(pattern_key("worker_5"), "worker_{N}");
assert_eq!(pattern_key("rayon.pool.7"), "rayon.pool.{N}");
assert_eq!(pattern_key("Chrome thread 4"), "Chrome thread {N}");
assert_eq!(pattern_key("pool-2-thread-7"), "pool-{N}-thread-{N}");
}
#[test]
fn pattern_key_bare_numeric_and_dangling_separator() {
assert_eq!(pattern_key("0"), "{N}");
assert_eq!(pattern_key("worker-"), "worker-");
}
#[test]
fn pattern_key_alpha_prefix_groups_without_separator() {
assert_eq!(pattern_key("CamelCaseWord0"), "CamelCaseWord{N}");
assert_eq!(pattern_key("CamelCaseWord175"), "CamelCaseWord{N}");
assert_eq!(pattern_key("worker7"), "worker{N}");
assert_eq!(pattern_key("cpu0"), "cpu{N}");
assert_eq!(pattern_key("init"), "init");
}
#[test]
fn pattern_key_single_letter_alpha_prefix_normalizes() {
assert_eq!(pattern_key("gadget-v2"), "gadget-v{N}");
assert_eq!(pattern_key("thingo-r2"), "thingo-r{N}");
assert_eq!(pattern_key("t1"), "t{N}");
assert_eq!(pattern_key("a0"), "{H}");
assert_eq!(pattern_key("t-1"), "t-{N}");
assert_eq!(pattern_key("ab_5"), "ab_{N}");
}
#[test]
fn pattern_key_kworker_shapes_under_token_normalizer() {
assert_eq!(pattern_key("kworker/0:0"), "kworker/{N}:{N}");
assert_eq!(pattern_key("kworker/3:2"), "kworker/{N}:{N}");
assert_eq!(pattern_key("kworker/u8:3"), "kworker/u{N}:{N}");
assert_eq!(pattern_key("kworker/u8:7"), "kworker/u{N}:{N}");
assert_eq!(pattern_key("kworker/u16:0"), "kworker/u{N}:{N}");
assert_eq!(
pattern_key("kworker/0:0-wq_reclaim"),
"kworker/{N}:{N}-wq_reclaim",
);
assert_eq!(
pattern_key("kworker/47:2-wq_reclaim"),
"kworker/{N}:{N}-wq_reclaim",
);
assert_eq!(pattern_key("kworker/0:1H"), "kworker/{N}:{N}H");
assert_eq!(
pattern_key("kworker/0:1H-wq_prio"),
"kworker/{N}:{N}H-wq_prio",
);
}
#[test]
fn classify_token_digits_alpha_suffix_rule_4() {
assert_eq!(classify_token("1H"), "{N}H");
assert_eq!(classify_token("0H"), "{N}H");
assert_eq!(classify_token("100Hz"), "{N}Hz");
assert_eq!(classify_token("3z"), "{N}z");
assert_eq!(classify_token("1a"), "{H}");
assert_eq!(classify_token("0f"), "{H}");
assert_eq!(classify_token("42abc"), "{H}");
assert_eq!(classify_token("1aZ"), "{N}aZ");
assert_eq!(classify_token("42xyz"), "{N}xyz");
assert_eq!(classify_token("42"), "{N}");
}
#[test]
fn pattern_key_empty_input_returns_empty() {
assert_eq!(pattern_key(""), "");
}
#[test]
fn build_groups_comm_produces_pattern_buckets_and_singleton() {
let mut threads = Vec::new();
for i in 0..8 {
threads.push(make_thread("app", &format!("worker-{i}")));
}
for i in 0..4 {
threads.push(make_thread("app", &format!("rayon-pool-{i}")));
}
threads.push(make_thread("app", "main"));
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert!(
groups.contains_key("worker-{N}"),
"worker-{{N}} pattern bucket",
);
assert_eq!(groups["worker-{N}"].thread_count, 8);
assert!(
groups.contains_key("rayon-pool-{N}"),
"rayon-pool-{{N}} pattern bucket",
);
assert_eq!(groups["rayon-pool-{N}"].thread_count, 4);
assert!(
groups.contains_key("main"),
"singleton main reverts to literal comm",
);
assert_eq!(groups["main"].thread_count, 1);
assert_eq!(groups.len(), 3);
}
#[test]
fn build_groups_comm_singleton_reverts_to_literal() {
let snap = snap_with(vec![make_thread("app", "worker-0")]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert!(
groups.contains_key("worker-0"),
"lone worker-0 stays literal",
);
assert!(
!groups.contains_key("worker-{N}"),
"no `worker-{{N}}` pattern key for a singleton",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_distinct_prefixes_do_not_merge() {
let snap = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
make_thread("app", "worker-large-0"),
make_thread("app", "worker-large-1"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["worker-{N}"].thread_count, 2);
assert_eq!(groups["worker-large-{N}"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_alpha_prefix_clusters_camelcase() {
let mut threads = Vec::new();
for i in 0..6 {
threads.push(make_thread("app", &format!("CamelCaseWord{i}")));
}
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert!(
groups.contains_key("CamelCaseWord{N}"),
"CamelCaseWord{{N}} bucket",
);
assert_eq!(groups["CamelCaseWord{N}"].thread_count, 6);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_kworker_workqueue_collapses_per_cpu() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/42:7-mm_percpu_wq"),
make_thread("kworker", "kworker/43:8-mm_percpu_wq"),
make_thread("kworker", "kworker/44:9-mm_percpu_wq"),
make_thread("kworker", "kworker/0:0-wq_reclaim"),
make_thread("kworker", "kworker/1:0-wq_reclaim"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}-mm_percpu_wq"].thread_count, 3);
assert_eq!(groups["kworker/{N}:{N}-wq_reclaim"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_kworker_bare_collapses_across_cpus() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/0:0"),
make_thread("kworker", "kworker/0:1"),
make_thread("kworker", "kworker/1:0"),
make_thread("kworker", "kworker/3:2"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 4);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_kworker_unbound_separate_from_bound() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/0:0"),
make_thread("kworker", "kworker/3:2"),
make_thread("kworker", "kworker/u8:3"),
make_thread("kworker", "kworker/u8:7"),
make_thread("kworker", "kworker/u16:0"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 2);
assert_eq!(groups["kworker/u{N}:{N}"].thread_count, 3);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_empty_comm_does_not_panic() {
let snap = snap_with(vec![make_thread("app", ""), make_thread("app", "")]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups[""].thread_count, 2);
}
#[test]
fn build_groups_comm_truncated_comms_group_via_exact_match() {
let snap = snap_with(vec![
make_thread("app", "tokio-runtime-w"),
make_thread("app", "tokio-runtime-w"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["tokio-runtime-w"].thread_count, 2);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_sum_conservation_across_buckets() {
let mut threads = Vec::new();
for i in 0..5 {
let mut t = make_thread("app", &format!("worker-{i}"));
t.run_time_ns = MonotonicNs(100 * (i as u64 + 1));
threads.push(t);
}
for i in 0..3 {
let mut t = make_thread("app", &format!("redis-bg-{i}"));
t.run_time_ns = MonotonicNs(50 * (i as u64 + 1));
threads.push(t);
}
let mut single = make_thread("app", "main");
single.run_time_ns = MonotonicNs(999);
threads.push(single);
let input_total: u64 = threads.iter().map(|t| t.run_time_ns.0).sum();
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
let aggregated_total: u64 = groups
.values()
.map(|g| match g.metrics.get("run_time_ns") {
Some(Aggregated::Sum(n)) => *n,
_ => 0,
})
.sum();
assert_eq!(
aggregated_total, input_total,
"pattern-aggregated sum must equal input sum",
);
}
#[test]
fn build_groups_comm_exact_preserves_literal_semantics() {
let snap = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
make_thread("app", "worker-1"),
]);
let groups = build_groups(&snap, GroupBy::CommExact, &[], None, None, false);
assert_eq!(groups["worker-0"].thread_count, 1);
assert_eq!(groups["worker-1"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn pattern_display_label_grex_for_multi_member_else_join_key() {
let single = vec!["worker-0".to_string()];
assert_eq!(pattern_display_label("worker-0", &single), "worker-0");
let empty: Vec<String> = vec![];
assert_eq!(pattern_display_label("worker", &empty), "worker");
let multi = vec!["worker-0".to_string(), "worker-1".to_string()];
let label = pattern_display_label("worker", &multi);
assert!(
label.contains("worker"),
"grex label must mention the shared prefix; got {label:?}",
);
}
#[test]
fn compare_comm_pattern_emits_prefix_join_key_and_grex_display() {
let baseline = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
]);
let candidate = snap_with(vec![
make_thread("app", "worker-2"),
make_thread("app", "worker-3"),
]);
let diff = compare(
&baseline,
&candidate,
&CompareOptions {
group_by: GroupBy::Comm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker-{N}")
.expect("worker-{N} row");
assert_eq!(
row.group_key, "worker-{N}",
"join key is the placeholder pattern"
);
assert!(
row.display_key.contains("worker"),
"display key reflects grex over union; got {:?}",
row.display_key,
);
assert_ne!(
row.display_key, "worker-{N}",
"≥2 members → grex regex, not the placeholder pattern",
);
}
#[test]
fn compare_comm_pattern_joins_across_asymmetric_resize() {
let baseline = snap_with(vec![make_thread("app", "worker-7")]);
let candidate = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
make_thread("app", "worker-2"),
]);
let diff = compare(
&baseline,
&candidate,
&CompareOptions {
group_by: GroupBy::Comm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker-{N}")
.expect("worker-{N} row joined across asymmetric snapshots");
assert_eq!(row.thread_count_a, 1, "baseline carries 1 worker");
assert_eq!(row.thread_count_b, 3, "candidate carries 3 workers");
let baseline_orphans: Vec<&String> = diff
.only_baseline
.iter()
.filter(|k| k.starts_with("worker"))
.collect();
assert!(
baseline_orphans.is_empty(),
"no worker-prefixed orphans in only_baseline; got {baseline_orphans:?}",
);
let candidate_orphans: Vec<&String> = diff
.only_candidate
.iter()
.filter(|k| k.starts_with("worker"))
.collect();
assert!(
candidate_orphans.is_empty(),
"no worker-prefixed orphans in only_candidate; got {candidate_orphans:?}",
);
}
#[test]
fn classify_token_pure_digits() {
assert_eq!(classify_token("0"), "{N}");
assert_eq!(classify_token("42"), "{N}");
assert_eq!(classify_token("999"), "{N}");
}
#[test]
fn classify_token_hex_like() {
assert_eq!(classify_token("a1234"), "{H}");
assert_eq!(classify_token("abc123def456"), "{H}");
assert_eq!(classify_token("7890ab"), "{H}");
assert_eq!(classify_token("1a2"), "{H}");
assert_eq!(classify_token("650ab12cd34ef"), "{H}");
assert_eq!(classify_token("abc"), "abc");
assert_eq!(classify_token("a"), "a");
assert_eq!(classify_token("a1"), "{H}");
assert_eq!(classify_token("u8"), "u{N}");
}
#[test]
fn classify_token_alpha_prefix_digits() {
assert_eq!(classify_token("worker7"), "worker{N}");
assert_eq!(classify_token("CamelCaseWord175"), "CamelCaseWord{N}");
assert_eq!(classify_token("u8"), "u{N}");
assert_eq!(classify_token("u16"), "u{N}");
assert_eq!(classify_token("v2"), "v{N}");
assert_eq!(classify_token("r2"), "r{N}");
assert_eq!(classify_token("1H"), "{N}H");
assert_eq!(classify_token("3z"), "{N}z");
assert_eq!(classify_token("proto303handler"), "proto303handler");
}
#[test]
fn classify_token_literal_fallback() {
assert_eq!(classify_token("BPF"), "BPF");
assert_eq!(classify_token("CUBIC"), "CUBIC");
assert_eq!(classify_token("AUTO"), "AUTO");
assert_eq!(classify_token("FLOWLABEL"), "FLOWLABEL");
assert_eq!(classify_token("hamster"), "hamster");
assert_eq!(classify_token("zilch"), "zilch");
}
#[test]
fn classify_token_empty_returns_empty() {
assert_eq!(classify_token(""), "");
}
#[test]
fn split_into_segments_alternates_token_and_separator_runs() {
assert!(split_into_segments("").is_empty());
let segs = split_into_segments("hamster");
assert_eq!(segs, vec![Segment::Token("hamster")]);
let segs = split_into_segments("worker-7");
assert_eq!(
segs,
vec![
Segment::Token("worker"),
Segment::Separator("-"),
Segment::Token("7"),
],
);
let segs = split_into_segments("a..b");
assert_eq!(
segs,
vec![
Segment::Token("a"),
Segment::Separator(".."),
Segment::Token("b"),
],
);
let segs = split_into_segments("/abc");
assert_eq!(segs, vec![Segment::Separator("/"), Segment::Token("abc")],);
let segs = split_into_segments("yy._650");
assert_eq!(
segs,
vec![
Segment::Token("yy"),
Segment::Separator("._"),
Segment::Token("650"),
],
);
let segs = split_into_segments("kworker/0:1+events");
assert_eq!(
segs,
vec![
Segment::Token("kworker"),
Segment::Separator("/"),
Segment::Token("0"),
Segment::Separator(":"),
Segment::Token("1"),
Segment::Separator("+"),
Segment::Token("events"),
],
);
}
#[test]
fn pattern_key_kworker_active_decoration_separator() {
assert_eq!(pattern_key("kworker/0:1+events"), "kworker/{N}:{N}+events",);
assert_eq!(pattern_key("kworker/1:0+events"), "kworker/{N}:{N}+events",);
assert_ne!(
pattern_key("kworker/0:1+events"),
pattern_key("kworker/0:1-events"),
);
assert_eq!(pattern_key("kworker/0:1-events"), "kworker/{N}:{N}-events",);
}
#[test]
fn spec_thread_grouping_verbatim() {
let inputs: &[&str] = &[
"whirly-gig-0",
"whirly-gig-1",
"whirly-gig-2",
"whirly-gig-15",
"plonk_zap_0",
"plonk_zap_1",
"plonk_zap_7",
"ksoftirqd/0",
"ksoftirqd/1",
"ksoftirqd/2",
"ksoftirqd/99",
"kworker/0:0",
"kworker/0:1",
"kworker/1:0",
"kworker/3:2",
"kworker/0:0-wq_reclaim",
"kworker/1:0-wq_reclaim",
"kworker/47:2-wq_reclaim",
"kworker/u8:3",
"kworker/u8:7",
"kworker/u16:0",
"kworker/0:1H-wq_prio",
"kworker/1:0H-wq_prio",
"kworker/2:1H-wq_prio",
"FooBar0",
"FooBar1",
"FooBar2",
"FooBar175",
"BazQux0",
"BazQux1",
"BazQux42",
"wonk0",
"wonk1",
"wonk9",
"Grommet.Z0",
"Grommet.Z1",
"Grommet.Z999",
"fizz-buzz-wham0",
"fizz-buzz-wham1",
"fizz-buzz-wham7",
"rcu_exp_par_gp_kthread_worker/0",
"rcu_exp_par_gp_kthread_worker/1",
"migration/0",
"migration/1",
"bloop-tangler",
"narf-bonker",
"spork-wrangler",
"hamster",
"zilch",
"gadget-v2",
"thingo-r2",
"cpu0",
"blip0",
"snorf0",
"ptp0",
"BPF_CUBIC",
"AUTO_FLOWLABEL",
];
let expected_keys: &[(&str, &str)] = &[
("whirly-gig-0", "whirly-gig-{N}"),
("whirly-gig-1", "whirly-gig-{N}"),
("whirly-gig-2", "whirly-gig-{N}"),
("whirly-gig-15", "whirly-gig-{N}"),
("plonk_zap_0", "plonk_zap_{N}"),
("plonk_zap_1", "plonk_zap_{N}"),
("plonk_zap_7", "plonk_zap_{N}"),
("ksoftirqd/0", "ksoftirqd/{N}"),
("ksoftirqd/1", "ksoftirqd/{N}"),
("ksoftirqd/2", "ksoftirqd/{N}"),
("ksoftirqd/99", "ksoftirqd/{N}"),
("kworker/0:0", "kworker/{N}:{N}"),
("kworker/0:1", "kworker/{N}:{N}"),
("kworker/1:0", "kworker/{N}:{N}"),
("kworker/3:2", "kworker/{N}:{N}"),
("kworker/0:0-wq_reclaim", "kworker/{N}:{N}-wq_reclaim"),
("kworker/1:0-wq_reclaim", "kworker/{N}:{N}-wq_reclaim"),
("kworker/47:2-wq_reclaim", "kworker/{N}:{N}-wq_reclaim"),
("kworker/u8:3", "kworker/u{N}:{N}"),
("kworker/u8:7", "kworker/u{N}:{N}"),
("kworker/u16:0", "kworker/u{N}:{N}"),
("kworker/0:1H-wq_prio", "kworker/{N}:{N}H-wq_prio"),
("kworker/1:0H-wq_prio", "kworker/{N}:{N}H-wq_prio"),
("kworker/2:1H-wq_prio", "kworker/{N}:{N}H-wq_prio"),
("FooBar0", "FooBar{N}"),
("FooBar1", "FooBar{N}"),
("FooBar2", "FooBar{N}"),
("FooBar175", "FooBar{N}"),
("BazQux0", "BazQux{N}"),
("BazQux1", "BazQux{N}"),
("BazQux42", "BazQux{N}"),
("wonk0", "wonk{N}"),
("wonk1", "wonk{N}"),
("wonk9", "wonk{N}"),
("Grommet.Z0", "Grommet.Z{N}"),
("Grommet.Z1", "Grommet.Z{N}"),
("Grommet.Z999", "Grommet.Z{N}"),
("fizz-buzz-wham0", "fizz-buzz-wham{N}"),
("fizz-buzz-wham1", "fizz-buzz-wham{N}"),
("fizz-buzz-wham7", "fizz-buzz-wham{N}"),
(
"rcu_exp_par_gp_kthread_worker/0",
"rcu_exp_par_gp_kthread_worker/{N}",
),
(
"rcu_exp_par_gp_kthread_worker/1",
"rcu_exp_par_gp_kthread_worker/{N}",
),
("migration/0", "migration/{N}"),
("migration/1", "migration/{N}"),
("bloop-tangler", "bloop-tangler"),
("narf-bonker", "narf-bonker"),
("spork-wrangler", "spork-wrangler"),
("hamster", "hamster"),
("zilch", "zilch"),
("gadget-v2", "gadget-v{N}"),
("thingo-r2", "thingo-r{N}"),
("cpu0", "cpu{N}"),
("blip0", "blip{N}"),
("snorf0", "snorf{N}"),
("ptp0", "ptp{N}"),
("BPF_CUBIC", "BPF_CUBIC"),
("AUTO_FLOWLABEL", "AUTO_FLOWLABEL"),
];
for (input, expected) in expected_keys {
assert_eq!(
pattern_key(input),
*expected,
"pattern_key({input:?}) skeleton mismatch",
);
}
let threads: Vec<_> = inputs.iter().map(|c| make_thread("p", c)).collect();
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
let expected_buckets: &[(&str, usize)] = &[
("whirly-gig-{N}", 4),
("plonk_zap_{N}", 3),
("ksoftirqd/{N}", 4),
("kworker/{N}:{N}", 4),
("kworker/{N}:{N}-wq_reclaim", 3),
("kworker/u{N}:{N}", 3),
("kworker/{N}:{N}H-wq_prio", 3),
("FooBar{N}", 4),
("BazQux{N}", 3),
("wonk{N}", 3),
("Grommet.Z{N}", 3),
("fizz-buzz-wham{N}", 3),
("rcu_exp_par_gp_kthread_worker/{N}", 2),
("migration/{N}", 2),
];
for (key, count) in expected_buckets {
let g = groups
.get(*key)
.unwrap_or_else(|| panic!("missing bucket {key:?}"));
assert_eq!(
g.thread_count, *count,
"bucket {key:?} expected {count} members, got {}",
g.thread_count,
);
}
for singleton in &[
"bloop-tangler",
"narf-bonker",
"spork-wrangler",
"hamster",
"zilch",
"gadget-v2",
"thingo-r2",
"cpu0",
"blip0",
"snorf0",
"ptp0",
"BPF_CUBIC",
"AUTO_FLOWLABEL",
] {
let g = groups
.get(*singleton)
.unwrap_or_else(|| panic!("missing singleton bucket {singleton:?}"));
assert_eq!(
g.thread_count, 1,
"singleton {singleton:?} should have 1 member",
);
}
assert_eq!(groups.len(), 14 + 13, "expected 27 buckets total");
}
#[test]
fn no_thread_normalize_uses_literal_comm() {
let snap_a = snap_with(vec![
make_thread("p", "worker-0"),
make_thread("p", "worker-1"),
]);
let snap_b = snap_with(vec![
make_thread("p", "worker-0"),
make_thread("p", "worker-1"),
]);
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Comm.into(),
cgroup_flatten: vec![],
no_thread_normalize: true,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<&str> =
diff.rows.iter().map(|r| r.group_key.as_str()).collect();
assert!(
group_keys.contains("worker-0"),
"literal worker-0 missing: {group_keys:?}",
);
assert!(
group_keys.contains("worker-1"),
"literal worker-1 missing: {group_keys:?}",
);
assert!(
!group_keys.contains("worker-{N}"),
"no normalized bucket under no_thread_normalize: {group_keys:?}",
);
}
#[test]
fn apply_systemd_template_opaque_id_to_placeholder() {
assert_eq!(
apply_systemd_template("/user.slice/user-0.slice/user@0.service/boot.scope"),
"/user.slice/user-0.slice/user@{I}.service/boot.scope",
);
assert_eq!(
apply_systemd_template("/user.slice/user-1001.slice/user@1001.service/boot.scope"),
"/user.slice/user-1001.slice/user@{I}.service/boot.scope",
);
assert_eq!(
apply_systemd_template("/critical.slice/launcher@foo.bar.baz.service"),
"/critical.slice/launcher@foo.bar.baz.service",
);
assert_eq!(
apply_systemd_template("/system.slice/crond.service"),
"/system.slice/crond.service",
);
assert_eq!(apply_systemd_template("/"), "/");
}
#[test]
fn spec_cgroup_grouping_verbatim() {
let cgroups: &[&str] = &[
"/",
"/boot.scope",
"/critical.slice/emitd.service",
"/critical.slice/remoted.service",
"/critical.slice/launcher@foo.bar.baz.service",
"/critical.slice/launcher@foo.qux.quux.service",
"/critical.slice/launcher@foo.waldo.grault.service",
"/system.slice/crond.service",
"/system.slice/ntpd.service",
"/system.slice/tpl.slice/launcher@foo.garply.plugh.service",
"/system.slice/tpl.slice/launcher@foo.corge.xyzzy.service",
"/system.slice/tpl.slice/launcher@foo.thud.fred.service",
"/user.slice/user-0.slice/session-a1234.scope",
"/user.slice/user-0.slice/user@0.service/boot.scope",
"/user.slice/user-1001.slice/session-b5678.scope",
"/user.slice/user-1001.slice/user@1001.service/boot.scope",
"/apps.slice/wl-foo.slice/wl-foo-abc123def456.7890ab.alloc.slice/v2_acme.prod_widget_sprocket_run_17.400_fluxcap9000.01.zz3_650ab12cd34ef_1a2.run.yy._650ab34ef56cd_1b3.run.exec.service/helper-logs",
"/apps.slice/wl-foo.slice/wl-foo-abc123def456.7890ab.alloc.slice/v2_acme.prod_widget_sprocket_run_17.400_fluxcap9000.01.zz3_650ab12cd34ef_1a2.run.yy._650ab34ef56cd_1b3.run.exec.service/nested/boot.scope",
"/apps.slice/wl-foo.slice/wl-foo-abc123def456.7890ab.alloc.slice/v2_acme.prod_widget_sprocket_run_17.400_fluxcap9000.01.zz3_650ab12cd34ef_1a2.run.yy._650ab34ef56cd_1b3.run.exec.service/nested/system.slice/remoted.service",
"/apps.slice/wl-foo.slice/wl-foo-abc123def456.7890ab.alloc.slice/v2_acme.prod_widget_sprocket_run_17.400_fluxcap9000.01.zz3_650ab12cd34ef_1a2.run.yy._650ab34ef56cd_1b3.run.exec.service/nested/system.slice/emitd.service",
"/apps.slice/wl-foo.slice/wl-foo-def789abc012.3456cd.alloc.slice/v2_acme.prod_widget_sprocket_run_22.401_fluxcap9000.01.zz3_650ab12cd78ef_1a3.run.yy._650ab34ef90cd_1b4.run.exec.service/helper-logs",
"/apps.slice/wl-foo.slice/wl-foo-def789abc012.3456cd.alloc.slice/v2_acme.prod_widget_sprocket_run_22.401_fluxcap9000.01.zz3_650ab12cd78ef_1a3.run.yy._650ab34ef90cd_1b4.run.exec.service/nested/boot.scope",
"/apps.slice/wl-foo.slice/wl-foo-def789abc012.3456cd.alloc.slice/v2_acme.prod_widget_sprocket_run_22.401_fluxcap9000.01.zz3_650ab12cd78ef_1a3.run.yy._650ab34ef90cd_1b4.run.exec.service/nested/system.slice/remoted.service",
"/apps.slice/wl-foo.slice/wl-foo-def789abc012.3456cd.alloc.slice/v2_acme.prod_widget_sprocket_run_22.401_fluxcap9000.01.zz3_650ab12cd78ef_1a3.run.yy._650ab34ef90cd_1b4.run.exec.service/nested/system.slice/emitd.service",
"/apps.slice/wl-foo.slice/wl-foo-fedcba987654.abcdef.alloc.slice/v2_acme.prod_widget_gizmo_run_5.399_fluxcap2000.03.zz7_650ab12cdaaef_2c1.run.yy._650ab34efbbcd_2c2.run.exec.service/helper-logs",
"/apps.slice/wl-foo.slice/wl-foo-fedcba987654.abcdef.alloc.slice/v2_acme.prod_widget_gizmo_run_5.399_fluxcap2000.03.zz7_650ab12cdaaef_2c1.run.yy._650ab34efbbcd_2c2.run.exec.service/nested/boot.scope",
"/apps.slice/wl-foo.slice/wl-foo-fedcba987654.abcdef.alloc.slice/v2_acme.prod_widget_gizmo_run_5.399_fluxcap2000.03.zz7_650ab12cdaaef_2c1.run.yy._650ab34efbbcd_2c2.run.exec.service/nested/system.slice/remoted.service",
"/apps.slice/wl-foo.slice/wl-foo-fedcba987654.abcdef.alloc.slice/v2_acme.prod_widget_gizmo_run_5.399_fluxcap2000.03.zz7_650ab12cdaaef_2c1.run.yy._650ab34efbbcd_2c2.run.exec.service/nested/system.slice/emitd.service",
"/apps.slice/wl-bar.slice/relay.service",
"/apps.slice/wl-bar.slice/cache.service",
];
let threads: Vec<_> = cgroups
.iter()
.enumerate()
.map(|(i, cg)| {
let mut t = make_thread("p", &format!("t{i}"));
t.cgroup = (*cg).into();
t
})
.collect();
let snap_a = snap_with(threads.clone());
let snap_b = snap_with(threads);
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<String> =
diff.rows.iter().map(|r| r.group_key.clone()).collect();
let user_session_skel = "/user.slice/user-{N}.slice/session-{H}.scope";
assert!(
group_keys.contains(user_session_skel),
"missing user-session bucket; got {group_keys:?}",
);
let user_service_skel = "/user.slice/user-{N}.slice/user@{I}.service/boot.scope";
assert!(
group_keys.contains(user_service_skel),
"missing user@.service bucket; got {group_keys:?}",
);
for singleton in &[
"/",
"/boot.scope",
"/critical.slice/emitd.service",
"/critical.slice/remoted.service",
"/critical.slice/launcher@foo.bar.baz.service",
"/critical.slice/launcher@foo.qux.quux.service",
"/critical.slice/launcher@foo.waldo.grault.service",
"/system.slice/crond.service",
"/system.slice/ntpd.service",
"/system.slice/tpl.slice/launcher@foo.garply.plugh.service",
"/system.slice/tpl.slice/launcher@foo.corge.xyzzy.service",
"/system.slice/tpl.slice/launcher@foo.thud.fred.service",
"/apps.slice/wl-bar.slice/relay.service",
"/apps.slice/wl-bar.slice/cache.service",
] {
assert!(
group_keys.contains(*singleton),
"missing singleton bucket {singleton}; got {group_keys:?}",
);
}
}
#[test]
fn cgroup_tighten_recovers_constant_tokens() {
let path_1 = "/apps.slice/run-17.fluxcap9000_01.zz3";
let path_2 = "/apps.slice/run-22.fluxcap9000_01.zz3";
let snap = snap_with(vec![
{
let mut t = make_thread("p", "ta");
t.cgroup = path_1.into();
t
},
{
let mut t = make_thread("p", "tb");
t.cgroup = path_2.into();
t
},
]);
let diff = compare(
&snap,
&snap,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<String> =
diff.rows.iter().map(|r| r.group_key.clone()).collect();
let expected = "/apps.slice/run-{N}.fluxcap9000_01.zz3";
assert!(
group_keys.contains(expected),
"tightened key {expected:?} missing; got {group_keys:?}",
);
}
#[test]
fn no_cg_normalize_uses_literal_post_flatten_path() {
let mut ta = make_thread("p", "ta");
ta.cgroup = "/user.slice/user-0.slice/user@0.service/boot.scope".into();
let mut tb = make_thread("p", "tb");
tb.cgroup = "/user.slice/user-1001.slice/user@1001.service/boot.scope".into();
let snap_a = snap_with(vec![ta]);
let snap_b = snap_with(vec![tb]);
let diff_on = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let normalized_key = "/user.slice/user-{N}.slice/user@{I}.service/boot.scope";
assert!(
diff_on.rows.iter().any(|r| r.group_key == normalized_key),
"expected normalized key {normalized_key:?} when no_cg_normalize=false",
);
let diff_off = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: true,
sort_by: Vec::new(),
},
);
assert!(
diff_off
.only_baseline
.contains(&"/user.slice/user-0.slice/user@0.service/boot.scope".to_string()),
"literal baseline path missing under no_cg_normalize: only_baseline={:?}",
diff_off.only_baseline,
);
assert!(
diff_off
.only_candidate
.contains(&"/user.slice/user-1001.slice/user@1001.service/boot.scope".to_string()),
"literal candidate path missing under no_cg_normalize: only_candidate={:?}",
diff_off.only_candidate,
);
}
#[test]
fn cgroup_normalize_collapses_bracketed_hex_session_ids() {
let mut ta = make_thread("p", "ta");
ta.cgroup = "/user.slice/session-[a1b2c3d4]/scope".into();
let mut tb = make_thread("p", "tb");
tb.cgroup = "/user.slice/session-[dead1234]/scope".into();
let snap_a = snap_with(vec![ta]);
let snap_b = snap_with(vec![tb]);
let (skel_a, post_a, _) = cgroup_normalize_skeleton("/user.slice/session-[a1b2c3d4]/scope");
let (skel_b, post_b, _) = cgroup_normalize_skeleton("/user.slice/session-[dead1234]/scope");
assert_eq!(
skel_a, "/user.slice/session-[{H}]/scope",
"Layer-2 skeleton for path1 mismatch; got {skel_a:?}",
);
assert_eq!(
skel_b, "/user.slice/session-[{H}]/scope",
"Layer-2 skeleton for path2 mismatch; got {skel_b:?}",
);
assert_eq!(post_a, "/user.slice/session-[a1b2c3d4]/scope");
assert_eq!(post_b, "/user.slice/session-[dead1234]/scope");
let key_map = build_cgroup_key_map(&snap_a, &snap_b, &[]);
assert_eq!(
key_map.get("/user.slice/session-[a1b2c3d4]/scope"),
Some(&"/user.slice/session-[{H}]/scope".to_string()),
"key_map must resolve path1 to the tightened skeleton",
);
assert_eq!(
key_map.get("/user.slice/session-[dead1234]/scope"),
Some(&"/user.slice/session-[{H}]/scope".to_string()),
"key_map must resolve path2 to the tightened skeleton",
);
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<String> =
diff.rows.iter().map(|r| r.group_key.clone()).collect();
let expected = "/user.slice/session-[{H}]/scope";
assert!(
group_keys.contains(expected),
"missing bracketed-hex cgroup bucket {expected:?}; got {group_keys:?}; \
diff.only_baseline={:?}; diff.only_candidate={:?}",
diff.only_baseline,
diff.only_candidate,
);
assert!(
diff.only_baseline.is_empty(),
"no orphans under bracketed-hex collapse, got {:?}",
diff.only_baseline,
);
assert!(
diff.only_candidate.is_empty(),
"no orphans under bracketed-hex collapse, got {:?}",
diff.only_candidate,
);
}
#[test]
fn auto_scale_ns_boundary_stays_at_base_below_threshold() {
assert_eq!(auto_scale(0.0, ScaleLadder::Ns), (0.0, "ns"));
assert_eq!(auto_scale(999.0, ScaleLadder::Ns), (999.0, "ns"));
assert_eq!(auto_scale(1000.0, ScaleLadder::Ns), (1.0, "µs"));
}
#[test]
fn auto_scale_ns_ladder_steps_up_at_powers_of_ten() {
let (v, u) = auto_scale(1_500.0, ScaleLadder::Ns);
assert_eq!(u, "µs");
assert!((v - 1.5).abs() < 1e-9);
let (v, u) = auto_scale(1_500_000.0, ScaleLadder::Ns);
assert_eq!(u, "ms");
assert!((v - 1.5).abs() < 1e-9);
let (v, u) = auto_scale(1_500_000_000.0, ScaleLadder::Ns);
assert_eq!(u, "s");
assert!((v - 1.5).abs() < 1e-9);
}
#[test]
fn auto_scale_byte_iec_ladder_uses_1024() {
assert_eq!(auto_scale(1023.0, ScaleLadder::Bytes), (1023.0, "B"));
let (v, u) = auto_scale(1024.0, ScaleLadder::Bytes);
assert_eq!(u, "KiB");
assert!((v - 1.0).abs() < 1e-9);
let (v, u) = auto_scale(1024.0 * 1024.0, ScaleLadder::Bytes);
assert_eq!(u, "MiB");
assert!((v - 1.0).abs() < 1e-9);
let (v, u) = auto_scale(1024.0 * 1024.0 * 1024.0, ScaleLadder::Bytes);
assert_eq!(u, "GiB");
assert!((v - 1.0).abs() < 1e-9);
}
#[test]
fn auto_scale_ticks_ladder_uses_decimal_prefixes() {
assert_eq!(auto_scale(999.0, ScaleLadder::Ticks), (999.0, "ticks"));
let (v, u) = auto_scale(1_500.0, ScaleLadder::Ticks);
assert_eq!(u, "Kticks");
assert!((v - 1.5).abs() < 1e-9);
let (v, u) = auto_scale(2_000_000.0, ScaleLadder::Ticks);
assert_eq!(u, "Mticks");
assert!((v - 2.0).abs() < 1e-9);
}
#[test]
fn auto_scale_unitless_ladder_uses_si_prefixes() {
assert_eq!(auto_scale(999.0, ScaleLadder::Unitless), (999.0, ""));
let (v, u) = auto_scale(1_500.0, ScaleLadder::Unitless);
assert_eq!(u, "K");
assert!((v - 1.5).abs() < 1e-9);
let (v, u) = auto_scale(2_500_000.0, ScaleLadder::Unitless);
assert_eq!(u, "M");
assert!((v - 2.5).abs() < 1e-9);
let (v, u) = auto_scale(3_000_000_000.0, ScaleLadder::Unitless);
assert_eq!(u, "G");
assert!((v - 3.0).abs() < 1e-9);
}
#[test]
fn auto_scale_preserves_sign_on_negative_input() {
let (v, u) = auto_scale(-2_000_000.0, ScaleLadder::Ns);
assert_eq!(u, "ms");
assert!((v - (-2.0)).abs() < 1e-9);
let (v, u) = auto_scale(-5_000.0, ScaleLadder::Bytes);
assert_eq!(u, "KiB");
assert!((v - (-5000.0 / 1024.0)).abs() < 1e-9);
}
#[test]
fn format_value_cell_renders_sum_at_appropriate_scale() {
assert_eq!(
format_value_cell(&Aggregated::Sum(50), ScaleLadder::Ns),
"50ns"
);
assert_eq!(
format_value_cell(&Aggregated::Sum(999), ScaleLadder::Ns),
"999ns"
);
assert_eq!(
format_value_cell(&Aggregated::Sum(1_500), ScaleLadder::Ns),
"1.500µs",
);
assert_eq!(
format_value_cell(&Aggregated::Sum(2_000_000), ScaleLadder::Ns),
"2.000ms",
);
}
#[test]
fn format_value_cell_renders_max_at_appropriate_scale() {
assert_eq!(
format_value_cell(&Aggregated::Max(100), ScaleLadder::Ns),
"100ns"
);
assert_eq!(
format_value_cell(&Aggregated::Max(7_500_000), ScaleLadder::Ns),
"7.500ms",
);
}
#[test]
fn format_value_cell_passes_non_numeric_aggregates_through() {
let m = Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 4,
total: 4,
};
assert_eq!(format_value_cell(&m, ScaleLadder::None), "SCHED_OTHER");
let r = Aggregated::OrdinalRange { min: -5, max: 10 };
assert_eq!(format_value_cell(&r, ScaleLadder::None), "-5..10");
}
#[test]
fn format_delta_cell_renders_signed_scaled_value() {
assert_eq!(format_delta_cell(-50.0, ScaleLadder::Ns), "-50ns");
assert_eq!(format_delta_cell(50.0, ScaleLadder::Ns), "+50ns");
assert_eq!(format_delta_cell(0.0, ScaleLadder::Ns), "+0ns");
assert_eq!(format_delta_cell(50.5, ScaleLadder::Ns), "+50.500ns");
assert_eq!(format_delta_cell(2_000_000.0, ScaleLadder::Ns), "+2.000ms");
assert_eq!(format_delta_cell(-2_000_000.0, ScaleLadder::Ns), "-2.000ms");
}
#[test]
fn auto_scale_does_not_affect_sort_order() {
let mut a_small = make_thread("small", "w");
a_small.run_time_ns = MonotonicNs(100);
let mut a_big = make_thread("big", "w");
a_big.run_time_ns = MonotonicNs(1_000_000);
let mut b_small = make_thread("small", "w");
b_small.run_time_ns = MonotonicNs(110);
let mut b_big = make_thread("big", "w");
b_big.run_time_ns = MonotonicNs(2_000_000);
let diff = compare(
&snap_with(vec![a_small, a_big]),
&snap_with(vec![b_small, b_big]),
&CompareOptions::default(),
);
let run_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.collect();
assert_eq!(run_rows[0].group_key, "big");
assert_eq!(run_rows[1].group_key, "small");
}
#[test]
fn write_diff_renders_auto_scaled_cells_for_ns_metric() {
let mut ta = make_thread("p", "w");
ta.run_time_ns = MonotonicNs(5_000_000); let mut tb = make_thread("p", "w");
tb.run_time_ns = MonotonicNs(8_000_000); let diff = compare(
&snap_with(vec![ta]),
&snap_with(vec![tb]),
&CompareOptions::default(),
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(out.contains("5.000ms"), "missing baseline ms:\n{out}");
assert!(out.contains("8.000ms"), "missing candidate ms:\n{out}");
assert!(out.contains("+3.000ms"), "missing delta ms:\n{out}");
}
#[test]
fn registry_utime_stime_carry_ticks_unit() {
let utime = CTPROF_METRICS
.iter()
.find(|m| m.name == "utime_clock_ticks")
.expect("utime_clock_ticks in registry");
let stime = CTPROF_METRICS
.iter()
.find(|m| m.name == "stime_clock_ticks")
.expect("stime_clock_ticks in registry");
assert_eq!(utime.rule.ladder(), ScaleLadder::Ticks);
assert_eq!(stime.rule.ladder(), ScaleLadder::Ticks);
}
#[test]
fn parse_sort_by_empty_returns_empty_vec() {
let keys = parse_sort_by("").expect("empty parses");
assert!(keys.is_empty());
}
#[test]
fn parse_sort_by_single_field_defaults_to_desc() {
let keys = parse_sort_by("wait_sum").expect("parse");
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].metric, "wait_sum");
assert!(keys[0].descending);
}
#[test]
fn parse_sort_by_bare_metric_with_whitespace_no_colon() {
let keys = parse_sort_by(" wait_sum ").expect("bare-metric whitespace must parse");
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].metric, "wait_sum");
assert!(keys[0].descending);
}
#[test]
fn parse_sort_by_explicit_directions() {
let keys = parse_sort_by("wait_sum:asc,run_time_ns:desc").expect("parse");
assert_eq!(keys.len(), 2);
assert_eq!(keys[0].metric, "wait_sum");
assert!(!keys[0].descending);
assert_eq!(keys[1].metric, "run_time_ns");
assert!(keys[1].descending);
}
#[test]
fn parse_sort_by_trims_whitespace_between_entries() {
let keys = parse_sort_by(" wait_sum:desc , run_time_ns:asc ").expect("parse");
assert_eq!(keys.len(), 2);
assert_eq!(keys[0].metric, "wait_sum");
assert!(keys[0].descending);
assert_eq!(keys[1].metric, "run_time_ns");
assert!(!keys[1].descending);
}
#[test]
fn parse_sort_by_trims_whitespace_around_colon() {
let keys = parse_sort_by("wait_sum : desc").expect("trimmed colon parse");
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].metric, "wait_sum");
assert!(keys[0].descending);
let keys2 = parse_sort_by("run_time_ns: asc ").expect("trimmed asc-side parse");
assert_eq!(keys2.len(), 1);
assert_eq!(keys2[0].metric, "run_time_ns");
assert!(!keys2[0].descending);
}
#[test]
fn parse_sort_by_direction_is_case_insensitive() {
for spec in ["wait_sum:DESC", "wait_sum:Desc", "wait_sum:dEsC"] {
let keys = parse_sort_by(spec).unwrap_or_else(|e| panic!("{spec} must parse: {e}"));
assert_eq!(keys.len(), 1, "{spec}");
assert!(keys[0].descending, "{spec}");
}
for spec in ["wait_sum:ASC", "wait_sum:Asc", "wait_sum:aSc"] {
let keys = parse_sort_by(spec).unwrap_or_else(|e| panic!("{spec} must parse: {e}"));
assert_eq!(keys.len(), 1, "{spec}");
assert!(!keys[0].descending, "{spec}");
}
}
#[test]
fn parse_sort_by_rejects_unknown_metric() {
let err = parse_sort_by("not_a_real_metric").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("not_a_real_metric"),
"error must cite offending metric name, got: {msg}"
);
assert!(
msg.contains("must be one of"),
"error must include the 'must be one of' preamble that introduces the valid-name list, got: {msg}"
);
assert!(
msg.contains("run_time_ns"),
"error must list at least one canonical metric name from the registry, got: {msg}"
);
assert!(
msg.contains("bare metric name"),
"error must hint at bare-metric-name usage, got: {msg}"
);
}
#[test]
fn parse_sort_by_unknown_with_tag_suffix_carries_hint() {
let err = parse_sort_by("wait_sum [non-ext] [SCHEDSTATS]").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("bare metric name"),
"tagged-cell paste must produce the bare-name hint, got: {msg}",
);
}
#[test]
fn parse_sort_by_rejects_invalid_direction() {
let err = parse_sort_by("wait_sum:sideways").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("sideways"),
"error must cite offending direction, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_empty_entry() {
let err = parse_sort_by("wait_sum,,run_time_ns").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("empty entry"),
"error must mention empty entry, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_trailing_comma() {
let err = parse_sort_by("wait_sum,").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("empty entry"),
"trailing comma must surface as empty-entry error, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_leading_comma() {
let err = parse_sort_by(",wait_sum").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("empty entry"),
"leading comma must surface as empty-entry error, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_bare_colon() {
let err = parse_sort_by(":").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("invalid direction"),
"bare colon must surface as invalid-direction error, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_metric_colon_no_direction() {
let err = parse_sort_by("wait_sum:").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("invalid direction"),
"metric-colon-no-direction must surface as invalid-direction error, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_categorical_metric() {
let policy_def = CTPROF_METRICS
.iter()
.find(|m| m.name == "policy")
.expect("policy must be in CTPROF_METRICS");
assert!(
matches!(policy_def.rule, AggRule::Mode(_)),
"test premise drift: policy is no longer Mode-aggregated; \
pick a different categorical metric for this test",
);
let err = parse_sort_by("policy").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("categorical"),
"categorical metric error must label the failure mode, got: {msg}"
);
assert!(
msg.contains("policy"),
"categorical metric error must name the offending metric, got: {msg}"
);
}
#[test]
fn parse_sort_by_rejects_duplicate_metric() {
let err = parse_sort_by("wait_sum,wait_sum").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("duplicate"),
"duplicate-metric error must label the failure mode, got: {msg}"
);
assert!(
msg.contains("wait_sum"),
"duplicate-metric error must name the offending metric, got: {msg}"
);
let err2 = parse_sort_by("wait_sum:asc,wait_sum:desc").unwrap_err();
let msg2 = format!("{err2:#}");
assert!(
msg2.contains("duplicate"),
"duplicate metric across different directions must still reject, got: {msg2}"
);
}
#[test]
fn parse_sort_by_unknown_metric_lists_valid_names_sorted() {
let err = parse_sort_by("not_a_real_metric").unwrap_err();
let msg = format!("{err:#}");
let nice_at = msg
.find("nice")
.expect("error must list 'nice' from the registry");
let policy_at = msg
.find("policy")
.expect("error must list 'policy' from the registry");
let run_time_at = msg
.find("run_time_ns")
.expect("error must list 'run_time_ns' from the registry");
assert!(
nice_at < policy_at,
"names must appear in alphabetical order: \
nice@{nice_at} < policy@{policy_at}\nmsg: {msg}",
);
assert!(
policy_at < run_time_at,
"names must appear in alphabetical order: \
policy@{policy_at} < run_time_ns@{run_time_at}\nmsg: {msg}",
);
assert!(
!msg.contains("{\""),
"error must use comma-separated list, not BTreeSet debug dump:\n{msg}"
);
}
#[test]
fn parse_sort_by_multi_key_preserves_order() {
let keys =
parse_sort_by("run_time_ns:desc,nr_wakeups:asc,wait_time_ns:desc").expect("parse");
assert_eq!(keys.len(), 3);
assert_eq!(keys[0].metric, "run_time_ns");
assert!(keys[0].descending);
assert_eq!(keys[1].metric, "nr_wakeups");
assert!(!keys[1].descending);
assert_eq!(keys[2].metric, "wait_time_ns");
assert!(keys[2].descending);
}
#[test]
fn sort_diff_rows_by_keys_ranks_by_first_key_first() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("A", "run_time_ns", 1000.0),
mk_row("A", "wait_sum", 100.0),
mk_row("B", "run_time_ns", 100.0),
mk_row("B", "wait_sum", 1000.0),
mk_row("C", "run_time_ns", 50.0),
mk_row("C", "wait_sum", 50.0),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: true,
}],
);
let groups_in_order: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
assert_eq!(
groups_in_order,
vec!["A", "A", "B", "B", "C", "C"],
"groups should rank by run_time_ns delta desc",
);
let metrics_first_two: Vec<&str> = rows.iter().take(2).map(|r| r.metric_name).collect();
assert_eq!(metrics_first_two, vec!["run_time_ns", "wait_sum"]);
}
#[test]
fn sort_diff_rows_by_keys_breaks_ties_with_second_key() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("A", "run_time_ns", 500.0),
mk_row("A", "wait_sum", 100.0),
mk_row("B", "run_time_ns", 500.0),
mk_row("B", "wait_sum", 200.0),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[
SortKey {
metric: "run_time_ns",
descending: true,
},
SortKey {
metric: "wait_sum",
descending: true,
},
],
);
let groups_in_order: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
assert_eq!(groups_in_order, vec!["B", "B", "A", "A"]);
}
#[test]
fn sort_diff_rows_by_keys_respects_ascending_direction() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("A", "run_time_ns", 1000.0),
mk_row("B", "run_time_ns", 100.0),
mk_row("C", "run_time_ns", 500.0),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: false, }],
);
let groups_in_order: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
assert_eq!(groups_in_order, vec!["B", "C", "A"]);
}
#[test]
fn compare_uses_sort_by_when_set() {
let mut a_pre = make_thread("alpha", "w");
a_pre.run_time_ns = MonotonicNs(1_000_000_000); let mut a_post = make_thread("alpha", "w");
a_post.run_time_ns = MonotonicNs(1_000_000_500); let mut b_pre = make_thread("bravo", "w");
b_pre.run_time_ns = MonotonicNs(100);
let mut b_post = make_thread("bravo", "w");
b_post.run_time_ns = MonotonicNs(200); let diff = compare(
&snap_with(vec![a_pre, b_pre]),
&snap_with(vec![a_post, b_post]),
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: vec![SortKey {
metric: "run_time_ns",
descending: true,
}],
},
);
let run_rows: Vec<&DiffRow> = diff
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.collect();
assert_eq!(
run_rows[0].group_key, "alpha",
"sort_by abs delta picks alpha"
);
assert_eq!(run_rows[1].group_key, "bravo");
}
#[test]
fn sort_diff_rows_by_keys_falls_back_to_ascending_group_key_on_full_tie() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("charlie", "run_time_ns", 100.0),
mk_row("bravo", "run_time_ns", 100.0),
mk_row("alpha", "run_time_ns", 100.0),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: true,
}],
);
let order: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
assert_eq!(
order,
vec!["alpha", "bravo", "charlie"],
"full sort-key tie must fall back to ascending group_key",
);
}
#[test]
fn sort_diff_rows_by_keys_missing_metric_sinks_under_desc() {
let mk_row = |group: &str, metric: &'static str, delta: Option<f64>| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta,
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("alpha", "run_time_ns", Some(100.0)),
mk_row("bravo", "wait_time_ns", Some(999_999.0)),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: true,
}],
);
let mut order: Vec<&str> = Vec::new();
for r in &rows {
if !order.contains(&r.group_key.as_str()) {
order.push(r.group_key.as_str());
}
}
assert_eq!(
order,
vec!["alpha", "bravo"],
"missing metric under desc must sink the group (NEG_INFINITY)",
);
}
#[test]
fn sort_diff_rows_by_keys_missing_metric_sinks_under_asc() {
let mk_row = |group: &str, metric: &'static str, delta: Option<f64>| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta,
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("alpha", "run_time_ns", Some(100.0)),
mk_row("bravo", "wait_time_ns", Some(50.0)),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: false,
}],
);
let mut order: Vec<&str> = Vec::new();
for r in &rows {
if !order.contains(&r.group_key.as_str()) {
order.push(r.group_key.as_str());
}
}
assert_eq!(
order,
vec!["alpha", "bravo"],
"missing metric under asc must sink the group (INFINITY)",
);
}
#[test]
fn sort_diff_rows_by_keys_categorical_only_group_does_not_panic() {
let mk_row = |group: &str, metric: &'static str| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 1,
total: 1,
},
candidate: Aggregated::Mode {
value: "SCHED_OTHER".into(),
count: 1,
total: 1,
},
delta: None,
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![mk_row("alpha", "policy"), mk_row("bravo", "policy")];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: true,
}],
);
let order: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
assert_eq!(
order,
vec!["alpha", "bravo"],
"categorical-only groups must survive the sort and fall to ascending group_key",
);
}
#[test]
fn sort_diff_rows_by_keys_within_group_uses_registry_order() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("alpha", "nr_wakeups", 4.0),
mk_row("alpha", "timeslices", 3.0),
mk_row("alpha", "wait_time_ns", 999.0),
mk_row("alpha", "run_time_ns", 1.0),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "wait_time_ns",
descending: true,
}],
);
let metric_order: Vec<&str> = rows.iter().map(|r| r.metric_name).collect();
assert_eq!(
metric_order,
vec!["run_time_ns", "wait_time_ns", "timeslices", "nr_wakeups"],
"within-group order must be registry, not sort-spec, order",
);
}
#[test]
fn sort_diff_rows_by_keys_nan_delta_does_not_panic() {
let mk_row = |group: &str, metric: &'static str, delta: f64| DiffRow {
group_key: group.into(),
thread_count_a: 1,
thread_count_b: 1,
metric_name: metric,
metric_ladder: ScaleLadder::None,
baseline: Aggregated::Sum(0),
candidate: Aggregated::Sum(0),
delta: Some(delta),
delta_pct: None,
display_key: group.into(),
uptime_pct: None,
sort_by_cell: None,
sort_by_delta: None,
};
let mut rows = vec![
mk_row("alpha", "run_time_ns", f64::NAN),
mk_row("bravo", "run_time_ns", 100.0),
mk_row("charlie", "run_time_ns", f64::NAN),
];
sort_diff_rows_by_keys(
&mut rows,
&mut Vec::new(),
&[SortKey {
metric: "run_time_ns",
descending: true,
}],
);
let mut groups: Vec<&str> = rows.iter().map(|r| r.group_key.as_str()).collect();
groups.sort();
groups.dedup();
assert_eq!(
groups,
vec!["alpha", "bravo", "charlie"],
"NaN delta must not drop or duplicate any group",
);
}
#[test]
fn compare_uses_default_sort_when_sort_by_empty() {
let mut a_pre = make_thread("alpha", "w");
a_pre.run_time_ns = MonotonicNs(1_000_000_000);
let mut a_post = make_thread("alpha", "w");
a_post.run_time_ns = MonotonicNs(1_000_000_500);
let mut b_pre = make_thread("bravo", "w");
b_pre.run_time_ns = MonotonicNs(100);
let mut b_post = make_thread("bravo", "w");
b_post.run_time_ns = MonotonicNs(200);
let diff_default = compare(
&snap_with(vec![a_pre.clone(), b_pre.clone()]),
&snap_with(vec![a_post.clone(), b_post.clone()]),
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let default_order: Vec<&str> = diff_default
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.map(|r| r.group_key.as_str())
.collect();
assert_eq!(
default_order,
vec!["bravo", "alpha"],
"empty sort_by must use default delta_pct desc sort \
(bravo's +100% beats alpha's +5e-5 %)",
);
let diff_sort = compare(
&snap_with(vec![a_pre, b_pre]),
&snap_with(vec![a_post, b_post]),
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: vec![SortKey {
metric: "run_time_ns",
descending: true,
}],
},
);
let sort_order: Vec<&str> = diff_sort
.rows
.iter()
.filter(|r| r.metric_name == "run_time_ns")
.map(|r| r.group_key.as_str())
.collect();
assert_eq!(
sort_order,
vec!["alpha", "bravo"],
"non-empty sort_by must use multi-key path (alpha's +500 abs beats bravo's +100)",
);
assert_ne!(
default_order, sort_order,
"empty vs non-empty sort_by must produce different orderings on this fixture",
);
}
#[test]
fn format_scaled_u64_zero_renders_at_base_unit_for_all_families() {
assert_eq!(format_scaled_u64(0, ScaleLadder::Ns), "0ns");
assert_eq!(format_scaled_u64(0, ScaleLadder::Us), "0µs");
assert_eq!(format_scaled_u64(0, ScaleLadder::Bytes), "0B");
assert_eq!(format_scaled_u64(0, ScaleLadder::Ticks), "0ticks");
assert_eq!(format_scaled_u64(0, ScaleLadder::Unitless), "0");
}
#[test]
fn format_delta_cell_negative_microseconds_scales_to_seconds() {
let cell = format_delta_cell(-1_500_000.0, ScaleLadder::Us);
assert_eq!(cell, "-1.500s");
}
#[test]
fn format_delta_cell_negative_bytes_scales_to_gib() {
let two_gib_neg = -(2.0 * 1024.0 * 1024.0 * 1024.0);
let cell = format_delta_cell(two_gib_neg, ScaleLadder::Bytes);
assert_eq!(cell, "-2.000GiB");
}
#[test]
fn cgroup_cell_each_cell_scales_independently() {
let cell = cgroup_cell(Some(999), Some(2000), ScaleLadder::Us);
assert_eq!(
cell, "999µs → 2.000ms (+1.001ms)",
"asymmetric scaling: each cell must pick its own prefix",
);
}
#[test]
fn pattern_key_normalizes_bracketed_digits() {
assert_eq!(pattern_key("worker[42]"), "worker[{N}]");
assert_eq!(
pattern_key("systemd-network[105904]"),
"systemd-network[{N}]"
);
assert_eq!(pattern_key("bash[4242]"), "bash[{N}]");
assert_eq!(pattern_key("dev[1ab]"), "dev[{H}]");
}
#[test]
fn split_into_segments_treats_brackets_as_separators() {
let segs = split_into_segments("worker[42]");
assert_eq!(
segs,
vec![
Segment::Token("worker"),
Segment::Separator("["),
Segment::Token("42"),
Segment::Separator("]"),
],
);
let segs = split_into_segments("a-[1]");
assert_eq!(
segs,
vec![
Segment::Token("a"),
Segment::Separator("-["),
Segment::Token("1"),
Segment::Separator("]"),
],
);
}
#[test]
fn is_token_separator_includes_brackets() {
assert!(is_token_separator('['));
assert!(is_token_separator(']'));
}
#[test]
fn build_groups_pcomm_kworker_collapses_across_cpus() {
let snap = snap_with(vec![
make_thread("kworker/0:0", "t0"),
make_thread("kworker/1:0", "t1"),
make_thread("kworker/3:2", "t2"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 3);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_pcomm_singleton_reverts_to_literal() {
let snap = snap_with(vec![make_thread("worker-7", "t0")]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert!(
groups.contains_key("worker-7"),
"lone worker-7 stays literal under Pcomm normalization",
);
assert!(
!groups.contains_key("worker-{N}"),
"no `worker-{{N}}` pattern key for a singleton pcomm",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn no_thread_normalize_uses_literal_pcomm() {
let snap_a = snap_with(vec![
make_thread("worker-7", "t0"),
make_thread("worker-15", "t1"),
]);
let snap_b = snap_with(vec![
make_thread("worker-7", "t0"),
make_thread("worker-15", "t1"),
]);
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: true,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<&str> =
diff.rows.iter().map(|r| r.group_key.as_str()).collect();
assert!(
group_keys.contains("worker-7"),
"literal worker-7 missing under no_thread_normalize: {group_keys:?}",
);
assert!(
group_keys.contains("worker-15"),
"literal worker-15 missing under no_thread_normalize: {group_keys:?}",
);
assert!(
!group_keys.contains("worker-{N}"),
"no normalized bucket under no_thread_normalize on Pcomm: {group_keys:?}",
);
}
#[test]
fn compare_pcomm_pattern_joins_across_asymmetric_resize() {
let baseline = snap_with(vec![make_thread("worker-7", "t0")]);
let candidate = snap_with(vec![
make_thread("worker-0", "t0"),
make_thread("worker-1", "t1"),
]);
let diff = compare(
&baseline,
&candidate,
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker-{N}")
.expect("worker-{N} pcomm row joined across asymmetric snapshots");
assert_eq!(row.thread_count_a, 1, "baseline carries 1 worker process");
assert_eq!(
row.thread_count_b, 2,
"candidate carries 2 worker processes"
);
let baseline_orphans: Vec<&String> = diff
.only_baseline
.iter()
.filter(|k| k.starts_with("worker"))
.collect();
assert!(
baseline_orphans.is_empty(),
"no worker-prefixed pcomm orphans in only_baseline; got {baseline_orphans:?}",
);
let candidate_orphans: Vec<&String> = diff
.only_candidate
.iter()
.filter(|k| k.starts_with("worker"))
.collect();
assert!(
candidate_orphans.is_empty(),
"no worker-prefixed pcomm orphans in only_candidate; got {candidate_orphans:?}",
);
}
#[test]
fn compare_pcomm_pattern_emits_prefix_join_key_and_grex_display() {
let baseline = snap_with(vec![
make_thread("worker-0", "t0"),
make_thread("worker-1", "t1"),
]);
let candidate = snap_with(vec![
make_thread("worker-2", "t0"),
make_thread("worker-3", "t1"),
]);
let diff = compare(
&baseline,
&candidate,
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker-{N}")
.expect("worker-{N} pcomm row");
assert_eq!(
row.group_key, "worker-{N}",
"join key is the placeholder pattern under Pcomm normalization",
);
assert!(
row.display_key.contains("worker"),
"display key reflects grex over union; got {:?}",
row.display_key,
);
assert_ne!(
row.display_key, "worker-{N}",
"≥2 members → grex regex, not the placeholder pattern",
);
}
#[test]
fn build_groups_pcomm_sum_conservation_across_buckets() {
let mut threads = Vec::new();
for i in 0..5 {
let mut t = make_thread(&format!("worker-{i}"), "t");
t.run_time_ns = MonotonicNs(100 * (i as u64 + 1));
threads.push(t);
}
for i in 0..3 {
let mut t = make_thread(&format!("redis-bg-{i}"), "t");
t.run_time_ns = MonotonicNs(50 * (i as u64 + 1));
threads.push(t);
}
let mut single = make_thread("init", "t");
single.run_time_ns = MonotonicNs(999);
threads.push(single);
let input_total: u64 = threads.iter().map(|t| t.run_time_ns.0).sum();
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
let aggregated_total: u64 = groups
.values()
.map(|g| match g.metrics.get("run_time_ns") {
Some(Aggregated::Sum(n)) => *n,
_ => 0,
})
.sum();
assert_eq!(
aggregated_total, input_total,
"Pcomm pattern-aggregated sum must equal input sum",
);
}
fn smaps_thread(pcomm: &str, tgid: u32, rss_kb: u64, pss_kb: u64) -> ThreadState {
let mut t = ThreadState {
tid: tgid,
tgid,
pcomm: pcomm.into(),
comm: pcomm.into(),
cgroup: "/".into(),
..ThreadState::default()
};
t.smaps_rollup_kb.insert("Rss".into(), rss_kb);
t.smaps_rollup_kb.insert("Pss".into(), pss_kb);
t
}
#[test]
fn collect_smaps_rollup_normalizes_and_sums_across_pids() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, 1024, 512),
smaps_thread("worker-1", 200, 2048, 1024),
smaps_thread("worker-2", 300, 4096, 2048),
]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1, "three PIDs collapse into one bucket: {out:?}");
let bucket = out
.get("worker-{N}")
.expect("bucket key is pattern_key(pcomm) — no `[tgid]` suffix");
assert_eq!(
bucket.get("Rss").copied(),
Some((1024 + 2048 + 4096) * 1024),
"Rss SUMs across the three collapsed PIDs",
);
assert_eq!(
bucket.get("Pss").copied(),
Some((512 + 1024 + 2048) * 1024),
"Pss SUMs across the three collapsed PIDs",
);
}
#[test]
fn collect_smaps_rollup_no_singleton_revert_when_normalizing() {
let snap = snap_with(vec![smaps_thread("worker-7", 99, 1024, 512)]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1);
assert!(
out.contains_key("worker-{N}"),
"lone worker-7 must STILL normalize to worker-{{N}} for smaps; \
singleton-revert is intentionally skipped on the smaps axis: \
got {:?}",
out.keys().collect::<Vec<_>>(),
);
assert!(
!out.contains_key("worker-7"),
"literal singleton key must NOT appear under default smaps \
normalization: got {:?}",
out.keys().collect::<Vec<_>>(),
);
}
#[test]
fn collect_smaps_rollup_no_normalize_preserves_literal_pid_keys() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, 1024, 512),
smaps_thread("worker-1", 200, 2048, 1024),
smaps_thread("worker-2", 300, 4096, 2048),
]);
let out = collect_smaps_rollup(&snap, true);
assert_eq!(
out.len(),
3,
"no_normalize keeps three distinct PID buckets"
);
assert_eq!(out["worker-0[100]"]["Rss"], 1024 * 1024);
assert_eq!(out["worker-1[200]"]["Rss"], 2048 * 1024);
assert_eq!(out["worker-2[300]"]["Rss"], 4096 * 1024);
}
#[test]
fn collect_smaps_rollup_empty_snapshot_returns_empty_map() {
let snap = snap_with(vec![]);
assert!(collect_smaps_rollup(&snap, false).is_empty());
assert!(collect_smaps_rollup(&snap, true).is_empty());
}
#[test]
fn collect_smaps_rollup_skips_non_leader_threads() {
let leader = smaps_thread("worker-0", 100, 1024, 512);
let mut non_leader = ThreadState {
tid: 101,
tgid: 100,
pcomm: "worker-0".into(),
comm: "worker-0".into(),
cgroup: "/".into(),
..ThreadState::default()
};
assert!(non_leader.smaps_rollup_kb.is_empty());
non_leader.smaps_rollup_kb.clear();
let snap = snap_with(vec![leader, non_leader]);
let out_norm = collect_smaps_rollup(&snap, false);
assert_eq!(out_norm.len(), 1);
assert!(out_norm.contains_key("worker-{N}"));
let out_lit = collect_smaps_rollup(&snap, true);
assert_eq!(out_lit.len(), 1);
assert!(out_lit.contains_key("worker-0[100]"));
}
#[test]
fn collect_smaps_rollup_merge_carries_every_field_seen() {
let t1 = smaps_thread("worker-0", 100, 1024, 512);
let mut t2 = ThreadState {
tid: 200,
tgid: 200,
pcomm: "worker-1".into(),
comm: "worker-1".into(),
cgroup: "/".into(),
..ThreadState::default()
};
t2.smaps_rollup_kb.insert("Rss".into(), 2048);
t2.smaps_rollup_kb.insert("Private_Clean".into(), 256);
assert!(!t1.smaps_rollup_kb.contains_key("Private_Clean"));
let snap = snap_with(vec![t1, t2]);
let out = collect_smaps_rollup(&snap, false);
let bucket = out.get("worker-{N}").expect("merged bucket");
assert_eq!(bucket.get("Rss").copied(), Some((1024 + 2048) * 1024));
assert_eq!(bucket.get("Pss").copied(), Some(512 * 1024));
assert_eq!(bucket.get("Private_Clean").copied(), Some(256 * 1024));
}
#[test]
fn write_diff_smaps_orders_processes_by_rss_desc() {
let mut diff = CtprofDiff::default();
let mut heavy = BTreeMap::new();
heavy.insert("Rss".to_string(), 100 * 1024 * 1024); heavy.insert("Pss".to_string(), 50 * 1024 * 1024);
let mut heavy_b = BTreeMap::new();
heavy_b.insert("Rss".to_string(), 200 * 1024 * 1024);
heavy_b.insert("Pss".to_string(), 100 * 1024 * 1024);
let mut light = BTreeMap::new();
light.insert("Rss".to_string(), 1024); light.insert("Pss".to_string(), 512);
let mut light_b = BTreeMap::new();
light_b.insert("Rss".to_string(), 2048);
light_b.insert("Pss".to_string(), 1024);
diff.smaps_rollup_a.insert("a_light[1]".to_string(), light);
diff.smaps_rollup_b
.insert("a_light[1]".to_string(), light_b);
diff.smaps_rollup_a.insert("b_heavy[2]".to_string(), heavy);
diff.smaps_rollup_b
.insert("b_heavy[2]".to_string(), heavy_b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
let smaps_at = out
.find("## smaps_rollup")
.expect("smaps section must render");
let after_header = &out[smaps_at..];
let heavy_pos = after_header
.find("b_heavy[2]")
.expect("b_heavy must appear");
let light_pos = after_header
.find("a_light[1]")
.expect("a_light must appear");
assert!(
heavy_pos < light_pos,
"process with larger Rss must render first; \
b_heavy@{heavy_pos} must precede a_light@{light_pos}",
);
}
#[test]
fn collect_smaps_rollup_saturating_add_does_not_panic_on_overflow() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, u64::MAX, 1),
smaps_thread("worker-1", 200, u64::MAX, 1),
]);
let out = collect_smaps_rollup(&snap, false);
let bucket = out.get("worker-{N}").expect("merged bucket");
let v = bucket
.get("Rss")
.copied()
.expect("Rss key present after overflow");
assert_eq!(
v,
u64::MAX,
"saturating_add must clamp to u64::MAX, not panic",
);
}
#[test]
fn pattern_key_bracket_alpha_token_stays_literal() {
assert_eq!(pattern_key("foo[bar]"), "foo[bar]");
assert_eq!(pattern_key("a[b]"), "a[b]");
assert_eq!(pattern_key("dev[abc]"), "dev[abc]");
}
#[test]
fn build_groups_pcomm_distinct_prefixes_do_not_merge() {
let snap = snap_with(vec![
make_thread("worker-0", "t"),
make_thread("worker-1", "t"),
make_thread("worker-large-0", "t"),
make_thread("worker-large-1", "t"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups["worker-{N}"].thread_count, 2);
assert_eq!(groups["worker-large-{N}"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn collect_smaps_rollup_singleton_drops_tgid_suffix() {
let snap = snap_with(vec![smaps_thread("bash", 42, 4096, 1024)]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1);
assert!(
out.contains_key("bash"),
"singleton bash key must equal pattern_key(\"bash\") = \"bash\"; \
got {:?}",
out.keys().collect::<Vec<_>>(),
);
assert!(
!out.contains_key("bash[42]"),
"singleton must NOT carry the tgid suffix under \
default normalization: got {:?}",
out.keys().collect::<Vec<_>>(),
);
}
#[test]
fn write_diff_smaps_emits_row_for_each_process_key() {
let mut diff = CtprofDiff::default();
let mut firefox_a = BTreeMap::new();
firefox_a.insert("Rss".to_string(), 100 * 1024 * 1024);
let mut firefox_b = BTreeMap::new();
firefox_b.insert("Rss".to_string(), 200 * 1024 * 1024);
let mut bash_a = BTreeMap::new();
bash_a.insert("Rss".to_string(), 1024);
let mut bash_b = BTreeMap::new();
bash_b.insert("Rss".to_string(), 2048);
diff.smaps_rollup_a.insert("firefox".into(), firefox_a);
diff.smaps_rollup_b.insert("firefox".into(), firefox_b);
diff.smaps_rollup_a.insert("bash".into(), bash_a);
diff.smaps_rollup_b.insert("bash".into(), bash_b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
let smaps_at = out
.find("## smaps_rollup")
.expect("smaps section must render");
let smaps_section = &out[smaps_at..];
assert!(
smaps_section.contains("firefox"),
"process key `firefox` must appear in smaps section body:\n{smaps_section}",
);
assert!(
smaps_section.contains("bash"),
"process key `bash` must appear in smaps section body:\n{smaps_section}",
);
}
#[test]
fn pattern_display_label_handles_bracket_member_names() {
let members = vec![
"worker[0]".to_string(),
"worker[1]".to_string(),
"worker[2]".to_string(),
];
let label = pattern_display_label("worker[{N}]", &members);
assert!(
label.contains("worker"),
"grex must produce a label that contains the shared `worker` prefix; got {label:?}",
);
let _: Regex = Regex::new(&label)
.unwrap_or_else(|e| panic!("grex output {label:?} is not a valid regex: {e}"));
}
#[test]
fn build_groups_pcomm_empty_pcomm_collapses_under_normalization() {
let snap = snap_with(vec![make_thread("", "t0"), make_thread("", "t1")]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups[""].thread_count, 2);
assert_eq!(groups.len(), 1);
}
#[test]
fn cgroup_skeleton_tokens_handles_bracketed_segments() {
let (skeleton, tokens) = cgroup_skeleton_tokens("/runner-[xyz]/scope");
assert_eq!(
tokens,
vec!["runner".to_string(), "xyz".to_string(), "scope".to_string(),],
"bracket separators must split tokens cleanly; got {tokens:?}",
);
assert_eq!(
skeleton, "/runner-[xyz]/scope",
"skeleton must preserve separators including brackets; got {skeleton:?}",
);
}
#[test]
fn build_groups_pcomm_bracketed_pcomms_collapse() {
let snap = snap_with(vec![
make_thread("[stress-ng-0]", "t0"),
make_thread("[stress-ng-1]", "t1"),
make_thread("[stress-ng-2]", "t2"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(
groups["[stress-ng-{N}]"].thread_count,
3,
"all three bracketed pcomms must collapse into one bucket; got {:?}",
groups.keys().collect::<Vec<_>>(),
);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_pcomm_truncated_pcomms_group_via_exact_match() {
let snap = snap_with(vec![
make_thread("tokio-runtime-w", "t0"),
make_thread("tokio-runtime-w", "t1"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(
groups["tokio-runtime-w"].thread_count, 2,
"identical truncated pcomms collapse via literal-pcomm branch",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn collect_smaps_rollup_independent_of_group_by_axis() {
let mut t0 = smaps_thread("worker-0", 100, 1024, 512);
t0.cgroup = "/cg-a".into();
let mut t1 = smaps_thread("worker-1", 200, 2048, 1024);
t1.cgroup = "/cg-b".into();
let mut t2 = smaps_thread("worker-2", 300, 4096, 2048);
t2.cgroup = "/cg-c".into();
let snap = snap_with(vec![t0, t1, t2]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(
out.len(),
1,
"smaps keying must collapse pcomm-pattern siblings \
regardless of cgroup distribution: got {:?}",
out.keys().collect::<Vec<_>>(),
);
let bucket = out.get("worker-{N}").expect("merged worker bucket");
assert_eq!(
bucket.get("Rss").copied(),
Some((1024 + 2048 + 4096) * 1024)
);
}
#[test]
fn write_diff_smaps_pss_breaks_tie_when_rss_equal() {
let mut diff = CtprofDiff::default();
let mut a = BTreeMap::new();
a.insert("Rss".to_string(), 100 * 1024 * 1024);
a.insert("Pss".to_string(), 30 * 1024 * 1024); let mut a_b = BTreeMap::new();
a_b.insert("Rss".to_string(), 120 * 1024 * 1024);
a_b.insert("Pss".to_string(), 35 * 1024 * 1024);
let mut z = BTreeMap::new();
z.insert("Rss".to_string(), 100 * 1024 * 1024); z.insert("Pss".to_string(), 80 * 1024 * 1024); let mut z_b = BTreeMap::new();
z_b.insert("Rss".to_string(), 120 * 1024 * 1024); z_b.insert("Pss".to_string(), 90 * 1024 * 1024);
diff.smaps_rollup_a.insert("alpha_proc".into(), a);
diff.smaps_rollup_b.insert("alpha_proc".into(), a_b);
diff.smaps_rollup_a.insert("zoomed".into(), z);
diff.smaps_rollup_b.insert("zoomed".into(), z_b);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
let smaps_at = out
.find("## smaps_rollup")
.expect("smaps section must render");
let after = &out[smaps_at..];
let zoomed_pos = after.find("zoomed").expect("zoomed key must appear");
let alpha_pos = after
.find("alpha_proc")
.expect("alpha_proc key must appear");
assert!(
zoomed_pos < alpha_pos,
"Pss tiebreaker must place higher-Pss process (zoomed) ahead of \
lower-Pss process (alpha_proc) when Rss ties; got zoomed@{zoomed_pos} \
alpha_proc@{alpha_pos}",
);
}
#[test]
fn collect_smaps_rollup_empty_pcomm_collapses_under_normalization() {
let snap = snap_with(vec![
smaps_thread("", 100, 1024, 512),
smaps_thread("", 200, 2048, 1024),
]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(
out.len(),
1,
"two empty-pcomm leaders must merge into one bucket; got {:?}",
out.keys().collect::<Vec<_>>(),
);
let bucket = out.get("").expect("empty-key bucket");
assert_eq!(bucket.get("Rss").copied(), Some((1024 + 2048) * 1024));
assert_eq!(bucket.get("Pss").copied(), Some((512 + 1024) * 1024));
}
#[test]
fn write_diff_smaps_literal_mode_renders_pcomm_tgid_keys() {
let mut leader_a = make_thread("worker", "worker");
leader_a.tid = 4242;
leader_a.tgid = 4242;
leader_a.smaps_rollup_kb.insert("Rss".into(), 4096);
leader_a.smaps_rollup_kb.insert("Pss".into(), 1024);
let snap_a = snap_with(vec![leader_a]);
let mut leader_b = make_thread("worker", "worker");
leader_b.tid = 4242;
leader_b.tgid = 4242;
leader_b.smaps_rollup_kb.insert("Rss".into(), 4096);
leader_b.smaps_rollup_kb.insert("Pss".into(), 2048);
let snap_b = snap_with(vec![leader_b]);
let opts = CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: true,
no_cg_normalize: false,
sort_by: Vec::new(),
};
let diff = compare(&snap_a, &snap_b, &opts);
assert!(
diff.smaps_rollup_a.contains_key("worker[4242]"),
"literal-mode baseline key must be `worker[4242]`; got {:?}",
diff.smaps_rollup_a.keys().collect::<Vec<_>>(),
);
assert!(
diff.smaps_rollup_b.contains_key("worker[4242]"),
"literal-mode candidate key must be `worker[4242]`; got {:?}",
diff.smaps_rollup_b.keys().collect::<Vec<_>>(),
);
assert!(
!diff.smaps_rollup_a.contains_key("worker"),
"literal-mode must NOT carry the normalized `worker` key",
);
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
let smaps_at = out
.find("## smaps_rollup")
.expect("smaps section must render");
let after = &out[smaps_at..];
assert!(
after.contains("worker[4242]"),
"literal-mode rendered table must show `worker[4242]` key:\n{after}",
);
}
}