#![allow(unused_imports)]
#![allow(clippy::field_reassign_with_default)]
use std::collections::BTreeMap;
use std::path::Path;
use super::aggregate::{format_cpu_range, merge_aggregated_into};
use super::cgroup_merge::{
merge_cgroup_cpu, merge_cgroup_memory, merge_cgroup_pids, merge_kv_counters, merge_max_option,
merge_memory_stat, merge_min_option, merge_psi,
};
use super::columns::{compare_columns_for, format_cgroup_only_section_warning};
use super::compare::sort_diff_rows_by_keys;
use super::groups::build_row;
use super::pattern::{
Segment, apply_systemd_template, cgroup_normalize_skeleton, cgroup_skeleton_tokens,
classify_token, is_token_separator, pattern_counts_union, pattern_key, split_into_segments,
tighten_group,
};
use super::render::psi_pair_has_data;
use super::scale::{auto_scale, format_delta_cell};
use super::tests_fixtures::*;
use super::*;
use crate::ctprof::{CgroupStats, CtprofSnapshot, Psi, ThreadState};
use crate::metric_types::{
Bytes, CategoricalString, CpuSet, MonotonicCount, MonotonicNs, OrdinalI32, PeakNs,
};
use regex::Regex;
#[test]
fn flatten_cgroup_path_collapses_via_pattern() {
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into()]);
let out = flatten_cgroup_path("/kubepods/pod-abc-123/workload", &pats);
assert_eq!(out, "/kubepods/*/workload");
}
#[test]
fn flatten_cgroup_path_falls_through_unmatched() {
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into()]);
assert_eq!(
flatten_cgroup_path("/system.slice/sshd.service", &pats),
"/system.slice/sshd.service",
);
}
#[test]
fn group_by_cgroup_applies_flatten_patterns() {
let mut ta = make_thread("app", "w1");
ta.cgroup = "/kubepods/pod-xxx/workload".into();
ta.run_time_ns = MonotonicNs(1_000);
let mut tb = make_thread("app", "w1");
tb.cgroup = "/kubepods/pod-yyy/workload".into();
tb.run_time_ns = MonotonicNs(2_000);
let opts = CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec!["/kubepods/*/workload".into()],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
};
let diff = compare(&snap_with(vec![ta]), &snap_with(vec![tb]), &opts);
assert!(diff.only_baseline.is_empty(), "{:?}", diff.only_baseline);
assert!(diff.only_candidate.is_empty(), "{:?}", diff.only_candidate,);
assert!(
diff.rows
.iter()
.any(|r| r.group_key == "/kubepods/*/workload"),
"rows={:?}",
diff.rows.iter().map(|r| &r.group_key).collect::<Vec<_>>(),
);
}
#[test]
fn group_by_cgroup_surfaces_enrichment_on_diff() {
let mut ta = make_thread("app", "w1");
ta.cgroup = "/app".into();
let mut snap_a = snap_with(vec![ta]);
snap_a
.cgroup_stats
.insert("/app".into(), simple_cgroup_stats(100, 1, 50, 1 << 20));
let mut tb = make_thread("app", "w1");
tb.cgroup = "/app".into();
let mut snap_b = snap_with(vec![tb]);
snap_b
.cgroup_stats
.insert("/app".into(), simple_cgroup_stats(500, 3, 250, 2 << 20));
let opts = CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
};
let diff = compare(&snap_a, &snap_b, &opts);
assert_eq!(diff.cgroup_stats_a["/app"].cpu.usage_usec, 100);
assert_eq!(diff.cgroup_stats_b["/app"].cpu.usage_usec, 500);
}
#[test]
fn group_by_comm_aggregates_across_processes() {
let mut ta = make_thread("procA", "worker");
ta.run_time_ns = MonotonicNs(100);
let mut tb = make_thread("procB", "worker");
tb.run_time_ns = MonotonicNs(200);
let mut candidate = make_thread("procA", "worker");
candidate.run_time_ns = MonotonicNs(500);
let mut candidate2 = make_thread("procB", "worker");
candidate2.run_time_ns = MonotonicNs(500);
let diff = compare(
&snap_with(vec![ta, tb]),
&snap_with(vec![candidate, candidate2]),
&CompareOptions {
group_by: GroupBy::Comm.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let row = diff
.rows
.iter()
.find(|r| r.metric_name == "run_time_ns" && r.group_key == "worker")
.expect("worker row");
assert_eq!(row.thread_count_a, 2);
assert_eq!(row.thread_count_b, 2);
assert_eq!(row.delta, Some(700.0));
}
#[test]
fn flatten_first_match_wins_over_later_pattern() {
let pats = compile_flatten_patterns(&["/kubepods/*/workload".into(), "/kubepods/**".into()]);
assert_eq!(
flatten_cgroup_path("/kubepods/pod-abc/workload", &pats),
"/kubepods/*/workload",
);
}
#[test]
fn compile_flatten_patterns_skips_malformed() {
let pats = compile_flatten_patterns(&["[invalid".into(), "/ok/*".into()]);
assert_eq!(pats.len(), 1);
assert_eq!(pats[0].as_str(), "/ok/*");
}
#[test]
fn write_diff_enrichment_section_absent_when_group_by_pcomm() {
let mut diff = CtprofDiff::default();
diff.cgroup_stats_a
.insert("/app".into(), simple_cgroup_stats(10, 0, 0, 0));
let mut out = String::new();
write_diff(
&mut out,
&diff,
Path::new("a"),
Path::new("b"),
GroupBy::Pcomm,
&DisplayOptions::default(),
)
.unwrap();
assert!(!out.contains("cpu_usage_usec"), "enrichment leaked:\n{out}");
}
#[test]
fn build_groups_comm_singleton_reverts_to_literal() {
let snap = snap_with(vec![make_thread("app", "worker-0")]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert!(
groups.contains_key("worker-0"),
"lone worker-0 stays literal",
);
assert!(
!groups.contains_key("worker-{N}"),
"no `worker-{{N}}` pattern key for a singleton",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_distinct_prefixes_do_not_merge() {
let snap = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
make_thread("app", "worker-large-0"),
make_thread("app", "worker-large-1"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["worker-{N}"].thread_count, 2);
assert_eq!(groups["worker-large-{N}"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_alpha_prefix_clusters_camelcase() {
let mut threads = Vec::new();
for i in 0..6 {
threads.push(make_thread("app", &format!("CamelCaseWord{i}")));
}
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert!(
groups.contains_key("CamelCaseWord{N}"),
"CamelCaseWord{{N}} bucket",
);
assert_eq!(groups["CamelCaseWord{N}"].thread_count, 6);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_kworker_workqueue_collapses_per_cpu() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/42:7-mm_percpu_wq"),
make_thread("kworker", "kworker/43:8-mm_percpu_wq"),
make_thread("kworker", "kworker/44:9-mm_percpu_wq"),
make_thread("kworker", "kworker/0:0-wq_reclaim"),
make_thread("kworker", "kworker/1:0-wq_reclaim"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}-mm_percpu_wq"].thread_count, 3);
assert_eq!(groups["kworker/{N}:{N}-wq_reclaim"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_kworker_bare_collapses_across_cpus() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/0:0"),
make_thread("kworker", "kworker/0:1"),
make_thread("kworker", "kworker/1:0"),
make_thread("kworker", "kworker/3:2"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 4);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_kworker_unbound_separate_from_bound() {
let snap = snap_with(vec![
make_thread("kworker", "kworker/0:0"),
make_thread("kworker", "kworker/3:2"),
make_thread("kworker", "kworker/u8:3"),
make_thread("kworker", "kworker/u8:7"),
make_thread("kworker", "kworker/u16:0"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 2);
assert_eq!(groups["kworker/u{N}:{N}"].thread_count, 3);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_comm_empty_comm_does_not_panic() {
let snap = snap_with(vec![make_thread("app", ""), make_thread("app", "")]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups[""].thread_count, 2);
}
#[test]
fn build_groups_comm_truncated_comms_group_via_exact_match() {
let snap = snap_with(vec![
make_thread("app", "tokio-runtime-w"),
make_thread("app", "tokio-runtime-w"),
]);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
assert_eq!(groups["tokio-runtime-w"].thread_count, 2);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_comm_sum_conservation_across_buckets() {
let mut threads = Vec::new();
for i in 0..5 {
let mut t = make_thread("app", &format!("worker-{i}"));
t.run_time_ns = MonotonicNs(100 * (i as u64 + 1));
threads.push(t);
}
for i in 0..3 {
let mut t = make_thread("app", &format!("redis-bg-{i}"));
t.run_time_ns = MonotonicNs(50 * (i as u64 + 1));
threads.push(t);
}
let mut single = make_thread("app", "main");
single.run_time_ns = MonotonicNs(999);
threads.push(single);
let input_total: u64 = threads.iter().map(|t| t.run_time_ns.0).sum();
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Comm, &[], None, None, false);
let aggregated_total: u64 = groups
.values()
.map(|g| match g.metrics.get("run_time_ns") {
Some(Aggregated::Sum(n)) => *n,
_ => 0,
})
.sum();
assert_eq!(
aggregated_total, input_total,
"pattern-aggregated sum must equal input sum",
);
}
#[test]
fn build_groups_comm_exact_preserves_literal_semantics() {
let snap = snap_with(vec![
make_thread("app", "worker-0"),
make_thread("app", "worker-1"),
make_thread("app", "worker-1"),
]);
let groups = build_groups(&snap, GroupBy::CommExact, &[], None, None, false);
assert_eq!(groups["worker-0"].thread_count, 1);
assert_eq!(groups["worker-1"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn build_groups_pcomm_kworker_collapses_across_cpus() {
let snap = snap_with(vec![
make_thread("kworker/0:0", "t0"),
make_thread("kworker/1:0", "t1"),
make_thread("kworker/3:2", "t2"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups["kworker/{N}:{N}"].thread_count, 3);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_pcomm_singleton_reverts_to_literal() {
let snap = snap_with(vec![make_thread("worker-7", "t0")]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert!(
groups.contains_key("worker-7"),
"lone worker-7 stays literal under Pcomm normalization",
);
assert!(
!groups.contains_key("worker-{N}"),
"no `worker-{{N}}` pattern key for a singleton pcomm",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn no_thread_normalize_uses_literal_pcomm() {
let snap_a = snap_with(vec![
make_thread("worker-7", "t0"),
make_thread("worker-15", "t1"),
]);
let snap_b = snap_with(vec![
make_thread("worker-7", "t0"),
make_thread("worker-15", "t1"),
]);
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Pcomm.into(),
cgroup_flatten: vec![],
no_thread_normalize: true,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
let group_keys: std::collections::BTreeSet<&str> =
diff.rows.iter().map(|r| r.group_key.as_str()).collect();
assert!(
group_keys.contains("worker-7"),
"literal worker-7 missing under no_thread_normalize: {group_keys:?}",
);
assert!(
group_keys.contains("worker-15"),
"literal worker-15 missing under no_thread_normalize: {group_keys:?}",
);
assert!(
!group_keys.contains("worker-{N}"),
"no normalized bucket under no_thread_normalize on Pcomm: {group_keys:?}",
);
}
#[test]
fn build_groups_pcomm_sum_conservation_across_buckets() {
let mut threads = Vec::new();
for i in 0..5 {
let mut t = make_thread(&format!("worker-{i}"), "t");
t.run_time_ns = MonotonicNs(100 * (i as u64 + 1));
threads.push(t);
}
for i in 0..3 {
let mut t = make_thread(&format!("redis-bg-{i}"), "t");
t.run_time_ns = MonotonicNs(50 * (i as u64 + 1));
threads.push(t);
}
let mut single = make_thread("init", "t");
single.run_time_ns = MonotonicNs(999);
threads.push(single);
let input_total: u64 = threads.iter().map(|t| t.run_time_ns.0).sum();
let snap = snap_with(threads);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
let aggregated_total: u64 = groups
.values()
.map(|g| match g.metrics.get("run_time_ns") {
Some(Aggregated::Sum(n)) => *n,
_ => 0,
})
.sum();
assert_eq!(
aggregated_total, input_total,
"Pcomm pattern-aggregated sum must equal input sum",
);
}
#[test]
fn collect_smaps_rollup_normalizes_and_sums_across_pids() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, 1024, 512),
smaps_thread("worker-1", 200, 2048, 1024),
smaps_thread("worker-2", 300, 4096, 2048),
]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1, "three PIDs collapse into one bucket: {out:?}");
let bucket = out
.get("worker-{N}")
.expect("bucket key is pattern_key(pcomm) — no `[tgid]` suffix");
assert_eq!(
bucket.get("Rss").copied(),
Some((1024 + 2048 + 4096) * 1024),
"Rss SUMs across the three collapsed PIDs",
);
assert_eq!(
bucket.get("Pss").copied(),
Some((512 + 1024 + 2048) * 1024),
"Pss SUMs across the three collapsed PIDs",
);
}
#[test]
fn collect_smaps_rollup_no_singleton_revert_when_normalizing() {
let snap = snap_with(vec![smaps_thread("worker-7", 99, 1024, 512)]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1);
assert!(
out.contains_key("worker-{N}"),
"lone worker-7 must STILL normalize to worker-{{N}} for smaps; \
singleton-revert is intentionally skipped on the smaps axis: \
got {:?}",
out.keys().collect::<Vec<_>>(),
);
assert!(
!out.contains_key("worker-7"),
"literal singleton key must NOT appear under default smaps \
normalization: got {:?}",
out.keys().collect::<Vec<_>>(),
);
}
#[test]
fn collect_smaps_rollup_no_normalize_preserves_literal_pid_keys() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, 1024, 512),
smaps_thread("worker-1", 200, 2048, 1024),
smaps_thread("worker-2", 300, 4096, 2048),
]);
let out = collect_smaps_rollup(&snap, true);
assert_eq!(
out.len(),
3,
"no_normalize keeps three distinct PID buckets"
);
assert_eq!(out["worker-0[100]"]["Rss"], 1024 * 1024);
assert_eq!(out["worker-1[200]"]["Rss"], 2048 * 1024);
assert_eq!(out["worker-2[300]"]["Rss"], 4096 * 1024);
}
#[test]
fn collect_smaps_rollup_empty_snapshot_returns_empty_map() {
let snap = snap_with(vec![]);
assert!(collect_smaps_rollup(&snap, false).is_empty());
assert!(collect_smaps_rollup(&snap, true).is_empty());
}
#[test]
fn collect_smaps_rollup_skips_non_leader_threads() {
let leader = smaps_thread("worker-0", 100, 1024, 512);
let mut non_leader = ThreadState {
tid: 101,
tgid: 100,
pcomm: "worker-0".into(),
comm: "worker-0".into(),
cgroup: "/".into(),
..ThreadState::default()
};
assert!(non_leader.smaps_rollup_kb.is_empty());
non_leader.smaps_rollup_kb.clear();
let snap = snap_with(vec![leader, non_leader]);
let out_norm = collect_smaps_rollup(&snap, false);
assert_eq!(out_norm.len(), 1);
assert!(out_norm.contains_key("worker-{N}"));
let out_lit = collect_smaps_rollup(&snap, true);
assert_eq!(out_lit.len(), 1);
assert!(out_lit.contains_key("worker-0[100]"));
}
#[test]
fn collect_smaps_rollup_merge_carries_every_field_seen() {
let t1 = smaps_thread("worker-0", 100, 1024, 512);
let mut t2 = ThreadState {
tid: 200,
tgid: 200,
pcomm: "worker-1".into(),
comm: "worker-1".into(),
cgroup: "/".into(),
..ThreadState::default()
};
t2.smaps_rollup_kb.insert("Rss".into(), 2048);
t2.smaps_rollup_kb.insert("Private_Clean".into(), 256);
assert!(!t1.smaps_rollup_kb.contains_key("Private_Clean"));
let snap = snap_with(vec![t1, t2]);
let out = collect_smaps_rollup(&snap, false);
let bucket = out.get("worker-{N}").expect("merged bucket");
assert_eq!(bucket.get("Rss").copied(), Some((1024 + 2048) * 1024));
assert_eq!(bucket.get("Pss").copied(), Some(512 * 1024));
assert_eq!(bucket.get("Private_Clean").copied(), Some(256 * 1024));
}
#[test]
fn collect_smaps_rollup_saturating_add_does_not_panic_on_overflow() {
let snap = snap_with(vec![
smaps_thread("worker-0", 100, u64::MAX, 1),
smaps_thread("worker-1", 200, u64::MAX, 1),
]);
let out = collect_smaps_rollup(&snap, false);
let bucket = out.get("worker-{N}").expect("merged bucket");
let v = bucket
.get("Rss")
.copied()
.expect("Rss key present after overflow");
assert_eq!(
v,
u64::MAX,
"saturating_add must clamp to u64::MAX, not panic",
);
}
#[test]
fn build_groups_pcomm_distinct_prefixes_do_not_merge() {
let snap = snap_with(vec![
make_thread("worker-0", "t"),
make_thread("worker-1", "t"),
make_thread("worker-large-0", "t"),
make_thread("worker-large-1", "t"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups["worker-{N}"].thread_count, 2);
assert_eq!(groups["worker-large-{N}"].thread_count, 2);
assert_eq!(groups.len(), 2);
}
#[test]
fn collect_smaps_rollup_singleton_drops_tgid_suffix() {
let snap = snap_with(vec![smaps_thread("bash", 42, 4096, 1024)]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(out.len(), 1);
assert!(
out.contains_key("bash"),
"singleton bash key must equal pattern_key(\"bash\") = \"bash\"; \
got {:?}",
out.keys().collect::<Vec<_>>(),
);
assert!(
!out.contains_key("bash[42]"),
"singleton must NOT carry the tgid suffix under \
default normalization: got {:?}",
out.keys().collect::<Vec<_>>(),
);
}
#[test]
fn build_groups_pcomm_empty_pcomm_collapses_under_normalization() {
let snap = snap_with(vec![make_thread("", "t0"), make_thread("", "t1")]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(groups[""].thread_count, 2);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_pcomm_bracketed_pcomms_collapse() {
let snap = snap_with(vec![
make_thread("[stress-ng-0]", "t0"),
make_thread("[stress-ng-1]", "t1"),
make_thread("[stress-ng-2]", "t2"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(
groups["[stress-ng-{N}]"].thread_count,
3,
"all three bracketed pcomms must collapse into one bucket; got {:?}",
groups.keys().collect::<Vec<_>>(),
);
assert_eq!(groups.len(), 1);
}
#[test]
fn build_groups_pcomm_truncated_pcomms_group_via_exact_match() {
let snap = snap_with(vec![
make_thread("tokio-runtime-w", "t0"),
make_thread("tokio-runtime-w", "t1"),
]);
let groups = build_groups(&snap, GroupBy::Pcomm, &[], None, None, false);
assert_eq!(
groups["tokio-runtime-w"].thread_count, 2,
"identical truncated pcomms collapse via literal-pcomm branch",
);
assert_eq!(groups.len(), 1);
}
#[test]
fn collect_smaps_rollup_independent_of_group_by_axis() {
let mut t0 = smaps_thread("worker-0", 100, 1024, 512);
t0.cgroup = "/cg-a".into();
let mut t1 = smaps_thread("worker-1", 200, 2048, 1024);
t1.cgroup = "/cg-b".into();
let mut t2 = smaps_thread("worker-2", 300, 4096, 2048);
t2.cgroup = "/cg-c".into();
let snap = snap_with(vec![t0, t1, t2]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(
out.len(),
1,
"smaps keying must collapse pcomm-pattern siblings \
regardless of cgroup distribution: got {:?}",
out.keys().collect::<Vec<_>>(),
);
let bucket = out.get("worker-{N}").expect("merged worker bucket");
assert_eq!(
bucket.get("Rss").copied(),
Some((1024 + 2048 + 4096) * 1024)
);
}
#[test]
fn collect_smaps_rollup_empty_pcomm_collapses_under_normalization() {
let snap = snap_with(vec![
smaps_thread("", 100, 1024, 512),
smaps_thread("", 200, 2048, 1024),
]);
let out = collect_smaps_rollup(&snap, false);
assert_eq!(
out.len(),
1,
"two empty-pcomm leaders must merge into one bucket; got {:?}",
out.keys().collect::<Vec<_>>(),
);
let bucket = out.get("").expect("empty-key bucket");
assert_eq!(bucket.get("Rss").copied(), Some((1024 + 2048) * 1024));
assert_eq!(bucket.get("Pss").copied(), Some((512 + 1024) * 1024));
}
#[test]
fn fudge_only_runs_under_group_by_all() {
let snap_a = fudge_snap("/cg-alpha", 10, "worker");
let snap_b = fudge_snap("/cg-beta", 10, "worker");
let diff = compare(
&snap_a,
&snap_b,
&CompareOptions {
group_by: GroupBy::Cgroup.into(),
cgroup_flatten: vec![],
no_thread_normalize: false,
no_cg_normalize: false,
sort_by: Vec::new(),
},
);
assert!(
diff.fudged_pairs.is_empty(),
"GroupBy::Cgroup must not activate fudge; got {} pair(s)",
diff.fudged_pairs.len(),
);
}