use super::super::bpf_map::{
BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_INSN_ARRAY, BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK,
};
use super::*;
use crate::monitor::btf_render::MemReader;
use crate::monitor::test_util::name_from_str;
#[test]
fn hex_dump_basic() {
assert_eq!(hex_dump(&[]), "");
assert_eq!(hex_dump(&[0]), "00");
assert_eq!(hex_dump(&[0x12, 0x34, 0xab]), "12 34 ab");
}
#[test]
fn render_sparkline_edge_cases() {
assert_eq!(render_sparkline(&[]), "");
assert_eq!(render_sparkline(&[42]), "▅");
assert_eq!(render_sparkline(&[0, 0, 0]), "▁▁▁");
assert_eq!(render_sparkline(&[5, 5, 5]), "▅▅▅");
}
#[test]
fn render_sparkline_monotonic_scales_to_full_range() {
let s = render_sparkline(&[0, 1, 2, 3, 4, 5, 6, 7]);
let chars: Vec<char> = s.chars().collect();
assert_eq!(chars.len(), 8);
assert_eq!(chars[0], '▁', "min must map to lowest glyph: {s}");
assert_eq!(chars[7], '█', "max must map to highest glyph: {s}");
}
#[test]
fn render_sparkline_i64_clamps_negatives() {
let s = render_sparkline_i64(&[-5, 0, 5, 10]);
assert_eq!(s.chars().count(), 4);
}
#[test]
fn event_counter_sample_sums_across_cpus() {
use super::super::{CpuSnapshot, MonitorSample, ScxEventCounters};
let cpu_a = CpuSnapshot {
event_counters: Some(ScxEventCounters {
select_cpu_fallback: 5,
bypass_dispatch: 100,
..Default::default()
}),
..Default::default()
};
let cpu_b = CpuSnapshot {
event_counters: Some(ScxEventCounters {
select_cpu_fallback: 7,
bypass_dispatch: 50,
..Default::default()
}),
..Default::default()
};
let sample = MonitorSample {
elapsed_ms: 100,
cpus: vec![cpu_a, cpu_b],
prog_stats: None,
};
let folded = EventCounterSample::from_monitor_sample(&sample)
.expect("at least one CPU has event_counters");
assert_eq!(folded.elapsed_ms, 100);
assert_eq!(folded.select_cpu_fallback, 12);
assert_eq!(folded.bypass_dispatch, 150);
}
#[test]
fn event_counter_sample_returns_none_when_no_cpu_has_counters() {
use super::super::{CpuSnapshot, MonitorSample};
let cpu = CpuSnapshot {
event_counters: None,
..Default::default()
};
let sample = MonitorSample {
elapsed_ms: 200,
cpus: vec![cpu],
prog_stats: None,
};
assert!(EventCounterSample::from_monitor_sample(&sample).is_none());
}
#[test]
fn event_counter_sample_serde_roundtrip() {
let s = EventCounterSample {
elapsed_ms: 123_456,
select_cpu_fallback: i64::MAX,
insert_not_owned: -1, ..Default::default()
};
let json = serde_json::to_string(&s).unwrap();
let loaded: EventCounterSample = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.elapsed_ms, 123_456);
assert_eq!(loaded.select_cpu_fallback, i64::MAX);
assert_eq!(loaded.insert_not_owned, -1);
}
#[test]
fn report_serde_roundtrip() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: vec![FailureDumpMap {
name: "scx_demo.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
value_size: 8,
max_entries: 1,
value: Some(RenderedValue::Uint {
bits: 32,
value: 42,
}),
entries: Vec::new(),
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
}],
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let json = serde_json::to_string(&report).unwrap();
let parsed: FailureDumpReport = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.maps.len(), 1);
assert_eq!(parsed.maps[0].name, "scx_demo.bss");
assert_eq!(parsed.maps[0].max_entries, 1);
}
#[test]
fn empty_report_serde() {
let report = FailureDumpReport::default();
let json = serde_json::to_string(&report).unwrap();
let parsed: FailureDumpReport = serde_json::from_str(&json).unwrap();
assert!(parsed.maps.is_empty());
}
fn make_simple_map() -> FailureDumpMap {
FailureDumpMap {
name: "scx_demo.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
value_size: 8,
max_entries: 1,
value: Some(RenderedValue::Struct {
type_name: Some("task_ctx".into()),
members: vec![super::super::btf_render::RenderedMember {
name: "weight".into(),
value: RenderedValue::Uint {
bits: 32,
value: 1024,
},
}],
}),
entries: Vec::new(),
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
}
}
#[test]
fn report_display_empty() {
let report = FailureDumpReport::default();
assert_eq!(format!("{report}"), "(empty failure dump)");
}
#[test]
fn report_display_one_map_with_value() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: vec![make_simple_map()],
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(
out.starts_with("map scx_demo.bss (type="),
"missing header: {out}"
);
assert!(out.contains("task_ctx{"), "missing struct: {out}");
assert!(out.contains("weight=1024"), "missing member: {out}");
assert!(out.ends_with('}'), "missing closing brace: {out}");
}
#[test]
fn report_display_multiple_maps_separated() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: vec![make_simple_map(), make_simple_map()],
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
let blank_line_count = out.matches("\n\n").count();
assert_eq!(
blank_line_count, 1,
"expected one blank-line separator between two maps: {out}"
);
}
#[test]
fn map_display_includes_error_marker() {
let mut m = make_simple_map();
m.value = None;
m.error = Some("ARRAY value region unreadable".into());
let out = format!("{m}");
assert!(
out.contains("[error: ARRAY value region unreadable]"),
"missing error marker: {out}"
);
}
#[test]
fn entry_display_renders_key_and_value() {
let entry = FailureDumpEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 7 }),
key_hex: "07 00 00 00".into(),
value: Some(RenderedValue::Uint {
bits: 32,
value: 99,
}),
value_hex: "63 00 00 00".into(),
payload: None,
};
let out = format!("{entry}");
assert!(out.contains("key=7"), "missing key: {out}");
assert!(out.contains("value: 99"), "missing value: {out}");
}
#[test]
fn entry_display_falls_back_to_hex_when_no_btf() {
let entry = FailureDumpEntry {
key: None,
key_hex: "ab cd".into(),
value: None,
value_hex: "ef".into(),
payload: None,
};
let out = format!("{entry}");
assert!(out.contains("ab cd (raw)"), "missing key hex: {out}");
assert!(out.contains("ef (raw)"), "missing value hex: {out}");
}
#[test]
fn percpu_entry_display_shows_each_cpu() {
let entry = FailureDumpPercpuEntry {
key: 0,
per_cpu: vec![
Some(RenderedValue::Uint { bits: 32, value: 1 }),
None,
Some(RenderedValue::Uint { bits: 32, value: 3 }),
],
};
let out = format!("{entry}");
assert!(out.contains("key 0:"));
assert!(out.contains("cpu 0: 1"));
assert!(out.contains("cpu 1: <unmapped>"));
assert!(out.contains("cpu 2: 3"));
}
#[test]
fn report_display_includes_vcpu_regs_section() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: vec![
Some(VcpuRegSnapshot {
instruction_pointer: 0x1,
stack_pointer: 0x2,
page_table_root: 0x3,
user_page_table_root: None,
tcr_el1: None,
}),
None,
Some(VcpuRegSnapshot {
instruction_pointer: 0xa,
stack_pointer: 0xb,
page_table_root: 0xc,
user_page_table_root: None,
tcr_el1: None,
}),
],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(out.starts_with("vcpu_regs:"), "missing header: {out}");
assert!(out.contains("vcpu 0: ip=0x"), "missing vcpu 0: {out}");
assert!(
out.contains("vcpu 1: <unavailable>"),
"missing vcpu 1 marker: {out}"
);
assert!(out.contains("vcpu 2: ip=0x"), "missing vcpu 2: {out}");
}
#[test]
fn report_display_pairs_maps_and_vcpu_regs_with_blank_line() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: vec![make_simple_map()],
vcpu_regs: vec![Some(VcpuRegSnapshot {
instruction_pointer: 0x1,
stack_pointer: 0x2,
page_table_root: 0x3,
user_page_table_root: None,
tcr_el1: None,
})],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(out.contains("\n\nvcpu_regs:"));
}
#[test]
fn report_display_empty_with_only_vcpu_regs_does_not_say_empty_dump() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: vec![None],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert_eq!(out, "vcpu_regs:\n vcpu 0: <unavailable>");
}
#[test]
fn report_display_partial_with_populated_regs_and_empty_maps() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: vec![Some(VcpuRegSnapshot {
instruction_pointer: 0xdead,
stack_pointer: 0xbeef,
page_table_root: 0xcafe,
user_page_table_root: None,
tcr_el1: None,
})],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(
out.contains("vcpu_regs:"),
"Display must contain the vcpu_regs section: {out}"
);
assert!(
out.contains("vcpu 0: ip=0x"),
"Display must render the BSP register row: {out}"
);
assert!(
!out.contains("(empty failure dump)"),
"Display must NOT fall through to empty fallback when \
vcpu_regs is populated: {out}"
);
let json = serde_json::to_string(&report).expect("serialize");
assert!(
json.contains("\"maps\":[]"),
"JSON must carry empty `maps` array (not skip): {json}"
);
assert!(
json.contains("\"vcpu_regs\""),
"JSON must carry vcpu_regs key: {json}"
);
}
#[test]
fn dual_report_serde_roundtrip_with_early() {
let early = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: vec![None],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let late = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: vec![None, None],
sdt_allocations: Vec::new(),
prog_runtime_stats: Vec::new(),
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(early),
late,
early_max_age_jiffies: 1234,
early_threshold_jiffies: 600,
early_skipped_reason: None,
};
let json = serde_json::to_string(&dual).unwrap();
let parsed: DualFailureDumpReport = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.schema, SCHEMA_DUAL);
assert!(parsed.early.is_some(), "early must roundtrip: {json}");
assert_eq!(parsed.early_max_age_jiffies, 1234);
assert_eq!(parsed.early_threshold_jiffies, 600);
assert_eq!(parsed.late.vcpu_regs.len(), 2);
}
#[test]
fn dual_report_serde_skips_zero_jiffies_fields() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let json = serde_json::to_string(&dual).unwrap();
assert!(
!json.contains("early_max_age_jiffies"),
"zero early_max_age_jiffies must skip: {json}"
);
assert!(
!json.contains("early_threshold_jiffies"),
"zero early_threshold_jiffies must skip: {json}"
);
}
#[test]
fn dual_report_serde_emits_nonzero_jiffies_fields() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 4096,
early_threshold_jiffies: 2048,
early_skipped_reason: None,
};
let json = serde_json::to_string(&dual).unwrap();
assert!(
json.contains("\"early_max_age_jiffies\":4096"),
"non-zero max_age must serialize: {json}"
);
assert!(
json.contains("\"early_threshold_jiffies\":2048"),
"non-zero threshold must serialize: {json}"
);
}
#[test]
fn dual_report_schema_distinguishes_from_single() {
let single = FailureDumpReport::default();
let single_json = serde_json::to_string(&single).unwrap();
assert!(
single_json.contains(&format!("\"schema\":\"{SCHEMA_SINGLE}\"")),
"single carries schema='single': {single_json}"
);
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let dual_json = serde_json::to_string(&dual).unwrap();
assert!(
dual_json.contains(&format!("\"schema\":\"{SCHEMA_DUAL}\"")),
"dual carries schema='dual': {dual_json}"
);
assert_ne!(SCHEMA_SINGLE, SCHEMA_DUAL);
}
#[test]
fn dual_report_display_present_carries_jiffies() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 9001,
early_threshold_jiffies: 4500,
early_skipped_reason: None,
};
let s = format!("{dual}");
assert!(
s.contains("early=present"),
"Display must say early=present: {s}"
);
assert!(
s.contains("max_age=9001j"),
"Display must surface max_age: {s}"
);
assert!(
s.contains("threshold=4500j"),
"Display must surface threshold: {s}"
);
}
#[test]
fn dual_report_display_absent_names_both_causes() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let s = format!("{dual}");
assert!(
s.contains("early=absent"),
"Display must say early=absent: {s}"
);
assert!(
s.contains("stall fired before half-way threshold"),
"Display must name the threshold-not-reached cause: {s}"
);
assert!(
s.contains("runnable_at scan setup failed"),
"Display must name the scan-setup-failure cause: {s}"
);
assert!(
s.contains("RUST_LOG=ktstr=debug"),
"Display must point at the RUST_LOG knob for diagnostics: {s}"
);
}
#[test]
fn report_any_dispatch_branches() {
let single = FailureDumpReport::default();
let single_json = serde_json::to_string(&single).expect("serialize single");
match FailureDumpReportAny::from_json(&single_json) {
Some(FailureDumpReportAny::Single(_)) => {}
other => panic!(
"schema=single must map to Single, got {other:?}",
other = other.is_some()
),
}
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let dual_json = serde_json::to_string(&dual).expect("serialize dual");
match FailureDumpReportAny::from_json(&dual_json) {
Some(FailureDumpReportAny::Dual(_)) => {}
other => panic!(
"schema=dual must map to Dual, got {other:?}",
other = other.is_some()
),
}
let absent = r#"{"maps":[],"vcpu_regs":[],"sdt_allocations":[]}"#;
match FailureDumpReportAny::from_json(absent) {
Some(FailureDumpReportAny::Single(_)) => {}
other => panic!(
"absent schema must default to Single, got {other:?}",
other = other.is_some()
),
}
let unknown = r#"{"schema":"triple","maps":[],"vcpu_regs":[],"sdt_allocations":[]}"#;
assert!(
FailureDumpReportAny::from_json(unknown).is_none(),
"unknown schema must return None, not silent fallback"
);
assert!(
FailureDumpReportAny::from_json("not json").is_none(),
"garbage input must return None"
);
}
#[test]
fn report_any_preserves_prog_runtime_stats() {
use super::super::bpf_prog::ProgRuntimeStats;
let report = FailureDumpReport {
prog_runtime_stats: vec![
ProgRuntimeStats {
name: "ktstr_enqueue".to_string(),
cnt: 1_500,
nsecs: 7_500_000,
misses: 2,
},
ProgRuntimeStats {
name: "ktstr_dispatch".to_string(),
cnt: u64::MAX,
nsecs: u64::MAX,
misses: u64::MAX,
},
],
..Default::default()
};
let json = serde_json::to_string(&report).expect("serialize");
match FailureDumpReportAny::from_json(&json) {
Some(FailureDumpReportAny::Single(loaded)) => {
assert_eq!(loaded.prog_runtime_stats.len(), 2);
assert_eq!(loaded.prog_runtime_stats[0].name, "ktstr_enqueue");
assert_eq!(loaded.prog_runtime_stats[0].cnt, 1_500);
assert_eq!(loaded.prog_runtime_stats[0].nsecs, 7_500_000);
assert_eq!(loaded.prog_runtime_stats[0].misses, 2);
assert_eq!(loaded.prog_runtime_stats[1].name, "ktstr_dispatch");
assert_eq!(loaded.prog_runtime_stats[1].cnt, u64::MAX);
assert_eq!(loaded.prog_runtime_stats[1].nsecs, u64::MAX);
assert_eq!(loaded.prog_runtime_stats[1].misses, u64::MAX);
}
other => panic!(
"populated single report must round-trip Single, got {:?}",
other.is_some()
),
}
}
#[test]
fn report_any_display_matches_underlying() {
let single = FailureDumpReport::default();
let single_direct = format!("{single}");
let single_via_any = format!("{}", FailureDumpReportAny::Single(Box::new(single)));
assert_eq!(single_direct, single_via_any);
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 42,
early_threshold_jiffies: 21,
early_skipped_reason: None,
};
let dual_direct = format!("{dual}");
let dual_via_any = format!("{}", FailureDumpReportAny::Dual(Box::new(dual)));
assert_eq!(dual_direct, dual_via_any);
}
#[test]
fn prog_runtime_stats_serde_roundtrip_with_saturation() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: vec![
super::super::bpf_prog::ProgRuntimeStats {
name: "dispatch".to_string(),
cnt: 12345,
nsecs: 67890,
misses: 3,
},
super::super::bpf_prog::ProgRuntimeStats {
name: "saturated".to_string(),
cnt: u64::MAX,
nsecs: u64::MAX,
misses: u64::MAX,
},
],
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let json = serde_json::to_string(&report).expect("serialize");
let parsed: FailureDumpReport = serde_json::from_str(&json).expect("deserialize");
assert_eq!(parsed.prog_runtime_stats.len(), 2);
assert_eq!(parsed.prog_runtime_stats[0].name, "dispatch");
assert_eq!(parsed.prog_runtime_stats[0].cnt, 12345);
assert_eq!(parsed.prog_runtime_stats[0].nsecs, 67890);
assert_eq!(parsed.prog_runtime_stats[0].misses, 3);
assert_eq!(parsed.prog_runtime_stats[1].cnt, u64::MAX);
assert_eq!(parsed.prog_runtime_stats[1].nsecs, u64::MAX);
assert_eq!(parsed.prog_runtime_stats[1].misses, u64::MAX);
}
#[test]
fn prog_runtime_stats_empty_skips_serialization() {
let report = FailureDumpReport::default();
let json = serde_json::to_string(&report).expect("serialize");
assert!(
!json.contains("prog_runtime_stats"),
"empty prog_runtime_stats must be skipped: {json}"
);
}
#[test]
fn report_display_renders_prog_runtime_stats() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: vec![
super::super::bpf_prog::ProgRuntimeStats {
name: "dispatch".to_string(),
cnt: 5,
nsecs: 1234,
misses: 0,
},
super::super::bpf_prog::ProgRuntimeStats {
name: "enqueue".to_string(),
cnt: 99,
nsecs: 9999,
misses: 7,
},
],
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(
out.contains("prog_runtime_stats:"),
"Display must render the prog_runtime_stats section: {out}"
);
assert!(
out.contains("dispatch: cnt=5 nsecs=1234 misses=0"),
"Display must render first program line: {out}"
);
assert!(
out.contains("enqueue: cnt=99 nsecs=9999 misses=7"),
"Display must render second program line: {out}"
);
}
#[test]
fn report_display_only_prog_runtime_stats_does_not_say_empty_dump() {
let report = FailureDumpReport {
schema: SCHEMA_SINGLE.to_string(),
maps: Vec::new(),
vcpu_regs: Vec::new(),
sdt_allocations: Vec::new(),
prog_runtime_stats: vec![super::super::bpf_prog::ProgRuntimeStats {
name: "lone".to_string(),
cnt: 1,
nsecs: 2,
misses: 0,
}],
prog_runtime_stats_unavailable: None,
per_cpu_time: Vec::new(),
per_node_numa: Vec::new(),
per_node_numa_unavailable: None,
task_enrichments: Vec::new(),
task_enrichments_unavailable: None,
event_counter_timeline: Vec::new(),
rq_scx_states: Vec::new(),
dsq_states: Vec::new(),
scx_sched_state: None,
scx_walker_unavailable: None,
vcpu_perf_at_freeze: Vec::new(),
dump_truncated_at_us: None,
probe_counters: None,
};
let out = format!("{report}");
assert!(
!out.contains("(empty failure dump)"),
"Display must NOT fall through to empty fallback when \
prog_runtime_stats is populated: {out}"
);
assert!(
out.starts_with("prog_runtime_stats:"),
"Display must lead with prog_runtime_stats section when \
only that field is populated: {out}"
);
}
#[test]
fn reason_no_struct_ops_loaded_string_pinned() {
assert_eq!(REASON_NO_STRUCT_OPS_LOADED, "no struct_ops programs loaded");
}
#[test]
fn reason_prog_accessor_unavailable_string_pinned() {
assert_eq!(
REASON_PROG_ACCESSOR_UNAVAILABLE,
"prog accessor unavailable"
);
}
#[test]
fn reason_task_walker_zero_tasks_string_pinned() {
assert_eq!(
REASON_TASK_WALKER_ZERO_TASKS,
"task walker yielded zero tasks"
);
}
#[test]
fn reason_no_task_walker_string_pinned() {
assert_eq!(REASON_NO_TASK_WALKER, "no task walker available");
}
#[test]
fn reason_scx_walker_no_state_string_pinned() {
assert_eq!(REASON_SCX_WALKER_NO_STATE, "scx walker reached no state");
}
#[test]
fn reason_scx_root_null_string_pinned() {
assert_eq!(
REASON_SCX_ROOT_NULL,
"scx_root is NULL (no scheduler attached)"
);
}
#[test]
fn reason_no_scx_walker_string_pinned() {
assert_eq!(REASON_NO_SCX_WALKER, "no scx walker capture");
}
#[test]
fn reason_strings_round_trip_through_serde() {
let report = FailureDumpReport {
prog_runtime_stats_unavailable: Some(REASON_NO_STRUCT_OPS_LOADED.to_string()),
task_enrichments_unavailable: Some(REASON_TASK_WALKER_ZERO_TASKS.to_string()),
scx_walker_unavailable: Some(REASON_SCX_WALKER_NO_STATE.to_string()),
..Default::default()
};
let json = serde_json::to_string(&report).expect("serialize");
assert!(
json.contains(REASON_NO_STRUCT_OPS_LOADED),
"JSON must contain prog reason verbatim: {json}",
);
assert!(
json.contains(REASON_TASK_WALKER_ZERO_TASKS),
"JSON must contain task reason verbatim: {json}",
);
assert!(
json.contains(REASON_SCX_WALKER_NO_STATE),
"JSON must contain scx reason verbatim: {json}",
);
let loaded: FailureDumpReport = serde_json::from_str(&json).expect("deserialize");
assert_eq!(
loaded.prog_runtime_stats_unavailable.as_deref(),
Some(REASON_NO_STRUCT_OPS_LOADED),
);
assert_eq!(
loaded.task_enrichments_unavailable.as_deref(),
Some(REASON_TASK_WALKER_ZERO_TASKS),
);
assert_eq!(
loaded.scx_walker_unavailable.as_deref(),
Some(REASON_SCX_WALKER_NO_STATE),
);
}
#[test]
fn failure_dump_report_strict_schema_maps_required() {
let report = FailureDumpReport::default();
let mut full = match serde_json::to_value(&report).unwrap() {
serde_json::Value::Object(m) => m,
other => panic!("expected object, got {other:?}"),
};
assert!(
full.remove("maps").is_some(),
"FailureDumpReport must emit `maps` for this test to be \
meaningful — the field has been renamed or removed",
);
let json = serde_json::Value::Object(full).to_string();
let err = serde_json::from_str::<FailureDumpReport>(&json)
.expect_err("deserialize must reject FailureDumpReport with `maps` removed");
let msg = format!("{err}");
assert!(
msg.contains("maps"),
"missing-field error for `maps` must name the field; got: {msg}",
);
}
#[test]
fn failure_dump_report_optional_fields_round_trip_when_omitted() {
let minimal = serde_json::json!({ "maps": [] });
let report: FailureDumpReport = serde_json::from_value(minimal)
.expect("deserialize must accept FailureDumpReport with only `maps`");
assert_eq!(
report.schema, SCHEMA_SINGLE,
"absent `schema` field must default to SCHEMA_SINGLE \
(default_schema_single fn); got: {:?}",
report.schema,
);
assert!(report.maps.is_empty());
assert!(report.vcpu_regs.is_empty());
assert!(report.sdt_allocations.is_empty());
assert!(report.prog_runtime_stats.is_empty());
assert!(report.prog_runtime_stats_unavailable.is_none());
assert!(report.per_cpu_time.is_empty());
assert!(report.per_node_numa.is_empty());
assert!(report.per_node_numa_unavailable.is_none());
assert!(report.task_enrichments.is_empty());
assert!(report.task_enrichments_unavailable.is_none());
assert!(report.event_counter_timeline.is_empty());
assert!(report.rq_scx_states.is_empty());
assert!(report.dsq_states.is_empty());
assert!(report.scx_sched_state.is_none());
assert!(report.scx_walker_unavailable.is_none());
assert!(report.vcpu_perf_at_freeze.is_empty());
}
#[test]
fn pinned_error_arena_btf_offsets_unavailable() {
let rendered: String = "arena BTF offsets unavailable (kernel lacks struct bpf_arena?)".into();
assert_eq!(
rendered, "arena BTF offsets unavailable (kernel lacks struct bpf_arena?)",
"arena-unavailable error string drifted from pin",
);
}
#[test]
fn pinned_error_multi_entry_array_truncation() {
let n: u32 = 7;
let rendered = format!("multi-entry ARRAY: only key 0 of {n} shown");
assert_eq!(
rendered, "multi-entry ARRAY: only key 0 of 7 shown",
"multi-entry ARRAY truncation string drifted from pin",
);
}
#[test]
fn pinned_error_hash_map_truncation() {
let rendered = format!("hash map truncated at {MAX_HASH_ENTRIES} entries");
assert_eq!(
rendered, "hash map truncated at 4096 entries",
"hash map truncation string OR MAX_HASH_ENTRIES drifted from pin",
);
}
#[test]
fn pinned_error_percpu_array_truncation() {
let max_entries: u32 = 999;
let rendered =
format!("PERCPU_ARRAY truncated at {MAX_PERCPU_KEYS} keys (max_entries={max_entries})",);
assert_eq!(
rendered, "PERCPU_ARRAY truncated at 256 keys (max_entries=999)",
"PERCPU_ARRAY truncation string OR MAX_PERCPU_KEYS drifted from pin",
);
}
#[test]
fn pinned_error_unknown_map_type() {
let other: u32 = 42;
let rendered = format!(
"unknown map_type {other} (kernel newer than dump renderer; \
update render_map dispatch)"
);
assert_eq!(
rendered,
"unknown map_type 42 (kernel newer than dump renderer; update render_map dispatch)",
"unknown-map-type string drifted from pin",
);
}
#[test]
fn pinned_error_local_storage_truncation() {
let rendered = format!("local_storage map truncated at {MAX_HASH_ENTRIES} entries");
assert_eq!(
rendered, "local_storage map truncated at 4096 entries",
"local_storage truncation string OR MAX_HASH_ENTRIES drifted",
);
}
#[test]
fn per_node_numa_stats_serde_roundtrip() {
let s = PerNodeNumaStats {
node: 1,
numa_hit: 1_000_000,
numa_miss: 100,
numa_foreign: 50,
numa_interleave_hit: 200,
numa_local: 999_900,
numa_other: 100,
};
let json = serde_json::to_string(&s).unwrap();
let parsed: PerNodeNumaStats = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.node, 1);
assert_eq!(parsed.numa_hit, 1_000_000);
assert_eq!(parsed.numa_miss, 100);
assert_eq!(parsed.numa_foreign, 50);
assert_eq!(parsed.numa_interleave_hit, 200);
assert_eq!(parsed.numa_local, 999_900);
assert_eq!(parsed.numa_other, 100);
}
#[test]
fn per_node_numa_empty_skips_serialization() {
let report = FailureDumpReport::default();
let json = serde_json::to_string(&report).unwrap();
assert!(
!json.contains("per_node_numa"),
"empty per_node_numa must be skipped: {json}",
);
}
#[test]
fn per_node_numa_populated_lands_in_wire() {
let mut report = FailureDumpReport::default();
report.per_node_numa.push(PerNodeNumaStats {
node: 0,
numa_hit: 42,
..Default::default()
});
let json = serde_json::to_string(&report).unwrap();
assert!(
json.contains("\"per_node_numa\""),
"populated per_node_numa must appear on wire: {json}",
);
assert!(
json.contains("\"numa_hit\":42"),
"field value must round-trip: {json}",
);
}
#[test]
fn reason_no_numa_walker_string_pinned() {
assert_eq!(
REASON_NO_NUMA_WALKER,
"no NUMA walker (host-side walker pending)"
);
}
#[test]
fn percpu_hash_entry_display_shows_key_and_cpus() {
let entry = FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 7 }),
key_hex: "07 00 00 00".into(),
per_cpu: vec![
Some(RenderedValue::Uint {
bits: 32,
value: 100,
}),
None,
Some(RenderedValue::Uint {
bits: 32,
value: 300,
}),
],
};
let out = format!("{entry}");
assert!(out.starts_with("entry: key="), "entry header: {out}");
assert!(out.contains("entry: key=7"), "rendered key: {out}");
assert!(out.contains("cpu 0: 100"), "cpu 0 value: {out}");
assert!(out.contains("cpu 1: <unmapped>"), "cpu 1 unmapped: {out}");
assert!(out.contains("cpu 2: 300"), "cpu 2 value: {out}");
}
#[test]
fn percpu_hash_entry_display_falls_back_to_hex_when_no_btf() {
let entry = FailureDumpPercpuHashEntry {
key: None,
key_hex: "ab cd ef 01".into(),
per_cpu: vec![Some(RenderedValue::Uint { bits: 32, value: 1 })],
};
let out = format!("{entry}");
assert!(
out.contains("ab cd ef 01 (raw)"),
"raw hex with (raw) marker: {out}",
);
}
#[test]
fn percpu_hash_entry_display_all_unmapped_cpus() {
let entry = FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 0 }),
key_hex: "00 00 00 00".into(),
per_cpu: vec![None, None, None],
};
let out = format!("{entry}");
assert!(out.contains("cpu 0: <unmapped>"));
assert!(out.contains("cpu 1: <unmapped>"));
assert!(out.contains("cpu 2: <unmapped>"));
}
#[test]
fn percpu_hash_entry_display_empty_per_cpu() {
let entry = FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 0 }),
key_hex: "00 00 00 00".into(),
per_cpu: vec![],
};
let out = format!("{entry}");
assert!(out.starts_with("entry: key="), "header: {out}");
assert!(!out.contains("cpu "), "no cpu lines: {out}");
}
#[test]
fn percpu_hash_entry_serde_roundtrip() {
let entry = FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint {
bits: 32,
value: 42,
}),
key_hex: "2a 00 00 00".into(),
per_cpu: vec![
Some(RenderedValue::Uint {
bits: 32,
value: 100,
}),
None,
],
};
let json = serde_json::to_string(&entry).expect("serialize");
let parsed: FailureDumpPercpuHashEntry = serde_json::from_str(&json).expect("deserialize");
assert!(parsed.key.is_some());
assert_eq!(parsed.key_hex, "2a 00 00 00");
assert_eq!(parsed.per_cpu.len(), 2);
assert!(parsed.per_cpu[0].is_some());
assert!(parsed.per_cpu[1].is_none());
}
#[test]
fn percpu_hash_entry_key_skips_when_none() {
let entry = FailureDumpPercpuHashEntry {
key: None,
key_hex: "00".into(),
per_cpu: vec![],
};
let json = serde_json::to_string(&entry).unwrap();
assert!(
!json.contains("\"key\":"),
"None key must skip on wire: {json}",
);
}
#[test]
fn map_display_percpu_hash_entries_render() {
let m = FailureDumpMap {
name: "percpu_hash".into(),
map_type: BPF_MAP_TYPE_PERCPU_HASH,
value_size: 4,
max_entries: 100,
value: None,
entries: Vec::new(),
percpu_entries: Vec::new(),
percpu_hash_entries: vec![FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 1 }),
key_hex: "01 00 00 00".into(),
per_cpu: vec![
Some(RenderedValue::Uint {
bits: 32,
value: 10,
}),
Some(RenderedValue::Uint {
bits: 32,
value: 20,
}),
],
}],
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(out.contains("map percpu_hash (type="), "header: {out}");
assert!(out.contains("entry: key=1"), "key surfaces: {out}");
assert!(out.contains("cpu 0: 10"), "cpu 0: {out}");
assert!(out.contains("cpu 1: 20"), "cpu 1: {out}");
}
#[test]
fn pinned_error_percpu_hash_truncation() {
let rendered = format!("percpu hash map truncated at {MAX_HASH_ENTRIES} entries");
assert_eq!(
rendered, "percpu hash map truncated at 4096 entries",
"percpu hash truncation string OR MAX_HASH_ENTRIES drifted",
);
}
#[test]
fn pinned_error_struct_ops_offsets_unresolved() {
let expected = "STRUCT_OPS value unreadable: bpf_struct_ops_map BTF offsets unresolved \
(kernel without struct_ops support, or vmlinux BTF stripped of \
bpf_struct_ops_map / bpf_struct_ops_value).";
let rendered: String = expected.into();
assert_eq!(
rendered, expected,
"STRUCT_OPS offsets-unresolved error string drifted",
);
}
#[test]
fn pinned_error_struct_ops_region_unmapped() {
let expected = "STRUCT_OPS value unreadable: value region unmapped. Live-host \
backend reads via BPF_MAP_LOOKUP_ELEM at key=0.";
let rendered: String = expected.into();
assert_eq!(
rendered, expected,
"STRUCT_OPS region-unmapped error string drifted",
);
}
#[test]
fn pinned_error_cgroup_storage_deprecated() {
let rendered: String =
"deprecated cgroup-attached storage; use CGRP_STORAGE on newer kernels".into();
assert_eq!(
rendered,
"deprecated cgroup-attached storage; use CGRP_STORAGE on newer kernels",
);
}
#[test]
fn pinned_error_queue_stack_destructive() {
let expected = "QUEUE/STACK are destructive (peek shows only the head; pop consumes); \
no enumeration API";
let rendered: String = expected.into();
assert_eq!(rendered, expected);
}
#[test]
fn pinned_error_bloom_filter() {
let rendered: String =
"BLOOM_FILTER is a probabilistic set; no key enumeration is possible".into();
assert_eq!(
rendered,
"BLOOM_FILTER is a probabilistic set; no key enumeration is possible",
);
}
#[test]
fn pinned_error_lpm_trie() {
let expected = "LPM_TRIE walker not implemented (keyed by prefixlen + data); \
use bpf(2) BPF_MAP_GET_NEXT_KEY for live-host iteration";
let rendered: String = expected.into();
assert_eq!(rendered, expected);
}
#[test]
fn pinned_error_unknown_map_type_format() {
let other: u32 = 99;
let rendered = format!(
"unknown map_type {other} (kernel newer than dump renderer; \
update render_map dispatch)"
);
assert_eq!(
rendered,
"unknown map_type 99 (kernel newer than dump renderer; \
update render_map dispatch)",
);
}
#[test]
fn map_percpu_hash_entries_skips_when_empty() {
let m = FailureDumpMap {
name: "test".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 4,
max_entries: 1,
value: None,
entries: Vec::new(),
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let json = serde_json::to_string(&m).unwrap();
assert!(
!json.contains("percpu_hash_entries"),
"empty must skip: {json}",
);
}
#[test]
fn map_percpu_hash_entries_round_trip_when_populated() {
let m = FailureDumpMap {
name: "ph".into(),
map_type: BPF_MAP_TYPE_PERCPU_HASH,
value_size: 4,
max_entries: 1,
value: None,
entries: Vec::new(),
percpu_entries: Vec::new(),
percpu_hash_entries: vec![FailureDumpPercpuHashEntry {
key: Some(RenderedValue::Uint { bits: 32, value: 1 }),
key_hex: "01 00 00 00".into(),
per_cpu: vec![Some(RenderedValue::Uint {
bits: 32,
value: 99,
})],
}],
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let json = serde_json::to_string(&m).expect("serialize");
assert!(
json.contains("percpu_hash_entries"),
"populated must serialize: {json}",
);
let parsed: FailureDumpMap = serde_json::from_str(&json).expect("deserialize");
assert_eq!(parsed.percpu_hash_entries.len(), 1);
assert_eq!(parsed.percpu_hash_entries[0].key_hex, "01 00 00 00");
assert_eq!(parsed.percpu_hash_entries[0].per_cpu.len(), 1);
}
#[test]
fn accessor_mem_reader_no_snapshot_rejects_all_addrs() {
struct StubReader<'a> {
arena_snapshot: Option<&'a super::super::arena::ArenaSnapshot>,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn is_arena_addr(&self, addr: u64) -> bool {
let Some(snap) = self.arena_snapshot else {
return false;
};
if snap.user_vm_start == 0 {
return false;
}
addr >= snap.user_vm_start && addr < snap.user_vm_start.wrapping_add(1 << 32)
}
}
let r = StubReader {
arena_snapshot: None,
};
assert!(!r.is_arena_addr(0));
assert!(!r.is_arena_addr(0x10000));
assert!(!r.is_arena_addr(u64::MAX));
}
#[test]
fn accessor_mem_reader_zero_user_vm_start_rejects_all() {
use super::super::arena::ArenaSnapshot;
let snap = ArenaSnapshot {
user_vm_start: 0,
..ArenaSnapshot::default()
};
struct StubReader<'a> {
arena_snapshot: Option<&'a ArenaSnapshot>,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn is_arena_addr(&self, addr: u64) -> bool {
let Some(snap) = self.arena_snapshot else {
return false;
};
if snap.user_vm_start == 0 {
return false;
}
addr >= snap.user_vm_start && addr < snap.user_vm_start.wrapping_add(1 << 32)
}
}
let r = StubReader {
arena_snapshot: Some(&snap),
};
assert!(!r.is_arena_addr(0));
assert!(!r.is_arena_addr(0x100000));
}
#[test]
fn accessor_mem_reader_arena_addr_range_via_snapshot() {
use super::super::arena::ArenaSnapshot;
let snap = ArenaSnapshot {
user_vm_start: 0x10_0000_0000,
..ArenaSnapshot::default()
};
struct StubReader<'a> {
arena_snapshot: Option<&'a ArenaSnapshot>,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn is_arena_addr(&self, addr: u64) -> bool {
let Some(snap) = self.arena_snapshot else {
return false;
};
if snap.user_vm_start == 0 {
return false;
}
addr >= snap.user_vm_start && addr < snap.user_vm_start.wrapping_add(1 << 32)
}
}
let r = StubReader {
arena_snapshot: Some(&snap),
};
assert!(!r.is_arena_addr(0));
assert!(!r.is_arena_addr(0xf_ffff_ffff));
assert!(r.is_arena_addr(0x10_0000_0000));
assert!(r.is_arena_addr(0x10_0000_0000 + (1 << 32) - 1));
assert!(!r.is_arena_addr(0x10_0000_0000 + (1 << 32)));
}
#[test]
fn accessor_mem_reader_read_arena_none_when_no_snapshot() {
struct StubReader<'a> {
arena_snapshot: Option<&'a super::super::arena::ArenaSnapshot>,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn read_arena(&self, _: u64, _: usize) -> Option<Vec<u8>> {
self.arena_snapshot?;
None
}
}
let r = StubReader {
arena_snapshot: None,
};
assert!(r.read_arena(0x1234, 8).is_none());
}
#[test]
fn accessor_mem_reader_read_arena_page_hit() {
use super::super::arena::{ArenaPage, ArenaSnapshot};
let snap = ArenaSnapshot {
pages: vec![ArenaPage {
user_addr: 0x1000,
bytes: (0..=0xffu8).cycle().take(4096).collect(),
}],
..ArenaSnapshot::default()
};
struct StubReader<'a> {
arena_snapshot: &'a ArenaSnapshot,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn read_arena(&self, addr: u64, len: usize) -> Option<Vec<u8>> {
let page_addr = addr & !0xFFF;
let offset = (addr & 0xFFF) as usize;
let page = self
.arena_snapshot
.pages
.iter()
.find(|p| p.user_addr == page_addr)?;
if offset + len > page.bytes.len() {
return None;
}
Some(page.bytes[offset..offset + len].to_vec())
}
}
let r = StubReader {
arena_snapshot: &snap,
};
let bytes = r.read_arena(0x1000, 8).expect("page-aligned hit");
assert_eq!(bytes, vec![0, 1, 2, 3, 4, 5, 6, 7]);
let bytes = r.read_arena(0x1000 + 100, 8).expect("offset hit");
assert_eq!(bytes[0], 100);
assert!(
r.read_arena(0x1000 + 4090, 100).is_none(),
"cross-page read must return None per MemReader::read_arena contract",
);
}
#[test]
fn accessor_mem_reader_read_arena_page_miss_returns_none() {
use super::super::arena::{ArenaPage, ArenaSnapshot};
let snap = ArenaSnapshot {
pages: vec![ArenaPage {
user_addr: 0x1000,
bytes: vec![0u8; 4096],
}],
..ArenaSnapshot::default()
};
struct StubReader<'a> {
arena_snapshot: &'a ArenaSnapshot,
}
impl super::super::btf_render::MemReader for StubReader<'_> {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
fn read_arena(&self, addr: u64, len: usize) -> Option<Vec<u8>> {
let page_addr = addr & !0xFFF;
let offset = (addr & 0xFFF) as usize;
let page = self
.arena_snapshot
.pages
.iter()
.find(|p| p.user_addr == page_addr)?;
if offset + len > page.bytes.len() {
return None;
}
Some(page.bytes[offset..offset + len].to_vec())
}
}
let r = StubReader {
arena_snapshot: &snap,
};
assert!(r.read_arena(0x2000, 8).is_none());
}
#[test]
fn mem_reader_default_impls_skip_arena() {
struct MinReader;
impl super::super::btf_render::MemReader for MinReader {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
}
let r = MinReader;
assert!(!r.is_arena_addr(0));
assert!(!r.is_arena_addr(u64::MAX));
assert!(r.read_arena(0x1234, 8).is_none());
}
#[test]
fn arena_snapshot_user_vm_start_round_trips() {
use super::super::arena::ArenaSnapshot;
let snap = ArenaSnapshot {
user_vm_start: 0x1234_5678_0000_0000,
..ArenaSnapshot::default()
};
let json = serde_json::to_string(&snap).expect("serialize");
assert!(
json.contains("\"user_vm_start\":1311768464867721216"),
"user_vm_start in JSON: {json}",
);
let parsed: ArenaSnapshot = serde_json::from_str(&json).expect("deserialize");
assert_eq!(parsed.user_vm_start, 0x1234_5678_0000_0000);
}
#[test]
fn arena_snapshot_default_user_vm_start_is_zero() {
use super::super::arena::ArenaSnapshot;
let snap = ArenaSnapshot::default();
assert_eq!(
snap.user_vm_start, 0,
"default snapshot's user_vm_start is 0 (no anchor)",
);
}
struct EmptyReader;
impl super::super::btf_render::MemReader for EmptyReader {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
}
#[test]
fn render_value_or_hex_falls_back_to_hex_when_btf_none() {
let bytes = [0x12u8, 0x34, 0x56];
let reader = EmptyReader;
let rendered = render_value_or_hex(None, 0, &bytes, &reader);
match rendered {
RenderedValue::Bytes { hex } => {
assert_eq!(hex, "12 34 56", "hex must match hex_dump output");
}
other => panic!("expected Bytes, got {other:?}"),
}
}
#[test]
fn render_value_or_hex_falls_back_to_hex_when_type_id_zero() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let bytes = [0xABu8, 0xCD];
let reader = EmptyReader;
let rendered = render_value_or_hex(Some(&btf), 0, &bytes, &reader);
match rendered {
RenderedValue::Bytes { hex } => {
assert_eq!(hex, "ab cd", "type_id=0 must surface hex even with btf");
}
other => panic!("expected Bytes, got {other:?}"),
}
}
#[test]
fn render_value_or_hex_renders_via_btf_when_present() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let Ok(ids) = btf.resolve_ids_by_name("int") else {
crate::report::test_skip("BTF missing 'int' type");
return;
};
let Some(&id) = ids.first() else {
crate::report::test_skip("BTF resolved 'int' to empty id list");
return;
};
let bytes = 0x42i32.to_le_bytes();
let reader = EmptyReader;
let rendered = render_value_or_hex(Some(&btf), id, &bytes, &reader);
match rendered {
RenderedValue::Int { bits: 32, value } => {
assert_eq!(value, 0x42, "BTF render must surface the decoded int value");
}
other => panic!("expected Int{{bits:32}}, got {other:?}"),
}
}
#[test]
fn render_key_optional_returns_none_when_btf_none() {
let bytes = [0x07u8, 0x00, 0x00, 0x00];
let reader = EmptyReader;
let rendered = render_key_optional(None, 0, &bytes, &reader);
assert!(
rendered.is_none(),
"None btf must surface as None: {rendered:?}"
);
}
#[test]
fn render_key_optional_returns_none_when_type_id_zero() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let bytes = [0x07u8, 0x00, 0x00, 0x00];
let reader = EmptyReader;
let rendered = render_key_optional(Some(&btf), 0, &bytes, &reader);
assert!(
rendered.is_none(),
"type_id=0 must surface as None even with btf: {rendered:?}",
);
}
#[test]
fn render_key_optional_returns_some_via_btf() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let Ok(ids) = btf.resolve_ids_by_name("int") else {
crate::report::test_skip("BTF missing 'int' type");
return;
};
let Some(&id) = ids.first() else {
crate::report::test_skip("BTF resolved 'int' to empty id list");
return;
};
let bytes = 0x99i32.to_le_bytes();
let reader = EmptyReader;
let rendered = render_key_optional(Some(&btf), id, &bytes, &reader);
match rendered {
Some(RenderedValue::Int { bits: 32, value }) => {
assert_eq!(value, 0x99, "must surface the decoded int value");
}
other => panic!("expected Some(Int{{bits:32}}), got {other:?}"),
}
}
#[test]
fn find_sdt_data_field_offset_zero_type_id_short_circuits() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
assert_eq!(
super::render_map::find_sdt_data_field_offset(&btf, 0),
None,
"type_id=0 must short-circuit to None",
);
}
#[test]
fn find_sdt_data_field_offset_none_for_unrelated_struct() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let Ok(ids) = btf.resolve_ids_by_name("task_struct") else {
crate::report::test_skip("vmlinux BTF missing 'task_struct'");
return;
};
let Some(&id) = ids.first() else {
crate::report::test_skip("'task_struct' resolved to empty id list");
return;
};
assert_eq!(
super::render_map::find_sdt_data_field_offset(&btf, id),
None,
"task_struct must not match the sdt_data pointee predicate",
);
}
#[test]
fn chase_sdt_data_payload_returns_none_on_missing_prereqs() {
use super::super::btf_render::MemReader;
use super::render_map::{SdtAllocMeta, chase_sdt_data_payload};
struct StubReader;
impl MemReader for StubReader {
fn read_kva(&self, _: u64, len: usize) -> Option<Vec<u8>> {
Some(vec![0u8; len])
}
}
let reader = StubReader;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let placeholder_type_id: u32 = 1;
let valid_meta = SdtAllocMeta {
allocator_name: "scx_test_allocator".into(),
elem_size: 32,
header_size: 8,
payload_btf_type_id: placeholder_type_id,
kern_vm_start: 0xFFFF_8000_0000_0000,
};
let mut value_bytes = vec![0u8; 24];
value_bytes[16..24].copy_from_slice(&0x1_0000_0000u64.to_le_bytes());
assert!(
chase_sdt_data_payload(None, Some(16), Some(&valid_meta), &value_bytes, &reader).is_none(),
"missing btf must yield None",
);
assert!(
chase_sdt_data_payload(Some(&btf), None, Some(&valid_meta), &value_bytes, &reader)
.is_none(),
"missing field offset must yield None",
);
assert!(
chase_sdt_data_payload(Some(&btf), Some(16), None, &value_bytes, &reader).is_none(),
"missing allocator metadata must yield None",
);
let zero_payload = SdtAllocMeta {
payload_btf_type_id: 0,
..valid_meta.clone()
};
assert!(
chase_sdt_data_payload(
Some(&btf),
Some(16),
Some(&zero_payload),
&value_bytes,
&reader,
)
.is_none(),
"payload_btf_type_id=0 must yield None",
);
let small_elem = SdtAllocMeta {
elem_size: 8,
..valid_meta.clone()
};
assert!(
chase_sdt_data_payload(
Some(&btf),
Some(16),
Some(&small_elem),
&value_bytes,
&reader,
)
.is_none(),
"elem_size <= header_size must yield None",
);
let no_kern_vm = SdtAllocMeta {
kern_vm_start: 0,
..valid_meta.clone()
};
assert!(
chase_sdt_data_payload(
Some(&btf),
Some(16),
Some(&no_kern_vm),
&value_bytes,
&reader,
)
.is_none(),
"kern_vm_start=0 must yield None",
);
let mut zero_value_bytes = vec![0u8; 24];
zero_value_bytes[16..24].copy_from_slice(&0u64.to_le_bytes());
assert!(
chase_sdt_data_payload(
Some(&btf),
Some(16),
Some(&valid_meta),
&zero_value_bytes,
&reader,
)
.is_none(),
"data_ptr=0 must yield None",
);
let short_value_bytes = vec![0u8; 20];
assert!(
chase_sdt_data_payload(
Some(&btf),
Some(16),
Some(&valid_meta),
&short_value_bytes,
&reader,
)
.is_none(),
"value bytes too short for pointer read must yield None",
);
}
#[test]
fn chase_sdt_data_payload_yields_none_on_unmapped_kva() {
use super::super::btf_render::MemReader;
use super::render_map::{SdtAllocMeta, chase_sdt_data_payload};
struct UnmappedReader;
impl MemReader for UnmappedReader {
fn read_kva(&self, _: u64, _: usize) -> Option<Vec<u8>> {
None
}
}
let reader = UnmappedReader;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
let Ok(btf) = crate::monitor::btf_offsets::load_btf_from_path(&path) else {
crate::report::test_skip("could not parse vmlinux BTF");
return;
};
let mut value_bytes = vec![0u8; 24];
value_bytes[16..24].copy_from_slice(&0x1_0000_0000u64.to_le_bytes());
let meta = SdtAllocMeta {
allocator_name: "scx_test_allocator".into(),
elem_size: 32,
header_size: 8,
payload_btf_type_id: 1,
kern_vm_start: 0xFFFF_8000_0000_0000,
};
assert!(
chase_sdt_data_payload(Some(&btf), Some(16), Some(&meta), &value_bytes, &reader,).is_none(),
"unmapped kva must yield None even with all other prereqs satisfied",
);
}
#[test]
fn failure_dump_entry_payload_serde_roundtrip() {
let entry = FailureDumpEntry {
key: None,
key_hex: "00 11 22 33 44 55 66 77".into(),
value: None,
value_hex: "AA BB".into(),
payload: Some(RenderedValue::Uint {
bits: 64,
value: 0xDEAD_BEEF,
}),
};
let json = serde_json::to_string(&entry).unwrap();
assert!(
json.contains("\"payload\""),
"populated payload must appear in JSON: {json}",
);
let parsed: FailureDumpEntry = serde_json::from_str(&json).unwrap();
match parsed.payload {
Some(RenderedValue::Uint { bits: 64, value }) => {
assert_eq!(value, 0xDEAD_BEEF, "value must round-trip");
}
other => panic!("payload didn't round-trip cleanly: {other:?}"),
}
}
#[test]
fn failure_dump_entry_payload_skipped_when_none() {
let entry = FailureDumpEntry {
key: None,
key_hex: "00".into(),
value: None,
value_hex: "00".into(),
payload: None,
};
let json = serde_json::to_string(&entry).unwrap();
assert!(
!json.contains("\"payload\""),
"None payload must be skipped: {json}",
);
}
#[test]
fn failure_dump_entry_display_renders_payload_after_value() {
let entry = FailureDumpEntry {
key: Some(RenderedValue::Uint { bits: 64, value: 0 }),
key_hex: "00".into(),
value: Some(RenderedValue::Uint {
bits: 32,
value: 99,
}),
value_hex: "63".into(),
payload: Some(RenderedValue::Uint {
bits: 64,
value: 0xCAFEBABE,
}),
};
let out = format!("{entry}");
assert!(
out.contains("\n .data "),
"Display must label .data: {out}"
);
let value_pos = out.find("value:").expect("value label present");
let payload_pos = out.find(".data ").expect(".data label present");
assert!(
value_pos < payload_pos,
"Display must order value before .data: {out}",
);
assert!(
out.contains("3405691582"), "rendered payload value must appear in Display: {out}",
);
}
#[test]
fn failure_dump_entry_display_omits_payload_when_none() {
let entry = FailureDumpEntry {
key: None,
key_hex: "ab".into(),
value: None,
value_hex: "cd".into(),
payload: None,
};
let out = format!("{entry}");
assert!(
!out.contains("payload"),
"Display must not surface payload when None: {out}",
);
}
#[test]
fn map_type_explanations_covers_every_non_walker_type() {
let non_walker: &[(u32, &str)] = &[
(BPF_MAP_TYPE_LPM_TRIE, "LPM_TRIE"),
(BPF_MAP_TYPE_CGROUP_STORAGE, "CGROUP_STORAGE"),
(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, "PERCPU_CGROUP_STORAGE"),
(BPF_MAP_TYPE_QUEUE, "QUEUE"),
(BPF_MAP_TYPE_STACK, "STACK"),
(BPF_MAP_TYPE_BLOOM_FILTER, "BLOOM_FILTER"),
(BPF_MAP_TYPE_INSN_ARRAY, "INSN_ARRAY"),
];
for (discriminant, name) in non_walker {
let found = MAP_TYPE_EXPLANATIONS.iter().any(|(t, _)| t == discriminant);
assert!(
found,
"MAP_TYPE_EXPLANATIONS missing entry for {name} ({discriminant}); \
wildcard arm would surface the generic 'unknown map_type' fallback",
);
}
}
#[test]
fn map_type_explanations_strings_are_actionable() {
for (discriminant, msg) in MAP_TYPE_EXPLANATIONS {
assert!(
!msg.is_empty(),
"explanation for discriminant {discriminant} is empty",
);
for placeholder in ["not yet supported", "TODO", "FIXME", "unimplemented"] {
assert!(
!msg.contains(placeholder),
"explanation for discriminant {discriminant} contains placeholder \
{placeholder:?}: {msg:?}",
);
}
}
}
#[test]
fn failure_dump_ringbuf_roundtrip() {
let original = FailureDumpRingbuf {
capacity: 0x1_0000,
consumer_pos: 0x100,
producer_pos: 0x200,
pending_pos: 0x180,
pending_bytes: 0x100,
};
let json = serde_json::to_string(&original).expect("ringbuf serialize");
let restored: FailureDumpRingbuf = serde_json::from_str(&json).expect("ringbuf deserialize");
assert_eq!(restored.capacity, original.capacity);
assert_eq!(restored.consumer_pos, original.consumer_pos);
assert_eq!(restored.producer_pos, original.producer_pos);
assert_eq!(restored.pending_pos, original.pending_pos);
assert_eq!(restored.pending_bytes, original.pending_bytes);
}
#[test]
fn failure_dump_stack_trace_empty_roundtrip() {
let original = FailureDumpStackTrace {
n_buckets: 0,
entries: Vec::new(),
truncated: false,
};
let json = serde_json::to_string(&original).expect("stack_trace serialize");
assert!(
!json.contains("\"truncated\":true") && !json.contains("\"truncated\":false"),
"skip_serializing_if must elide truncated when false; JSON: {json}",
);
let restored: FailureDumpStackTrace =
serde_json::from_str(&json).expect("stack_trace deserialize");
assert_eq!(restored.n_buckets, 0);
assert!(restored.entries.is_empty());
assert!(!restored.truncated);
}
#[test]
fn failure_dump_stack_trace_populated_roundtrip() {
let original = FailureDumpStackTrace {
n_buckets: 4,
entries: vec![
FailureDumpStackTraceEntry {
bucket_id: 0,
nr: 3,
pcs: vec![
0xFFFF_FFFF_8100_0000,
0xFFFF_FFFF_8100_0010,
0xFFFF_FFFF_8100_0020,
],
data_hex: "00 10 20".into(),
},
FailureDumpStackTraceEntry {
bucket_id: 2,
nr: 1,
pcs: vec![0xFFFF_FFFF_8200_0000],
data_hex: "ff".into(),
},
],
truncated: true,
};
let json = serde_json::to_string(&original).expect("stack_trace serialize");
assert!(
json.contains("\"truncated\":true"),
"truncated=true must appear in JSON: {json}",
);
let restored: FailureDumpStackTrace =
serde_json::from_str(&json).expect("stack_trace deserialize");
assert_eq!(restored.n_buckets, 4);
assert_eq!(restored.entries.len(), 2);
assert_eq!(restored.entries[0].bucket_id, 0);
assert_eq!(restored.entries[0].nr, 3);
assert_eq!(restored.entries[0].pcs.len(), 3);
assert_eq!(restored.entries[0].pcs[0], 0xFFFF_FFFF_8100_0000);
assert_eq!(restored.entries[0].data_hex, "00 10 20");
assert_eq!(restored.entries[1].bucket_id, 2);
assert!(restored.truncated);
}
#[test]
fn failure_dump_stack_trace_entry_build_id_roundtrip() {
let original = FailureDumpStackTraceEntry {
bucket_id: 7,
nr: 1,
pcs: Vec::new(),
data_hex: "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f \
10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f"
.into(),
};
let json = serde_json::to_string(&original).expect("entry serialize");
assert!(
!json.contains("\"pcs\""),
"empty pcs must be elided in build-id mode; JSON: {json}",
);
let restored: FailureDumpStackTraceEntry =
serde_json::from_str(&json).expect("entry deserialize");
assert_eq!(restored.bucket_id, 7);
assert_eq!(restored.nr, 1);
assert!(restored.pcs.is_empty());
assert_eq!(restored.data_hex.len(), 95); }
#[test]
fn failure_dump_fd_array_empty_roundtrip() {
let original = FailureDumpFdArray {
populated: 0,
scanned: 0,
indices: Vec::new(),
truncated: false,
indices_truncated: false,
};
let json = serde_json::to_string(&original).expect("fd_array serialize");
assert!(
!json.contains("\"truncated\""),
"truncated=false must be elided; JSON: {json}",
);
assert!(
!json.contains("\"indices_truncated\""),
"indices_truncated=false must be elided; JSON: {json}",
);
let restored: FailureDumpFdArray = serde_json::from_str(&json).expect("fd_array deserialize");
assert_eq!(restored.populated, 0);
assert_eq!(restored.scanned, 0);
assert!(restored.indices.is_empty());
assert!(!restored.truncated);
}
#[test]
fn failure_dump_fd_array_populated_roundtrip() {
let original = FailureDumpFdArray {
populated: 1500,
scanned: 4096,
indices: (0..1024).collect(), truncated: true,
indices_truncated: true, };
let json = serde_json::to_string(&original).expect("fd_array serialize");
assert!(
json.contains("\"indices_truncated\":true"),
"indices_truncated=true must be emitted; JSON: {json}",
);
let restored: FailureDumpFdArray = serde_json::from_str(&json).expect("fd_array deserialize");
assert_eq!(restored.populated, 1500);
assert_eq!(restored.scanned, 4096);
assert_eq!(restored.indices.len(), 1024);
assert_eq!(restored.indices[0], 0);
assert!(
restored.indices_truncated,
"indices_truncated must roundtrip"
);
assert_eq!(restored.indices[1023], 1023);
assert!(restored.truncated);
}
#[test]
fn failure_dump_minimal_deserialize_uses_defaults() {
let json =
r#"{"capacity":0,"consumer_pos":0,"producer_pos":0,"pending_pos":0,"pending_bytes":0}"#;
let rb: FailureDumpRingbuf = serde_json::from_str(json).expect("ringbuf minimal deserialize");
assert_eq!(rb.capacity, 0);
let json = r#"{"n_buckets":0,"entries":[]}"#;
let st: FailureDumpStackTrace =
serde_json::from_str(json).expect("stack_trace minimal deserialize");
assert_eq!(st.n_buckets, 0);
assert!(st.entries.is_empty());
assert!(!st.truncated, "truncated must default to false");
let json = r#"{"populated":0,"scanned":0}"#;
let fa: FailureDumpFdArray = serde_json::from_str(json).expect("fd_array minimal deserialize");
assert_eq!(fa.populated, 0);
assert_eq!(fa.scanned, 0);
assert!(fa.indices.is_empty(), "indices must default to []");
assert!(!fa.truncated, "truncated must default to false");
}
#[test]
fn dual_dump_display_zero_jiffies_uses_jiffies_not_captured_branch() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let rendered = format!("{dual}");
assert!(
rendered.contains("early=present (jiffies not captured)"),
"zero-jiffies branch must surface a distinct phrase; got: {rendered}",
);
assert!(
!rendered.contains("max_age=0j"),
"zero-jiffies header must NOT print max_age=0j; got: {rendered}",
);
assert!(
!rendered.contains("threshold=0j"),
"zero-jiffies header must NOT print threshold=0j; got: {rendered}",
);
}
#[test]
fn dual_dump_display_nonzero_jiffies_preserves_max_age_format() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 1234,
early_threshold_jiffies: 5678,
early_skipped_reason: None,
};
let rendered = format!("{dual}");
assert!(
rendered.contains("max_age=1234j, threshold=5678j"),
"non-zero jiffies must preserve the max_age/threshold format; got: {rendered}",
);
assert!(
!rendered.contains("jiffies not captured"),
"non-zero jiffies must NOT use the not-captured phrase; got: {rendered}",
);
}
#[test]
fn dual_dump_display_one_zero_one_nonzero_uses_legacy_format() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 5,
early_skipped_reason: None,
};
let rendered = format!("{dual}");
assert!(
rendered.contains("max_age=0j, threshold=5j"),
"single-zero case must use legacy format; got: {rendered}",
);
assert!(
!rendered.contains("jiffies not captured"),
"single-zero case must NOT use the not-captured phrase; got: {rendered}",
);
let dual2 = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: Some(FailureDumpReport::default()),
late: FailureDumpReport::default(),
early_max_age_jiffies: 5,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let rendered2 = format!("{dual2}");
assert!(
rendered2.contains("max_age=5j, threshold=0j"),
"single-zero case must use legacy format; got: {rendered2}",
);
assert!(
!rendered2.contains("jiffies not captured"),
"single-zero case must NOT use the not-captured phrase; got: {rendered2}",
);
}
#[test]
fn dual_dump_display_early_absent_omits_jiffies_lines() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 999,
early_threshold_jiffies: 100,
early_skipped_reason: None,
};
let rendered = format!("{dual}");
assert!(
rendered.contains("early=absent"),
"absent branch must surface 'early=absent'; got: {rendered}",
);
assert!(
!rendered.contains("max_age=999j"),
"absent branch must NOT surface jiffies values; got: {rendered}",
);
assert!(
!rendered.contains("jiffies not captured"),
"absent branch must NOT surface the not-captured phrase; got: {rendered}",
);
}
#[test]
fn dual_dump_display_early_absent_renders_structured_reason() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: Some("scx_tick stall — no per-task runnable_at data".to_string()),
};
let rendered = format!("{dual}");
assert!(
rendered.contains("scx_tick stall"),
"structured reason must appear in absent header; got: {rendered}",
);
assert!(
!rendered.contains("RUST_LOG=ktstr=debug"),
"RUST_LOG hint must NOT appear when reason is structured; got: {rendered}",
);
assert!(
!rendered.contains("stall fired before half-way threshold"),
"legacy generic text must NOT appear when reason is structured; got: {rendered}",
);
}
#[test]
fn dual_dump_display_early_absent_falls_back_when_reason_absent() {
let dual = DualFailureDumpReport {
schema: SCHEMA_DUAL.to_string(),
early: None,
late: FailureDumpReport::default(),
early_max_age_jiffies: 0,
early_threshold_jiffies: 0,
early_skipped_reason: None,
};
let rendered = format!("{dual}");
assert!(
rendered.contains("stall fired before half-way threshold"),
"legacy generic text must appear when reason is absent; got: {rendered}",
);
assert!(
rendered.contains("RUST_LOG=ktstr=debug"),
"RUST_LOG hint must appear when reason is absent; got: {rendered}",
);
}
#[test]
fn failure_dump_display_per_cpu_time_summary() {
let report = FailureDumpReport {
per_cpu_time: vec![
super::PerCpuTimeStats {
cpu: 0,
..super::PerCpuTimeStats::default()
},
super::PerCpuTimeStats {
cpu: 1,
..super::PerCpuTimeStats::default()
},
super::PerCpuTimeStats {
cpu: 2,
..super::PerCpuTimeStats::default()
},
],
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("per_cpu_time: 3 CPUs captured"),
"per_cpu_time section must surface CPU count; got: {rendered}",
);
}
#[test]
fn failure_dump_display_per_node_numa_summary() {
let report = FailureDumpReport {
per_node_numa: vec![
super::PerNodeNumaStats {
node: 0,
..super::PerNodeNumaStats::default()
},
super::PerNodeNumaStats {
node: 1,
..super::PerNodeNumaStats::default()
},
],
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("per_node_numa: 2 nodes captured"),
"per_node_numa section must surface node count; got: {rendered}",
);
assert!(
!rendered.contains("per_cpu_time:"),
"per_cpu_time must be elided when empty; got: {rendered}",
);
}
#[test]
fn failure_dump_display_per_node_numa_unavailable() {
let report = FailureDumpReport {
per_node_numa_unavailable: Some("no NUMA walker".into()),
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("per_node_numa: <unavailable: no NUMA walker>"),
"per_node_numa_unavailable must surface the reason inline; got: {rendered}",
);
}
#[test]
fn failure_dump_display_scx_walker_all_present() {
use crate::monitor::scx_walker::{DsqState, RqScxState, ScxSchedState};
let report = FailureDumpReport {
rq_scx_states: vec![RqScxState::default(); 4],
dsq_states: vec![DsqState::default(); 2],
scx_sched_state: Some(ScxSchedState::default()),
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("scx_walker: rq_scx=4 dsq=2 sched=captured"),
"scx_walker present-everywhere must surface counts and 'captured'; got: {rendered}",
);
}
#[test]
fn failure_dump_display_scx_walker_partial() {
use crate::monitor::scx_walker::RqScxState;
let report = FailureDumpReport {
rq_scx_states: vec![RqScxState::default()],
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("scx_walker: rq_scx=1 dsq=0 sched=absent"),
"partial scx_walker must show 'sched=absent'; got: {rendered}",
);
}
#[test]
fn failure_dump_display_scx_walker_unavailable() {
let report = FailureDumpReport {
scx_walker_unavailable: Some("scx_sched offsets unresolved".into()),
..FailureDumpReport::default()
};
let rendered = format!("{report}");
assert!(
rendered.contains("scx_walker: <unavailable: scx_sched offsets unresolved>"),
"scx_walker_unavailable must surface the reason inline; got: {rendered}",
);
}
struct RenderScene {
buf: Vec<u8>,
page_offset: u64,
offsets: crate::monitor::btf_offsets::BpfMapOffsets,
}
fn pa_to_kva(pa: u64, page_offset: u64) -> u64 {
page_offset.wrapping_add(pa)
}
fn synth_ringbuf_offsets() -> super::super::btf_offsets::BpfRingbufOffsets {
super::super::btf_offsets::BpfRingbufOffsets {
rbm_rb: 0,
rb_mask: 0,
rb_consumer_pos: 64,
rb_producer_pos: 128,
rb_pending_pos: 192,
}
}
fn synth_ringbuf_map_offsets() -> crate::monitor::btf_offsets::BpfMapOffsets {
let mut o = crate::monitor::btf_offsets::BpfMapOffsets::EMPTY;
o.ringbuf_offsets = Some(synth_ringbuf_offsets());
o
}
fn build_ringbuf_scene(
mask: u64,
consumer_pos: u64,
producer_pos: u64,
pending_pos: u64,
rb_kva_override: Option<u64>,
) -> (RenderScene, u64) {
let rb_offs = synth_ringbuf_offsets();
let offsets = synth_ringbuf_map_offsets();
let page_offset = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let map_pa: u64 = 0x1000;
let rb_pa: u64 = 0x10_0000;
let buf_size: usize = (rb_pa as usize) + 0x1000;
let mut buf = vec![0u8; buf_size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let rb_kva = rb_kva_override.unwrap_or_else(|| pa_to_kva(rb_pa, page_offset));
write_u64(&mut buf, map_pa + rb_offs.rbm_rb as u64, rb_kva);
write_u64(&mut buf, rb_pa + rb_offs.rb_mask as u64, mask);
write_u64(
&mut buf,
rb_pa + rb_offs.rb_consumer_pos as u64,
consumer_pos,
);
write_u64(
&mut buf,
rb_pa + rb_offs.rb_producer_pos as u64,
producer_pos,
);
write_u64(&mut buf, rb_pa + rb_offs.rb_pending_pos as u64, pending_pos);
let map_kva = pa_to_kva(map_pa, page_offset);
(
RenderScene {
buf,
page_offset,
offsets,
},
map_kva,
)
}
fn ringbuf_map_info(map_kva: u64) -> super::super::bpf_map::BpfMapInfo {
let (name_bytes, name_len) = name_from_str("test_ringbuf");
super::super::bpf_map::BpfMapInfo {
map_pa: 0,
map_kva,
name_bytes,
name_len,
map_type: super::super::bpf_map::BPF_MAP_TYPE_RINGBUF,
map_flags: 0,
key_size: 0,
value_size: 0,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
}
}
#[test]
fn render_ringbuf_no_offsets_returns_err() {
let (scene, map_kva) = build_ringbuf_scene(0xFFFF, 0x100, 0x200, 0x180, None);
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let mut offsets = scene.offsets;
offsets.ringbuf_offsets = None;
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &offsets, 0);
let result = render_ringbuf_state(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("BTF lacks bpf_ringbuf_map")));
}
#[test]
fn render_ringbuf_unmapped_map_kva_returns_err() {
let (scene, _map_kva) = build_ringbuf_scene(0xFFFF, 0x100, 0x200, 0x180, None);
let bogus_map_kva = scene.page_offset + 0x100_0000;
let info = ringbuf_map_info(bogus_map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_ringbuf_state(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("RINGBUF map_kva unmapped")));
}
#[test]
fn render_ringbuf_null_rb_returns_err() {
let (scene, map_kva) = build_ringbuf_scene(0xFFFF, 0x100, 0x200, 0x180, Some(0));
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_ringbuf_state(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("rb pointer NULL")));
}
#[test]
fn render_ringbuf_unmapped_rb_returns_err() {
let (scene, map_kva) = build_ringbuf_scene(
0xFFFF,
0x100,
0x200,
0x180,
Some(crate::monitor::symbols::DEFAULT_PAGE_OFFSET + 0x100_0000),
);
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_ringbuf_state(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("rb->mask unmapped")));
}
#[test]
fn render_ringbuf_basic_capacity_and_pending() {
let (scene, map_kva) = build_ringbuf_scene(0xFFFF, 0x100, 0x300, 0x180, None);
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let rb = render_ringbuf_state(&accessor, &info).expect("happy-path render");
assert_eq!(rb.capacity, 0x10000);
assert_eq!(rb.consumer_pos, 0x100);
assert_eq!(rb.producer_pos, 0x300);
assert_eq!(rb.pending_pos, 0x180);
assert_eq!(rb.pending_bytes, 0x200);
}
#[test]
fn render_ringbuf_wraparound_pending_bytes() {
let consumer = 200u64;
let producer = 100u64;
let (scene, map_kva) = build_ringbuf_scene(0xFFFF, consumer, producer, producer, None);
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let rb = render_ringbuf_state(&accessor, &info).expect("wraparound render");
assert_eq!(
rb.pending_bytes,
producer.wrapping_sub(consumer),
"wraparound subtraction must match production semantics",
);
}
#[test]
fn render_ringbuf_mask_max_returns_err() {
let (scene, map_kva) = build_ringbuf_scene(u64::MAX, 0, 0, 0, None);
let info = ringbuf_map_info(map_kva);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_ringbuf_state(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("mask = u64::MAX")));
}
fn synth_stackmap_offsets() -> super::super::btf_offsets::BpfStackmapOffsets {
super::super::btf_offsets::BpfStackmapOffsets {
smap_n_buckets: 0,
smap_buckets: 16,
smb_nr: 0,
smb_data: 16,
}
}
fn synth_stackmap_map_offsets() -> crate::monitor::btf_offsets::BpfMapOffsets {
let mut o = crate::monitor::btf_offsets::BpfMapOffsets::EMPTY;
o.stackmap_offsets = Some(synth_stackmap_offsets());
o
}
fn build_stackmap_scene(
bucket_pc_lists: &[Vec<u64>],
map_flags: u32,
) -> (RenderScene, super::super::bpf_map::BpfMapInfo) {
let sm_offs = synth_stackmap_offsets();
let offsets = synth_stackmap_map_offsets();
let page_offset = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let map_pa: u64 = 0x1000;
let n_buckets = bucket_pc_lists.len() as u32;
let map_struct_end = sm_offs.smap_buckets as u64 + (n_buckets as u64) * 8;
let bucket_stride: u64 = 0x1000;
let buckets_start: u64 = 0x1_0000;
let buf_size: usize = (buckets_start + bucket_stride * (n_buckets as u64 + 1)) as usize;
let mut buf = vec![0u8; buf_size];
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_u32(&mut buf, map_pa + sm_offs.smap_n_buckets as u64, n_buckets);
let _ = map_struct_end; for (i, pcs) in bucket_pc_lists.iter().enumerate() {
let slot_pa = map_pa + sm_offs.smap_buckets as u64 + (i as u64) * 8;
if pcs.is_empty() {
write_u64(&mut buf, slot_pa, 0); continue;
}
let bucket_pa = buckets_start + (i as u64) * bucket_stride;
write_u64(&mut buf, slot_pa, pa_to_kva(bucket_pa, page_offset));
write_u32(
&mut buf,
bucket_pa + sm_offs.smb_nr as u64,
pcs.len() as u32,
);
for (j, pc) in pcs.iter().enumerate() {
write_u64(
&mut buf,
bucket_pa + sm_offs.smb_data as u64 + (j as u64) * 8,
*pc,
);
}
}
let map_kva = pa_to_kva(map_pa, page_offset);
let (name_bytes, name_len) = name_from_str("test_stack");
let info = super::super::bpf_map::BpfMapInfo {
map_pa: 0,
map_kva,
name_bytes,
name_len,
map_type: super::super::bpf_map::BPF_MAP_TYPE_STACK_TRACE,
map_flags,
key_size: 0,
value_size: 0,
max_entries: n_buckets,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
(
RenderScene {
buf,
page_offset,
offsets,
},
info,
)
}
#[test]
fn render_stack_traces_no_offsets_returns_err() {
let (mut scene, info) = build_stackmap_scene(&[vec![]], 0);
scene.offsets.stackmap_offsets = None;
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_stack_traces(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("BTF lacks bpf_stack_map")));
}
#[test]
fn render_stack_traces_unmapped_map_kva_returns_err() {
let (scene, _info) = build_stackmap_scene(&[vec![]], 0);
let mut info = _info;
info.map_kva = scene.page_offset + 0x100_0000;
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let result = render_stack_traces(&accessor, &info);
assert!(matches!(result, Err(ref s) if s.contains("STACK_TRACE map_kva unmapped")));
}
#[test]
fn render_stack_traces_empty_returns_no_entries() {
let (scene, info) = build_stackmap_scene(&[vec![], vec![], vec![], vec![]], 0);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let st = render_stack_traces(&accessor, &info).expect("empty render");
assert_eq!(st.n_buckets, 4);
assert!(st.entries.is_empty());
assert!(!st.truncated);
}
#[test]
fn render_stack_traces_populated_pcs() {
let (scene, info) = build_stackmap_scene(
&[
vec![],
vec![0xFFFF_FFFF_8100_1000, 0xFFFF_FFFF_8100_2000],
vec![],
vec![0xFFFF_FFFF_8200_3000],
],
0,
);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let st = render_stack_traces(&accessor, &info).expect("populated render");
assert_eq!(st.n_buckets, 4);
assert_eq!(st.entries.len(), 2);
assert_eq!(st.entries[0].bucket_id, 1);
assert_eq!(st.entries[0].nr, 2);
assert_eq!(
st.entries[0].pcs,
vec![0xFFFF_FFFF_8100_1000, 0xFFFF_FFFF_8100_2000]
);
assert_eq!(st.entries[1].bucket_id, 3);
assert_eq!(st.entries[1].pcs, vec![0xFFFF_FFFF_8200_3000]);
assert!(!st.truncated);
}
#[test]
fn render_stack_traces_build_id_mode_pcs_empty() {
const BPF_F_STACK_BUILD_ID: u32 = 1 << 5;
let (scene, info) = build_stackmap_scene(&[vec![0xDEAD_BEEFu64]], BPF_F_STACK_BUILD_ID);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let st = render_stack_traces(&accessor, &info).expect("build-id render");
assert_eq!(st.entries.len(), 1);
assert!(
st.entries[0].pcs.is_empty(),
"build-id mode must NOT populate pcs (entry shape is bpf_stack_build_id, not u64)"
);
}
fn synth_fd_array_offsets() -> crate::monitor::btf_offsets::BpfMapOffsets {
let mut o = crate::monitor::btf_offsets::BpfMapOffsets::EMPTY;
o.array_value = 16;
o
}
fn build_fd_array_scene(
map_type: u32,
max_entries: u32,
populated_indices: &[u32],
) -> (RenderScene, super::super::bpf_map::BpfMapInfo) {
let offsets = synth_fd_array_offsets();
let page_offset = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let map_pa: u64 = 0x1000;
let scan = max_entries.min(super::render_map::MAX_FD_ARRAY_SLOTS);
let buf_size =
(map_pa as usize) + (offsets.array_value as usize) + (scan as usize) * 8 + 0x1000;
let mut buf = vec![0u8; buf_size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
for &idx in populated_indices {
if idx >= scan {
continue;
}
let slot_pa = map_pa + offsets.array_value as u64 + (idx as u64) * 8;
write_u64(&mut buf, slot_pa, 0xFFFF_8000_0000_0000 + (idx as u64));
}
let map_kva = pa_to_kva(map_pa, page_offset);
let (name_bytes, name_len) = name_from_str("test_fd_array");
let info = super::super::bpf_map::BpfMapInfo {
map_pa: 0,
map_kva,
name_bytes,
name_len,
map_type,
map_flags: 0,
key_size: 0,
value_size: 0,
max_entries,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
(
RenderScene {
buf,
page_offset,
offsets,
},
info,
)
}
#[test]
fn render_fd_array_populated_indices() {
let (scene, info) = build_fd_array_scene(
super::super::bpf_map::BPF_MAP_TYPE_PROG_ARRAY,
16,
&[0, 5, 10],
);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let fa = render_fd_array_slots(&accessor, &info);
assert_eq!(fa.populated, 3);
assert_eq!(fa.scanned, 16);
assert_eq!(fa.indices, vec![0, 5, 10]);
assert!(!fa.truncated);
assert!(
!fa.indices_truncated,
"populated == indices.len() must NOT set indices_truncated",
);
}
#[test]
fn render_fd_array_hash_shaped_returns_empty() {
let (scene, info) = build_fd_array_scene(
super::super::bpf_map::BPF_MAP_TYPE_SOCKHASH,
16,
&[0, 5, 10],
);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let fa = render_fd_array_slots(&accessor, &info);
assert_eq!(fa.populated, 0);
assert_eq!(fa.scanned, 0);
assert!(fa.indices.is_empty());
assert!(!fa.truncated);
assert!(
!fa.indices_truncated,
"hash-shaped early exit must NOT set indices_truncated",
);
}
#[test]
fn render_fd_array_max_entries_truncation() {
let (scene, mut info) = build_fd_array_scene(
super::super::bpf_map::BPF_MAP_TYPE_PROG_ARRAY,
super::render_map::MAX_FD_ARRAY_SLOTS,
&[],
);
info.max_entries = super::render_map::MAX_FD_ARRAY_SLOTS + 1;
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let fa = render_fd_array_slots(&accessor, &info);
assert!(
fa.truncated,
"max_entries above MAX_FD_ARRAY_SLOTS must set truncated"
);
assert_eq!(fa.scanned, super::render_map::MAX_FD_ARRAY_SLOTS);
assert!(
!fa.indices_truncated,
"scan-size truncation must NOT set indices_truncated when populated == indices.len()",
);
let _ = scene; }
#[test]
fn render_map_struct_ops_no_offsets_returns_error() {
let mut offsets = crate::monitor::btf_offsets::BpfMapOffsets::EMPTY;
offsets.struct_ops_offsets = None;
let buf = vec![0u8; 0x4000];
let page_offset = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let mem =
unsafe { super::super::reader::GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &offsets, 0);
let (name_bytes, name_len) = name_from_str("test_struct_ops");
let info = super::super::bpf_map::BpfMapInfo {
map_pa: 0,
map_kva: pa_to_kva(0x1000, page_offset),
name_bytes,
name_len,
map_type: super::super::bpf_map::BPF_MAP_TYPE_STRUCT_OPS,
map_flags: 0,
key_size: 0,
value_size: 256,
max_entries: 1,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let arena_page_index = super::render_map::ArenaPageIndex::new();
let sdt_alloc_metas: Vec<super::render_map::SdtAllocMeta> = Vec::new();
let ctx = super::render_map::RenderMapCtx {
accessor: &accessor,
btf: None,
num_cpus: 1,
arena_offsets: None,
shared_arena: None,
arena_page_index: &arena_page_index,
sdt_alloc_metas: &sdt_alloc_metas,
};
let rendered = super::render_map::render_map(&ctx, &info);
let err = rendered
.error
.expect("STRUCT_OPS no-offsets must surface error");
assert!(
err.contains("STRUCT_OPS value unreadable") && err.contains("BTF offsets unresolved"),
"STRUCT_OPS no-offsets error must explain the resolution failure; got: {err}",
);
}
#[test]
fn render_map_struct_ops_unmapped_value_returns_error() {
let mut offsets = crate::monitor::btf_offsets::BpfMapOffsets::EMPTY;
offsets.struct_ops_offsets = Some(super::super::btf_offsets::StructOpsOffsets {
kvalue: 64,
value_data: 8,
});
let buf = vec![0u8; 0x4000];
let page_offset = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let mem =
unsafe { super::super::reader::GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &offsets, 0);
let (name_bytes, name_len) = name_from_str("test_struct_ops");
let info = super::super::bpf_map::BpfMapInfo {
map_pa: 0,
map_kva: pa_to_kva(0x1000, page_offset),
name_bytes,
name_len,
map_type: super::super::bpf_map::BPF_MAP_TYPE_STRUCT_OPS,
map_flags: 0,
key_size: 0,
value_size: 256,
max_entries: 1,
value_kva: Some(page_offset + 0x100_0000), btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let arena_page_index = super::render_map::ArenaPageIndex::new();
let sdt_alloc_metas: Vec<super::render_map::SdtAllocMeta> = Vec::new();
let ctx = super::render_map::RenderMapCtx {
accessor: &accessor,
btf: None,
num_cpus: 1,
arena_offsets: None,
shared_arena: None,
arena_page_index: &arena_page_index,
sdt_alloc_metas: &sdt_alloc_metas,
};
let rendered = super::render_map::render_map(&ctx, &info);
let err = rendered
.error
.expect("STRUCT_OPS unmapped-value must surface error");
assert!(
err.contains("STRUCT_OPS value unreadable") && err.contains("value region unmapped"),
"STRUCT_OPS unmapped-value error must mention the unmapped region; got: {err}",
);
}
#[test]
fn struct_ops_value_kva_math_kvalue_plus_data() {
let so = super::super::btf_offsets::StructOpsOffsets {
kvalue: 0x40,
value_data: 0x10,
};
let map_kva = 0xFFFF_8888_0000_0000u64;
let value_kva = map_kva
.wrapping_add(so.kvalue as u64)
.wrapping_add(so.value_data as u64);
assert_eq!(value_kva, 0xFFFF_8888_0000_0050);
}
#[test]
fn render_fd_array_indices_capped_at_max_indices() {
let cap = super::render_map::MAX_FD_ARRAY_INDICES as u32;
let pop: Vec<u32> = (0..cap + 5).collect();
let (scene, info) = build_fd_array_scene(
super::super::bpf_map::BPF_MAP_TYPE_PROG_ARRAY,
cap + 5,
&pop,
);
let mem = unsafe {
super::super::reader::GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64)
};
let mem_ref: &super::super::reader::GuestMem = unsafe { &*(&mem as *const _) };
let kernel = super::super::guest::GuestKernel::new_for_test(
mem_ref,
std::collections::HashMap::new(),
scene.page_offset,
0,
false,
);
let kernel_ref = unsafe { &*(&kernel as *const _) };
let accessor =
super::super::bpf_map::GuestMemMapAccessor::new_for_test(kernel_ref, &scene.offsets, 0);
let fa = render_fd_array_slots(&accessor, &info);
assert_eq!(
fa.populated,
cap + 5,
"populated counts every non-zero slot"
);
assert_eq!(
fa.indices.len() as u32,
cap,
"indices vector caps at MAX_FD_ARRAY_INDICES",
);
assert!(
fa.indices_truncated,
"populated > indices.len() must set indices_truncated",
);
}
#[test]
fn report_dump_truncated_at_us_serde() {
let r = FailureDumpReport::default();
let json = serde_json::to_string(&r).unwrap();
assert!(
!json.contains("dump_truncated_at_us"),
"None must skip-serialize: {json}"
);
let parsed: FailureDumpReport = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.dump_truncated_at_us, None);
let r = FailureDumpReport {
dump_truncated_at_us: Some(15_000),
..FailureDumpReport::default()
};
let json = serde_json::to_string(&r).unwrap();
assert!(
json.contains("\"dump_truncated_at_us\":15000"),
"Some must serialize: {json}"
);
let parsed: FailureDumpReport = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.dump_truncated_at_us, Some(15_000));
}
#[test]
fn max_enriched_tasks_constant_is_reasonable() {
assert_eq!(super::MAX_ENRICHED_TASKS, 4096);
}
fn make_small_struct(type_name: &str, fields: &[(&str, u64)]) -> RenderedValue {
RenderedValue::Struct {
type_name: Some(type_name.into()),
members: fields
.iter()
.map(|(n, v)| super::super::btf_render::RenderedMember {
name: (*n).into(),
value: RenderedValue::Uint {
bits: 64,
value: *v,
},
})
.collect(),
}
}
#[test]
fn entry_display_renders_inline_struct_key_and_value() {
let entry = FailureDumpEntry {
key: Some(make_small_struct(
"cgroup_llc_id",
&[("cgrp_id", 1), ("llc_id", 5)],
)),
key_hex: "01 05".into(),
value: Some(make_small_struct(
"cbw_llc_entry",
&[("llcx", 17_592_186_046_336)],
)),
value_hex: "00".into(),
payload: None,
};
let out = format!("{entry}");
assert!(
out.starts_with("entry: key="),
"missing entry header: {out}",
);
assert!(out.contains("cgroup_llc_id{"), "key inline form: {out}");
assert!(out.contains("cgrp_id=1"), "key field cgrp_id: {out}");
assert!(out.contains("llc_id=5"), "key field llc_id: {out}");
assert!(
out.contains("\n value: "),
"missing indented value line: {out}",
);
assert!(out.contains("cbw_llc_entry{"), "value inline form: {out}");
assert!(out.contains("llcx=17592186046336"), "value field: {out}");
assert!(
!out.contains("struct cgroup_llc_id"),
"inline form must drop `struct` prefix: {out}",
);
assert!(
!out.contains("struct cbw_llc_entry"),
"inline form must drop `struct` prefix: {out}",
);
}
#[test]
fn entry_display_inline_zero_fields_dropped_silently() {
let entry = FailureDumpEntry {
key: Some(make_small_struct(
"k",
&[("real", 7), ("zero1", 0), ("zero2", 0)],
)),
key_hex: "07".into(),
value: Some(make_small_struct("v", &[("real", 3)])),
value_hex: "03".into(),
payload: None,
};
let out = format!("{entry}");
assert!(out.contains("real=7"), "non-zero key field present: {out}");
assert!(
out.contains("real=3"),
"non-zero value field present: {out}"
);
assert!(!out.contains("zero1"), "zero fields are suppressed: {out}",);
assert!(
!out.contains("fields zero"),
"no zero-count summary anywhere: {out}",
);
}
#[test]
fn entry_display_value_falls_to_multi_line_when_too_wide() {
let big_value = RenderedValue::Struct {
type_name: Some("v".into()),
members: (0..15)
.map(|i| super::super::btf_render::RenderedMember {
name: format!("very_long_field_name_{i}"),
value: RenderedValue::Uint {
bits: 64,
value: 0x1234_5678_9abc_def0u64.wrapping_add(i as u64),
},
})
.collect(),
};
let entry = FailureDumpEntry {
key: Some(make_small_struct("k", &[("only", 1)])),
key_hex: "01".into(),
value: Some(big_value),
value_hex: "01".into(),
payload: None,
};
let out = format!("{entry}");
assert!(
out.contains("\n value: v:"),
"multi-line value uses breadcrumb form: {out}",
);
assert!(out.contains("k{only=1}"), "key inline form: {out}");
}
#[test]
fn entry_display_payload_renders_below_value() {
let entry = FailureDumpEntry {
key: Some(make_small_struct("k", &[("a", 1)])),
key_hex: "01".into(),
value: Some(make_small_struct("v", &[("b", 2)])),
value_hex: "02".into(),
payload: Some(RenderedValue::Uint {
bits: 64,
value: 0xDEAD_BEEF,
}),
};
let out = format!("{entry}");
assert!(out.starts_with("entry: key="), "entry header: {out}");
assert!(out.contains("\n value: v{b=2}"), "value inline: {out}");
assert!(out.contains("\n .data "), ".data label must appear: {out}",);
assert!(
out.contains("3735928559"), "rendered payload value must appear: {out}",
);
}
#[test]
fn map_display_table_for_homogeneous_entries() {
let m = FailureDumpMap {
name: "cbw".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 8,
max_entries: 64,
value: None,
entries: vec![
FailureDumpEntry {
key: Some(make_small_struct(
"cgroup_llc_id",
&[("cgrp_id", 1), ("llc_id", 5)],
)),
key_hex: "01 05".into(),
value: Some(make_small_struct(
"cbw_llc_entry",
&[("llcx", 17_592_186_046_336)],
)),
value_hex: "00".into(),
payload: None,
},
FailureDumpEntry {
key: Some(make_small_struct(
"cgroup_llc_id",
&[("cgrp_id", 61), ("llc_id", 3)],
)),
key_hex: "3d 03".into(),
value: Some(make_small_struct(
"cbw_llc_entry",
&[("llcx", 17_592_186_047_616)],
)),
value_hex: "00".into(),
payload: None,
},
FailureDumpEntry {
key: Some(make_small_struct(
"cgroup_llc_id",
&[("cgrp_id", 41), ("llc_id", 1)],
)),
key_hex: "29 01".into(),
value: Some(make_small_struct(
"cbw_llc_entry",
&[("llcx", 17_592_186_047_040)],
)),
value_hex: "00".into(),
payload: None,
},
],
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(out.contains("cgrp_id"), "key column header missing: {out}",);
assert!(out.contains("llc_id"), "key column header missing: {out}");
assert!(out.contains("llcx"), "value column header missing: {out}");
assert!(out.contains(" | "), "key/value separator missing: {out}");
assert!(out.contains("17592186046336"), "row 0 value: {out}");
assert!(out.contains("17592186047616"), "row 1 value: {out}");
assert!(out.contains("17592186047040"), "row 2 value: {out}");
assert!(
!out.contains("entry {"),
"table form must replace per-entry blocks: {out}",
);
}
#[test]
fn map_display_skips_table_for_single_entry() {
let m = FailureDumpMap {
name: "single".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 8,
max_entries: 64,
value: None,
entries: vec![FailureDumpEntry {
key: Some(make_small_struct("k", &[("a", 1)])),
key_hex: "01".into(),
value: Some(make_small_struct("v", &[("b", 2)])),
value_hex: "02".into(),
payload: None,
}],
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(
out.contains("entry: key="),
"single entry must keep per-entry rendering: {out}",
);
assert!(
!out.contains(" | "),
"single entry must not use table form: {out}",
);
}
#[test]
fn map_display_skips_table_when_payload_present() {
let m = FailureDumpMap {
name: "with_payload".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 8,
max_entries: 64,
value: None,
entries: vec![
FailureDumpEntry {
key: Some(make_small_struct("k", &[("a", 1)])),
key_hex: "01".into(),
value: Some(make_small_struct("v", &[("b", 2)])),
value_hex: "02".into(),
payload: Some(RenderedValue::Uint {
bits: 64,
value: 99,
}),
},
FailureDumpEntry {
key: Some(make_small_struct("k", &[("a", 3)])),
key_hex: "03".into(),
value: Some(make_small_struct("v", &[("b", 4)])),
value_hex: "04".into(),
payload: None,
},
],
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(
out.contains("entry: key="),
"payload-bearing batch must use per-entry form: {out}",
);
assert!(out.contains("\n .data "), ".data still surfaces: {out}",);
}
#[test]
fn map_display_skips_table_for_heterogeneous_types() {
let m = FailureDumpMap {
name: "het".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 8,
max_entries: 64,
value: None,
entries: vec![
FailureDumpEntry {
key: Some(make_small_struct("k1", &[("a", 1)])),
key_hex: "01".into(),
value: Some(make_small_struct("v", &[("b", 2)])),
value_hex: "02".into(),
payload: None,
},
FailureDumpEntry {
key: Some(make_small_struct("k2", &[("a", 3)])),
key_hex: "03".into(),
value: Some(make_small_struct("v", &[("b", 4)])),
value_hex: "04".into(),
payload: None,
},
],
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(
out.contains("entry: key="),
"heterogeneous types must use per-entry form: {out}",
);
}
#[test]
fn map_display_skips_table_when_entry_has_no_btf_render() {
let m = FailureDumpMap {
name: "no_btf".into(),
map_type: BPF_MAP_TYPE_HASH,
value_size: 8,
max_entries: 64,
value: None,
entries: vec![
FailureDumpEntry {
key: None,
key_hex: "ab".into(),
value: None,
value_hex: "cd".into(),
payload: None,
},
FailureDumpEntry {
key: Some(make_small_struct("k", &[("a", 1)])),
key_hex: "01".into(),
value: Some(make_small_struct("v", &[("b", 2)])),
value_hex: "02".into(),
payload: None,
},
],
percpu_entries: Vec::new(),
percpu_hash_entries: Vec::new(),
arena: None,
ringbuf: None,
stack_trace: None,
fd_array: None,
error: None,
};
let out = format!("{m}");
assert!(
out.contains("entry: key="),
"missing BTF render disqualifies table: {out}",
);
assert!(
out.contains("ab (raw)"),
"hex fallback must still surface: {out}",
);
}