use std::borrow::Cow;
use std::collections::HashSet;
use serde::{Deserialize, Serialize};
use btf_rs::{Btf, BtfType, Member, Struct, Type};
use super::dump::hex_dump;
const MAX_MODIFIER_DEPTH: u32 = 32;
const MAX_ARRAY_ELEMS: usize = 4096;
const MAX_RENDER_DEPTH: u32 = 32;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "kind", rename_all = "snake_case")]
#[allow(dead_code)]
pub enum RenderedValue {
Int { bits: u32, value: i64 },
Uint { bits: u32, value: u64 },
Bool { value: bool },
Char { value: u8 },
Float { bits: u32, value: f64 },
Enum {
bits: u32,
value: i64,
variant: Option<String>,
},
Struct {
type_name: Option<String>,
members: Vec<RenderedMember>,
},
Array {
len: usize,
elements: Vec<RenderedValue>,
},
CpuList { cpus: String },
Ptr {
value: u64,
#[serde(default, skip_serializing_if = "Option::is_none")]
deref: Option<Box<RenderedValue>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
deref_skipped_reason: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
cast_annotation: Option<Cow<'static, str>>,
},
Bytes { hex: String },
Truncated {
needed: usize,
had: usize,
partial: Box<RenderedValue>,
},
Unsupported { reason: String },
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RenderedMember {
pub name: String,
pub value: RenderedValue,
}
impl std::fmt::Display for RenderedValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write_rendered_value(f, self, 0)
}
}
const INDENT: &str = " ";
pub(crate) fn write_value_at_depth(
f: &mut std::fmt::Formatter<'_>,
v: &RenderedValue,
depth: usize,
) -> std::fmt::Result {
write_rendered_value(f, v, depth)
}
fn write_rendered_value(
f: &mut std::fmt::Formatter<'_>,
v: &RenderedValue,
depth: usize,
) -> std::fmt::Result {
match v {
RenderedValue::Int { value, .. } => {
let mut buf = itoa::Buffer::new();
f.write_str(buf.format(*value))
}
RenderedValue::Uint { value, .. } => {
let mut buf = itoa::Buffer::new();
f.write_str(buf.format(*value))
}
RenderedValue::Bool { value } => f.write_str(if *value { "true" } else { "false" }),
RenderedValue::Char { value } => {
if (0x20..=0x7e).contains(value) {
f.write_str("'")?;
f.write_str(core::str::from_utf8(&[*value]).unwrap_or("?"))?;
f.write_str("'")
} else {
write!(f, "0x{value:02x}")
}
}
RenderedValue::Float { value, .. } => write!(f, "{value}"),
RenderedValue::Enum { value, variant, .. } => match variant {
Some(name) => {
f.write_str(name)?;
f.write_str(" (")?;
let mut buf = itoa::Buffer::new();
f.write_str(buf.format(*value))?;
f.write_str(")")
}
None => {
let mut buf = itoa::Buffer::new();
f.write_str(buf.format(*value))
}
},
RenderedValue::CpuList { cpus } => write!(f, "cpus={{{cpus}}}"),
RenderedValue::Ptr {
value,
deref,
deref_skipped_reason,
cast_annotation,
..
} => {
write!(f, "0x{value:x}")?;
if let Some(tag) = cast_annotation {
write!(f, " ({tag})")?;
}
if let Some(inner) = deref {
f.write_str(" → ")?;
write_rendered_value(f, inner, depth)?;
} else if let Some(reason) = deref_skipped_reason {
if reason.starts_with("cycle ") {
f.write_str(" [cycle]")?;
} else {
write!(f, " [chase: {reason}]")?;
}
}
Ok(())
}
RenderedValue::Bytes { hex } => f.write_str(hex),
RenderedValue::Truncated {
needed,
had,
partial,
} => {
if *had == 0 {
return Ok(());
}
write!(f, "<truncated needed={needed} had={had}> ")?;
write_rendered_value(f, partial, depth)
}
RenderedValue::Unsupported { reason } => write!(f, "<unsupported: {reason}>"),
RenderedValue::Array { len, elements } => {
if elements.is_empty() {
return write!(f, "[]");
}
let first_byte = match &elements[0] {
RenderedValue::Int { bits: 8, value } => Some(*value as u8),
RenderedValue::Uint { bits: 8, value } => Some(*value as u8),
RenderedValue::Char { value } => Some(*value),
_ => None,
};
let is_string = first_byte.is_some_and(|b| b != 0 && is_text_byte(b))
&& elements.len() >= 2
&& elements.iter().all(|e| match e {
RenderedValue::Int { bits: 8, value } => is_text_byte(*value as u8),
RenderedValue::Uint { bits: 8, value } => is_text_byte(*value as u8),
RenderedValue::Char { value } => is_text_byte(*value),
_ => false,
});
if is_string {
let mut s = String::new();
for e in elements {
let ch = match e {
RenderedValue::Int { value, .. } => *value as u8,
RenderedValue::Uint { value, .. } => *value as u8,
RenderedValue::Char { value } => *value,
_ => 0,
};
if ch == 0 {
break;
}
s.push(ch as char);
}
if s.contains('\n') {
f.write_str("|\n")?;
for line in s.split('\n') {
if line.is_empty() {
continue;
}
write_indent(f, depth + 1)?;
f.write_str(line)?;
f.write_str("\n")?;
}
write_indent(f, depth)?;
} else {
write!(f, "\"{s}\"")?;
}
return Ok(());
}
let inline = elements.iter().all(is_inline_scalar);
if inline {
let mut runs: Vec<(usize, usize, Vec<&RenderedValue>)> = Vec::new();
for (i, e) in elements.iter().enumerate() {
if is_zero(e) {
continue;
}
if let Some(last) = runs.last_mut()
&& last.1 + 1 == i
{
last.1 = i;
last.2.push(e);
} else {
runs.push((i, i, vec![e]));
}
}
if runs.is_empty() {
return write!(f, "[all {len} zero]");
}
let render_elem = |e: &RenderedValue| -> String {
use std::fmt::Write;
let mut s = String::new();
match e {
RenderedValue::Uint { value, bits } if *bits >= 32 => {
let _ = write!(s, "{value:#x}");
}
_ => {
let _ = write!(s, "{e}");
}
}
s
};
if runs.len() == 1
&& runs[0].0 == 0
&& runs[0].1 + 1 == elements.len()
&& elements.len() == *len
{
let strs: Vec<String> = runs[0].2.iter().map(|e| render_elem(e)).collect();
write_inline_list_wrapped(f, "[", "]", &strs, ", ", depth)?;
return Ok(());
}
let run_strs: Vec<String> = runs
.iter()
.map(|(start, end, vals)| {
if start == end {
format!("[{start}]={}", render_elem(vals[0]))
} else {
let inner: Vec<String> = vals.iter().map(|v| render_elem(v)).collect();
format!("[{start}..{end}]={{{}}}", inner.join(", "))
}
})
.collect();
write_inline_list_wrapped(f, "[", "]", &run_strs, " ", depth)?;
if elements.len() < *len {
write!(f, " /* {} of {len} shown */", elements.len())?;
}
Ok(())
} else {
f.write_str("[")?;
let mut groups: Vec<(usize, usize, &RenderedValue)> = Vec::new();
for (i, e) in elements.iter().enumerate() {
if is_zero(e) {
continue;
}
if let Some(g) = groups.last_mut()
&& g.2 == e
{
g.1 = i;
continue;
}
groups.push((i, i, e));
}
if groups.is_empty() {
return write!(f, "all {len} zero]");
}
let mut i = 0;
while i < groups.len() {
let (start, end, val) = &groups[i];
if start == end
&& let RenderedValue::Struct {
members: first_m, ..
} = val
{
let mut run_end = i;
'scan: while run_end + 1 < groups.len() {
let (ns, ne, nv) = &groups[run_end + 1];
if ns != ne {
break;
}
if let RenderedValue::Struct {
members: next_m, ..
} = nv
{
if next_m.len() != first_m.len() {
break;
}
let diffs = first_m
.iter()
.zip(next_m.iter())
.filter(|(a, b)| a.value != b.value)
.count();
if diffs >= 8 {
break 'scan;
}
} else {
break;
}
run_end += 1;
}
if run_end > i {
let run = &groups[i..=run_end];
if try_write_struct_template(f, run, depth + 1)? {
i = run_end + 1;
continue;
}
}
}
f.write_str("\n")?;
write_indent(f, depth + 1)?;
if start == end {
write!(f, "[{start}] ")?;
} else {
write!(f, "[{start}-{end}] ")?;
}
write_rendered_value(f, val, depth + 1)?;
i += 1;
}
f.write_str("\n")?;
write_indent(f, depth)?;
f.write_str("]")?;
if elements.len() < *len {
write!(f, " /* {} of {len} shown */", elements.len())?;
}
Ok(())
}
}
RenderedValue::Struct { type_name, members } => {
write_struct(f, type_name.as_deref(), members, depth)
}
}
}
fn write_struct(
f: &mut std::fmt::Formatter<'_>,
type_name: Option<&str>,
members: &[RenderedMember],
depth: usize,
) -> std::fmt::Result {
let mut flat_members;
let members = if members
.iter()
.any(|m| m.name.is_empty() && matches!(m.value, RenderedValue::Struct { .. }))
{
flat_members = Vec::with_capacity(members.len());
for m in members {
if m.name.is_empty()
&& let RenderedValue::Struct {
members: ref inner, ..
} = m.value
{
flat_members.extend_from_slice(inner);
continue;
}
flat_members.push(m.clone());
}
flat_members.as_slice()
} else {
members
};
let any_anon = members.iter().any(|m| m.name.is_empty());
let sibling_scalar_pool: Option<std::collections::HashSet<u64>> = if any_anon {
Some(build_sibling_scalar_pool(members))
} else {
None
};
let mut visible_rendered: Vec<(&RenderedMember, Option<String>)> =
Vec::with_capacity(members.len());
for m in members {
if is_deeply_zero(&m.value) {
continue;
}
if (m.name.contains("___fmt") || m.name.contains("____fmt")) && is_string_value(&m.value) {
continue;
}
if m.name.is_empty()
&& let Some(pool) = sibling_scalar_pool.as_ref()
&& anon_duplicates_pool(&m.value, pool)
{
continue;
}
let single_line = if is_flat_scalar(&m.value) {
Some(format!("{}", m.value))
} else if matches!(m.value, RenderedValue::Struct { .. }) {
let s = format!("{}", m.value);
if s.contains('\n') { None } else { Some(s) }
} else {
None
};
visible_rendered.push((m, single_line));
}
if let Some(inline) = try_inline_from_rendered(type_name, &visible_rendered) {
return f.write_str(&inline);
}
if let Some(name) = type_name {
f.write_str(name)?;
}
if visible_rendered.is_empty() {
f.write_str("{}")?;
return Ok(());
}
f.write_str(":")?;
let mut scalar_cells: Vec<(String, String)> = Vec::new();
let mut compound_members: Vec<&RenderedMember> = Vec::new();
for (m, rendered) in &visible_rendered {
if is_flat_scalar(&m.value) {
let value_str = rendered.clone().expect(
"is_flat_scalar guarantees a single-line rendering; \
visible_rendered must carry Some(string) for flat scalars",
);
let name = if m.name.is_empty() {
"<anon>".to_string()
} else {
m.name.clone()
};
scalar_cells.push((name, value_str));
} else {
compound_members.push(m);
}
}
if !scalar_cells.is_empty() {
let cells_per_row = 3;
let n = scalar_cells.len();
let n_rows = n.div_ceil(cells_per_row);
let mut name_max = vec![0usize; cells_per_row];
let mut name_min = vec![usize::MAX; cells_per_row];
for row in 0..n_rows {
for col in 0..cells_per_row {
let idx = row * cells_per_row + col;
if idx >= n {
break;
}
let nl = scalar_cells[idx].0.len();
if nl > name_max[col] {
name_max[col] = nl;
}
if nl < name_min[col] {
name_min[col] = nl;
}
}
}
let pad_eq: Vec<bool> = (0..cells_per_row)
.map(|col| {
if n_rows < 3 {
return false;
}
let max = name_max[col];
let min = name_min[col];
if min == usize::MAX {
return false;
}
max.saturating_sub(min) >= 4
})
.collect();
let mut cell_widths = vec![0usize; cells_per_row];
for row in 0..n_rows {
for col in 0..cells_per_row {
let idx = row * cells_per_row + col;
if idx >= n {
break;
}
let (name, value) = &scalar_cells[idx];
let cl = if pad_eq[col] {
name_max[col] + 3 + value.len() } else {
name.len() + 1 + value.len() };
if cl > cell_widths[col] {
cell_widths[col] = cl;
}
}
}
for row in 0..n_rows {
f.write_str("\n")?;
write_indent(f, depth + 1)?;
for col in 0..cells_per_row {
let idx = row * cells_per_row + col;
if idx >= n {
break;
}
let (name, value) = &scalar_cells[idx];
f.write_str(name)?;
if pad_eq[col] {
for _ in 0..name_max[col].saturating_sub(name.len()) {
f.write_str(" ")?;
}
f.write_str(" = ")?;
} else {
f.write_str("=")?;
}
f.write_str(value)?;
if col + 1 < cells_per_row && (row * cells_per_row + col + 1) < n {
let cell_len = if pad_eq[col] {
name_max[col] + 3 + value.len()
} else {
name.len() + 1 + value.len()
};
let pad = cell_widths[col].saturating_sub(cell_len) + 3;
for _ in 0..pad {
f.write_str(" ")?;
}
}
}
}
}
for m in compound_members {
f.write_str("\n")?;
write_indent(f, depth + 1)?;
if m.name.is_empty() {
f.write_str("<anon> ")?;
} else {
write!(f, "{} ", m.name)?;
}
write_rendered_value(f, &m.value, depth + 1)?;
}
Ok(())
}
const STRUCT_INLINE_WIDTH_BUDGET: usize = 120;
const INLINE_LIST_WRAP_BUDGET: usize = 120;
fn write_inline_list_wrapped(
f: &mut std::fmt::Formatter<'_>,
open: &str,
close: &str,
parts: &[String],
sep: &str,
depth: usize,
) -> std::fmt::Result {
if parts.is_empty() {
f.write_str(open)?;
return f.write_str(close);
}
let sep_len = sep.len();
let mut total = open.len() + close.len();
for (i, p) in parts.iter().enumerate() {
if i > 0 {
total += sep_len;
}
total += p.len();
}
f.write_str(open)?;
if total <= INLINE_LIST_WRAP_BUDGET {
for (i, p) in parts.iter().enumerate() {
if i > 0 {
f.write_str(sep)?;
}
f.write_str(p)?;
}
return f.write_str(close);
}
let indent = INDENT.repeat(depth + 1);
let mut cursor = open.len();
for (i, p) in parts.iter().enumerate() {
if i == 0 {
f.write_str(p)?;
cursor += p.len();
continue;
}
let next_len = sep_len + p.len();
if cursor + next_len > INLINE_LIST_WRAP_BUDGET {
f.write_str(sep.trim_end())?;
f.write_str("\n")?;
f.write_str(&indent)?;
f.write_str(p)?;
cursor = indent.len() + p.len();
} else {
f.write_str(sep)?;
f.write_str(p)?;
cursor += next_len;
}
}
f.write_str(close)?;
Ok(())
}
fn try_inline_from_rendered(
type_name: Option<&str>,
visible_rendered: &[(&RenderedMember, Option<String>)],
) -> Option<String> {
if visible_rendered.is_empty() {
let s = match type_name {
Some(n) => format!("{n}{{}}"),
None => "{}".to_string(),
};
return if s.len() <= STRUCT_INLINE_WIDTH_BUDGET {
Some(s)
} else {
None
};
}
let mut field_strs = Vec::with_capacity(visible_rendered.len());
for (m, value_str) in visible_rendered {
let v = value_str.as_deref()?;
let name = if m.name.is_empty() {
"<anon>"
} else {
m.name.as_str()
};
field_strs.push(format!("{name}={v}"));
}
let body = field_strs.join(", ");
let s = match type_name {
Some(n) => format!("{n}{{{body}}}"),
None => format!("{{{body}}}"),
};
if s.len() <= STRUCT_INLINE_WIDTH_BUDGET {
Some(s)
} else {
None
}
}
pub fn is_zero(v: &RenderedValue) -> bool {
match v {
RenderedValue::Int { value, .. } => *value == 0,
RenderedValue::Uint { value, .. } => *value == 0,
RenderedValue::Bool { value } => !*value,
RenderedValue::Char { value } => *value == 0,
RenderedValue::Float { value, .. } => *value == 0.0,
RenderedValue::Enum { value, .. } => *value == 0,
RenderedValue::CpuList { cpus } => cpus.is_empty(),
RenderedValue::Ptr { value, .. } => *value == 0,
_ => false,
}
}
fn scalar_numeric_value(v: &RenderedValue) -> Option<u64> {
match v {
RenderedValue::Int { value, .. } => Some(*value as u64),
RenderedValue::Uint { value, .. } => Some(*value),
RenderedValue::Bool { value } => Some(if *value { 1 } else { 0 }),
RenderedValue::Char { value } => Some(*value as u64),
RenderedValue::Enum { value, .. } => Some(*value as u64),
RenderedValue::Ptr { value, .. } => Some(*value),
_ => None,
}
}
fn build_sibling_scalar_pool(members: &[RenderedMember]) -> std::collections::HashSet<u64> {
let mut sibling_values: std::collections::HashSet<u64> = std::collections::HashSet::new();
for s in members {
if let Some(n) = scalar_numeric_value(&s.value) {
if n != 0 {
sibling_values.insert(n);
}
} else if let RenderedValue::Struct { members: sm, .. } = &s.value {
for sub in sm {
if let Some(n) = scalar_numeric_value(&sub.value)
&& n != 0
{
sibling_values.insert(n);
}
}
}
}
sibling_values
}
fn anon_duplicates_pool(anon: &RenderedValue, pool: &std::collections::HashSet<u64>) -> bool {
let RenderedValue::Struct { members, .. } = anon else {
return false;
};
if members.is_empty() || pool.is_empty() {
return false;
}
for m in members {
match scalar_numeric_value(&m.value) {
Some(0) => continue, Some(n) => {
if !pool.contains(&n) {
return false;
}
}
None => return false, }
}
true
}
pub(crate) fn is_deeply_zero(v: &RenderedValue) -> bool {
const MAX_DEPTH: u32 = 16;
fn inner(v: &RenderedValue, depth: u32) -> bool {
if depth >= MAX_DEPTH {
return false;
}
match v {
RenderedValue::Struct { members, .. } => {
members.iter().all(|m| inner(&m.value, depth + 1))
}
RenderedValue::Array { elements, .. } => elements.iter().all(|e| inner(e, depth + 1)),
RenderedValue::Bytes { .. }
| RenderedValue::Truncated { .. }
| RenderedValue::Unsupported { .. } => false,
_ => is_zero(v),
}
}
inner(v, 0)
}
fn try_write_struct_template(
f: &mut std::fmt::Formatter<'_>,
groups: &[(usize, usize, &RenderedValue)],
depth: usize,
) -> Result<bool, std::fmt::Error> {
let structs: Vec<(usize, &[RenderedMember])> = groups
.iter()
.filter_map(|(start, end, val)| {
if start != end {
return None;
}
match val {
RenderedValue::Struct { members, .. } => Some((*start, members.as_slice())),
_ => None,
}
})
.collect();
if structs.len() != groups.len() || structs.len() < 3 {
return Ok(false);
}
let member_count = structs[0].1.len();
if structs.iter().any(|(_, m)| m.len() != member_count) {
return Ok(false);
}
let first = structs[0].1;
let mut varying: Vec<usize> = Vec::new();
for i in 0..member_count {
if structs[1..]
.iter()
.any(|(_, m)| m[i].value != first[i].value)
{
varying.push(i);
}
}
if varying.is_empty() || varying.len() > 3 {
return Ok(false);
}
if !structs.windows(2).all(|pair| pair[1].0 == pair[0].0 + 1) {
return Ok(false);
}
let type_name = match groups[0].2 {
RenderedValue::Struct { type_name, .. } => type_name.as_deref(),
_ => None,
};
let idx_range = format!("[{}-{}]", structs[0].0, structs.last().unwrap().0);
f.write_str("\n")?;
write_indent(f, depth)?;
match type_name {
Some(name) => write!(f, "{idx_range} {name}:")?,
None => write!(f, "{idx_range}:")?,
}
for (i, m) in first.iter().enumerate() {
if varying.contains(&i) {
continue;
}
if is_deeply_zero(&m.value) {
continue;
}
f.write_str("\n")?;
write_indent(f, depth + 1)?;
write!(f, "{}=", m.name)?;
write_rendered_value(f, &m.value, depth + 1)?;
}
for &vi in &varying {
f.write_str("\n")?;
write_indent(f, depth + 1)?;
write!(f, "{}: ", first[vi].name)?;
for (idx, members) in &structs {
write!(f, "[{idx}]=")?;
write_rendered_value(f, &members[vi].value, depth + 1)?;
f.write_str(" ")?;
}
}
Ok(true)
}
fn try_render_cpumask_bits(bytes: &[u8], max_cpus: u32) -> Option<RenderedValue> {
if bytes.len() < 8 {
return None;
}
let n_words = bytes.len() / 8;
let mut set_cpus: Vec<u32> = Vec::new();
for word_idx in 0..n_words {
let off = word_idx * 8;
if off + 8 > bytes.len() {
break;
}
let word_first_cpu = (word_idx * 64) as u64;
if word_first_cpu >= max_cpus as u64 {
break;
}
let word = u64::from_le_bytes(bytes[off..off + 8].try_into().unwrap());
if word == 0 {
continue;
}
if word > 0xFFFF_FFFF && set_cpus.len() > 64 {
break;
}
for bit in 0..64 {
let cpu = (word_idx * 64 + bit) as u32;
if cpu >= max_cpus {
break;
}
if word & (1u64 << bit) != 0 {
set_cpus.push(cpu);
}
}
}
Some(RenderedValue::CpuList {
cpus: format_cpu_list(&set_cpus),
})
}
fn format_cpu_list(cpus: &[u32]) -> String {
use std::fmt::Write;
if cpus.is_empty() {
return String::new();
}
let mut out = String::new();
let mut start = cpus[0];
let mut end = cpus[0];
let flush = |out: &mut String, start: u32, end: u32| {
if !out.is_empty() {
out.push(',');
}
if start == end {
let _ = write!(out, "{start}");
} else {
let _ = write!(out, "{start}-{end}");
}
};
for &cpu in &cpus[1..] {
if cpu == end + 1 {
end = cpu;
} else {
flush(&mut out, start, end);
start = cpu;
end = cpu;
}
}
flush(&mut out, start, end);
out
}
fn is_text_byte(b: u8) -> bool {
b == 0 || b == b'\n' || (0x20..=0x7e).contains(&b)
}
fn is_string_value(v: &RenderedValue) -> bool {
match v {
RenderedValue::Array { elements, .. } => {
elements.len() >= 2
&& elements.iter().all(|e| match e {
RenderedValue::Int { bits: 8, value } => is_text_byte(*value as u8),
RenderedValue::Uint { bits: 8, value } => is_text_byte(*value as u8),
RenderedValue::Char { value } => is_text_byte(*value),
_ => false,
})
}
_ => false,
}
}
pub(crate) fn is_inline_scalar(v: &RenderedValue) -> bool {
match v {
RenderedValue::Int { .. }
| RenderedValue::Uint { .. }
| RenderedValue::Bool { .. }
| RenderedValue::Char { .. }
| RenderedValue::Float { .. }
| RenderedValue::Enum { .. }
| RenderedValue::Bytes { .. }
| RenderedValue::Unsupported { .. } => true,
RenderedValue::Ptr { deref, .. } => deref.is_none(),
_ => false,
}
}
pub(crate) fn is_flat_scalar(v: &RenderedValue) -> bool {
match v {
RenderedValue::Int { .. }
| RenderedValue::Uint { .. }
| RenderedValue::Bool { .. }
| RenderedValue::Char { .. }
| RenderedValue::Float { .. }
| RenderedValue::Enum { .. } => true,
RenderedValue::Ptr {
deref: None,
deref_skipped_reason: None,
..
} => true,
_ => false,
}
}
fn write_indent(f: &mut std::fmt::Formatter<'_>, depth: usize) -> std::fmt::Result {
for _ in 0..depth {
f.write_str(INDENT)?;
}
Ok(())
}
pub use super::cast_analysis::CastHit;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct ArenaResolveHit {
pub target_type_id: u32,
pub header_skip: usize,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FwdKind {
Struct,
Union,
}
impl FwdKind {
pub fn from_is_struct(is_struct: bool) -> Self {
if is_struct {
FwdKind::Struct
} else {
FwdKind::Union
}
}
}
#[derive(Copy, Clone)]
pub struct CrossBtfRef<'a> {
pub btf: &'a Btf,
pub type_id: u32,
}
pub trait MemReader {
fn read_kva(&self, kva: u64, len: usize) -> Option<Vec<u8>>;
fn is_arena_addr(&self, _addr: u64) -> bool {
false
}
fn read_arena(&self, _addr: u64, _len: usize) -> Option<Vec<u8>> {
None
}
fn nr_cpu_ids(&self) -> u32 {
u32::MAX
}
fn cast_lookup(&self, _parent_type_id: u32, _member_byte_offset: u32) -> Option<CastHit> {
None
}
fn resolve_arena_type(&self, _addr: u64) -> Option<ArenaResolveHit> {
None
}
fn cross_btf_resolve_fwd(&self, _name: &str, _kind: FwdKind) -> Option<CrossBtfRef<'_>> {
None
}
fn resolve_arena_type_meta_fallback(&self, _addr: u64) -> Option<ArenaResolveHit> {
None
}
fn is_already_rendered(&self, _addr: u64) -> bool {
false
}
fn alloc_size_types(&self) -> &[(u64, String)] {
&[]
}
}
#[allow(dead_code)]
pub fn render_value(btf: &Btf, type_id: u32, bytes: &[u8]) -> RenderedValue {
let mut visited: HashSet<u64> = HashSet::new();
render_value_inner(btf, type_id, bytes, 0, None::<&dyn MemReader>, &mut visited)
}
pub fn render_value_with_mem(
btf: &Btf,
type_id: u32,
bytes: &[u8],
mem: &dyn MemReader,
) -> RenderedValue {
let mut visited: HashSet<u64> = HashSet::new();
render_value_inner(btf, type_id, bytes, 0, Some(mem), &mut visited)
}
fn render_value_inner(
btf: &Btf,
type_id: u32,
bytes: &[u8],
depth: u32,
mem: Option<&dyn MemReader>,
visited: &mut HashSet<u64>,
) -> RenderedValue {
if depth >= MAX_RENDER_DEPTH {
return RenderedValue::Unsupported {
reason: format!("render depth {MAX_RENDER_DEPTH} exceeded"),
};
}
let Some((ty, peeled_type_id)) = peel_modifiers_with_id(btf, type_id) else {
return RenderedValue::Unsupported {
reason: format!("could not peel modifiers from type id {type_id}"),
};
};
match ty {
Type::Int(int) => render_int(&int, bytes),
Type::Float(float) => render_float(float.size(), bytes),
Type::Enum(e) => {
let needed = e.size();
if bytes.len() < needed {
return RenderedValue::Truncated {
needed,
had: bytes.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(bytes),
}),
};
}
let raw = read_uint_le(&bytes[..needed]);
let signed = e.is_signed();
let value = if signed {
sign_extend(raw, needed * 8) as i64
} else {
raw as i64
};
let variant = e
.members
.iter()
.find(|m| m.val() as u64 == raw)
.and_then(|m| btf.resolve_name(m).ok());
RenderedValue::Enum {
bits: (needed * 8) as u32,
value,
variant,
}
}
Type::Enum64(e) => {
let needed = e.size();
if bytes.len() < needed {
return RenderedValue::Truncated {
needed,
had: bytes.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(bytes),
}),
};
}
let raw = read_uint_le(&bytes[..needed]);
let signed = e.is_signed();
let value = if signed {
sign_extend(raw, needed * 8) as i64
} else {
raw as i64
};
let variant = e
.members
.iter()
.find(|m| m.val() == raw)
.and_then(|m| btf.resolve_name(m).ok());
RenderedValue::Enum {
bits: (needed * 8) as u32,
value,
variant,
}
}
Type::Ptr(ptr) => {
if bytes.len() < 8 {
return RenderedValue::Truncated {
needed: 8,
had: bytes.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(bytes),
}),
};
}
let val = u64::from_le_bytes(bytes[..8].try_into().unwrap());
let mut deref_skipped_reason: Option<String> = None;
let mut cast_annotation: Option<Cow<'static, str>> = None;
let deref = match chase_gate(val, depth, visited) {
ChaseGate::Skip { reason } => {
deref_skipped_reason = reason;
None
}
ChaseGate::Proceed => mem.and_then(|m| {
let pointee_type_id = ptr.get_type_id().ok()?;
if m.is_arena_addr(val) {
let outcome =
chase_arena_pointer(btf, pointee_type_id, None, val, m, depth, visited);
if outcome.reason.is_some() {
deref_skipped_reason = outcome.reason;
}
if outcome.sdt_alloc_resolved {
cast_annotation = Some(Cow::Borrowed("sdt_alloc"));
}
return outcome.deref;
}
let (pointee_ty, _) = peel_modifiers_resolving_fwd(btf, pointee_type_id)?;
let btf_size = type_size(btf, &pointee_ty)?;
if btf_size == 0 {
deref_skipped_reason =
Some("pointee BTF size is 0 (incomplete type)".to_string());
return None;
}
let is_cpumask_ptr = match &pointee_ty {
Type::Struct(s) => {
let n = btf.resolve_name(s).unwrap_or_default();
n == "bpf_cpumask" || n == "cpumask"
}
_ => false,
};
if is_cpumask_ptr {
const CPUMASK_READ_CAP: usize = 1024;
let Some(bits_bytes) = m.read_kva(val, CPUMASK_READ_CAP) else {
deref_skipped_reason = Some(format!(
"cpumask kptr read_kva failed at 0x{val:x} \
(unmapped page or no PTE)"
));
return None;
};
if bits_bytes.len() < 8 {
deref_skipped_reason = Some(format!(
"cpumask kptr read returned {} bytes; need at least 8",
bits_bytes.len()
));
return None;
}
let max_cpus = m.nr_cpu_ids();
let bits0 = u64::from_le_bytes(bits_bytes[..8].try_into().ok()?);
if bits0 >> 56 != 0xff {
let mut cpus = Vec::new();
'walk: for word_idx in 0..(bits_bytes.len() / 8) {
let off = word_idx * 8;
let word_first_cpu = (word_idx * 64) as u64;
if word_first_cpu >= max_cpus as u64 {
break;
}
let word =
u64::from_le_bytes(bits_bytes[off..off + 8].try_into().ok()?);
if word >> 56 == 0xff {
break;
}
for bit in 0..64u32 {
let cpu = (word_idx * 64) as u32 + bit;
if cpu >= max_cpus {
break 'walk;
}
if word & (1u64 << bit) != 0 {
cpus.push(cpu);
}
}
}
return Some(Box::new(RenderedValue::CpuList {
cpus: format_cpu_list(&cpus),
}));
} else {
deref_skipped_reason = Some(format!(
"cpumask kptr plausibility gate rejected: bits[0] top \
byte is 0xff at 0x{val:x} (likely freed slab object)"
));
}
}
None
}),
};
RenderedValue::Ptr {
value: val,
deref,
deref_skipped_reason,
cast_annotation,
}
}
Type::Struct(s) | Type::Union(s) => {
render_struct(btf, &s, peeled_type_id, bytes, depth, mem, visited)
}
Type::Array(arr) => {
let len = arr.len();
let Ok(elem_type_id) = arr.get_type_id() else {
return RenderedValue::Unsupported {
reason: "array element type id not resolvable".to_string(),
};
};
let Ok(elem_ty) = btf.resolve_chained_type(&arr) else {
return RenderedValue::Unsupported {
reason: "array element type not resolvable".to_string(),
};
};
let Some(elem_size) = type_size(btf, &elem_ty) else {
return RenderedValue::Unsupported {
reason: "array element size not resolvable".to_string(),
};
};
if len == 0 && elem_size > 0 && !bytes.is_empty() {
return RenderedValue::Unsupported {
reason: format!(
"flex array (BTF len=0); runtime length not \
representable in BTF, {} bytes available at site",
bytes.len()
),
};
}
let cap = len.min(MAX_ARRAY_ELEMS);
let mut elements = Vec::with_capacity(cap);
for i in 0..cap {
let start = i * elem_size;
let end = start + elem_size;
if end > bytes.len() {
let avail = &bytes[start.min(bytes.len())..];
elements.push(RenderedValue::Truncated {
needed: elem_size,
had: avail.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(avail),
}),
});
break;
}
elements.push(render_value_inner(
btf,
elem_type_id,
&bytes[start..end],
depth + 1,
mem,
visited,
));
}
RenderedValue::Array { len, elements }
}
Type::Fwd(_) => RenderedValue::Unsupported {
reason: "forward declaration: type body not in BTF".to_string(),
},
Type::Func(_) | Type::FuncProto(_) => RenderedValue::Unsupported {
reason: "function type: no value bytes to render".to_string(),
},
Type::Datasec(ds) => {
render_datasec(btf, &ds, peeled_type_id, bytes, depth, mem, visited)
}
Type::Var(var) => {
let Ok(inner_id) = var.get_type_id() else {
return RenderedValue::Unsupported {
reason: "var type id not resolvable".to_string(),
};
};
render_value_inner(btf, inner_id, bytes, depth + 1, mem, visited)
}
Type::Void => RenderedValue::Unsupported {
reason: "void: no value bytes to render".to_string(),
},
Type::Volatile(_)
| Type::Const(_)
| Type::Restrict(_)
| Type::Typedef(_)
| Type::TypeTag(_)
| Type::DeclTag(_) => RenderedValue::Unsupported {
reason: "unpeeled modifier (BTF cycle?)".to_string(),
},
}
}
fn render_int(int: &btf_rs::Int, bytes: &[u8]) -> RenderedValue {
let needed = int.size();
if bytes.len() < needed {
return RenderedValue::Truncated {
needed,
had: bytes.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(bytes),
}),
};
}
if int.is_bool() && needed >= 1 {
return RenderedValue::Bool {
value: bytes[..needed].iter().any(|&b| b != 0),
};
}
if int.is_char() && needed == 1 {
return RenderedValue::Char { value: bytes[0] };
}
if needed > 8 {
return RenderedValue::Bytes {
hex: hex_dump(&bytes[..needed]),
};
}
let raw = read_uint_le(&bytes[..needed]);
if int.is_signed() {
let value = sign_extend(raw, needed * 8) as i64;
RenderedValue::Int {
bits: (needed * 8) as u32,
value,
}
} else {
RenderedValue::Uint {
bits: (needed * 8) as u32,
value: raw,
}
}
}
fn render_float(size: usize, bytes: &[u8]) -> RenderedValue {
if bytes.len() < size {
return RenderedValue::Truncated {
needed: size,
had: bytes.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(bytes),
}),
};
}
let value = match size {
4 => f32::from_le_bytes(bytes[..4].try_into().unwrap()) as f64,
8 => f64::from_le_bytes(bytes[..8].try_into().unwrap()),
_ => {
return RenderedValue::Unsupported {
reason: format!("unsupported float size {size}"),
};
}
};
RenderedValue::Float {
bits: (size * 8) as u32,
value,
}
}
fn render_struct(
btf: &Btf,
s: &Struct,
parent_type_id: u32,
bytes: &[u8],
depth: u32,
mem: Option<&dyn MemReader>,
visited: &mut HashSet<u64>,
) -> RenderedValue {
let type_name = btf.resolve_name(s).ok().filter(|n| !n.is_empty());
let max_cpus = mem.map(|m| m.nr_cpu_ids()).unwrap_or(u32::MAX);
if let Some(ref name) = type_name {
match name.as_str() {
"cpumask" | "cpumask_t" => {
if let Some(cpu_list) = try_render_cpumask_bits(bytes, max_cpus) {
return cpu_list;
}
}
"bpf_cpumask" => {
if let Some(cpu_list) = try_render_cpumask_bits(bytes, max_cpus) {
return cpu_list;
}
}
"scx_bitmap" => {
if bytes.len() >= 16
&& let Some(cpu_list) = try_render_cpumask_bits(&bytes[8..], max_cpus)
{
return cpu_list;
}
}
_ => {}
}
}
let truncated = bytes.len() < s.size();
let mut members = Vec::with_capacity(s.members.len());
for m in &s.members {
let bit_off = m.bit_offset() as usize;
let byte_off = bit_off / 8;
if byte_off >= bytes.len() && bytes.len() < s.size() {
continue;
}
let name = btf.resolve_name(m).unwrap_or_default();
let value = render_member(btf, m, Some(parent_type_id), bytes, depth, mem, visited);
if name.is_empty()
&& let RenderedValue::Struct { members: inner, .. } = value
{
members.extend(inner);
continue;
}
members.push(RenderedMember { name, value });
}
let rendered = RenderedValue::Struct { type_name, members };
if truncated {
RenderedValue::Truncated {
needed: s.size(),
had: bytes.len(),
partial: Box::new(rendered),
}
} else {
rendered
}
}
fn try_cast_intercept(
btf: &Btf,
cast_key: (u32, usize),
peeled: &Type,
field_bytes: &[u8],
depth: u32,
mem: Option<&dyn MemReader>,
visited: &mut HashSet<u64>,
) -> Option<RenderedValue> {
let (parent_type_id, byte_off) = cast_key;
let reader = mem?;
let Type::Int(int) = peeled else {
return None;
};
if int.size() != 8 || int.is_signed() || int.is_bool() || int.is_char() {
return None;
}
let off_u32 = u32::try_from(byte_off).ok()?;
let hit = reader.cast_lookup(parent_type_id, off_u32)?;
let head = field_bytes.get(..8)?;
let value = u64::from_le_bytes(head.try_into().ok()?);
Some(render_cast_pointer(btf, hit, value, depth, reader, visited))
}
fn render_datasec(
btf: &Btf,
ds: &btf_rs::Datasec,
parent_type_id: u32,
bytes: &[u8],
depth: u32,
mem: Option<&dyn MemReader>,
visited: &mut HashSet<u64>,
) -> RenderedValue {
let type_name = btf.resolve_name(ds).ok().filter(|n| !n.is_empty());
let mut members = Vec::with_capacity(ds.variables.len());
for var_info in &ds.variables {
let offset = var_info.offset() as usize;
let size = var_info.size();
let chained = match btf.resolve_chained_type(var_info) {
Ok(t) => t,
Err(_) => {
members.push(RenderedMember {
name: String::new(),
value: RenderedValue::Unsupported {
reason: "datasec var type not resolvable".to_string(),
},
});
continue;
}
};
let var = match chained {
Type::Var(v) => v,
other => {
members.push(RenderedMember {
name: String::new(),
value: RenderedValue::Unsupported {
reason: format!("datasec entry resolved to non-Var ({})", other.name()),
},
});
continue;
}
};
let var_name = btf.resolve_name(&var).unwrap_or_default();
let inner_id = match var.get_type_id() {
Ok(id) => id,
Err(_) => {
members.push(RenderedMember {
name: var_name,
value: RenderedValue::Unsupported {
reason: "var underlying type id not resolvable".to_string(),
},
});
continue;
}
};
let cast_intercept = peel_modifiers(btf, inner_id).and_then(|inner_ty| {
let field_bytes = bytes.get(offset..).unwrap_or_default();
try_cast_intercept(
btf,
(parent_type_id, offset),
&inner_ty,
field_bytes,
depth,
mem,
visited,
)
});
if let Some(rv) = cast_intercept {
members.push(RenderedMember {
name: var_name,
value: rv,
});
continue;
}
let end = offset.checked_add(size);
let value = match end {
Some(end) if end <= bytes.len() => {
render_value_inner(btf, inner_id, &bytes[offset..end], depth + 1, mem, visited)
}
_ => {
let avail_start = offset.min(bytes.len());
let avail = &bytes[avail_start..];
let partial = render_value_inner(btf, inner_id, avail, depth + 1, mem, visited);
RenderedValue::Truncated {
needed: size,
had: avail.len(),
partial: Box::new(partial),
}
}
};
members.push(RenderedMember {
name: var_name,
value,
});
}
RenderedValue::Struct { type_name, members }
}
fn render_member(
btf: &Btf,
m: &Member,
parent_type_id: Option<u32>,
parent_bytes: &[u8],
depth: u32,
mem: Option<&dyn MemReader>,
visited: &mut HashSet<u64>,
) -> RenderedValue {
let bit_off = m.bit_offset() as usize;
let Ok(member_type_id) = m.get_type_id() else {
return RenderedValue::Unsupported {
reason: "member has no type id".to_string(),
};
};
if let Some(width) = m.bitfield_size()
&& width > 0
{
return render_bitfield(btf, member_type_id, parent_bytes, bit_off, width as usize);
}
if !bit_off.is_multiple_of(8) {
return RenderedValue::Unsupported {
reason: format!("non-bitfield member at non-byte bit offset {bit_off}"),
};
}
let byte_off = bit_off / 8;
let Some(member_ty) = peel_modifiers(btf, member_type_id) else {
return RenderedValue::Unsupported {
reason: "member type modifiers unresolvable".to_string(),
};
};
let Some(size) = type_size(btf, &member_ty) else {
return RenderedValue::Unsupported {
reason: "member type size unresolvable".to_string(),
};
};
let cast_intercept = parent_type_id.and_then(|parent| {
let field_bytes = parent_bytes.get(byte_off..).unwrap_or_default();
try_cast_intercept(
btf,
(parent, byte_off),
&member_ty,
field_bytes,
depth,
mem,
visited,
)
});
if let Some(rv) = cast_intercept {
return rv;
}
if let Some(parent) = parent_type_id
&& let Type::Array(arr) = &member_ty
&& let (Ok(elem_tid), Some(elem_size)) = (
arr.get_type_id(),
peel_modifiers(btf, arr.get_type_id().unwrap_or(0)).and_then(|t| type_size(btf, &t)),
)
&& elem_size == 8
&& let Some(elem_term) = peel_modifiers(btf, elem_tid)
&& matches!(
elem_term,
Type::Int(ref i) if i.size() == 8 && !i.is_signed() && !i.is_bool() && !i.is_char()
)
{
let arr_len = arr.len();
let has_any_cast = mem.is_some_and(|m| {
(0..arr_len).any(|i| {
let elem_off = (byte_off + i * 8) as u32;
m.cast_lookup(parent, elem_off).is_some()
})
});
if has_any_cast {
let cap = arr_len.min(MAX_ARRAY_ELEMS);
let mut elements = Vec::with_capacity(cap);
for i in 0..cap {
let elem_off = byte_off + i * 8;
let elem_bytes = parent_bytes.get(elem_off..elem_off + 8).unwrap_or_default();
if let Some(rv) = try_cast_intercept(
btf,
(parent, elem_off),
&elem_term,
elem_bytes,
depth + 1,
mem,
visited,
) {
elements.push(rv);
} else {
elements.push(render_value_inner(
btf,
elem_tid,
elem_bytes,
depth + 1,
mem,
visited,
));
}
}
return RenderedValue::Array {
len: arr_len,
elements,
};
}
}
let end = byte_off.checked_add(size);
match end {
Some(end) if end <= parent_bytes.len() => render_value_inner(
btf,
member_type_id,
&parent_bytes[byte_off..end],
depth + 1,
mem,
visited,
),
_ => {
let avail_start = byte_off.min(parent_bytes.len());
let avail = &parent_bytes[avail_start..];
let partial = render_value_inner(btf, member_type_id, avail, depth + 1, mem, visited);
RenderedValue::Truncated {
needed: size,
had: avail.len(),
partial: Box::new(partial),
}
}
}
}
const POINTER_CHASE_CAP: usize = 4096;
enum ChaseGate {
Skip { reason: Option<String> },
Proceed,
}
fn chase_gate(val: u64, depth: u32, visited: &HashSet<u64>) -> ChaseGate {
if val == 0 {
return ChaseGate::Skip { reason: None };
}
if visited.contains(&val) {
return ChaseGate::Skip {
reason: Some(format!("cycle → 0x{val:x}")),
};
}
if depth >= MAX_RENDER_DEPTH {
return ChaseGate::Skip { reason: None };
}
ChaseGate::Proceed
}
fn unsizable_chase_reason(
btf: &Btf,
kind_label: &'static str,
target_type_id: u32,
target_ty: &Type,
) -> String {
match target_ty {
Type::Fwd(fwd) => {
let aggregate = if fwd.is_union() { "union" } else { "struct" };
let name = btf.resolve_name(fwd).ok().filter(|n| !n.is_empty());
match name {
Some(n) => format!(
"{kind_label} target {aggregate} {n} (type id \
{target_type_id}) is a forward declaration; \
body not in this BTF"
),
None => format!(
"{kind_label} target type id {target_type_id} \
is an anonymous {aggregate} forward declaration; \
body not in this BTF"
),
}
}
Type::Func(_) => format!(
"{kind_label} target type id {target_type_id} is a \
function (BTF_KIND_FUNC); functions have no storage size"
),
Type::FuncProto(_) => format!(
"{kind_label} target type id {target_type_id} is a \
function prototype (BTF_KIND_FUNC_PROTO); prototypes \
have no storage size"
),
Type::Datasec(_) => format!(
"{kind_label} target type id {target_type_id} is a \
datasec (BTF_KIND_DATASEC); not a pointer chase target"
),
Type::Var(_) => format!(
"{kind_label} target type id {target_type_id} is a \
var (BTF_KIND_VAR); not a pointer chase target"
),
Type::Void => format!(
"{kind_label} target type id {target_type_id} is void; \
chasing a void* requires runtime type info"
),
Type::DeclTag(_) => format!(
"{kind_label} target type id {target_type_id} is a \
decl-tag (BTF_KIND_DECL_TAG); modifiers should have \
peeled (malformed BTF chain?)"
),
_ => format!(
"{kind_label} target type id {target_type_id} has \
unresolvable size"
),
}
}
struct ArenaChaseOutcome {
deref: Option<Box<RenderedValue>>,
reason: Option<String>,
sdt_alloc_resolved: bool,
}
fn try_sdt_alloc_bridge(
mem: &dyn MemReader,
val: u64,
target_ty: &Type,
) -> Option<ArenaResolveHit> {
if !matches!(target_ty, Type::Fwd(_)) {
return None;
}
if let Some(hit) = mem.resolve_arena_type(val) {
return Some(hit);
}
mem.resolve_arena_type_meta_fallback(val)
}
fn apply_header_skip(raw_bytes: &[u8], header_skip: usize) -> Option<&[u8]> {
raw_bytes.get(header_skip..)
}
struct CrossBtfMemReader<'a> {
inner: &'a dyn MemReader,
}
impl MemReader for CrossBtfMemReader<'_> {
fn read_kva(&self, kva: u64, len: usize) -> Option<Vec<u8>> {
self.inner.read_kva(kva, len)
}
fn is_arena_addr(&self, addr: u64) -> bool {
self.inner.is_arena_addr(addr)
}
fn read_arena(&self, addr: u64, len: usize) -> Option<Vec<u8>> {
self.inner.read_arena(addr, len)
}
fn nr_cpu_ids(&self) -> u32 {
self.inner.nr_cpu_ids()
}
fn cross_btf_resolve_fwd(&self, name: &str, kind: FwdKind) -> Option<CrossBtfRef<'_>> {
self.inner.cross_btf_resolve_fwd(name, kind)
}
fn is_already_rendered(&self, addr: u64) -> bool {
self.inner.is_already_rendered(addr)
}
}
struct ResolvedTarget<'a> {
effective_type_id: u32,
current_btf: &'a Btf,
btf_size: usize,
header_skip: usize,
sdt_alloc_resolved: bool,
cross_btf_hit: Option<CrossBtfRef<'a>>,
}
enum ChaseResolve<'a> {
Ready(ResolvedTarget<'a>),
Skip {
reason: String,
sdt_alloc_resolved: bool,
},
}
fn resolve_chase_target<'a>(
btf: &'a Btf,
mem: &'a dyn MemReader,
val: u64,
target_type_id: u32,
kind_label: &'static str,
) -> ChaseResolve<'a> {
let Some((mut target_ty, mut effective_type_id)) =
peel_modifiers_resolving_fwd(btf, target_type_id)
else {
return ChaseResolve::Skip {
reason: format!("{kind_label} target type id {target_type_id} unresolvable"),
sdt_alloc_resolved: false,
};
};
let bridge = try_sdt_alloc_bridge(mem, val, &target_ty);
let (sdt_alloc_resolved, header_skip) = match &bridge {
Some(hit) => {
if let Some((resolved_ty, resolved_id)) =
peel_modifiers_resolving_fwd(btf, hit.target_type_id)
{
target_ty = resolved_ty;
effective_type_id = resolved_id;
}
(true, hit.header_skip)
}
None => (false, 0usize),
};
let cross_btf_hit = if matches!(target_ty, Type::Fwd(_)) {
try_cross_btf_fwd_resolve(mem, btf, &target_ty)
} else {
None
};
let current_btf: &Btf = match cross_btf_hit {
Some(hit) => {
target_ty = match hit.btf.resolve_type_by_id(hit.type_id) {
Ok(ty) => ty,
Err(_) => {
tracing::debug!(
kind_label,
hit_type_id = hit.type_id,
"cross-BTF Fwd resolve returned a type_id that does not resolve \
in the sibling BTF",
);
return ChaseResolve::Skip {
reason: format!(
"{kind_label}: cross-BTF Fwd type id {} unresolved in sibling BTF",
hit.type_id
),
sdt_alloc_resolved,
};
}
};
effective_type_id = hit.type_id;
hit.btf
}
None => btf,
};
let btf_size = {
let Some(sz) = type_size(current_btf, &target_ty) else {
return ChaseResolve::Skip {
reason: unsizable_chase_reason(current_btf, kind_label, target_type_id, &target_ty),
sdt_alloc_resolved,
};
};
sz
};
if btf_size == 0 {
return ChaseResolve::Skip {
reason: format!(
"{kind_label} target type id {target_type_id} BTF size is 0 (incomplete type)"
),
sdt_alloc_resolved,
};
}
ChaseResolve::Ready(ResolvedTarget {
effective_type_id,
current_btf,
btf_size,
header_skip,
sdt_alloc_resolved,
cross_btf_hit,
})
}
fn recurse_into_target(
resolved: &ResolvedTarget<'_>,
target_bytes: &[u8],
val: u64,
depth: u32,
mem: &dyn MemReader,
visited: &mut HashSet<u64>,
truncated_at_cap: bool,
) -> Box<RenderedValue> {
visited.insert(val);
let cross_btf_wrap = resolved
.cross_btf_hit
.as_ref()
.map(|_| CrossBtfMemReader { inner: mem });
let recurse_mem: &dyn MemReader = match &cross_btf_wrap {
Some(w) => w,
None => mem,
};
let inner = render_value_inner(
resolved.current_btf,
resolved.effective_type_id,
target_bytes,
depth + 1,
Some(recurse_mem),
visited,
);
visited.remove(&val);
if truncated_at_cap {
Box::new(RenderedValue::Truncated {
needed: resolved.btf_size,
had: target_bytes.len(),
partial: Box::new(inner),
})
} else {
Box::new(inner)
}
}
fn chase_arena_pointer(
btf: &Btf,
target_type_id: u32,
alloc_size: Option<u64>,
val: u64,
mem: &dyn MemReader,
depth: u32,
visited: &mut HashSet<u64>,
) -> ArenaChaseOutcome {
if mem.is_already_rendered(val) {
return ArenaChaseOutcome {
deref: None,
reason: Some("already rendered in sdt_allocations".to_string()),
sdt_alloc_resolved: false,
};
}
let resolved = if target_type_id == 0 {
let bridge_hit = mem.resolve_arena_type(val);
let (effective_target_id, header_skip, resolution_source) = match bridge_hit {
Some(hit) => (hit.target_type_id, hit.header_skip, "bridge"),
None => {
let size = match alloc_size {
Some(s) => s,
None => {
for (candidate_size, struct_name) in mem.alloc_size_types() {
if mem.read_arena(val, *candidate_size as usize).is_none() {
continue;
}
if let Some(cross_ref) =
mem.cross_btf_resolve_fwd(struct_name, FwdKind::Struct)
&& let Ok(ty) = cross_ref.btf.resolve_type_by_id(cross_ref.type_id)
&& let Some(btf_size) = type_size(cross_ref.btf, &ty)
&& btf_size > 0
&& btf_size <= *candidate_size as usize
{
let read_size = btf_size.min(POINTER_CHASE_CAP);
let truncated = btf_size > POINTER_CHASE_CAP;
if let Some(raw_bytes) = mem.read_arena(val, read_size) {
let payload = recurse_into_target(
&ResolvedTarget {
effective_type_id: cross_ref.type_id,
current_btf: cross_ref.btf,
btf_size,
header_skip: 0,
sdt_alloc_resolved: false,
cross_btf_hit: Some(CrossBtfRef {
btf: cross_ref.btf,
type_id: cross_ref.type_id,
}),
},
&raw_bytes,
val,
depth,
mem,
visited,
truncated,
);
return ArenaChaseOutcome {
deref: Some(payload),
reason: None,
sdt_alloc_resolved: false,
};
}
}
let choice = super::sdt_alloc::discover_payload_btf_id(
btf,
*candidate_size as usize,
"",
);
if choice.target_type_id != 0
&& let Some((resolved_ty, resolved_id)) =
peel_modifiers_resolving_fwd(btf, choice.target_type_id)
&& let Some(btf_size) = type_size(btf, &resolved_ty)
&& btf_size > 0
{
let read_size = btf_size.min(POINTER_CHASE_CAP);
let truncated = btf_size > POINTER_CHASE_CAP;
if let Some(raw_bytes) = mem.read_arena(val, read_size) {
let payload = recurse_into_target(
&ResolvedTarget {
effective_type_id: resolved_id,
current_btf: btf,
btf_size,
header_skip: 0,
sdt_alloc_resolved: true,
cross_btf_hit: None,
},
&raw_bytes,
val,
depth,
mem,
visited,
truncated,
);
return ArenaChaseOutcome {
deref: Some(payload),
reason: None,
sdt_alloc_resolved: true,
};
}
}
}
tracing::debug!(
val = format_args!("{:#x}", val),
"arena chase: STX-flow tagged slot as Arena (target_type_id=0, \
deferred resolve), but resolve_arena_type had no entry and \
no alloc_size was supplied; allocator pre-pass may not have \
populated the index for this allocator",
);
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena chase: STX-flow path tagged slot as Arena with \
deferred resolve; bridge had no entry for 0x{val:x}"
)),
sdt_alloc_resolved: false,
};
}
};
let choice = super::sdt_alloc::discover_payload_btf_id(btf, size as usize, "");
if choice.target_type_id != 0 {
(choice.target_type_id, 0usize, "alloc_size")
} else {
for (candidate_size, struct_name) in mem.alloc_size_types() {
if *candidate_size != size {
continue;
}
if mem.read_arena(val, *candidate_size as usize).is_none() {
continue;
}
if let Some(cross_ref) =
mem.cross_btf_resolve_fwd(struct_name, FwdKind::Struct)
&& let Ok(ty) = cross_ref.btf.resolve_type_by_id(cross_ref.type_id)
&& let Some(btf_size) = type_size(cross_ref.btf, &ty)
&& btf_size > 0
&& btf_size <= *candidate_size as usize
{
let read_size = btf_size.min(POINTER_CHASE_CAP);
let truncated = btf_size > POINTER_CHASE_CAP;
if let Some(raw_bytes) = mem.read_arena(val, read_size) {
let payload = recurse_into_target(
&ResolvedTarget {
effective_type_id: cross_ref.type_id,
current_btf: cross_ref.btf,
btf_size,
header_skip: 0,
sdt_alloc_resolved: false,
cross_btf_hit: Some(CrossBtfRef {
btf: cross_ref.btf,
type_id: cross_ref.type_id,
}),
},
&raw_bytes,
val,
depth,
mem,
visited,
truncated,
);
return ArenaChaseOutcome {
deref: Some(payload),
reason: None,
sdt_alloc_resolved: false,
};
}
}
}
tracing::debug!(
val = format_args!("{:#x}", val),
alloc_size = size,
reason = %choice.reason,
"arena chase: STX-flow tagged slot as Arena (target_type_id=0, \
deferred resolve), bridge returned no entry, and alloc_size \
fallback could not resolve a unique BTF type",
);
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena chase: STX-flow path tagged slot as Arena with \
deferred resolve; alloc_size={size} fallback unresolved \
({reason})",
reason = choice.reason
)),
sdt_alloc_resolved: false,
};
}
}
};
let Some((resolved_ty, resolved_id)) =
peel_modifiers_resolving_fwd(btf, effective_target_id)
else {
tracing::debug!(
source = resolution_source,
effective_target_id,
"arena chase: resolution source returned target_type_id but the type \
does not resolve in the program BTF",
);
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena chase: type id {effective_target_id} unresolved in BTF"
)),
sdt_alloc_resolved: true,
};
};
let Some(btf_size) = type_size(btf, &resolved_ty) else {
return ArenaChaseOutcome {
deref: None,
reason: Some(unsizable_chase_reason(
btf,
"arena chase",
effective_target_id,
&resolved_ty,
)),
sdt_alloc_resolved: true,
};
};
if btf_size == 0 {
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena chase target type id {effective_target_id} \
BTF size is 0 (incomplete type)"
)),
sdt_alloc_resolved: true,
};
}
ResolvedTarget {
effective_type_id: resolved_id,
current_btf: btf,
btf_size,
header_skip,
sdt_alloc_resolved: true,
cross_btf_hit: None,
}
} else {
match resolve_chase_target(btf, mem, val, target_type_id, "arena chase") {
ChaseResolve::Ready(r) => r,
ChaseResolve::Skip {
reason,
sdt_alloc_resolved,
} => {
return ArenaChaseOutcome {
deref: None,
reason: Some(reason),
sdt_alloc_resolved,
};
}
}
};
let total_needed = resolved.header_skip.saturating_add(resolved.btf_size);
let read_size = total_needed.min(POINTER_CHASE_CAP);
let truncated_at_cap = total_needed > POINTER_CHASE_CAP;
let Some(raw_bytes) = mem.read_arena(val, read_size) else {
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena read failed (cross-page boundary or unmapped \
page); needed {read_size} bytes from 0x{val:x}"
)),
sdt_alloc_resolved: resolved.sdt_alloc_resolved,
};
};
let Some(target_bytes) = apply_header_skip(&raw_bytes, resolved.header_skip) else {
return ArenaChaseOutcome {
deref: None,
reason: Some(format!(
"arena read at 0x{val:x} returned {} bytes; \
sdt_alloc bridge needs at least {} for header skip",
raw_bytes.len(),
resolved.header_skip
)),
sdt_alloc_resolved: resolved.sdt_alloc_resolved,
};
};
let payload = recurse_into_target(
&resolved,
target_bytes,
val,
depth,
mem,
visited,
truncated_at_cap,
);
ArenaChaseOutcome {
deref: Some(payload),
reason: None,
sdt_alloc_resolved: resolved.sdt_alloc_resolved,
}
}
fn try_cross_btf_fwd_resolve<'a>(
mem: &'a dyn MemReader,
entry_btf: &Btf,
target_ty: &Type,
) -> Option<CrossBtfRef<'a>> {
let Type::Fwd(fwd) = target_ty else {
return None;
};
let kind = FwdKind::from_is_struct(fwd.is_struct());
let name = entry_btf.resolve_name(fwd).ok()?;
if name.is_empty() {
return None;
}
mem.cross_btf_resolve_fwd(&name, kind)
}
fn cast_ptr(
value: u64,
deref: Option<Box<RenderedValue>>,
reason: Option<String>,
addr_space: super::cast_analysis::AddrSpace,
sdt_alloc_resolved: bool,
) -> RenderedValue {
RenderedValue::Ptr {
value,
deref,
deref_skipped_reason: reason,
cast_annotation: Some(Cow::Borrowed(cast_annotation_for(
addr_space,
sdt_alloc_resolved,
))),
}
}
fn cast_annotation_for(
addr_space: super::cast_analysis::AddrSpace,
sdt_alloc_resolved: bool,
) -> &'static str {
use super::cast_analysis::AddrSpace;
match (addr_space, sdt_alloc_resolved) {
(AddrSpace::Arena, false) => "cast→arena",
(AddrSpace::Arena, true) => "cast→arena (sdt_alloc)",
(AddrSpace::Kernel, false) => "cast→kernel",
(AddrSpace::Kernel, true) => "cast→kernel (sdt_alloc)",
}
}
fn render_cast_pointer(
btf: &Btf,
hit: CastHit,
value: u64,
depth: u32,
mem: &dyn MemReader,
visited: &mut HashSet<u64>,
) -> RenderedValue {
if let ChaseGate::Skip { reason } = chase_gate(value, depth, visited) {
return cast_ptr(value, None, reason, hit.addr_space, false);
}
if mem.is_arena_addr(value) {
let outcome = chase_arena_pointer(
btf,
hit.target_type_id,
hit.alloc_size,
value,
mem,
depth,
visited,
);
return cast_ptr(
value,
outcome.deref,
outcome.reason,
super::cast_analysis::AddrSpace::Arena,
outcome.sdt_alloc_resolved,
);
}
if hit.target_type_id == 0 {
return cast_ptr(
value,
None,
Some(format!(
"kernel cast target unresolved (analyzer hinted Arena \
with deferred resolve, but runtime value 0x{value:x} \
fell outside the arena window)"
)),
super::cast_analysis::AddrSpace::Kernel,
false,
);
}
let resolved = match resolve_chase_target(btf, mem, value, hit.target_type_id, "kernel cast") {
ChaseResolve::Ready(r) => r,
ChaseResolve::Skip {
reason,
sdt_alloc_resolved,
} => {
return cast_ptr(
value,
None,
Some(reason),
super::cast_analysis::AddrSpace::Kernel,
sdt_alloc_resolved,
);
}
};
const PAGE_SIZE: u64 = 4096;
let page_remaining = (PAGE_SIZE - (value % PAGE_SIZE)) as usize;
let total_needed = resolved.header_skip.saturating_add(resolved.btf_size);
let read_size = total_needed.min(POINTER_CHASE_CAP).min(page_remaining);
let truncated_at_cap = total_needed > read_size;
let Some(raw_bytes) = mem.read_kva(value, read_size) else {
let suffix = if matches!(hit.addr_space, super::cast_analysis::AddrSpace::Arena) {
" (cast analysis may have flagged a non-pointer field)"
} else {
""
};
return cast_ptr(
value,
None,
Some(format!(
"kernel read_kva failed at 0x{value:x} \
(unmapped page or no PTE); needed {read_size} bytes{suffix}"
)),
super::cast_analysis::AddrSpace::Kernel,
resolved.sdt_alloc_resolved,
);
};
let Some(target_bytes) = apply_header_skip(&raw_bytes, resolved.header_skip) else {
return cast_ptr(
value,
None,
Some(format!(
"kernel read_kva at 0x{value:x} returned {} bytes; \
sdt_alloc bridge needs at least {} for header skip",
raw_bytes.len(),
resolved.header_skip
)),
super::cast_analysis::AddrSpace::Kernel,
resolved.sdt_alloc_resolved,
);
};
if target_bytes.len() >= 8 {
let first_qword = u64::from_le_bytes(target_bytes[..8].try_into().unwrap());
if first_qword >> 56 == 0xff {
return cast_ptr(
value,
None,
Some(format!(
"kernel cast plausibility gate rejected: first qword \
top byte is 0xff at 0x{value:x} (likely freed slab \
object freelist pointer)"
)),
super::cast_analysis::AddrSpace::Kernel,
resolved.sdt_alloc_resolved,
);
}
}
let deref_payload = recurse_into_target(
&resolved,
target_bytes,
value,
depth,
mem,
visited,
truncated_at_cap,
);
cast_ptr(
value,
Some(deref_payload),
None,
super::cast_analysis::AddrSpace::Kernel,
resolved.sdt_alloc_resolved,
)
}
fn render_bitfield(
btf: &Btf,
member_type_id: u32,
parent_bytes: &[u8],
bit_off: usize,
width: usize,
) -> RenderedValue {
if width == 0 || width > 64 {
return RenderedValue::Unsupported {
reason: format!("bitfield width {width} out of range"),
};
}
let byte_start = bit_off / 8;
let bit_shift = bit_off % 8;
let bits_needed = bit_shift + width;
let bytes_needed = bits_needed.div_ceil(8);
if byte_start + bytes_needed > parent_bytes.len() {
let avail_start = byte_start.min(parent_bytes.len());
let avail = &parent_bytes[avail_start..];
return RenderedValue::Truncated {
needed: bytes_needed,
had: avail.len(),
partial: Box::new(RenderedValue::Bytes {
hex: hex_dump(avail),
}),
};
}
let mut buf = [0u8; 16];
buf[..bytes_needed].copy_from_slice(&parent_bytes[byte_start..byte_start + bytes_needed]);
let mut packed: u128 = 0;
for (i, b) in buf[..bytes_needed].iter().enumerate() {
packed |= (*b as u128) << (i * 8);
}
let raw = ((packed >> bit_shift) & ((1u128 << width) - 1)) as u64;
let Some(member_ty) = peel_modifiers(btf, member_type_id) else {
return RenderedValue::Unsupported {
reason: "bitfield type modifiers unresolvable".to_string(),
};
};
let signed = match &member_ty {
Type::Int(i) => i.is_signed(),
Type::Enum(e) => e.is_signed(),
Type::Enum64(e) => e.is_signed(),
_ => false,
};
if signed {
let value = sign_extend(raw, width) as i64;
RenderedValue::Int {
bits: width as u32,
value,
}
} else {
RenderedValue::Uint {
bits: width as u32,
value: raw,
}
}
}
pub(crate) fn peel_modifiers(btf: &Btf, type_id: u32) -> Option<Type> {
peel_modifiers_with_id(btf, type_id).map(|(ty, _)| ty)
}
pub(crate) fn peel_modifiers_resolving_fwd(btf: &Btf, type_id: u32) -> Option<(Type, u32)> {
let (ty, tid) = peel_modifiers_with_id(btf, type_id)?;
let Type::Fwd(ref fwd) = ty else {
return Some((ty, tid));
};
let kind = FwdKind::from_is_struct(fwd.is_struct());
let Ok(name) = btf.resolve_name(fwd) else {
return Some((ty, tid));
};
if name.is_empty() {
return Some((ty, tid));
}
let Ok(candidate_ids) = btf.resolve_ids_by_name(&name) else {
return Some((ty, tid));
};
for cid in candidate_ids {
if cid == tid {
continue;
}
let Some((candidate_ty, candidate_id)) = peel_modifiers_with_id(btf, cid) else {
continue;
};
match (&candidate_ty, kind) {
(Type::Struct(_), FwdKind::Struct) => return Some((candidate_ty, candidate_id)),
(Type::Union(_), FwdKind::Union) => return Some((candidate_ty, candidate_id)),
_ => continue,
}
}
Some((ty, tid))
}
pub(crate) fn peel_modifiers_from_type(btf: &Btf, start: Type) -> Option<Type> {
let mut t = start;
for _ in 0..MAX_MODIFIER_DEPTH {
let next = match &t {
Type::Volatile(inner) => btf.resolve_chained_type(inner).ok()?,
Type::Const(inner) => btf.resolve_chained_type(inner).ok()?,
Type::Restrict(inner) => btf.resolve_chained_type(inner).ok()?,
Type::Typedef(inner) => btf.resolve_chained_type(inner).ok()?,
Type::TypeTag(inner) => btf.resolve_chained_type(inner).ok()?,
Type::DeclTag(inner) => btf.resolve_chained_type(inner).ok()?,
_ => return Some(t),
};
t = next;
}
None
}
pub(crate) fn peel_modifiers_with_id(btf: &Btf, mut type_id: u32) -> Option<(Type, u32)> {
for _ in 0..MAX_MODIFIER_DEPTH {
let ty = btf.resolve_type_by_id(type_id).ok()?;
match &ty {
Type::Volatile(t) => type_id = t.get_type_id().ok()?,
Type::Const(t) => type_id = t.get_type_id().ok()?,
Type::Restrict(t) => type_id = t.get_type_id().ok()?,
Type::Typedef(t) => type_id = t.get_type_id().ok()?,
Type::TypeTag(t) => type_id = t.get_type_id().ok()?,
Type::DeclTag(t) => type_id = t.get_type_id().ok()?,
_ => return Some((ty, type_id)),
}
}
None
}
pub(crate) fn type_size(btf: &Btf, ty: &Type) -> Option<usize> {
match ty {
Type::Int(int) => Some(int.size()),
Type::Float(f) => Some(f.size()),
Type::Enum(e) => Some(e.size()),
Type::Enum64(e) => Some(e.size()),
Type::Struct(s) | Type::Union(s) => Some(s.size()),
Type::Ptr(_) => Some(8),
Type::Array(arr) => {
let len = arr.len();
let elem_peeled = peel_modifiers(btf, arr.get_type_id().ok()?)?;
let elem_size = type_size(btf, &elem_peeled)?;
Some(len * elem_size)
}
Type::Volatile(t) | Type::Const(t) | Type::Restrict(t) => {
let inner = btf.resolve_chained_type(t).ok()?;
type_size(btf, &inner)
}
Type::Typedef(t) | Type::TypeTag(t) => {
let inner = btf.resolve_chained_type(t).ok()?;
type_size(btf, &inner)
}
_ => None,
}
}
fn read_uint_le(bytes: &[u8]) -> u64 {
let mut buf = [0u8; 8];
let n = bytes.len().min(8);
buf[..n].copy_from_slice(&bytes[..n]);
u64::from_le_bytes(buf)
}
fn sign_extend(raw: u64, bits: usize) -> u64 {
if bits == 0 || bits >= 64 {
return raw;
}
let shift = 64 - bits;
((raw << shift) as i64 >> shift) as u64
}
#[cfg(test)]
mod tests;