use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use btf_rs::{Btf, BtfType, Type};
use libbpf_rs::libbpf_sys as bs;
#[repr(C)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct BpfInsn {
pub code: u8,
regs: u8,
pub off: i16,
pub imm: i32,
}
impl BpfInsn {
#[cfg(test)]
pub const fn new(code: u8, dst: u8, src: u8, off: i16, imm: i32) -> Self {
Self {
code,
regs: (dst & 0x0f) | ((src & 0x0f) << 4),
off,
imm,
}
}
pub fn from_le_bytes(buf: [u8; 8]) -> Self {
let off = i16::from_le_bytes([buf[2], buf[3]]);
let imm = i32::from_le_bytes([buf[4], buf[5], buf[6], buf[7]]);
Self {
code: buf[0],
regs: buf[1],
off,
imm,
}
}
#[inline]
pub const fn dst_reg(&self) -> u8 {
self.regs & 0x0f
}
#[inline]
pub const fn src_reg(&self) -> u8 {
(self.regs >> 4) & 0x0f
}
#[inline]
pub(crate) fn set_src_reg(&mut self, src: u8) {
self.regs = (self.regs & 0x0f) | ((src & 0x0f) << 4);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct InitialReg {
pub reg: u8,
pub struct_type_id: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct FuncEntry {
pub insn_offset: usize,
pub func_proto_id: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct SubprogReturn {
pub insn_offset: usize,
pub alloc_size: Option<u64>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct DatasecPointer {
pub insn_offset: usize,
pub datasec_type_id: u32,
pub base_offset: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AddrSpace {
Arena,
Kernel,
}
impl std::fmt::Display for AddrSpace {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
AddrSpace::Arena => "arena",
AddrSpace::Kernel => "kernel",
};
f.write_str(s)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct CastHit {
pub target_type_id: u32,
pub addr_space: AddrSpace,
pub alloc_size: Option<u64>,
}
pub type CastMap = BTreeMap<(u32, u32), CastHit>;
const CANDIDATE_SEARCH_SLACK: u32 = 65_536;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum RegState {
Unknown,
Pointer {
struct_type_id: u32,
},
LoadedU64Field {
source_struct_id: u32,
field_offset: u32,
},
DatasecPointer {
datasec_type_id: u32,
base_offset: u32,
},
ArenaU64FromAlloc {
source: Option<(u32, u32)>,
alloc_size: Option<u64>,
},
FrameAddr {
offset: i16,
},
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
struct Access {
offset: u32,
size: u32,
}
pub fn analyze_casts(
insns: &[BpfInsn],
btf: &Btf,
initial_regs: &[InitialReg],
func_entries: &[FuncEntry],
datasec_pointers: &[DatasecPointer],
subprog_returns: &[SubprogReturn],
) -> CastMap {
let targets = jump_targets(insns);
let mut caller_args: CallerArgTypes = HashMap::new();
let mut arena_stx: ArenaStxFindings = BTreeMap::new();
let mut alloc_size_idx: ArenaAllocSizeIndex = BTreeMap::new();
for pass in 0..MAX_PASSES {
let mut a = if pass == 0 {
Analyzer::new(btf)
} else {
Analyzer::with_carryover(
btf,
caller_args.clone(),
arena_stx.clone(),
alloc_size_idx.clone(),
)
};
a.seed(initial_regs);
a.run(
insns,
&targets,
func_entries,
datasec_pointers,
subprog_returns,
);
let (next_args, next_stx, next_alloc_size) = a.into_carryover();
if next_stx == arena_stx && next_alloc_size == alloc_size_idx && pass > 0 {
let mut final_a = Analyzer::with_carryover(btf, next_args, next_stx, next_alloc_size);
final_a.seed(initial_regs);
final_a.run(
insns,
&targets,
func_entries,
datasec_pointers,
subprog_returns,
);
return final_a.finalize();
}
caller_args = next_args;
arena_stx = next_stx;
alloc_size_idx = next_alloc_size;
}
let mut final_a = Analyzer::with_carryover(btf, caller_args, arena_stx, alloc_size_idx);
final_a.seed(initial_regs);
final_a.run(
insns,
&targets,
func_entries,
datasec_pointers,
subprog_returns,
);
final_a.finalize()
}
const MAX_PASSES: usize = 8;
type CallerArgTypes = HashMap<usize, [RegState; 5]>;
type ArenaStxFindings = BTreeMap<(u32, u32), ArenaStxEntry>;
type ArenaAllocSizeIndex = BTreeMap<(u32, u32), Option<u64>>;
struct Analyzer<'a> {
btf: &'a Btf,
regs: [RegState; 11],
patterns: BTreeMap<(u32, u32), BTreeSet<Access>>,
kptr_findings: BTreeMap<(u32, u32), KptrEntry>,
stack_slots: BTreeMap<i16, RegState>,
arena_confirmed: BTreeSet<(u32, u32)>,
arena_stx_findings: BTreeMap<(u32, u32), ArenaStxEntry>,
arena_alloc_size_index: BTreeMap<(u32, u32), Option<u64>>,
max_seen_type_id: u32,
alloc_seeds_applied: u32,
caller_arg_types: std::collections::HashMap<usize, [RegState; 5]>,
bridge_slot_origins: std::collections::HashMap<i16, (u32, u32)>,
func_has_alloc: bool,
branch_source_regs: std::collections::HashMap<usize, [RegState; 11]>,
}
#[derive(Debug, Clone, Copy)]
enum KptrEntry {
Single(u32),
Conflicting,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ArenaStxEntry {
Pending,
#[allow(dead_code)]
Conflicting,
}
#[derive(Debug, Clone, Copy)]
enum StxValueKind {
Kptr { target: u32 },
Arena { alloc_size: Option<u64> },
Unknown,
}
impl<'a> Analyzer<'a> {
fn new(btf: &'a Btf) -> Self {
Self {
btf,
regs: [RegState::Unknown; 11],
patterns: BTreeMap::new(),
kptr_findings: BTreeMap::new(),
stack_slots: BTreeMap::new(),
arena_confirmed: BTreeSet::new(),
arena_stx_findings: BTreeMap::new(),
arena_alloc_size_index: BTreeMap::new(),
max_seen_type_id: 0,
alloc_seeds_applied: 0,
caller_arg_types: HashMap::new(),
bridge_slot_origins: HashMap::new(),
func_has_alloc: false,
branch_source_regs: HashMap::new(),
}
}
fn with_carryover(
btf: &'a Btf,
caller_arg_types: CallerArgTypes,
arena_stx_findings: ArenaStxFindings,
arena_alloc_size_index: ArenaAllocSizeIndex,
) -> Self {
let max_seen_type_id = arena_stx_findings
.keys()
.map(|(parent, _)| *parent)
.max()
.unwrap_or(0);
Self {
btf,
regs: [RegState::Unknown; 11],
patterns: BTreeMap::new(),
kptr_findings: BTreeMap::new(),
stack_slots: BTreeMap::new(),
arena_confirmed: BTreeSet::new(),
arena_stx_findings,
arena_alloc_size_index,
max_seen_type_id,
alloc_seeds_applied: 0,
caller_arg_types,
bridge_slot_origins: HashMap::new(),
func_has_alloc: false,
branch_source_regs: HashMap::new(),
}
}
fn into_carryover(self) -> (CallerArgTypes, ArenaStxFindings, ArenaAllocSizeIndex) {
(
self.caller_arg_types,
self.arena_stx_findings,
self.arena_alloc_size_index,
)
}
fn seed(&mut self, initial_regs: &[InitialReg]) {
for seed in initial_regs {
if (seed.reg as usize) >= BPF_REG_R10 {
continue;
}
let Some(sid) = super::bpf_map::resolve_to_struct_id(self.btf, seed.struct_type_id)
else {
continue;
};
self.regs[seed.reg as usize] = RegState::Pointer {
struct_type_id: sid,
};
self.note_type_id(sid);
}
}
fn seed_from_func_proto(&mut self, func_proto_id: u32) {
let saved_r0 = match self.regs[0] {
r0 @ RegState::ArenaU64FromAlloc {
alloc_size: Some(_),
..
} => Some(r0),
_ => None,
};
self.regs = [RegState::Unknown; 11];
if let Some(r0) = saved_r0 {
self.regs[0] = r0;
}
self.stack_slots.clear();
self.bridge_slot_origins.clear();
self.func_has_alloc = false;
let proto = match self.btf.resolve_type_by_id(func_proto_id) {
Ok(Type::FuncProto(fp)) => fp,
Ok(Type::Func(f)) => match f.get_type_id() {
Ok(pid) => match self.btf.resolve_type_by_id(pid) {
Ok(Type::FuncProto(fp)) => fp,
_ => return,
},
Err(_) => return,
},
_ => return,
};
for (i, param) in proto.parameters.iter().enumerate().take(5) {
if param.is_variadic() {
break;
}
let Ok(tid) = param.get_type_id() else {
continue;
};
if let Some(sid) = super::bpf_map::resolve_to_struct_id(self.btf, tid) {
let reg_idx = i + 1; self.regs[reg_idx] = RegState::Pointer {
struct_type_id: sid,
};
self.note_type_id(sid);
}
}
}
fn run(
&mut self,
insns: &[BpfInsn],
jump_targets: &BTreeSet<usize>,
func_entries: &[FuncEntry],
datasec_pointers: &[DatasecPointer],
subprog_returns: &[SubprogReturn],
) {
let mut skip_next = false;
let mut entries_by_pc: std::collections::HashMap<usize, Vec<u32>> =
std::collections::HashMap::with_capacity(func_entries.len());
for fe in func_entries {
entries_by_pc
.entry(fe.insn_offset)
.or_default()
.push(fe.func_proto_id);
}
let mut datasec_by_pc: std::collections::HashMap<usize, (u32, u32)> =
std::collections::HashMap::with_capacity(datasec_pointers.len());
for dp in datasec_pointers {
datasec_by_pc.insert(dp.insn_offset, (dp.datasec_type_id, dp.base_offset));
}
let mut subprog_returns_by_pc: std::collections::HashMap<usize, Option<u64>> =
std::collections::HashMap::with_capacity(subprog_returns.len());
for sr in subprog_returns {
subprog_returns_by_pc.insert(sr.insn_offset, sr.alloc_size);
}
for (pc, insn) in insns.iter().enumerate() {
if let Some(src_regs) = self.branch_source_regs.get(&pc) {
for (i, src_reg) in src_regs.iter().enumerate().take(11) {
match (self.regs[i], *src_reg) {
(a, b) if a == b => {}
(RegState::ArenaU64FromAlloc { .. }, _) => {
}
(_, typed @ RegState::ArenaU64FromAlloc { .. }) => {
self.regs[i] = typed;
}
(_, typed) if !matches!(typed, RegState::Unknown) => {
self.regs[i] = typed;
}
_ => {}
}
}
}
if skip_next {
skip_next = false;
continue;
}
if let Some(protos) = entries_by_pc.get(&pc) {
for proto_id in protos {
self.seed_from_func_proto(*proto_id);
}
if let Some(caller_args) = self.caller_arg_types.get(&pc) {
let args = *caller_args;
for (i, &caller_state) in args.iter().enumerate() {
let reg_idx = i + 1; if matches!(self.regs[reg_idx], RegState::Unknown) {
if let RegState::Pointer { struct_type_id } = caller_state {
self.regs[reg_idx] = RegState::Pointer { struct_type_id };
self.note_type_id(struct_type_id);
} else if let r @ RegState::ArenaU64FromAlloc { .. } = caller_state {
self.regs[reg_idx] = r;
}
}
}
}
}
let datasec_hit = datasec_by_pc.get(&pc).copied();
let alloc_seed = subprog_returns_by_pc.get(&pc).copied();
if insn.code == (BPF_CLASS_JMP | BPF_OP_CALL) && insn.src_reg() == BPF_PSEUDO_CALL {
let callee_pc = (pc as i64 + 1 + insn.imm as i64) as usize;
let new_args = [
self.regs[1],
self.regs[2],
self.regs[3],
self.regs[4],
self.regs[5],
];
self.caller_arg_types
.entry(callee_pc)
.and_modify(|existing| {
for (slot, new) in existing.iter_mut().zip(new_args.iter()) {
match (*slot, *new) {
(RegState::Unknown, _) => *slot = *new,
(_, RegState::Unknown) => {}
(a, b) if a == b => {}
_ => *slot = RegState::Unknown,
}
}
})
.or_insert(new_args);
}
self.step(*insn, &mut skip_next, datasec_hit, alloc_seed);
let class = insn.code & 0x07;
let op = insn.code & 0xf0;
if (class == BPF_CLASS_JMP || class == BPF_CLASS_JMP32)
&& op != BPF_OP_CALL
&& op != 0x00 && insn.code != 0x06
{
let target = (pc as i64 + 1 + insn.off as i64) as usize;
self.branch_source_regs
.entry(target)
.and_modify(|existing| {
for (i, new) in existing.iter_mut().zip(self.regs.iter()) {
match (*i, *new) {
(RegState::Unknown, typed) => *i = typed,
(_, RegState::Unknown) => {}
(a, b) if a == b => {}
_ => *i = RegState::Unknown,
}
}
})
.or_insert(self.regs);
}
let class = insn.code & 0x07;
let op = insn.code & 0xf0;
let unconditional_ja =
(class == BPF_CLASS_JMP || class == BPF_CLASS_JMP32) && op == 0x00;
let is_exit = class == BPF_CLASS_JMP && op == BPF_OP_EXIT;
if (is_exit || unconditional_ja) && !jump_targets.contains(&(pc + 1)) {
self.regs = [RegState::Unknown; 11];
self.stack_slots.clear();
self.bridge_slot_origins.clear();
self.func_has_alloc = false;
}
}
}
fn step(
&mut self,
insn: BpfInsn,
skip_next: &mut bool,
datasec_hit: Option<(u32, u32)>,
alloc_seed: Option<Option<u64>>,
) {
let class = insn.code & 0x07;
let dst = insn.dst_reg() as usize;
let src = insn.src_reg() as usize;
if dst >= self.regs.len() || src >= self.regs.len() {
return;
}
match class {
BPF_CLASS_LDX => {
let mode = insn.code & 0xe0;
let size = insn.code & 0x18;
if mode != BPF_MODE_MEM {
self.set_reg(dst, RegState::Unknown);
return;
}
self.handle_ldx(dst, src, size, insn.off as i32);
}
BPF_CLASS_STX => {
let mode = insn.code & 0xe0;
let size = insn.code & 0x18;
if mode == BPF_MODE_ATOMIC {
self.handle_atomic(dst, src, insn.imm, insn.off);
return;
}
if mode != BPF_MODE_MEM {
return;
}
self.handle_stx(dst, src, size, insn.off);
}
BPF_CLASS_ST => {
let mode = insn.code & 0xe0;
if mode == BPF_MODE_MEM && dst == BPF_REG_R10 {
self.stack_slots.remove(&insn.off);
}
}
BPF_CLASS_LD => {
if insn.code == (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM) {
if let Some((datasec_type_id, base_offset)) = datasec_hit {
self.set_reg(
dst,
RegState::DatasecPointer {
datasec_type_id,
base_offset,
},
);
self.note_type_id(datasec_type_id);
} else {
self.set_reg(dst, RegState::Unknown);
}
*skip_next = true;
} else {
self.set_reg(0, RegState::Unknown);
}
}
BPF_CLASS_ALU64 | BPF_CLASS_ALU => {
let op = insn.code & 0xf0;
let src_kind = insn.code & 0x08;
if op == BPF_OP_MOV && src_kind == BPF_SRC_X {
if class == BPF_CLASS_ALU64 {
if dst == BPF_REG_R10 {
return;
}
match insn.off {
0 => {
if src == BPF_REG_R10 {
self.regs[dst] = RegState::FrameAddr { offset: 0 };
} else {
self.regs[dst] = self.regs[src];
}
}
1 => {
if insn.imm == 1 {
if let RegState::LoadedU64Field {
source_struct_id,
field_offset,
} = self.regs[src]
{
self.arena_confirmed
.insert((source_struct_id, field_offset));
}
self.regs[dst] = self.regs[src];
} else if insn.imm == (1 << 16) {
if let RegState::LoadedU64Field {
source_struct_id,
field_offset,
} = self.regs[src]
{
self.arena_confirmed
.insert((source_struct_id, field_offset));
self.regs[dst] = self.regs[src];
} else if matches!(
self.regs[src],
RegState::Pointer { .. }
| RegState::ArenaU64FromAlloc { .. }
) {
self.regs[dst] = self.regs[src];
} else {
self.set_reg(dst, RegState::Unknown);
}
} else {
self.set_reg(dst, RegState::Unknown);
}
}
8 | 16 | 32 => {
self.set_reg(dst, RegState::Unknown);
}
_ => {
self.set_reg(dst, RegState::Unknown);
}
}
} else {
self.set_reg(dst, RegState::Unknown);
}
} else if class == BPF_CLASS_ALU64
&& op == BPF_OP_ADD
&& (insn.code & 0x08) == 0
&& let RegState::FrameAddr { offset } = self.regs[dst]
{
let new_off = (offset as i32).saturating_add(insn.imm);
if let Ok(narrowed) = i16::try_from(new_off) {
self.regs[dst] = RegState::FrameAddr { offset: narrowed };
} else {
self.set_reg(dst, RegState::Unknown);
}
} else {
self.set_reg(dst, RegState::Unknown);
}
}
BPF_CLASS_JMP | BPF_CLASS_JMP32 => {
let op = insn.code & 0xf0;
if op == BPF_OP_CALL {
let pre_call_r1 = self.regs[1];
let pre_call_r3 = self.regs[3];
for r in 0..=5 {
self.set_reg(r, RegState::Unknown);
}
let pseudo = insn.src_reg();
if pseudo == BPF_PSEUDO_KFUNC_CALL {
self.handle_kfunc_call(insn.imm);
} else if pseudo == 0
&& (insn.imm == BPF_FUNC_MAP_LOOKUP_ELEM
|| insn.imm == BPF_FUNC_MAP_LOOKUP_PERCPU_ELEM)
{
if let RegState::DatasecPointer {
datasec_type_id,
base_offset,
} = pre_call_r1
&& let Some(sid) =
map_value_struct_id(self.btf, datasec_type_id, base_offset)
{
self.regs[0] = RegState::Pointer {
struct_type_id: sid,
};
self.note_type_id(sid);
}
} else if pseudo == 0
&& insn.imm == BPF_FUNC_MAP_UPDATE_ELEM
&& let RegState::DatasecPointer {
datasec_type_id,
base_offset,
} = pre_call_r1
&& let Some(value_sid) =
map_value_struct_id(self.btf, datasec_type_id, base_offset)
&& let RegState::FrameAddr { offset: r3_base } = pre_call_r3
{
self.bridge_map_value_spill(value_sid, r3_base);
}
if let Some(captured_alloc_size) = alloc_seed
&& matches!(self.regs[0], RegState::Unknown)
{
self.regs[0] = RegState::ArenaU64FromAlloc {
source: None,
alloc_size: captured_alloc_size,
};
self.alloc_seeds_applied = self.alloc_seeds_applied.saturating_add(1);
self.func_has_alloc = true;
}
}
}
_ => {
self.set_reg(dst, RegState::Unknown);
}
}
}
fn handle_ldx(&mut self, dst: usize, src: usize, size: u8, off: i32) {
if dst >= self.regs.len() || src >= self.regs.len() {
return;
}
if dst == BPF_REG_R10 {
return;
}
let size_bytes = ldx_size_bytes(size);
if src == BPF_REG_R10 {
if size != BPF_SIZE_DW || off >= 0 {
self.set_reg(dst, RegState::Unknown);
return;
}
let Ok(slot_off) = i16::try_from(off) else {
self.set_reg(dst, RegState::Unknown);
return;
};
let restored = self.stack_slots.get(&slot_off).copied();
self.set_reg(dst, restored.unwrap_or(RegState::Unknown));
return;
}
let typed_base: Option<(u32, u32)> = match self.regs[src] {
RegState::Pointer { struct_type_id } => Some((struct_type_id, 0)),
RegState::DatasecPointer {
datasec_type_id,
base_offset,
} => Some((datasec_type_id, base_offset)),
_ => None,
};
if let Some((parent_btf_id, base_offset)) = typed_base {
let insn_off = match field_byte_offset(off) {
Some(o) => o,
None => {
self.set_reg(dst, RegState::Unknown);
return;
}
};
let Some(field_off) = base_offset.checked_add(insn_off) else {
self.set_reg(dst, RegState::Unknown);
return;
};
if let Some(member) = struct_member_at(self.btf, parent_btf_id, field_off) {
let member_type_id = member.member_type_id();
let resolved = super::btf_render::peel_modifiers(self.btf, member_type_id);
let (canonical_parent, canonical_field_off) = match &member {
MemberAt::Struct {
resolved_parent_type_id,
resolved_member_offset,
..
} => (*resolved_parent_type_id, *resolved_member_offset),
MemberAt::Datasec {
var_byte_offset, ..
} => (parent_btf_id, *var_byte_offset),
};
match (size_bytes, resolved) {
(Some(8), Some(Type::Ptr(p))) => {
if let Ok(pointee) = p.get_type_id()
&& let Some(sid) =
super::bpf_map::resolve_to_struct_id(self.btf, pointee)
{
self.set_reg(
dst,
RegState::Pointer {
struct_type_id: sid,
},
);
self.note_type_id(sid);
return;
}
self.set_reg(dst, RegState::Unknown);
}
(Some(8), Some(Type::Int(int))) => {
if int.size() == 8 && !int.is_signed() && !int.is_bool() && !int.is_char() {
let dst_state = if self
.arena_stx_findings
.contains_key(&(canonical_parent, canonical_field_off))
{
let inherited_size = self
.arena_alloc_size_index
.get(&(canonical_parent, canonical_field_off))
.copied()
.flatten();
RegState::ArenaU64FromAlloc {
source: Some((canonical_parent, canonical_field_off)),
alloc_size: inherited_size,
}
} else {
RegState::LoadedU64Field {
source_struct_id: canonical_parent,
field_offset: canonical_field_off,
}
};
self.set_reg(dst, dst_state);
self.note_type_id(canonical_parent);
self.patterns
.entry((canonical_parent, canonical_field_off))
.or_default();
} else {
self.set_reg(dst, RegState::Unknown);
}
}
_ => {
self.set_reg(dst, RegState::Unknown);
}
}
} else {
self.set_reg(dst, RegState::Unknown);
}
return;
}
match self.regs[src] {
RegState::LoadedU64Field {
source_struct_id,
field_offset,
} => {
let target_off = match field_byte_offset(off) {
Some(o) => o,
None => {
self.set_reg(dst, RegState::Unknown);
return;
}
};
if let Some(sz) = size_bytes {
self.patterns
.entry((source_struct_id, field_offset))
.or_default()
.insert(Access {
offset: target_off,
size: sz,
});
}
self.set_reg(dst, RegState::Unknown);
}
RegState::ArenaU64FromAlloc { source, .. } => {
if let Some((source_struct_id, field_offset)) = source {
let target_off = match field_byte_offset(off) {
Some(o) => o,
None => {
self.set_reg(dst, RegState::Unknown);
return;
}
};
if let Some(sz) = size_bytes {
self.patterns
.entry((source_struct_id, field_offset))
.or_default()
.insert(Access {
offset: target_off,
size: sz,
});
}
}
self.set_reg(dst, RegState::Unknown);
}
RegState::Unknown | RegState::FrameAddr { .. } => {
self.set_reg(dst, RegState::Unknown);
}
RegState::Pointer { .. } | RegState::DatasecPointer { .. } => unreachable!(),
}
}
fn handle_stx(&mut self, dst: usize, src: usize, size: u8, off: i16) {
if dst >= self.regs.len() || src >= self.regs.len() {
return;
}
if dst == BPF_REG_R10 {
if size != BPF_SIZE_DW || off >= 0 {
self.stack_slots.remove(&off);
return;
}
self.stack_slots.insert(off, self.regs[src]);
return;
}
if size != BPF_SIZE_DW {
return;
}
let (parent_btf_id, base_offset) = match self.regs[dst] {
RegState::Pointer {
struct_type_id: pid,
} => (pid, 0u32),
RegState::DatasecPointer {
datasec_type_id,
base_offset,
} => (datasec_type_id, base_offset),
_ => return,
};
let value_state = match self.regs[src] {
RegState::Pointer {
struct_type_id: tid,
} => StxValueKind::Kptr { target: tid },
RegState::ArenaU64FromAlloc { alloc_size, .. } => StxValueKind::Arena { alloc_size },
RegState::Unknown => StxValueKind::Unknown,
_ => return,
};
let Some(insn_off) = field_byte_offset(off as i32) else {
return;
};
let Some(field_off) = base_offset.checked_add(insn_off) else {
return;
};
let Some(member) = struct_member_at(self.btf, parent_btf_id, field_off) else {
return;
};
let member_type_id = member.member_type_id();
let Some(terminal) = super::btf_render::peel_modifiers(self.btf, member_type_id) else {
return;
};
let Type::Int(int) = terminal else { return };
if int.size() != 8 || int.is_signed() || int.is_bool() || int.is_char() {
return;
}
let (canonical_parent, canonical_field_off) = match &member {
MemberAt::Struct {
resolved_parent_type_id,
resolved_member_offset,
..
} => (*resolved_parent_type_id, *resolved_member_offset),
MemberAt::Datasec {
var_byte_offset, ..
} => (parent_btf_id, *var_byte_offset),
};
self.note_type_id(canonical_parent);
let key = (canonical_parent, canonical_field_off);
match value_state {
StxValueKind::Kptr { target } => {
if canonical_parent == target {
return;
}
self.note_type_id(target);
match self.kptr_findings.get(&key).copied() {
None => {
self.kptr_findings.insert(key, KptrEntry::Single(target));
}
Some(KptrEntry::Single(prev)) if prev == target => {
}
Some(_) => {
self.kptr_findings.insert(key, KptrEntry::Conflicting);
}
}
}
StxValueKind::Arena { alloc_size } => {
match self.arena_stx_findings.get(&key).copied() {
None => {
self.arena_stx_findings.insert(key, ArenaStxEntry::Pending);
self.arena_alloc_size_index.insert(key, alloc_size);
}
Some(ArenaStxEntry::Pending) => {
match (self.arena_alloc_size_index.get(&key).copied(), alloc_size) {
(None, _) => {
self.arena_alloc_size_index.insert(key, alloc_size);
}
(Some(None), Some(_)) => {
self.arena_alloc_size_index.insert(key, alloc_size);
}
(Some(None), None) => {
}
(Some(Some(_)), None) => {
}
(Some(Some(prev)), Some(new)) if prev == new => {
}
(Some(Some(_)), Some(_)) => {
self.arena_alloc_size_index.insert(key, None);
}
}
}
Some(ArenaStxEntry::Conflicting) => {
unreachable!(
"arena_stx_findings cannot hold Conflicting: \
only the StxValueKind::Arena arm inserts, \
and it only inserts Pending"
);
}
}
}
StxValueKind::Unknown => {
if false {
tracing::debug!(
parent = key.0,
offset = key.1,
"cast_analysis: arena_stx_findings invalidated \
by non-arena DW store to same slot"
);
}
self.arena_confirmed.remove(&key);
}
}
}
fn handle_atomic(&mut self, dst: usize, src: usize, imm: i32, off: i16) {
if dst >= self.regs.len() || src >= self.regs.len() {
return;
}
if dst == BPF_REG_R10 && imm != BPF_LOAD_ACQ_IMM {
self.stack_slots.remove(&off);
}
if imm == BPF_LOAD_ACQ_IMM {
self.set_reg(dst, RegState::Unknown);
return;
}
if imm == BPF_STORE_REL_IMM {
return;
}
let top = imm & 0xf0;
let has_fetch = (imm & BPF_FETCH) != 0;
if top == BPF_CMPXCHG_TOP && has_fetch {
self.set_reg(0, RegState::Unknown);
}
if has_fetch {
self.set_reg(src, RegState::Unknown);
}
}
fn handle_kfunc_call(&mut self, imm: i32) {
if imm <= 0 {
return;
}
let func_btf_id = imm as u32;
let (proto, func_name) = match self.btf.resolve_type_by_id(func_btf_id) {
Ok(Type::Func(f)) => match f.get_type_id() {
Ok(pid) => match self.btf.resolve_type_by_id(pid) {
Ok(Type::FuncProto(fp)) => {
let name = self.btf.resolve_name(&f).ok();
(fp, name)
}
_ => return,
},
Err(_) => return,
},
Ok(Type::FuncProto(fp)) => (fp, None),
_ => return,
};
let ret_id = proto.return_type_id();
if ret_id == 0 {
return;
}
if let Some(sid) = super::bpf_map::resolve_to_struct_id(self.btf, ret_id) {
self.regs[0] = RegState::Pointer {
struct_type_id: sid,
};
self.note_type_id(sid);
return;
}
if return_peels_to_ptr_void(self.btf, ret_id)
&& let Some(name) = func_name.as_deref()
&& ARENA_ALLOC_KFUNC_NAMES.contains(&name)
{
self.regs[0] = RegState::ArenaU64FromAlloc {
source: None,
alloc_size: None,
};
self.alloc_seeds_applied = self.alloc_seeds_applied.saturating_add(1);
self.func_has_alloc = true;
}
}
fn set_reg(&mut self, idx: usize, state: RegState) {
if idx == BPF_REG_R10 {
return;
}
if idx < self.regs.len() {
self.regs[idx] = state;
}
}
fn bridge_map_value_spill(&mut self, value_struct_id: u32, r3_base: i16) {
let (t, _peeled_id) =
match super::btf_render::peel_modifiers_with_id(self.btf, value_struct_id) {
Some(v) => v,
None => return,
};
let s = match t {
Type::Struct(s) | Type::Union(s) => s,
_ => return,
};
for m in &s.members {
if matches!(m.bitfield_size(), Some(b) if b > 0) {
continue;
}
let bit_off = m.bit_offset();
if bit_off % 8 != 0 {
continue;
}
let member_off = bit_off / 8;
let Ok(member_tid) = m.get_type_id() else {
continue;
};
let Some(terminal) = super::btf_render::peel_modifiers(self.btf, member_tid) else {
continue;
};
let Type::Int(int) = terminal else { continue };
if int.size() != 8 || int.is_signed() || int.is_bool() || int.is_char() {
continue;
}
let Some(slot_off) = i16::try_from(member_off as i32)
.ok()
.and_then(|o| r3_base.checked_add(o))
else {
continue;
};
let ever_arena = self.func_has_alloc;
let slot = match self.stack_slots.get(&slot_off).copied() {
Some(s) => s,
None if ever_arena => RegState::ArenaU64FromAlloc {
source: None,
alloc_size: None,
},
None => continue,
};
let key = (value_struct_id, member_off);
match slot {
_ if ever_arena => {
self.note_type_id(value_struct_id);
let captured = match slot {
RegState::ArenaU64FromAlloc { alloc_size, .. } => alloc_size,
RegState::LoadedU64Field {
source_struct_id,
field_offset,
} => self
.arena_alloc_size_index
.get(&(source_struct_id, field_offset))
.copied()
.flatten(),
_ => None,
};
if let std::collections::btree_map::Entry::Vacant(e) =
self.arena_stx_findings.entry(key)
{
e.insert(ArenaStxEntry::Pending);
self.arena_alloc_size_index.insert(key, captured);
self.bridge_slot_origins.insert(slot_off, key);
} else {
match (self.arena_alloc_size_index.get(&key).copied(), captured) {
(None, _) => {
self.arena_alloc_size_index.insert(key, captured);
}
(Some(None), Some(_)) => {
self.arena_alloc_size_index.insert(key, captured);
}
(Some(Some(prev)), Some(new)) if prev != new => {
self.arena_alloc_size_index.insert(key, None);
}
_ => {}
}
}
}
RegState::Pointer {
struct_type_id: target,
} => {
if value_struct_id == target {
continue;
}
self.note_type_id(value_struct_id);
self.note_type_id(target);
match self.kptr_findings.get(&key).copied() {
None => {
self.kptr_findings.insert(key, KptrEntry::Single(target));
}
Some(KptrEntry::Single(prev)) if prev == target => {}
Some(_) => {
self.kptr_findings.insert(key, KptrEntry::Conflicting);
}
}
}
_ => {}
}
}
}
fn note_type_id(&mut self, id: u32) {
if id > self.max_seen_type_id {
self.max_seen_type_id = id;
}
}
fn finalize(self) -> CastMap {
let mut out = CastMap::new();
let max_id = self
.max_seen_type_id
.saturating_add(CANDIDATE_SEARCH_SLACK)
.min(super::sdt_alloc::MAX_BTF_ID_PROBE);
if self.max_seen_type_id.saturating_add(CANDIDATE_SEARCH_SLACK)
> super::sdt_alloc::MAX_BTF_ID_PROBE
{
tracing::warn!(
max_seen_type_id = self.max_seen_type_id,
slack = CANDIDATE_SEARCH_SLACK,
cap = super::sdt_alloc::MAX_BTF_ID_PROBE,
"cast_analysis: candidate-search slack capped at MAX_BTF_ID_PROBE; \
shape-inference candidates above the cap are invisible"
);
}
let layout = build_layout_index(self.btf, max_id);
let mut arena_kptr_merged: BTreeMap<(u32, u32), u32> = BTreeMap::new();
let conflicting: BTreeSet<(u32, u32)> = self
.patterns
.iter()
.filter(|(_, accesses)| !accesses.is_empty())
.map(|(k, _)| *k)
.chain(self.arena_confirmed.iter().copied())
.chain(self.arena_stx_findings.keys().copied())
.filter(|k| self.kptr_findings.contains_key(k))
.filter(|k| {
if let Some(KptrEntry::Single(tid)) = self.kptr_findings.get(k)
&& let Ok(ty) = self.btf.resolve_type_by_id(*tid)
&& matches!(ty, Type::Fwd(_))
{
arena_kptr_merged.insert(*k, *tid);
return false;
}
true })
.collect();
for (key, entry) in &self.arena_stx_findings {
if !matches!(entry, ArenaStxEntry::Pending) {
continue;
}
if conflicting.contains(key) {
continue;
}
let inferred_target = self.patterns.get(key).and_then(|accesses| {
if accesses.is_empty() {
return None;
}
let mut iter = accesses.iter();
let first = iter.next()?;
let empty = HashSet::new();
let mut candidates: HashSet<u32> = layout
.get(&(first.offset, first.size))
.cloned()
.unwrap_or_default();
for acc in iter {
let next = layout.get(&(acc.offset, acc.size)).unwrap_or(&empty);
candidates.retain(|c| next.contains(c));
if candidates.is_empty() {
break;
}
}
candidates.remove(&key.0);
if candidates.len() == 1 {
candidates.into_iter().next()
} else {
None
}
});
let target_type_id = inferred_target.unwrap_or(0);
let alloc_size = self.arena_alloc_size_index.get(key).copied().flatten();
tracing::debug!(
parent = key.0,
offset = key.1,
target = target_type_id,
alloc_size = ?alloc_size,
"cast_analysis: arena STX-flow hit emitted"
);
out.insert(
*key,
CastHit {
target_type_id,
addr_space: AddrSpace::Arena,
alloc_size,
},
);
}
for (key, kptr_target) in &arena_kptr_merged {
let alloc_size = self.arena_alloc_size_index.get(key).copied().flatten();
tracing::debug!(
parent = key.0,
offset = key.1,
target = kptr_target,
alloc_size = ?alloc_size,
"cast_analysis: arena+kptr merged hit emitted"
);
out.insert(
*key,
CastHit {
target_type_id: *kptr_target,
addr_space: AddrSpace::Arena,
alloc_size,
},
);
}
for ((source, field_off), accesses) in &self.patterns {
if accesses.is_empty() {
continue;
}
if conflicting.contains(&(*source, *field_off)) {
continue;
}
let key = (*source, *field_off);
if out.contains_key(&key) {
continue;
}
let has_direct_evidence =
self.arena_confirmed.contains(&key) || self.arena_stx_findings.contains_key(&key);
if !has_direct_evidence {
tracing::debug!(
parent_type_id = source,
field_offset = field_off,
accesses = accesses.len(),
"cast_analysis: shape-inference candidate without direct evidence; dropped (F1 mitigation)"
);
continue;
}
let mut iter = accesses.iter();
let first = iter.next().expect("non-empty checked above");
let empty = HashSet::new();
let mut candidates: HashSet<u32> = layout
.get(&(first.offset, first.size))
.cloned()
.unwrap_or_default();
for acc in iter {
let next = layout.get(&(acc.offset, acc.size)).unwrap_or(&empty);
candidates.retain(|c| next.contains(c));
if candidates.is_empty() {
break;
}
}
candidates.remove(source);
if candidates.len() == 1 {
let target = candidates.into_iter().next().unwrap();
let alloc_size = self
.arena_alloc_size_index
.get(&(*source, *field_off))
.copied()
.flatten();
tracing::debug!(
parent = source,
offset = field_off,
target,
accesses = accesses.len(),
alloc_size = ?alloc_size,
"cast_analysis: shape-inference hit emitted"
);
out.insert(
(*source, *field_off),
CastHit {
target_type_id: target,
addr_space: AddrSpace::Arena,
alloc_size,
},
);
}
}
if self.alloc_seeds_applied > 0 && self.arena_stx_findings.is_empty() {
tracing::warn!(
alloc_seeds_applied = self.alloc_seeds_applied,
"cast_analysis: allocator seeds applied but no slot got an arena \
STX tag; allocator helpers may need __always_inline so the \
returned R0 reaches a typed-slot STX without crossing a \
BPF-to-BPF call boundary"
);
}
for (key, entry) in self.kptr_findings {
let KptrEntry::Single(target) = entry else {
continue;
};
if conflicting.contains(&key) {
continue;
}
if arena_kptr_merged.contains_key(&key) {
continue;
}
out.insert(
key,
CastHit {
target_type_id: target,
addr_space: AddrSpace::Kernel,
alloc_size: None,
},
);
}
let arena_count = out
.values()
.filter(|h| h.addr_space == AddrSpace::Arena)
.count();
let kernel_count = out
.values()
.filter(|h| h.addr_space == AddrSpace::Kernel)
.count();
tracing::debug!(
total = out.len(),
arena = arena_count,
kernel = kernel_count,
"cast_analysis: finalize summary"
);
out
}
}
fn jump_targets(insns: &[BpfInsn]) -> BTreeSet<usize> {
let mut targets = BTreeSet::new();
let mut skip_next = false;
for (pc, insn) in insns.iter().enumerate() {
if skip_next {
skip_next = false;
continue;
}
let class = insn.code & 0x07;
if class == BPF_CLASS_LD && insn.code == (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM) {
skip_next = true;
continue;
}
if class != BPF_CLASS_JMP && class != BPF_CLASS_JMP32 {
continue;
}
let op = insn.code & 0xf0;
if op == BPF_OP_EXIT || op == BPF_OP_CALL {
continue;
}
let jump_off = if class == BPF_CLASS_JMP32 && op == 0x00 {
insn.imm as i64
} else {
insn.off as i64
};
let next = pc as i64 + 1 + jump_off;
if next >= 0 && (next as usize) < insns.len() {
targets.insert(next as usize);
}
}
targets
}
fn build_layout_index(btf: &Btf, max_id: u32) -> HashMap<(u32, u32), HashSet<u32>> {
let mut out: HashMap<(u32, u32), HashSet<u32>> = HashMap::new();
let mut size_cache: HashMap<u32, Option<u32>> = HashMap::new();
let mut consecutive_fail: u32 = 0;
const CONSECUTIVE_FAIL_CAP: u32 = 256;
let mut tid: u32 = 1;
while tid <= max_id {
match btf.resolve_type_by_id(tid) {
Ok(Type::Struct(s)) | Ok(Type::Union(s)) => {
consecutive_fail = 0;
index_aggregate_members(btf, tid, &s.members, 0, &mut out, &mut size_cache, 0);
}
Ok(_) => {
consecutive_fail = 0;
}
Err(_) => {
consecutive_fail += 1;
if consecutive_fail >= CONSECUTIVE_FAIL_CAP {
break;
}
}
}
tid += 1;
}
out
}
const LAYOUT_INDEX_MAX_DEPTH: u32 = 8;
fn index_aggregate_members(
btf: &Btf,
parent_tid: u32,
members: &[btf_rs::Member],
base_offset: u32,
out: &mut HashMap<(u32, u32), HashSet<u32>>,
size_cache: &mut HashMap<u32, Option<u32>>,
depth: u32,
) {
if depth >= LAYOUT_INDEX_MAX_DEPTH {
return;
}
for m in members {
let bit_off = m.bit_offset();
if bit_off % 8 != 0 {
continue;
}
if matches!(m.bitfield_size(), Some(s) if s > 0) {
continue;
}
let off = base_offset + bit_off / 8;
if let Some(size) = cached_member_size(btf, m, size_cache) {
out.entry((off, size)).or_default().insert(parent_tid);
}
let Ok(member_tid) = m.get_type_id() else {
continue;
};
let Some(peeled) = super::btf_render::peel_modifiers(btf, member_tid) else {
continue;
};
let inner = match peeled {
Type::Struct(s) | Type::Union(s) => s,
_ => continue,
};
let Ok(name) = btf.resolve_name(&inner) else {
continue;
};
if !name.is_empty() {
continue;
}
index_aggregate_members(
btf,
parent_tid,
&inner.members,
off,
out,
size_cache,
depth + 1,
);
}
}
fn cached_member_size(
btf: &Btf,
m: &btf_rs::Member,
cache: &mut HashMap<u32, Option<u32>>,
) -> Option<u32> {
let tid = m.get_type_id().ok()?;
*cache
.entry(tid)
.or_insert_with(|| member_size_bytes(btf, m))
}
fn member_size_bytes(btf: &Btf, m: &btf_rs::Member) -> Option<u32> {
let tid = m.get_type_id().ok()?;
let terminal = super::btf_render::peel_modifiers(btf, tid)?;
super::btf_render::type_size(btf, &terminal).map(|s| s as u32)
}
#[derive(Debug, Clone)]
enum MemberAt {
Struct {
member_type_id: u32,
resolved_parent_type_id: u32,
resolved_member_offset: u32,
},
Datasec {
var_underlying_type_id: u32,
var_byte_offset: u32,
},
}
impl MemberAt {
fn member_type_id(&self) -> u32 {
match self {
Self::Struct { member_type_id, .. } => *member_type_id,
Self::Datasec {
var_underlying_type_id,
..
} => *var_underlying_type_id,
}
}
}
fn struct_member_at(btf: &Btf, parent_type_id: u32, byte_offset: u32) -> Option<MemberAt> {
let (t, parent_type_id) = super::btf_render::peel_modifiers_with_id(btf, parent_type_id)?;
match t {
Type::Struct(s) | Type::Union(s) => {
for m in &s.members {
if matches!(m.bitfield_size(), Some(s) if s > 0) {
continue;
}
let bit_off = m.bit_offset();
if bit_off % 8 != 0 {
continue;
}
let member_off = bit_off / 8;
let member_type_id = m.get_type_id().ok()?;
if member_off == byte_offset {
if let Some(terminal) = super::btf_render::peel_modifiers(btf, member_type_id)
&& matches!(terminal, Type::Struct(_) | Type::Union(_))
{
return struct_member_at(btf, member_type_id, 0);
}
return Some(MemberAt::Struct {
member_type_id,
resolved_parent_type_id: parent_type_id,
resolved_member_offset: byte_offset,
});
}
if member_off < byte_offset
&& let Some(terminal) = super::btf_render::peel_modifiers(btf, member_type_id)
{
match &terminal {
Type::Array(arr) => {
let elem_tid = arr.get_type_id().ok()?;
let elem_size = super::btf_render::type_size(btf, &{
super::btf_render::peel_modifiers(btf, elem_tid)?
})? as u32;
if elem_size > 0 {
let arr_len = arr.len() as u32;
let arr_byte_size = elem_size * arr_len;
let rel = byte_offset - member_off;
if rel < arr_byte_size && rel.is_multiple_of(elem_size) {
return Some(MemberAt::Struct {
member_type_id: elem_tid,
resolved_parent_type_id: parent_type_id,
resolved_member_offset: byte_offset,
});
}
}
}
Type::Struct(_) | Type::Union(_) => {
let member_size = super::btf_render::type_size(btf, &terminal)? as u32;
let rel = byte_offset - member_off;
if rel < member_size {
return struct_member_at(btf, member_type_id, rel);
}
}
_ => {}
}
}
}
None
}
Type::Datasec(ds) => {
for var_info in &ds.variables {
let off = var_info.offset();
let size = var_info.size() as u32;
let end = off.checked_add(size)?;
if byte_offset < off || byte_offset >= end {
continue;
}
let chained = btf.resolve_chained_type(var_info).ok()?;
let var = match chained {
Type::Var(v) => v,
_ => return None,
};
let var_underlying_type_id = var.get_type_id().ok()?;
let rel = byte_offset - off;
if let Some(terminal) =
super::btf_render::peel_modifiers(btf, var_underlying_type_id)
&& matches!(terminal, Type::Struct(_) | Type::Union(_))
&& let Some(inner) = struct_member_at(btf, var_underlying_type_id, rel)
{
return Some(inner);
}
return Some(MemberAt::Datasec {
var_underlying_type_id,
var_byte_offset: off,
});
}
None
}
_ => None,
}
}
fn return_peels_to_ptr_void(btf: &Btf, ret_id: u32) -> bool {
let Some(peeled) = super::btf_render::peel_modifiers(btf, ret_id) else {
return false;
};
let Type::Ptr(p) = peeled else {
return false;
};
p.get_type_id().map(|id| id == 0).unwrap_or(false)
}
fn map_value_struct_id(btf: &Btf, datasec_id: u32, var_offset: u32) -> Option<u32> {
let ty = btf.resolve_type_by_id(datasec_id).ok()?;
let datasec = match ty {
Type::Datasec(d) => d,
_ => return None,
};
let name = btf.resolve_name(&datasec).ok()?;
if name != ".maps" {
return None;
}
let var_info = datasec
.variables
.iter()
.find(|v| v.offset() == var_offset)?;
let chained = btf.resolve_chained_type(var_info).ok()?;
let var = match chained {
Type::Var(v) => v,
_ => return None,
};
let var_type_id = var.get_type_id().ok()?;
let map_def_terminal = super::btf_render::peel_modifiers(btf, var_type_id)?;
let map_def = match map_def_terminal {
Type::Struct(s) => s,
_ => return None,
};
for member in &map_def.members {
let Ok(mname) = btf.resolve_name(member) else {
continue;
};
if mname != "value" {
continue;
}
let mtype_id = member.get_type_id().ok()?;
let mterminal = super::btf_render::peel_modifiers(btf, mtype_id)?;
let ptr = match mterminal {
Type::Ptr(p) => p,
_ => return None,
};
let pointee = ptr.get_type_id().ok()?;
return super::bpf_map::resolve_to_struct_id(btf, pointee);
}
None
}
pub(crate) const ARENA_ALLOC_KFUNC_NAMES: &[&str] = &[
"bpf_arena_alloc_pages",
];
fn ldx_size_bytes(size_bits: u8) -> Option<u32> {
match size_bits {
BPF_SIZE_DW => Some(8),
BPF_SIZE_W => Some(4),
BPF_SIZE_H => Some(2),
BPF_SIZE_B => Some(1),
_ => None,
}
}
fn field_byte_offset(off: i32) -> Option<u32> {
if off < 0 { None } else { Some(off as u32) }
}
const BPF_CLASS_LD: u8 = bs::BPF_LD as u8;
const BPF_CLASS_LDX: u8 = bs::BPF_LDX as u8;
const BPF_CLASS_ST: u8 = bs::BPF_ST as u8;
const BPF_CLASS_STX: u8 = bs::BPF_STX as u8;
const BPF_CLASS_ALU: u8 = bs::BPF_ALU as u8;
const BPF_CLASS_JMP: u8 = bs::BPF_JMP as u8;
const BPF_CLASS_JMP32: u8 = bs::BPF_JMP32 as u8;
const BPF_CLASS_ALU64: u8 = bs::BPF_ALU64 as u8;
const BPF_SIZE_W: u8 = bs::BPF_W as u8;
const BPF_SIZE_H: u8 = bs::BPF_H as u8;
const BPF_SIZE_B: u8 = bs::BPF_B as u8;
const BPF_SIZE_DW: u8 = bs::BPF_DW as u8;
const BPF_MODE_IMM: u8 = bs::BPF_IMM as u8;
const BPF_MODE_MEM: u8 = bs::BPF_MEM as u8;
const BPF_MODE_ATOMIC: u8 = bs::BPF_ATOMIC as u8;
const BPF_OP_MOV: u8 = bs::BPF_MOV as u8;
const BPF_OP_CALL: u8 = bs::BPF_CALL as u8;
const BPF_OP_EXIT: u8 = bs::BPF_EXIT as u8;
const BPF_SRC_X: u8 = bs::BPF_X as u8;
const BPF_FETCH: i32 = bs::BPF_FETCH as i32;
const BPF_CMPXCHG_TOP: i32 = (bs::BPF_CMPXCHG as i32) & !BPF_FETCH;
const BPF_LOAD_ACQ_IMM: i32 = bs::BPF_LOAD_ACQ as i32;
const BPF_STORE_REL_IMM: i32 = bs::BPF_STORE_REL as i32;
const BPF_REG_R10: usize = bs::BPF_REG_10 as usize;
pub(crate) const BPF_PSEUDO_KFUNC_CALL: u8 = bs::BPF_PSEUDO_KFUNC_CALL as u8;
const BPF_FUNC_MAP_LOOKUP_ELEM: i32 = bs::BPF_FUNC_map_lookup_elem as i32;
const BPF_FUNC_MAP_UPDATE_ELEM: i32 = bs::BPF_FUNC_map_update_elem as i32;
const BPF_FUNC_MAP_LOOKUP_PERCPU_ELEM: i32 = bs::BPF_FUNC_map_lookup_percpu_elem as i32;
const BPF_OP_ADD: u8 = bs::BPF_ADD as u8;
pub(crate) const BPF_PSEUDO_CALL: u8 = bs::BPF_PSEUDO_CALL as u8;
#[cfg(test)]
mod tests;