use super::*;
use crate::monitor::btf_offsets::TaskStorageOffsets;
fn test_task_storage_offsets() -> TaskStorageOffsets {
TaskStorageOffsets {
smap_buckets: 0,
smap_bucket_log: 8,
bucket_size: 16,
bucket_list: 0,
hlist_head_first: 0,
hlist_node_next: 0,
elem_local_storage: 16,
elem_sdata: 24,
sdata_data: 0,
ls_owner: 0,
}
}
fn test_local_storage_map_offsets() -> BpfMapOffsets {
BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: Some(test_task_storage_offsets()),
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
}
}
fn make_storage_map(map_kva: u64, value_size: u32, map_type: u32) -> BpfMapInfo {
let (name_bytes, name_len) = super::name_from_str("test_storage");
BpfMapInfo {
map_pa: 0,
map_kva,
name_bytes,
name_len,
map_type,
map_flags: 0,
key_size: 0, value_size,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
}
}
struct StorageScene {
buf: Vec<u8>,
page_offset: u64,
map: BpfMapInfo,
offsets: BpfMapOffsets,
elem_pas: Vec<u64>,
}
#[allow(clippy::type_complexity)]
fn build_storage_scene(
n_buckets: u32,
bucket_log: u32,
entries_per_bucket: &[Vec<(Vec<u8>, u64, Option<u64>)>],
value_size: u32,
map_type: u32,
) -> StorageScene {
assert!(n_buckets.is_power_of_two() || n_buckets == 0);
assert_eq!(entries_per_bucket.len(), n_buckets as usize);
let ts = test_task_storage_offsets();
let offsets = test_local_storage_map_offsets();
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let pa_to_kva = |pa: u64| -> u64 { page_offset.wrapping_add(pa) };
let smap_pa: u64 = 0x0000;
let buckets_pa: u64 = 0x1000;
let elems_start: u64 = 0x2000;
let ls_start: u64 = 0x10_0000;
let elem_size = (ts.elem_sdata + ts.sdata_data + value_size as usize).max(64);
let ls_size: usize = 64;
let total_entries: usize = entries_per_bucket.iter().map(|e| e.len()).sum();
let buf_size = (ls_start as usize) + total_entries * ls_size + 0x1000;
let mut buf = vec![0u8; buf_size];
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_u32(&mut buf, smap_pa + ts.smap_bucket_log as u64, bucket_log);
write_u64(
&mut buf,
smap_pa + ts.smap_buckets as u64,
pa_to_kva(buckets_pa),
);
let mut elem_pas: Vec<u64> = Vec::with_capacity(total_entries);
let mut next_elem_idx: usize = 0;
for (bucket_idx, bucket_entries) in entries_per_bucket.iter().enumerate() {
let bucket_pa = buckets_pa + (bucket_idx as u64) * (ts.bucket_size as u64);
if bucket_entries.is_empty() {
write_u64(
&mut buf,
bucket_pa + ts.bucket_list as u64 + ts.hlist_head_first as u64,
0,
);
continue;
}
let bucket_elem_start = next_elem_idx;
for _ in 0..bucket_entries.len() {
let elem_pa = elems_start + (next_elem_idx as u64) * (elem_size as u64);
elem_pas.push(elem_pa);
next_elem_idx += 1;
}
write_u64(
&mut buf,
bucket_pa + ts.bucket_list as u64 + ts.hlist_head_first as u64,
pa_to_kva(elem_pas[bucket_elem_start]),
);
for (slot_idx, (value, owner, ls_override)) in bucket_entries.iter().enumerate() {
let elem_idx = bucket_elem_start + slot_idx;
let elem_pa = elem_pas[elem_idx];
let value_off = elem_pa + ts.elem_sdata as u64 + ts.sdata_data as u64;
assert!(
value.len() <= value_size as usize,
"value bytes ({}) exceed declared value_size ({})",
value.len(),
value_size,
);
for (i, b) in value.iter().enumerate() {
buf[value_off as usize + i] = *b;
}
let ls_pa = ls_start + (elem_idx as u64) * (ls_size as u64);
write_u64(&mut buf, ls_pa + ts.ls_owner as u64, *owner);
let ls_kva = match ls_override {
Some(v) => *v,
None => pa_to_kva(ls_pa),
};
write_u64(&mut buf, elem_pa + ts.elem_local_storage as u64, ls_kva);
let next_kva = if slot_idx + 1 < bucket_entries.len() {
pa_to_kva(elem_pas[elem_idx + 1])
} else {
0 };
write_u64(&mut buf, elem_pa + ts.hlist_node_next as u64, next_kva);
}
}
let map = make_storage_map(pa_to_kva(smap_pa), value_size, map_type);
StorageScene {
buf,
page_offset,
map,
offsets,
elem_pas,
}
}
#[test]
fn iter_local_storage_non_storage_map_returns_empty() {
let scene = build_storage_scene(
1,
0,
&[vec![(vec![0u8; 4], 0xDEAD_BEEFu64, None)]],
4,
BPF_MAP_TYPE_HASH, );
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert!(
entries.is_empty(),
"non-storage map types must short-circuit"
);
}
#[test]
fn iter_local_storage_empty_buckets() {
let scene = build_storage_scene(2, 1, &[vec![], vec![]], 4, BPF_MAP_TYPE_TASK_STORAGE);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert!(entries.is_empty(), "no live elems => no entries");
}
#[test]
fn iter_local_storage_single_selem() {
let value = vec![0xAA, 0xBB, 0xCC, 0xDD];
let owner = 0xFFFF_8880_1234_0000u64;
let scene = build_storage_scene(
1,
0,
&[vec![(value.clone(), owner, None)]],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, owner.to_le_bytes().to_vec());
assert_eq!(entries[0].1, value);
}
#[test]
fn iter_local_storage_chain_of_three() {
let v1 = vec![1u8, 0, 0, 0];
let v2 = vec![2u8, 0, 0, 0];
let v3 = vec![3u8, 0, 0, 0];
let o1 = 0x1111_1111_1111_1111u64;
let o2 = 0x2222_2222_2222_2222u64;
let o3 = 0x3333_3333_3333_3333u64;
let scene = build_storage_scene(
1,
0,
&[vec![
(v1.clone(), o1, None),
(v2.clone(), o2, None),
(v3.clone(), o3, None),
]],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].0, o1.to_le_bytes().to_vec());
assert_eq!(entries[0].1, v1);
assert_eq!(entries[1].0, o2.to_le_bytes().to_vec());
assert_eq!(entries[1].1, v2);
assert_eq!(entries[2].0, o3.to_le_bytes().to_vec());
assert_eq!(entries[2].1, v3);
}
#[test]
fn iter_local_storage_multi_bucket() {
let v_a = vec![10u8, 0, 0, 0];
let v_b = vec![20u8, 0, 0, 0];
let o_a = 0xAAAA_AAAA_AAAA_AAAAu64;
let o_b = 0xBBBB_BBBB_BBBB_BBBBu64;
let scene = build_storage_scene(
4,
2,
&[
vec![(v_a.clone(), o_a, None)],
vec![],
vec![(v_b.clone(), o_b, None)],
vec![],
],
4,
BPF_MAP_TYPE_INODE_STORAGE,
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].0, o_a.to_le_bytes().to_vec());
assert_eq!(entries[0].1, v_a);
assert_eq!(entries[1].0, o_b.to_le_bytes().to_vec());
assert_eq!(entries[1].1, v_b);
}
#[test]
fn iter_local_storage_null_local_storage_yields_owner_zero() {
let value = vec![0x77u8, 0, 0, 0];
let scene = build_storage_scene(
1,
0,
&[vec![(value.clone(), 0xDEAD_BEEFu64, Some(0))]],
4,
BPF_MAP_TYPE_SK_STORAGE,
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, 0u64.to_le_bytes().to_vec());
assert_eq!(entries[0].1, value);
}
#[test]
fn iter_local_storage_unmapped_local_storage_yields_owner_zero() {
let value = vec![0xEEu8, 0xFF, 0, 0];
let unmapped_kva = crate::monitor::symbols::DEFAULT_PAGE_OFFSET + (1u64 << 30);
let scene = build_storage_scene(
1,
0,
&[vec![(value.clone(), 0xDEAD_BEEFu64, Some(unmapped_kva))]],
4,
BPF_MAP_TYPE_CGRP_STORAGE,
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, 0u64.to_le_bytes().to_vec());
assert_eq!(entries[0].1, value);
}
#[test]
fn iter_local_storage_unmapped_elem_breaks_chain() {
let v1 = vec![1u8, 0, 0, 0];
let v2 = vec![2u8, 0, 0, 0];
let v3 = vec![3u8, 0, 0, 0];
let mut scene = build_storage_scene(
1,
0,
&[vec![
(v1.clone(), 0x1111u64, None),
(v2.clone(), 0x2222u64, None),
(v3.clone(), 0x3333u64, None),
]],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let unmapped_kva = scene.page_offset + (1u64 << 30);
let ts = test_task_storage_offsets();
let elem0_pa = scene.elem_pas[0];
let next_off = elem0_pa as usize + ts.hlist_node_next;
scene.buf[next_off..next_off + 8].copy_from_slice(&unmapped_kva.to_ne_bytes());
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(
entries.len(),
1,
"chain breaks at first untranslatable elem"
);
assert_eq!(entries[0].1, v1);
}
#[test]
fn iter_local_storage_unmapped_bucket_continues() {
let v_a = vec![0xAAu8, 0, 0, 0];
let v_b = vec![0xBBu8, 0, 0, 0];
let mut scene = build_storage_scene(
4,
2,
&[
vec![(v_a.clone(), 0xA1u64, None)],
vec![(vec![0u8; 4], 0xB1u64, None)],
vec![(v_b.clone(), 0xC1u64, None)],
vec![],
],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let ts = test_task_storage_offsets();
let bucket1_first_off =
(0x1000u64 + (ts.bucket_size as u64) + ts.bucket_list as u64 + ts.hlist_head_first as u64)
as usize;
let unmapped_kva = scene.page_offset + (1u64 << 30);
scene.buf[bucket1_first_off..bucket1_first_off + 8]
.copy_from_slice(&unmapped_kva.to_ne_bytes());
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].1, v_a);
assert_eq!(entries[1].1, v_b);
}
#[test]
fn iter_local_storage_bucket_log_32_returns_empty() {
let mut scene = build_storage_scene(1, 0, &[vec![]], 4, BPF_MAP_TYPE_TASK_STORAGE);
let ts = test_task_storage_offsets();
let off = ts.smap_bucket_log;
scene.buf[off..off + 4].copy_from_slice(&32u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert!(
entries.is_empty(),
"bucket_log >= 32 must drop the read entirely"
);
}
#[test]
fn iter_local_storage_bucket_log_17_returns_empty() {
let mut scene = build_storage_scene(1, 0, &[vec![]], 4, BPF_MAP_TYPE_TASK_STORAGE);
let ts = test_task_storage_offsets();
let off = ts.smap_bucket_log;
scene.buf[off..off + 4].copy_from_slice(&17u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert!(
entries.is_empty(),
"bucket count above the safety cap must drop the read entirely"
);
}
#[test]
fn iter_local_storage_no_offsets_returns_empty() {
let scene = build_storage_scene(
1,
0,
&[vec![(vec![0u8; 4], 0xDEAD_BEEFu64, None)]],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let mut offsets = scene.offsets;
offsets.task_storage_offsets = None;
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &offsets, false),
&scene.map,
);
assert!(
entries.is_empty(),
"missing TaskStorageOffsets must short-circuit"
);
}
#[test]
fn iter_local_storage_null_buckets_returns_empty() {
let mut scene = build_storage_scene(1, 0, &[vec![]], 4, BPF_MAP_TYPE_TASK_STORAGE);
let ts = test_task_storage_offsets();
let off = ts.smap_buckets;
scene.buf[off..off + 8].copy_from_slice(&0u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert!(
entries.is_empty(),
"NULL buckets pointer must short-circuit"
);
}
#[test]
fn iter_local_storage_value_size_cap_returns_empty() {
let scene = build_storage_scene(
1,
0,
&[vec![]], 4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let mut hostile = scene.map.clone();
hostile.value_size = (super::super::MAX_VALUE_SIZE + 1) as u32;
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&hostile,
);
assert!(
entries.is_empty(),
"value_size > MAX_VALUE_SIZE must short-circuit"
);
}
#[test]
fn iter_local_storage_owner_zero_surfaces_zero_for_shared_entries() {
let v1 = vec![0xAAu8, 0xAA, 0, 0];
let v2 = vec![0xBBu8, 0xBB, 0, 0];
let mut scene = build_storage_scene(
1,
0,
&[vec![(v1.clone(), 0u64, None), (v2.clone(), 0u64, None)]],
4,
BPF_MAP_TYPE_TASK_STORAGE,
);
let ts = test_task_storage_offsets();
let ls_start: u64 = 0x10_0000;
let shared_ls_kva = scene.page_offset.wrapping_add(ls_start);
let owner_pa = ls_start + ts.ls_owner as u64;
scene.buf[owner_pa as usize..owner_pa as usize + 8].copy_from_slice(&0u64.to_ne_bytes());
let elem1_ls_off = scene.elem_pas[1] as usize + ts.elem_local_storage;
scene.buf[elem1_ls_off..elem1_ls_off + 8].copy_from_slice(&shared_ls_kva.to_ne_bytes());
let elem0_ls_off = scene.elem_pas[0] as usize + ts.elem_local_storage;
let elem0_ls_kva = u64::from_ne_bytes(
scene.buf[elem0_ls_off..elem0_ls_off + 8]
.try_into()
.unwrap(),
);
assert_eq!(
elem0_ls_kva, shared_ls_kva,
"test premise: both elems must address the same ls container",
);
let mem = unsafe { GuestMem::new(scene.buf.as_ptr() as *mut u8, scene.buf.len() as u64) };
let entries = iter_local_storage_entries(
&lookup_ctx(&mem, 0, scene.page_offset, &scene.offsets, false),
&scene.map,
);
assert_eq!(
entries.len(),
2,
"both selems must surface; the cache short-circuit must NOT drop the second entry",
);
assert_eq!(
entries[0].0,
0u64.to_le_bytes().to_vec(),
"first elem's owner field is 0 → owner_kva_le_bytes is all-zero",
);
assert_eq!(entries[0].1, v1);
assert_eq!(
entries[1].0,
0u64.to_le_bytes().to_vec(),
"second elem hits the cached 0; owner_kva_le_bytes stays all-zero",
);
assert_eq!(entries[1].1, v2);
}