use crate::{
ebpf,
error::EbpfError,
vm::{Config, ProgramResult},
};
use std::{array, cell::UnsafeCell, fmt, ops::Range};
#[derive(Clone, PartialEq, Eq, Default)]
#[repr(C, align(32))]
pub struct MemoryRegion {
pub host_addr: u64,
pub vm_addr: u64,
pub len: u64,
pub vm_gap_shift: u8,
pub is_writable: bool,
}
impl MemoryRegion {
fn new(slice: &[u8], vm_addr: u64, vm_gap_size: u64, is_writable: bool) -> Self {
let mut vm_gap_shift = (std::mem::size_of::<u64>() as u8)
.saturating_mul(8)
.saturating_sub(1);
if vm_gap_size > 0 {
vm_gap_shift = vm_gap_shift.saturating_sub(vm_gap_size.leading_zeros() as u8);
debug_assert_eq!(Some(vm_gap_size), 1_u64.checked_shl(vm_gap_shift as u32));
};
MemoryRegion {
host_addr: slice.as_ptr() as u64,
vm_addr,
len: slice.len() as u64,
vm_gap_shift,
is_writable,
}
}
pub fn new_for_testing(
slice: &[u8],
vm_addr: u64,
vm_gap_size: u64,
is_writable: bool,
) -> Self {
Self::new(slice, vm_addr, vm_gap_size, is_writable)
}
pub fn new_readonly(slice: &[u8], vm_addr: u64) -> Self {
Self::new(slice, vm_addr, 0, false)
}
pub fn new_writable(slice: &mut [u8], vm_addr: u64) -> Self {
Self::new(slice, vm_addr, 0, true)
}
pub fn new_writable_gapped(slice: &mut [u8], vm_addr: u64, vm_gap_size: u64) -> Self {
Self::new(slice, vm_addr, vm_gap_size, true)
}
pub fn vm_to_host(&self, vm_addr: u64, len: u64) -> ProgramResult {
if vm_addr < self.vm_addr {
return ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr));
}
let begin_offset = vm_addr.saturating_sub(self.vm_addr);
let is_in_gap = (begin_offset
.checked_shr(self.vm_gap_shift as u32)
.unwrap_or(0)
& 1)
== 1;
let gap_mask = (-1i64).checked_shl(self.vm_gap_shift as u32).unwrap_or(0) as u64;
let gapped_offset =
(begin_offset & gap_mask).checked_shr(1).unwrap_or(0) | (begin_offset & !gap_mask);
if let Some(end_offset) = gapped_offset.checked_add(len) {
if end_offset <= self.len && !is_in_gap {
return ProgramResult::Ok(self.host_addr.saturating_add(gapped_offset));
}
}
ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr))
}
}
impl fmt::Debug for MemoryRegion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"host_addr: {:#x?}-{:#x?}, vm_addr: {:#x?}-{:#x?}, len: {}",
self.host_addr,
self.host_addr.saturating_add(self.len),
self.vm_addr,
self.vm_addr.saturating_add(self.len),
self.len
)
}
}
impl std::cmp::PartialOrd for MemoryRegion {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::cmp::Ord for MemoryRegion {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.vm_addr.cmp(&other.vm_addr)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum AccessType {
Load,
Store,
}
#[derive(Debug)]
pub struct UnalignedMemoryMapping<'a> {
regions: Box<[MemoryRegion]>,
region_addresses: Box<[u64]>,
cache: UnsafeCell<MappingCache>,
config: &'a Config,
}
impl<'a> UnalignedMemoryMapping<'a> {
fn construct_eytzinger_order(
&mut self,
ascending_regions: &[MemoryRegion],
mut in_index: usize,
out_index: usize,
) -> usize {
if out_index >= self.regions.len() {
return in_index;
}
in_index = self.construct_eytzinger_order(
ascending_regions,
in_index,
out_index.saturating_mul(2).saturating_add(1),
);
self.regions[out_index] = ascending_regions[in_index].clone();
self.region_addresses[out_index] = ascending_regions[in_index].vm_addr;
self.construct_eytzinger_order(
ascending_regions,
in_index.saturating_add(1),
out_index.saturating_mul(2).saturating_add(2),
)
}
pub fn new(mut regions: Vec<MemoryRegion>, config: &'a Config) -> Result<Self, EbpfError> {
regions.sort();
for index in 1..regions.len() {
let first = ®ions[index.saturating_sub(1)];
let second = ®ions[index];
if first.vm_addr.saturating_add(first.len) > second.vm_addr {
return Err(EbpfError::InvalidMemoryRegion(index));
}
}
let mut result = Self {
regions: vec![MemoryRegion::default(); regions.len()].into_boxed_slice(),
region_addresses: vec![0; regions.len()].into_boxed_slice(),
cache: UnsafeCell::new(MappingCache::new()),
config,
};
result.construct_eytzinger_order(®ions, 0, 0);
Ok(result)
}
#[allow(clippy::integer_arithmetic)]
pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64, pc: usize) -> ProgramResult {
let cache = unsafe { &mut *self.cache.get() };
let (cache_miss, index) = if let Some(region) = cache.find(vm_addr) {
(false, region)
} else {
let mut index = 1;
while index <= self.region_addresses.len() {
index = (index << 1)
+ unsafe { *self.region_addresses.get_unchecked(index - 1) <= vm_addr }
as usize;
}
index >>= index.trailing_zeros() + 1;
if index == 0 {
return generate_access_violation(self.config, access_type, vm_addr, len, pc);
}
(true, index)
};
let region = unsafe { self.regions.get_unchecked(index - 1) };
if access_type == AccessType::Load || region.is_writable {
if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
if cache_miss {
cache.insert(
region.vm_addr..region.vm_addr.saturating_add(region.len),
index,
);
}
return ProgramResult::Ok(host_addr);
}
}
generate_access_violation(self.config, access_type, vm_addr, len, pc)
}
pub fn get_regions(&self) -> &[MemoryRegion] {
&self.regions
}
pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
if index >= self.regions.len() || self.regions[index].vm_addr != region.vm_addr {
return Err(EbpfError::InvalidMemoryRegion(index));
}
self.regions[index] = region;
self.cache.get_mut().flush();
Ok(())
}
}
#[derive(Debug)]
pub struct AlignedMemoryMapping<'a> {
regions: Box<[MemoryRegion]>,
config: &'a Config,
}
impl<'a> AlignedMemoryMapping<'a> {
pub fn new(mut regions: Vec<MemoryRegion>, config: &'a Config) -> Result<Self, EbpfError> {
regions.insert(0, MemoryRegion::new_readonly(&[], 0));
regions.sort();
for (index, region) in regions.iter().enumerate() {
if region
.vm_addr
.checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
.unwrap_or(0)
!= index as u64
{
return Err(EbpfError::InvalidMemoryRegion(index));
}
}
Ok(Self {
regions: regions.into_boxed_slice(),
config,
})
}
pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64, pc: usize) -> ProgramResult {
let index = vm_addr
.checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
.unwrap_or(0) as usize;
if (1..self.regions.len()).contains(&index) {
let region = &self.regions[index];
if access_type == AccessType::Load || region.is_writable {
if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
return ProgramResult::Ok(host_addr);
}
}
}
generate_access_violation(self.config, access_type, vm_addr, len, pc)
}
pub fn get_regions(&self) -> &[MemoryRegion] {
&self.regions
}
pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
if index >= self.regions.len() {
return Err(EbpfError::InvalidMemoryRegion(index));
}
let begin_index = region
.vm_addr
.checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
.unwrap_or(0) as usize;
let end_index = region
.vm_addr
.saturating_add(region.len.saturating_sub(1))
.checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
.unwrap_or(0) as usize;
if begin_index != index || end_index != index {
return Err(EbpfError::InvalidMemoryRegion(index));
}
self.regions[index] = region;
Ok(())
}
}
#[derive(Debug)]
pub enum MemoryMapping<'a> {
Aligned(AlignedMemoryMapping<'a>),
Unaligned(UnalignedMemoryMapping<'a>),
}
impl<'a> MemoryMapping<'a> {
pub fn new(regions: Vec<MemoryRegion>, config: &'a Config) -> Result<Self, EbpfError> {
if config.aligned_memory_mapping {
AlignedMemoryMapping::new(regions, config).map(MemoryMapping::Aligned)
} else {
UnalignedMemoryMapping::new(regions, config).map(MemoryMapping::Unaligned)
}
}
pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64, pc: usize) -> ProgramResult {
match self {
MemoryMapping::Aligned(m) => m.map(access_type, vm_addr, len, pc),
MemoryMapping::Unaligned(m) => m.map(access_type, vm_addr, len, pc),
}
}
pub fn get_regions(&self) -> &[MemoryRegion] {
match self {
MemoryMapping::Aligned(m) => m.get_regions(),
MemoryMapping::Unaligned(m) => m.get_regions(),
}
}
pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
match self {
MemoryMapping::Aligned(m) => m.replace_region(index, region),
MemoryMapping::Unaligned(m) => m.replace_region(index, region),
}
}
}
fn generate_access_violation(
config: &Config,
access_type: AccessType,
vm_addr: u64,
len: u64,
pc: usize,
) -> ProgramResult {
let stack_frame = (vm_addr as i64)
.saturating_sub(ebpf::MM_STACK_START as i64)
.checked_div(config.stack_frame_size as i64)
.unwrap_or(0);
if !config.dynamic_stack_frames
&& (-1..(config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame)
{
ProgramResult::Err(EbpfError::StackAccessViolation(
pc,
access_type,
vm_addr,
len,
stack_frame,
))
} else {
let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) {
ebpf::MM_PROGRAM_START => "program",
ebpf::MM_STACK_START => "stack",
ebpf::MM_HEAP_START => "heap",
ebpf::MM_INPUT_START => "input",
_ => "unknown",
};
ProgramResult::Err(EbpfError::AccessViolation(
pc,
access_type,
vm_addr,
len,
region_name,
))
}
}
#[derive(Debug)]
struct MappingCache {
entries: [(Range<u64>, usize); MappingCache::SIZE as usize],
head: isize,
}
impl MappingCache {
const SIZE: isize = 4;
fn new() -> MappingCache {
MappingCache {
entries: array::from_fn(|_| (0..0, 0)),
head: 0,
}
}
#[allow(clippy::integer_arithmetic)]
#[inline]
fn find(&self, vm_addr: u64) -> Option<usize> {
for i in 0..Self::SIZE {
let index = (self.head + i) % Self::SIZE;
let (vm_range, region_index) = unsafe { self.entries.get_unchecked(index as usize) };
if vm_range.contains(&vm_addr) {
return Some(*region_index);
}
}
None
}
#[allow(clippy::integer_arithmetic)]
#[inline]
fn insert(&mut self, vm_range: Range<u64>, region_index: usize) {
self.head = (self.head - 1).rem_euclid(Self::SIZE);
unsafe { *self.entries.get_unchecked_mut(self.head as usize) = (vm_range, region_index) };
}
#[inline]
fn flush(&mut self) {
self.entries = array::from_fn(|_| (0..0, 0));
self.head = 0;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_mapping_cache() {
let mut cache = MappingCache::new();
assert_eq!(cache.find(0), None);
let mut ranges = vec![10u64..20, 20..30, 30..40, 40..50];
for (region, range) in ranges.iter().cloned().enumerate() {
cache.insert(range, region);
}
for (region, range) in ranges.iter().enumerate() {
if region > 0 {
assert_eq!(cache.find(range.start - 1), Some(region - 1));
} else {
assert_eq!(cache.find(range.start - 1), None);
}
assert_eq!(cache.find(range.start), Some(region));
assert_eq!(cache.find(range.start + 1), Some(region));
assert_eq!(cache.find(range.end - 1), Some(region));
if region < 3 {
assert_eq!(cache.find(range.end), Some(region + 1));
} else {
assert_eq!(cache.find(range.end), None);
}
}
cache.insert(50..60, 4);
ranges.push(50..60);
for (region, range) in ranges.iter().enumerate() {
if region == 0 {
assert_eq!(cache.find(range.start), None);
continue;
}
if region > 1 {
assert_eq!(cache.find(range.start - 1), Some(region - 1));
} else {
assert_eq!(cache.find(range.start - 1), None);
}
assert_eq!(cache.find(range.start), Some(region));
assert_eq!(cache.find(range.start + 1), Some(region));
assert_eq!(cache.find(range.end - 1), Some(region));
if region < 4 {
assert_eq!(cache.find(range.end), Some(region + 1));
} else {
assert_eq!(cache.find(range.end), None);
}
}
}
#[test]
fn test_mapping_cache_flush() {
let mut cache = MappingCache::new();
assert_eq!(cache.find(0), None);
cache.insert(0..10, 0);
assert_eq!(cache.find(0), Some(0));
cache.flush();
assert_eq!(cache.find(0), None);
}
#[test]
fn test_map_empty() {
let config = Config::default();
let m = UnalignedMemoryMapping::new(vec![], &config).unwrap();
assert!(matches!(
m.map(AccessType::Load, ebpf::MM_INPUT_START, 8, 0),
ProgramResult::Err(EbpfError::AccessViolation(..))
));
let m = AlignedMemoryMapping::new(vec![], &config).unwrap();
assert!(matches!(
m.map(AccessType::Load, ebpf::MM_INPUT_START, 8, 0),
ProgramResult::Err(EbpfError::AccessViolation(..))
));
}
#[test]
fn test_unaligned_map_overlap() {
let config = Config::default();
let mem1 = [1, 2, 3, 4];
let mem2 = [5, 6];
assert!(matches!(
UnalignedMemoryMapping::new(
vec![
MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64 - 1),
],
&config,
)
.unwrap_err(),
EbpfError::InvalidMemoryRegion(1),
));
assert!(UnalignedMemoryMapping::new(
vec![
MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
],
&config,
)
.is_ok());
}
#[test]
fn test_unaligned_map() {
let config = Config::default();
let mem1 = [11];
let mem2 = [22, 22];
let mem3 = [33];
let mem4 = [44, 44];
let m = UnalignedMemoryMapping::new(
vec![
MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
MemoryRegion::new_readonly(
&mem3,
ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
),
MemoryRegion::new_readonly(
&mem4,
ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
),
],
&config,
)
.unwrap();
assert_eq!(
m.map(AccessType::Load, ebpf::MM_INPUT_START, 1, 0).unwrap(),
mem1.as_ptr() as u64
);
assert_eq!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + mem1.len() as u64,
1,
0,
)
.unwrap(),
mem2.as_ptr() as u64
);
assert_eq!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
1,
0,
)
.unwrap(),
mem3.as_ptr() as u64
);
assert_eq!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
1,
0,
)
.unwrap(),
mem4.as_ptr() as u64
);
assert!(matches!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len() + mem4.len()) as u64,
1,
0,
),
ProgramResult::Err(EbpfError::AccessViolation(..))
));
}
#[test]
fn test_unaligned_map_replace_region() {
let config = Config::default();
let mem1 = [11];
let mem2 = [22, 22];
let mem3 = [33];
let mut m = UnalignedMemoryMapping::new(
vec![
MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
],
&config,
)
.unwrap();
assert_eq!(
m.map(AccessType::Load, ebpf::MM_INPUT_START, 1, 0).unwrap(),
mem1.as_ptr() as u64
);
assert_eq!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + mem1.len() as u64,
1,
0,
)
.unwrap(),
mem2.as_ptr() as u64
);
assert!(matches!(
m.replace_region(
2,
MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64)
),
Err(EbpfError::InvalidMemoryRegion(2))
));
let region_index = m
.get_regions()
.iter()
.position(|mem| mem.vm_addr == ebpf::MM_INPUT_START + mem1.len() as u64)
.unwrap();
assert!(matches!(
m.replace_region(
region_index,
MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64 + 1)
),
Err(EbpfError::InvalidMemoryRegion(i)) if i == region_index
));
m.replace_region(
region_index,
MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64),
)
.unwrap();
assert_eq!(
m.map(
AccessType::Load,
ebpf::MM_INPUT_START + mem1.len() as u64,
1,
0,
)
.unwrap(),
mem3.as_ptr() as u64
);
}
#[test]
fn test_aligned_map_replace_region() {
let config = Config::default();
let mem1 = [11];
let mem2 = [22, 22];
let mem3 = [33, 33];
let mut m = AlignedMemoryMapping::new(
vec![
MemoryRegion::new_readonly(&mem1, ebpf::MM_PROGRAM_START),
MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START),
],
&config,
)
.unwrap();
assert_eq!(
m.map(AccessType::Load, ebpf::MM_STACK_START, 1, 0).unwrap(),
mem2.as_ptr() as u64
);
assert!(matches!(
m.replace_region(3, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)),
Err(EbpfError::InvalidMemoryRegion(3))
));
assert!(matches!(
m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START)),
Err(EbpfError::InvalidMemoryRegion(2))
));
assert!(matches!(
m.replace_region(
2,
MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START - 1)
),
Err(EbpfError::InvalidMemoryRegion(2))
));
m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START))
.unwrap();
assert_eq!(
m.map(AccessType::Load, ebpf::MM_STACK_START, 1, 0).unwrap(),
mem3.as_ptr() as u64
);
}
}