#[cfg(any(
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use std::io;
use std::{collections::BTreeMap, ffi::c_void};
use backtrace::Backtrace;
use frida_gum::{PageProtection, RangeDetails};
use hashbrown::HashMap;
use libafl::bolts::cli::FuzzerOptions;
#[cfg(any(
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use libc::{sysconf, _SC_PAGESIZE};
use nix::{
libc::memset,
sys::mman::{mmap, MapFlags, ProtFlags},
};
use rangemap::RangeSet;
use serde::{Deserialize, Serialize};
use crate::asan::errors::{AsanError, AsanErrors};
#[derive(Debug)]
pub struct Allocator {
#[allow(dead_code)]
options: FuzzerOptions,
page_size: usize,
shadow_offset: usize,
shadow_bit: usize,
pre_allocated_shadow: bool,
allocations: HashMap<usize, AllocationMetadata>,
shadow_pages: RangeSet<usize>,
allocation_queue: BTreeMap<usize, Vec<AllocationMetadata>>,
largest_allocation: usize,
total_allocation_size: usize,
base_mapping_addr: usize,
current_mapping_addr: usize,
}
#[cfg(target_vendor = "apple")]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
#[cfg(not(target_vendor = "apple"))]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow {
($self:expr, $address:expr) => {
$self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
};
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct AllocationMetadata {
pub address: usize,
pub size: usize,
pub actual_size: usize,
pub allocation_site_backtrace: Option<Backtrace>,
pub release_site_backtrace: Option<Backtrace>,
pub freed: bool,
pub is_malloc_zero: bool,
}
impl Allocator {
#[cfg(not(any(
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
)))]
#[must_use]
pub fn new(_: FuzzerOptions) -> Self {
todo!("Shadow region not yet supported for this platform!");
}
#[cfg(any(
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
#[must_use]
#[allow(clippy::too_many_lines)]
pub fn new(options: FuzzerOptions) -> Self {
let ret = unsafe { sysconf(_SC_PAGESIZE) };
assert!(
ret >= 0,
"Failed to read pagesize {:?}",
io::Error::last_os_error()
);
#[allow(clippy::cast_sign_loss)]
let page_size = ret as usize;
let mut shadow_bit = 0;
let mut occupied_ranges: Vec<(usize, usize)> = vec![];
let mut userspace_max: usize = 0;
for prot in [
PageProtection::Read,
PageProtection::Write,
PageProtection::Execute,
] {
RangeDetails::enumerate_with_prot(prot, &mut |details| {
let start = details.memory_range().base_address().0 as usize;
let end = start + details.memory_range().size();
occupied_ranges.push((start, end));
let base: usize = 2;
#[cfg(target_arch = "x86_64")]
if end <= base.pow(48) && end > userspace_max {
userspace_max = end;
}
#[cfg(target_arch = "aarch64")]
if end <= base.pow(52) && end > userspace_max {
userspace_max = end;
}
true
});
}
let mut maxbit = 0;
for power in 1..64 {
let base: usize = 2;
if base.pow(power) > userspace_max {
maxbit = power;
break;
}
}
{
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] {
let addr: usize = 1 << try_shadow_bit;
let shadow_start = addr;
let shadow_end = addr + addr + addr;
for (start, end) in &occupied_ranges {
if (shadow_start <= *end) && (*start <= shadow_end) {
println!("shadow_bit {try_shadow_bit:x} is not suitable");
break;
}
}
if unsafe {
mmap(
addr as *mut c_void,
page_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE
| ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok()
{
shadow_bit = (*try_shadow_bit).try_into().unwrap();
break;
}
}
}
println!("shadow_bit {shadow_bit:x} is suitable");
assert!(shadow_bit != 0);
let addr: usize = 1 << shadow_bit;
let pre_allocated_shadow = unsafe {
mmap(
addr as *mut c_void,
addr + addr,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok();
Self {
options,
page_size,
pre_allocated_shadow,
shadow_offset: 1 << shadow_bit,
shadow_bit,
allocations: HashMap::new(),
shadow_pages: RangeSet::new(),
allocation_queue: BTreeMap::new(),
largest_allocation: 0,
total_allocation_size: 0,
base_mapping_addr: addr + addr + addr,
current_mapping_addr: addr + addr + addr,
}
}
#[must_use]
pub fn shadow_bit(&self) -> u32 {
self.shadow_bit as u32
}
#[inline]
#[must_use]
fn round_up_to_page(&self, size: usize) -> usize {
((size + self.page_size) / self.page_size) * self.page_size
}
#[inline]
#[must_use]
fn round_down_to_page(&self, value: usize) -> usize {
(value / self.page_size) * self.page_size
}
fn find_smallest_fit(&mut self, size: usize) -> Option<AllocationMetadata> {
for (current_size, list) in &mut self.allocation_queue {
if *current_size >= size {
if let Some(metadata) = list.pop() {
return Some(metadata);
}
}
}
None
}
#[must_use]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void {
let mut is_malloc_zero = false;
let size = if size == 0 {
is_malloc_zero = true;
16
} else {
size
};
if size > self.options.max_allocation {
#[allow(clippy::manual_assert)]
if self.options.max_allocation_panics {
panic!("ASAN: Allocation is too large: 0x{size:x}");
}
return std::ptr::null_mut();
}
let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size;
if self.total_allocation_size + rounded_up_size > self.options.max_total_allocation {
return std::ptr::null_mut();
}
self.total_allocation_size += rounded_up_size;
let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size) {
metadata.is_malloc_zero = is_malloc_zero;
metadata.size = size;
if self.options.allocation_backtraces {
metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved());
}
metadata
} else {
let mapping = match mmap(
self.current_mapping_addr as *mut c_void,
rounded_up_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
) {
Ok(mapping) => mapping as usize,
Err(err) => {
println!("An error occurred while mapping memory: {err:?}");
return std::ptr::null_mut();
}
};
self.current_mapping_addr += rounded_up_size;
self.map_shadow_for_region(mapping, mapping + rounded_up_size, false);
let mut metadata = AllocationMetadata {
address: mapping,
size,
actual_size: rounded_up_size,
..AllocationMetadata::default()
};
if self.options.allocation_backtraces {
metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved());
}
metadata
};
self.largest_allocation = std::cmp::max(self.largest_allocation, metadata.actual_size);
Self::unpoison(
map_to_shadow!(self, metadata.address + self.page_size),
size,
);
let address = (metadata.address + self.page_size) as *mut c_void;
self.allocations
.insert(metadata.address + self.page_size, metadata);
address
}
#[allow(clippy::missing_safety_doc)]
pub unsafe fn release(&mut self, ptr: *mut c_void) {
let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) else {
if !ptr.is_null() {
AsanErrors::get_mut()
.report_error(AsanError::UnallocatedFree((ptr as usize, Backtrace::new())));
}
return;
};
if metadata.freed {
AsanErrors::get_mut().report_error(AsanError::DoubleFree((
ptr as usize,
metadata.clone(),
Backtrace::new(),
)));
}
let shadow_mapping_start = map_to_shadow!(self, ptr as usize);
metadata.freed = true;
if self.options.allocation_backtraces {
metadata.release_site_backtrace = Some(Backtrace::new_unresolved());
}
Self::poison(shadow_mapping_start, metadata.size);
}
pub fn find_metadata(
&mut self,
ptr: usize,
hint_base: usize,
) -> Option<&mut AllocationMetadata> {
let mut metadatas: Vec<&mut AllocationMetadata> = self.allocations.values_mut().collect();
metadatas.sort_by(|a, b| a.address.cmp(&b.address));
let mut offset_to_closest = i64::max_value();
let mut closest = None;
for metadata in metadatas {
let new_offset = if hint_base == metadata.address {
(ptr as i64 - metadata.address as i64).abs()
} else {
std::cmp::min(
offset_to_closest,
(ptr as i64 - metadata.address as i64).abs(),
)
};
if new_offset < offset_to_closest {
offset_to_closest = new_offset;
closest = Some(metadata);
}
}
closest
}
pub fn reset(&mut self) {
let mut tmp_allocations = Vec::new();
for (address, mut allocation) in self.allocations.drain() {
if !allocation.freed {
tmp_allocations.push(allocation);
continue;
}
Self::poison(map_to_shadow!(self, address), allocation.size);
allocation.size = 0;
allocation.freed = false;
allocation.allocation_site_backtrace = None;
allocation.release_site_backtrace = None;
self.allocation_queue
.entry(allocation.actual_size)
.or_default()
.push(allocation);
}
for allocation in tmp_allocations {
self.allocations
.insert(allocation.address + self.page_size, allocation);
}
self.total_allocation_size = 0;
}
pub fn get_usable_size(&self, ptr: *mut c_void) -> usize {
match self.allocations.get(&(ptr as usize)) {
Some(metadata) => metadata.size,
None => {
panic!(
"Attempted to get_usable_size on a pointer ({ptr:?}) which was not allocated!"
);
}
}
}
fn unpoison(start: usize, size: usize) {
unsafe {
memset(start as *mut c_void, 0xff, size / 8);
let remainder = size % 8;
if remainder > 0 {
memset(
(start + size / 8) as *mut c_void,
(0xff << (8 - remainder)) & 0xff,
1,
);
}
}
}
pub fn poison(start: usize, size: usize) {
unsafe {
memset(start as *mut c_void, 0x00, size / 8);
let remainder = size % 8;
if remainder > 0 {
memset((start + size / 8) as *mut c_void, 0x00, 1);
}
}
}
pub fn map_shadow_for_region(
&mut self,
start: usize,
end: usize,
unpoison: bool,
) -> (usize, usize) {
let shadow_mapping_start = map_to_shadow!(self, start);
if !self.pre_allocated_shadow {
let shadow_start = self.round_down_to_page(shadow_mapping_start);
let shadow_end =
self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
unsafe {
mmap(
range.start as *mut c_void,
range.end - range.start,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
-1,
0,
)
.expect("An error occurred while mapping shadow memory");
}
}
self.shadow_pages.insert(shadow_start..shadow_end);
}
if unpoison {
Self::unpoison(shadow_mapping_start, end - start);
}
(shadow_mapping_start, (end - start) / 8)
}
#[inline]
#[must_use]
pub fn map_to_shadow(&self, start: usize) -> usize {
map_to_shadow!(self, start)
}
#[inline]
pub fn is_managed(&self, ptr: *mut c_void) -> bool {
self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr
}
pub fn check_for_leaks(&self) {
for metadata in self.allocations.values() {
if !metadata.freed {
AsanErrors::get_mut()
.report_error(AsanError::Leak((metadata.address, metadata.clone())));
}
}
}
pub fn unpoison_all_existing_memory(&mut self) {
RangeDetails::enumerate_with_prot(PageProtection::NoAccess, &mut |range: &RangeDetails| {
if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 {
let start = range.memory_range().base_address().0 as usize;
let end = start + range.memory_range().size();
if self.pre_allocated_shadow && start == 1 << self.shadow_bit {
return true;
}
self.map_shadow_for_region(start, end, true);
}
true
});
}
}