#![deny(missing_docs)]
use std::alloc::{GlobalAlloc, Layout};
use std::cell::{Cell, RefCell};
use std::thread_local;
#[cfg(feature = "tracing")]
pub mod tracing;
fn hash_fn(p: usize) -> u128 {
const PRIME1: u128 = 257343791756393576901679996513787191589;
const PRIME2: u128 = 271053192961985756828288246809453504189;
let mut p = (p as u128).wrapping_add(PRIME2);
p = p.wrapping_mul(PRIME1);
p = p ^ (p >> 64);
p = p.wrapping_mul(PRIME2);
p = p ^ (p >> 42);
p = p.wrapping_mul(PRIME1);
p = p ^ (p >> 25);
p
}
#[derive(Default)]
struct LocalState {
ptr_accum: u128,
ptr_size_accum: u128,
ptr_align_accum: u128,
num_allocs: u64,
num_frees: u64,
mem_allocated: u64,
mem_freed: u64,
peak_mem: u64,
peak_mem_allocs: u64,
#[cfg(feature = "tracing")]
tracing: tracing::TracingState,
}
impl LocalState {
fn record_alloc(&mut self, ptr: *const u8, layout: Layout) {
if ptr.is_null() {
return;
}
let ptr_hash = hash_fn(ptr as usize);
let size_hash = hash_fn(layout.size());
let align_hash = hash_fn(layout.align());
self.ptr_accum = self.ptr_accum.wrapping_add(ptr_hash);
self.ptr_size_accum = self
.ptr_size_accum
.wrapping_add(ptr_hash.wrapping_mul(size_hash));
self.ptr_align_accum = self
.ptr_align_accum
.wrapping_add(ptr_hash.wrapping_mul(align_hash));
self.num_allocs += 1;
self.mem_allocated += layout.size() as u64;
if self.mem_allocated > self.mem_freed {
let mem_usage = self.mem_allocated - self.mem_freed;
if mem_usage > self.peak_mem {
self.peak_mem = mem_usage;
self.peak_mem_allocs = self.num_allocs.saturating_sub(self.num_frees);
}
}
#[cfg(feature = "tracing")]
self.tracing.record_alloc(ptr, layout);
}
fn record_free(&mut self, ptr: *const u8, layout: Layout) {
let ptr_hash = hash_fn(ptr as usize);
let size_hash = hash_fn(layout.size());
let align_hash = hash_fn(layout.align());
self.ptr_accum = self.ptr_accum.wrapping_sub(ptr_hash);
self.ptr_size_accum = self
.ptr_size_accum
.wrapping_sub(ptr_hash.wrapping_mul(size_hash));
self.ptr_align_accum = self
.ptr_align_accum
.wrapping_sub(ptr_hash.wrapping_mul(align_hash));
self.num_frees += 1;
self.mem_freed += layout.size() as u64;
#[cfg(feature = "tracing")]
self.tracing.record_free(ptr, layout);
}
fn start(&mut self) {
*self = Default::default();
#[cfg(feature = "tracing")]
self.tracing.start();
}
fn finish(&mut self) -> AllocInfo {
let result = if self.num_allocs > self.num_frees {
Err(AllocError::Leak)
} else if self.num_allocs < self.num_frees {
Err(AllocError::DoubleFree)
} else if self.num_allocs == 0 {
Err(AllocError::NoData)
} else if self.ptr_accum != 0 {
Err(AllocError::BadPtr)
} else {
match (self.ptr_size_accum != 0, self.ptr_align_accum != 0) {
(true, true) => Err(AllocError::BadLayout),
(true, false) => Err(AllocError::BadSize),
(false, true) => Err(AllocError::BadAlignment),
(false, false) => Ok(()),
}
};
AllocInfo {
result,
num_allocs: self.num_allocs,
num_frees: self.num_frees,
mem_allocated: self.mem_allocated,
mem_freed: self.mem_freed,
peak_mem: self.peak_mem,
peak_mem_allocs: self.peak_mem_allocs,
#[cfg(feature = "tracing")]
tracing: self.tracing.finish(),
}
}
}
thread_local! {
static ENABLED: Cell<bool> = Cell::new(false);
static LOCAL_STATE: RefCell<LocalState> = RefCell::new(LocalState::default());
}
pub struct Mockalloc<T: GlobalAlloc>(pub T);
unsafe impl<T: GlobalAlloc> GlobalAlloc for Mockalloc<T> {
unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
let ptr = self.0.alloc(layout);
with_local_state(|state| {
state.record_alloc(ptr, layout);
});
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
with_local_state(|state| {
state.record_free(ptr, layout);
});
self.0.dealloc(ptr, layout);
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_ptr = self.0.realloc(ptr, layout, new_size);
with_local_state(|state| {
state.record_free(ptr, layout);
state.record_alloc(new_ptr, new_layout);
});
new_ptr
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let ptr = self.0.alloc_zeroed(layout);
with_local_state(|state| {
state.record_alloc(ptr, layout);
});
ptr
}
}
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum AllocError {
NoData,
Leak,
DoubleFree,
BadPtr,
BadSize,
BadAlignment,
BadLayout,
}
#[derive(Debug, Clone)]
pub struct AllocInfo {
num_allocs: u64,
num_frees: u64,
mem_allocated: u64,
mem_freed: u64,
peak_mem: u64,
peak_mem_allocs: u64,
result: Result<(), AllocError>,
#[cfg(feature = "tracing")]
tracing: tracing::TracingInfo,
}
impl AllocInfo {
pub fn num_allocs(&self) -> u64 {
self.num_allocs
}
pub fn num_frees(&self) -> u64 {
self.num_frees
}
pub fn num_leaks(&self) -> u64 {
self.num_allocs - self.num_frees
}
pub fn mem_allocated(&self) -> u64 {
self.mem_allocated
}
pub fn mem_leaked(&self) -> u64 {
self.mem_allocated - self.mem_freed
}
pub fn mem_freed(&self) -> u64 {
self.mem_freed
}
pub fn peak_mem(&self) -> u64 {
self.peak_mem
}
pub fn peak_mem_allocs(&self) -> u64 {
self.peak_mem_allocs
}
pub fn result(&self) -> Result<(), AllocError> {
self.result.clone()
}
#[cfg(feature = "tracing")]
pub fn tracing(&self) -> &tracing::TracingInfo {
&self.tracing
}
}
struct AllocChecker(bool);
impl AllocChecker {
fn new() -> Self {
LOCAL_STATE.with(|rc| rc.borrow_mut().start());
ENABLED.with(|c| {
assert!(!c.get(), "Mockalloc already recording");
c.set(true);
});
Self(true)
}
fn finish(mut self) -> AllocInfo {
self.0 = false;
ENABLED.with(|c| c.set(false));
LOCAL_STATE.with(|rc| rc.borrow_mut().finish())
}
}
impl Drop for AllocChecker {
fn drop(&mut self) {
if self.0 {
ENABLED.with(|c| c.set(false));
LOCAL_STATE.with(|rc| rc.borrow_mut().finish());
}
}
}
pub fn record_allocs(f: impl FnOnce()) -> AllocInfo {
let checker = AllocChecker::new();
f();
checker.finish()
}
pub fn assert_allocs(f: impl FnOnce()) {
if cfg!(miri) {
f();
} else {
let info = record_allocs(f);
#[cfg(feature = "tracing")]
if info.result.is_err() {
eprintln!("# Mockalloc trace:\n\n{:#?}", info.tracing);
}
info.result.unwrap();
}
}
pub fn is_recording() -> bool {
ENABLED.with(|c| c.get())
}
fn with_local_state(f: impl FnOnce(&mut LocalState)) {
if !is_recording() {
return;
}
ENABLED.with(|c| c.set(false));
LOCAL_STATE.with(|rc| f(&mut rc.borrow_mut()));
ENABLED.with(|c| c.set(true));
}
pub use mockalloc_macros::test;
#[cfg(test)]
mod tests {
use super::{is_recording, record_allocs, AllocError, Mockalloc};
use std::alloc::{GlobalAlloc, Layout, System};
use std::{cmp, mem, ptr};
struct LeakingAllocator(System);
unsafe impl GlobalAlloc for LeakingAllocator {
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
self.0.alloc_zeroed(layout)
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
if is_recording() {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_ptr = self.alloc(new_layout);
if !new_ptr.is_null() {
ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
self.dealloc(ptr, layout);
}
new_ptr
} else {
self.0.realloc(ptr, layout, new_size)
}
}
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if !is_recording() {
self.0.dealloc(ptr, layout);
}
}
}
#[global_allocator]
static A: Mockalloc<LeakingAllocator> = Mockalloc(LeakingAllocator(System));
fn do_some_allocations() -> Vec<Box<i32>> {
let mut a = Vec::new();
let mut b = Vec::new();
for i in 0..32 {
let p = Box::new(i);
if i % 2 == 0 {
a.push(p);
} else {
b.push(p);
}
}
a
}
#[test]
fn it_works() {
let alloc_info = record_allocs(|| {
let _p = Box::new(42);
});
alloc_info.result().unwrap();
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 1);
assert_eq!(alloc_info.peak_mem(), 4);
assert_eq!(alloc_info.peak_mem_allocs(), 1);
}
#[test]
fn it_detects_leak() {
let alloc_info = record_allocs(|| {
mem::forget(Box::new(42));
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::Leak);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 0);
}
#[test]
fn it_detects_bad_layout() {
let alloc_info = record_allocs(|| unsafe {
mem::transmute::<_, Box<f64>>(Box::new(42u32));
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadLayout);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 1);
}
#[test]
fn it_detects_no_data() {
let alloc_info = record_allocs(|| ());
assert_eq!(alloc_info.result().unwrap_err(), AllocError::NoData);
assert_eq!(alloc_info.num_allocs(), 0);
assert_eq!(alloc_info.num_frees(), 0);
}
#[test]
fn it_detects_bad_alignment() {
let alloc_info = record_allocs(|| unsafe {
mem::transmute::<_, Box<[u8; 4]>>(Box::new(42u32));
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadAlignment);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 1);
}
#[test]
fn it_detects_bad_size() {
let alloc_info = record_allocs(|| unsafe {
mem::transmute::<_, Box<[u32; 2]>>(Box::new(42u32));
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadSize);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 1);
}
#[test]
fn it_detects_double_free() {
let alloc_info = record_allocs(|| unsafe {
let mut x = mem::ManuallyDrop::new(Box::new(42));
mem::ManuallyDrop::drop(&mut x);
mem::ManuallyDrop::drop(&mut x);
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::DoubleFree);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 2);
}
#[test]
fn it_detects_bad_ptr() {
let alloc_info = record_allocs(|| unsafe {
let mut x = Box::new(42);
*mem::transmute::<_, &mut usize>(&mut x) += 1;
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadPtr);
assert_eq!(alloc_info.num_allocs(), 1);
assert_eq!(alloc_info.num_frees(), 1);
}
#[test]
fn it_works_amongst_many() {
let alloc_info = record_allocs(|| {
let _unused = do_some_allocations();
let _p = Box::new(42);
let _unused = do_some_allocations();
});
alloc_info.result().unwrap();
assert_eq!(alloc_info.peak_mem(), 580);
assert_eq!(alloc_info.peak_mem_allocs(), 52);
}
#[test]
fn it_detects_leak_amongst_many() {
let alloc_info = record_allocs(|| {
let _unused = do_some_allocations();
let p = Box::new(42);
let _unused = do_some_allocations();
mem::forget(p);
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::Leak);
}
#[test]
fn it_detects_bad_layout_amongst_many() {
let alloc_info = record_allocs(|| unsafe {
let _unused = do_some_allocations();
let p = Box::new(42u32);
let _unused = do_some_allocations();
mem::transmute::<_, Box<f64>>(p);
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadLayout);
}
#[test]
fn it_detects_bad_alignment_amongst_many() {
let alloc_info = record_allocs(|| unsafe {
let _unused = do_some_allocations();
let p = Box::new(42u32);
let _unused = do_some_allocations();
mem::transmute::<_, Box<[u8; 4]>>(p);
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadAlignment);
}
#[test]
fn it_detects_bad_size_amongst_many() {
let alloc_info = record_allocs(|| unsafe {
let _unused = do_some_allocations();
let p = Box::new(42u32);
let _unused = do_some_allocations();
mem::transmute::<_, Box<[u32; 2]>>(p);
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadSize);
}
#[test]
fn it_detects_double_free_amongst_many() {
let alloc_info = record_allocs(|| unsafe {
let _unused = do_some_allocations();
let mut x = mem::ManuallyDrop::new(Box::new(42));
let _unused = do_some_allocations();
mem::ManuallyDrop::drop(&mut x);
let _unused = do_some_allocations();
mem::ManuallyDrop::drop(&mut x);
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::DoubleFree);
}
#[test]
fn it_detects_bad_ptr_amongst_many() {
let alloc_info = record_allocs(|| unsafe {
let _unused = do_some_allocations();
let mut x = Box::new(42);
let _unused = do_some_allocations();
*mem::transmute::<_, &mut usize>(&mut x) += 1;
let _unused = do_some_allocations();
});
assert_eq!(alloc_info.result().unwrap_err(), AllocError::BadPtr);
}
}