#![no_std]
#![deny(missing_docs)]
#![deny(clippy::all)]
#[cfg(feature = "fmt")]
mod fmt;
mod histogram;
mod stats;
pub use histogram::Histogram;
pub use stats::Stats;
#[cfg(feature = "fmt")]
extern crate alloc;
use core::{
alloc::{GlobalAlloc, Layout},
cmp,
ops::Deref,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
};
#[repr(align(64))]
struct CacheAligned<T>(T);
impl<T> Deref for CacheAligned<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
struct AllocCluster {
count: AtomicUsize,
sum: AtomicU64,
}
struct UseCluster {
curr: AtomicUsize,
max: AtomicUsize,
}
static ALLOC: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
count: AtomicUsize::new(0),
sum: AtomicU64::new(0),
});
static DEALLOC: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
count: AtomicUsize::new(0),
sum: AtomicU64::new(0),
});
static REALLOC_GROWTH: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
count: AtomicUsize::new(0),
sum: AtomicU64::new(0),
});
static REALLOC_SHRINK: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
count: AtomicUsize::new(0),
sum: AtomicU64::new(0),
});
static REALLOC_MOVE: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
count: AtomicUsize::new(0),
sum: AtomicU64::new(0),
});
static USE: CacheAligned<UseCluster> = CacheAligned(UseCluster {
curr: AtomicUsize::new(0),
max: AtomicUsize::new(0),
});
static ALLOC_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
CacheAligned([const { AtomicUsize::new(0) }; 64]);
static REALLOC_GROWTH_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
CacheAligned([const { AtomicUsize::new(0) }; 64]);
static REALLOC_SHRINK_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
CacheAligned([const { AtomicUsize::new(0) }; 64]);
static ALLOC_FAIL_COUNT: AtomicUsize = AtomicUsize::new(0);
static REALLOC_FAIL_COUNT: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, Default, Clone, Copy)]
pub struct Heapster<A: GlobalAlloc>(A);
fn bucket_snapshot(buckets: &[AtomicUsize; 64]) -> Histogram {
let mut out = [0usize; 64];
for (i, b) in buckets.iter().enumerate() {
out[i] = b.load(Ordering::Relaxed);
}
Histogram { buckets: out }
}
impl<A: GlobalAlloc> Heapster<A> {
pub const fn new(alloc: A) -> Self {
Self(alloc)
}
pub const fn inner(&self) -> &A {
&self.0
}
#[inline]
pub fn alloc_count(&self) -> usize {
ALLOC.count.load(Ordering::Relaxed)
}
#[inline]
pub fn alloc_sum(&self) -> u64 {
ALLOC.sum.load(Ordering::Relaxed)
}
#[inline]
pub fn alloc_histogram(&self) -> Histogram {
bucket_snapshot(&ALLOC_BUCKETS)
}
#[inline]
pub fn alloc_fail_count(&self) -> usize {
ALLOC_FAIL_COUNT.load(Ordering::Relaxed)
}
#[inline]
pub fn dealloc_count(&self) -> usize {
DEALLOC.count.load(Ordering::Relaxed)
}
#[inline]
pub fn dealloc_sum(&self) -> u64 {
DEALLOC.sum.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_growth_count(&self) -> usize {
REALLOC_GROWTH.count.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_growth_sum(&self) -> u64 {
REALLOC_GROWTH.sum.load(Ordering::Relaxed)
}
pub fn realloc_growth_histogram(&self) -> Histogram {
bucket_snapshot(&REALLOC_GROWTH_BUCKETS)
}
#[inline]
pub fn realloc_shrink_count(&self) -> usize {
REALLOC_SHRINK.count.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_shrink_sum(&self) -> u64 {
REALLOC_SHRINK.sum.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_shrink_histogram(&self) -> Histogram {
bucket_snapshot(&REALLOC_SHRINK_BUCKETS)
}
#[inline]
pub fn realloc_move_count(&self) -> usize {
REALLOC_MOVE.count.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_move_sum(&self) -> u64 {
REALLOC_MOVE.sum.load(Ordering::Relaxed)
}
#[inline]
pub fn realloc_fail_count(&self) -> usize {
REALLOC_FAIL_COUNT.load(Ordering::Relaxed)
}
pub fn alloc_avg(&self) -> Option<usize> {
let sum = self.alloc_sum();
let count = self.alloc_count();
sum.checked_div(count as u64).map(|avg| avg as usize)
}
pub fn dealloc_avg(&self) -> Option<usize> {
let sum = self.dealloc_sum();
let count = self.dealloc_count();
sum.checked_div(count as u64).map(|avg| avg as usize)
}
pub fn realloc_growth_avg(&self) -> Option<usize> {
let sum = self.realloc_growth_sum();
let count = self.realloc_growth_count();
sum.checked_div(count as u64).map(|avg| avg as usize)
}
pub fn realloc_shrink_avg(&self) -> Option<usize> {
let sum = self.realloc_shrink_sum();
let count = self.realloc_shrink_count();
sum.checked_div(count as u64).map(|avg| avg as usize)
}
pub fn realloc_move_avg(&self) -> Option<usize> {
let sum = self.realloc_move_sum();
let count = self.realloc_move_count();
sum.checked_div(count as u64).map(|avg| avg as usize)
}
#[inline]
pub fn use_curr(&self) -> usize {
USE.curr.load(Ordering::Relaxed)
}
#[inline]
pub fn use_max(&self) -> usize {
USE.max.load(Ordering::Relaxed)
}
pub fn measure<R>(&self, f: impl FnOnce() -> R) -> (R, Stats) {
let before = self.stats();
let r = f();
let after = self.stats();
(r, &after - &before)
}
pub fn reset(&self) {
ALLOC.sum.store(0, Ordering::Relaxed);
ALLOC.count.store(0, Ordering::Relaxed);
for b in &*ALLOC_BUCKETS {
b.store(0, Ordering::Relaxed);
}
ALLOC_FAIL_COUNT.store(0, Ordering::Relaxed);
DEALLOC.sum.store(0, Ordering::Relaxed);
DEALLOC.count.store(0, Ordering::Relaxed);
REALLOC_GROWTH.count.store(0, Ordering::Relaxed);
REALLOC_GROWTH.sum.store(0, Ordering::Relaxed);
for b in &*REALLOC_GROWTH_BUCKETS {
b.store(0, Ordering::Relaxed);
}
REALLOC_SHRINK.count.store(0, Ordering::Relaxed);
REALLOC_SHRINK.sum.store(0, Ordering::Relaxed);
for b in &*REALLOC_SHRINK_BUCKETS {
b.store(0, Ordering::Relaxed);
}
REALLOC_MOVE.count.store(0, Ordering::Relaxed);
REALLOC_MOVE.sum.store(0, Ordering::Relaxed);
REALLOC_FAIL_COUNT.store(0, Ordering::Relaxed);
USE.max.store(self.use_curr(), Ordering::Relaxed);
}
}
#[inline]
fn bucket_of(size: usize) -> usize {
debug_assert!(size > 0);
(usize::BITS - 1 - size.leading_zeros()) as usize
}
unsafe impl<A: GlobalAlloc> GlobalAlloc for Heapster<A> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = unsafe { self.0.alloc(layout) };
if !ret.is_null() {
let size = layout.size();
ALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
ALLOC.count.fetch_add(1, Ordering::Relaxed);
let curr = USE.curr.fetch_add(size, Ordering::Relaxed) + size;
USE.max.fetch_max(curr, Ordering::Relaxed);
ALLOC_BUCKETS[bucket_of(size)].fetch_add(1, Ordering::Relaxed);
} else {
ALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
}
ret
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe { self.0.dealloc(ptr, layout) };
let size = layout.size();
USE.curr.fetch_sub(size, Ordering::Relaxed);
DEALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
DEALLOC.count.fetch_add(1, Ordering::Relaxed);
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let new_ptr = unsafe { self.0.realloc(ptr, layout, new_size) };
if !new_ptr.is_null() {
if new_size >= layout.size() {
let diff = new_size - layout.size();
REALLOC_GROWTH.count.fetch_add(1, Ordering::Relaxed);
REALLOC_GROWTH.sum.fetch_add(diff as u64, Ordering::Relaxed);
let curr = USE.curr.fetch_add(diff, Ordering::Relaxed) + diff;
USE.max.fetch_max(curr, Ordering::Relaxed);
REALLOC_GROWTH_BUCKETS[bucket_of(diff)].fetch_add(1, Ordering::Relaxed);
} else {
let diff = layout.size() - new_size;
REALLOC_SHRINK.count.fetch_add(1, Ordering::Relaxed);
REALLOC_SHRINK.sum.fetch_add(diff as u64, Ordering::Relaxed);
USE.curr.fetch_sub(diff, Ordering::Relaxed);
REALLOC_SHRINK_BUCKETS[bucket_of(diff)].fetch_add(1, Ordering::Relaxed);
}
if new_ptr != ptr {
REALLOC_MOVE.count.fetch_add(1, Ordering::Relaxed);
REALLOC_MOVE
.sum
.fetch_add(cmp::min(layout.size(), new_size) as u64, Ordering::Relaxed);
}
} else {
REALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
}
new_ptr
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let ret = unsafe { self.0.alloc_zeroed(layout) };
if !ret.is_null() {
let size = layout.size();
ALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
ALLOC.count.fetch_add(1, Ordering::Relaxed);
let curr = USE.curr.fetch_add(size, Ordering::Relaxed) + size;
USE.max.fetch_max(curr, Ordering::Relaxed);
ALLOC_BUCKETS[bucket_of(size)].fetch_add(1, Ordering::Relaxed);
} else {
ALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
}
ret
}
}