use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed};
use once_cell::sync::Lazy;
use parking_lot::Mutex;
use crate::{
allocation_tracker::{AllocationTracker, CallstackStatistics, PtrHash},
CountAndSize,
};
const SMALL_SIZE: usize = 128;
const MEDIUM_SIZE: usize = 4 * 1024;
static BIG_ALLOCATION_TRACKER: Lazy<Mutex<AllocationTracker>> =
Lazy::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(1)));
static MEDIUM_ALLOCATION_TRACKER: Lazy<Mutex<AllocationTracker>> =
Lazy::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(64)));
thread_local! {
static IS_THREAD_IN_ALLOCATION_TRACKER: std::cell::Cell<bool> = const { std::cell::Cell::new(false) };
}
struct AtomicCountAndSize {
pub count: AtomicUsize,
pub size: AtomicUsize,
}
impl AtomicCountAndSize {
pub const fn zero() -> Self {
Self {
count: AtomicUsize::new(0),
size: AtomicUsize::new(0),
}
}
fn load(&self) -> CountAndSize {
CountAndSize {
count: self.count.load(Relaxed),
size: self.size.load(Relaxed),
}
}
fn add(&self, size: usize) {
self.count.fetch_add(1, Relaxed);
self.size.fetch_add(size, Relaxed);
}
fn sub(&self, size: usize) {
self.count.fetch_sub(1, Relaxed);
self.size.fetch_sub(size, Relaxed);
}
}
struct GlobalStats {
pub live: AtomicCountAndSize,
pub track_callstacks: AtomicBool,
pub untracked: AtomicCountAndSize,
pub stochastically_tracked: AtomicCountAndSize,
pub fully_tracked: AtomicCountAndSize,
pub overhead: AtomicCountAndSize,
}
static GLOBAL_STATS: GlobalStats = GlobalStats {
live: AtomicCountAndSize::zero(),
track_callstacks: AtomicBool::new(false),
untracked: AtomicCountAndSize::zero(),
stochastically_tracked: AtomicCountAndSize::zero(),
fully_tracked: AtomicCountAndSize::zero(),
overhead: AtomicCountAndSize::zero(),
};
pub fn global_allocs() -> Option<CountAndSize> {
let count_and_size = GLOBAL_STATS.live.load();
(count_and_size.count > 0).then_some(count_and_size)
}
pub fn is_tracking_callstacks() -> bool {
GLOBAL_STATS.track_callstacks.load(Relaxed)
}
pub fn set_tracking_callstacks(track: bool) {
GLOBAL_STATS.track_callstacks.store(track, Relaxed);
}
#[cfg(not(target_arch = "wasm32"))]
pub fn turn_on_tracking_if_env_var(env_var: &str) {
if std::env::var(env_var).is_ok() {
set_tracking_callstacks(true);
re_log::info!("{env_var} found - turning on tracking of all large allocations");
}
}
const MAX_CALLSTACKS: usize = 128;
pub struct TrackingStatistics {
pub track_size_threshold: usize,
pub untracked: CountAndSize,
pub stochastically_tracked: CountAndSize,
pub fully_tracked: CountAndSize,
pub overhead: CountAndSize,
pub top_callstacks: Vec<CallstackStatistics>,
}
pub fn tracking_stats() -> Option<TrackingStatistics> {
fn tracker_stats(
allocation_tracker: &AllocationTracker,
) -> smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> {
let top_callstacks: smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> =
allocation_tracker
.top_callstacks(MAX_CALLSTACKS)
.into_iter()
.collect();
assert!(
!top_callstacks.spilled(),
"We shouldn't leak any allocations"
);
top_callstacks
}
GLOBAL_STATS.track_callstacks.load(Relaxed).then(|| {
IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
is_thread_in_allocation_tracker.set(true);
let mut top_big_callstacks = tracker_stats(&BIG_ALLOCATION_TRACKER.lock());
let mut top_medium_callstacks = tracker_stats(&MEDIUM_ALLOCATION_TRACKER.lock());
is_thread_in_allocation_tracker.set(false);
let mut top_callstacks: Vec<_> = top_big_callstacks
.drain(..)
.chain(top_medium_callstacks.drain(..))
.collect();
top_callstacks.sort_by_key(|c| -(c.extant.size as i64));
TrackingStatistics {
track_size_threshold: SMALL_SIZE,
untracked: GLOBAL_STATS.untracked.load(),
stochastically_tracked: GLOBAL_STATS.stochastically_tracked.load(),
fully_tracked: GLOBAL_STATS.fully_tracked.load(),
overhead: GLOBAL_STATS.overhead.load(),
top_callstacks,
}
})
})
}
#[derive(Default)]
pub struct AccountingAllocator<InnerAllocator> {
allocator: InnerAllocator,
}
impl<InnerAllocator> AccountingAllocator<InnerAllocator> {
pub const fn new(allocator: InnerAllocator) -> Self {
Self { allocator }
}
}
#[allow(unsafe_code)]
unsafe impl<InnerAllocator: std::alloc::GlobalAlloc> std::alloc::GlobalAlloc
for AccountingAllocator<InnerAllocator>
{
#[allow(clippy::let_and_return)]
unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
let ptr = unsafe { self.allocator.alloc(layout) };
note_alloc(ptr, layout.size());
ptr
}
unsafe fn alloc_zeroed(&self, layout: std::alloc::Layout) -> *mut u8 {
let ptr = unsafe { self.allocator.alloc_zeroed(layout) };
note_alloc(ptr, layout.size());
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
unsafe { self.allocator.dealloc(ptr, layout) };
note_dealloc(ptr, layout.size());
}
unsafe fn realloc(
&self,
old_ptr: *mut u8,
layout: std::alloc::Layout,
new_size: usize,
) -> *mut u8 {
note_dealloc(old_ptr, layout.size());
let new_ptr = unsafe { self.allocator.realloc(old_ptr, layout, new_size) };
note_alloc(new_ptr, new_size);
new_ptr
}
}
#[inline]
fn note_alloc(ptr: *mut u8, size: usize) {
GLOBAL_STATS.live.add(size);
if GLOBAL_STATS.track_callstacks.load(Relaxed) {
if size < SMALL_SIZE {
GLOBAL_STATS.untracked.add(size);
} else {
IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
if !is_thread_in_allocation_tracker.get() {
is_thread_in_allocation_tracker.set(true);
let ptr_hash = PtrHash::new(ptr);
if size < MEDIUM_SIZE {
GLOBAL_STATS.stochastically_tracked.add(size);
MEDIUM_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
} else {
GLOBAL_STATS.fully_tracked.add(size);
BIG_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
}
is_thread_in_allocation_tracker.set(false);
} else {
GLOBAL_STATS.overhead.add(size);
}
});
}
}
}
#[inline]
fn note_dealloc(ptr: *mut u8, size: usize) {
GLOBAL_STATS.live.sub(size);
if GLOBAL_STATS.track_callstacks.load(Relaxed) {
if size < SMALL_SIZE {
GLOBAL_STATS.untracked.sub(size);
} else {
IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
if !is_thread_in_allocation_tracker.get() {
is_thread_in_allocation_tracker.set(true);
let ptr_hash = PtrHash::new(ptr);
if size < MEDIUM_SIZE {
GLOBAL_STATS.stochastically_tracked.sub(size);
MEDIUM_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
} else {
GLOBAL_STATS.fully_tracked.sub(size);
BIG_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
}
is_thread_in_allocation_tracker.set(false);
} else {
GLOBAL_STATS.overhead.sub(size);
}
});
}
}
}