use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
static CURRENT: AtomicUsize = AtomicUsize::new(0);
static PEAK: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, Default, Copy, Clone)]
pub struct PeakAlloc;
impl PeakAlloc {
pub fn current_usage(&self) -> usize {
CURRENT.load(Ordering::Relaxed)
}
pub fn peak_usage(&self) -> usize {
PEAK.load(Ordering::Relaxed)
}
pub fn current_usage_as_kb(&self) -> f32 {
Self::kb(self.current_usage())
}
pub fn current_usage_as_mb(&self) -> f32 {
Self::mb(self.current_usage())
}
pub fn current_usage_as_gb(&self) -> f32 {
Self::gb(self.current_usage())
}
pub fn peak_usage_as_kb(&self) -> f32 {
Self::kb(self.peak_usage())
}
pub fn peak_usage_as_mb(&self) -> f32 {
Self::mb(self.peak_usage())
}
pub fn peak_usage_as_gb(&self) -> f32 {
Self::gb(self.peak_usage())
}
pub fn reset_peak_usage(&self) {
PEAK.store(CURRENT.load(Ordering::Relaxed), Ordering::Relaxed);
}
fn kb(x: usize) -> f32 {
x as f32 / 1024.0
}
fn mb(x: usize) -> f32 {
x as f32 / (1024.0 * 1024.0)
}
fn gb(x: usize) -> f32 {
x as f32 / (1024.0 * 1024.0 * 1024.0)
}
fn add_memory(&self, size: usize) {
let prev = CURRENT.fetch_add(size, Ordering::Relaxed);
PEAK.fetch_max(prev + size, Ordering::Relaxed);
}
fn sub_memory(&self, size: usize) {
CURRENT.fetch_sub(size, Ordering::Relaxed);
}
}
unsafe impl GlobalAlloc for PeakAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = System.alloc(layout);
if !ret.is_null() {
self.add_memory(layout.size())
}
ret
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
System.dealloc(ptr, layout);
self.sub_memory(layout.size());
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let size = layout.size();
let ret = System.alloc(layout);
if !ret.is_null() {
self.add_memory(size);
std::ptr::write_bytes(ret, 0, size);
}
ret
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let size = layout.size();
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_ptr = System.alloc(new_layout);
if !new_ptr.is_null() {
self.add_memory(new_size);
std::ptr::copy_nonoverlapping(ptr, new_ptr, std::cmp::min(size, new_size));
System.dealloc(ptr, layout);
self.sub_memory(size);
}
new_ptr
}
}
#[cfg(test)]
mod tests {
use crate::{CURRENT, PEAK};
#[global_allocator]
static PEAK_ALLOC: crate::PeakAlloc = crate::PeakAlloc;
#[test]
fn test_issue_4() {
CURRENT.store(0, std::sync::atomic::Ordering::Relaxed);
PEAK.store (0, std::sync::atomic::Ordering::Relaxed);
assert_eq!(0, PEAK_ALLOC.current_usage());
assert_eq!(0, PEAK_ALLOC.peak_usage());
{
let mut data = vec![0_u32; 1000];
assert_eq!(4000, PEAK_ALLOC.current_usage());
assert_eq!(4000, PEAK_ALLOC.peak_usage());
let mut tot = 0;
for (i, x) in data.iter_mut().enumerate() {
*x = i as u32;
tot += i as u32;
}
assert_eq!(tot, data.iter().sum::<u32>());
}
assert_eq!(0, PEAK_ALLOC.current_usage());
assert_eq!(4000, PEAK_ALLOC.peak_usage());
}
}