1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
use std::{
alloc::{GlobalAlloc, Layout, System},
sync::atomic::{AtomicBool, AtomicIsize, Ordering},
};
pub const MIN_ALIGN: usize = 16;
static ALLOCATED: AtomicIsize = AtomicIsize::new(0);
static ENABLE: AtomicBool = AtomicBool::new(false);
pub struct CountingAllocator;
impl CountingAllocator {
pub fn get_allocated() -> isize {
ALLOCATED.load(Ordering::SeqCst)
}
pub fn is_enable() -> bool {
ENABLE.load(Ordering::SeqCst)
}
pub fn reset() {
ALLOCATED.store(0, Ordering::SeqCst)
}
pub fn enable() {
ENABLE.store(true, Ordering::SeqCst)
}
pub fn disable() {
ENABLE.store(false, Ordering::SeqCst)
}
}
unsafe impl GlobalAlloc for CountingAllocator {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = System.alloc(layout);
if !ret.is_null() && Self::is_enable() {
ALLOCATED.fetch_add(layout.size() as isize, Ordering::SeqCst);
}
ret
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
System.dealloc(ptr, layout);
if Self::is_enable() {
ALLOCATED.fetch_sub(layout.size() as isize, Ordering::SeqCst);
}
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let ret: *mut u8 = System.realloc(ptr, layout, new_size);
if !ret.is_null()
&& Self::is_enable()
&& layout.align() <= MIN_ALIGN
&& layout.align() <= new_size
{
ALLOCATED.fetch_add(new_size as isize - layout.size() as isize, Ordering::SeqCst);
}
ret
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let ret = System.alloc_zeroed(layout);
if !ret.is_null() && Self::is_enable() {
ALLOCATED.fetch_add(layout.size() as isize, Ordering::SeqCst);
}
ret
}
}
#[cfg(feature = "allocation_counter")]
#[global_allocator]
static _COUNTER: perf_monitor::mem::CountingAllocator =
perf_monitor: mem::CountingAllocator;