bench_rs/
track_allocator.rs1use std::alloc::{GlobalAlloc, Layout};
2use std::sync::atomic::{AtomicUsize, Ordering};
3
4#[cfg(feature = "track-allocator")]
5#[global_allocator]
6pub static GLOBAL: TrackAllocator<std::alloc::System> = TrackAllocator {
7 allocator: std::alloc::System,
8 counter: AtomicUsize::new(0),
9 peak: AtomicUsize::new(0)
10};
11
12pub struct TrackAllocator<A> where A: GlobalAlloc {
13 pub allocator: A,
14 pub counter: AtomicUsize,
15 pub peak: AtomicUsize
16}
17
18impl<A> TrackAllocator<A> where A: GlobalAlloc {
19 pub fn get(&self) -> usize {
20 self.counter.load(Ordering::SeqCst)
21 }
22
23 pub fn counter(&'static self) -> &'static AtomicUsize {
24 &self.counter
25 }
26
27 pub fn peak(&'static self) -> &'static AtomicUsize {
28 &self.peak
29 }
30
31 #[inline]
32 fn add(&self, u: usize) {
33 self.counter.fetch_add(u, Ordering::SeqCst);
34 self.check_peak();
35 }
36
37 #[inline]
38 fn sub(&self, u: usize) {
39 self.counter.fetch_sub(u, Ordering::SeqCst);
40 }
41
42 #[inline]
43 fn check_peak(&self) {
44 self.peak.fetch_max(self.get(), Ordering::SeqCst);
45 }
46}
47
48unsafe impl<A> GlobalAlloc for TrackAllocator<A> where A: GlobalAlloc {
49 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
50 let ret = self.allocator.alloc(layout);
51 if !ret.is_null() {
52 self.add(layout.size());
53 }
54 ret
55 }
56
57 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
58 if self.get() > layout.size() {
59 self.sub(layout.size());
60 }
61 self.allocator.dealloc(ptr, layout);
62 }
63
64 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
65 self.add(layout.size());
66 self.allocator.alloc_zeroed(layout)
67 }
68
69 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
70 if new_size > layout.size() {
71 self.add(new_size - layout.size());
72 } else if new_size < layout.size() {
73 self.sub(layout.size() - new_size);
74 }
75 self.allocator.realloc(ptr, layout, new_size)
76 }
77}