1#![no_std]
60#![deny(missing_docs)]
61#![deny(clippy::all)]
62
63#[cfg(feature = "fmt")]
64mod fmt;
65mod histogram;
66mod stats;
67
68pub use histogram::Histogram;
69pub use stats::Stats;
70
71#[cfg(feature = "fmt")]
72extern crate alloc;
73
74use core::{
75 alloc::{GlobalAlloc, Layout},
76 cmp,
77 ops::Deref,
78 sync::atomic::{AtomicU64, AtomicUsize, Ordering},
79};
80
81#[repr(align(64))]
82struct CacheAligned<T>(T);
83
84impl<T> Deref for CacheAligned<T> {
85 type Target = T;
86
87 fn deref(&self) -> &Self::Target {
88 &self.0
89 }
90}
91
92struct AllocCluster {
93 count: AtomicUsize,
94 sum: AtomicU64,
95}
96
97struct UseCluster {
98 curr: AtomicUsize,
99 max: AtomicUsize,
100}
101
102static ALLOC: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
103 count: AtomicUsize::new(0),
104 sum: AtomicU64::new(0),
105});
106
107static DEALLOC: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
108 count: AtomicUsize::new(0),
109 sum: AtomicU64::new(0),
110});
111
112static REALLOC_GROWTH: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
113 count: AtomicUsize::new(0),
114 sum: AtomicU64::new(0),
115});
116
117static REALLOC_SHRINK: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
118 count: AtomicUsize::new(0),
119 sum: AtomicU64::new(0),
120});
121
122static REALLOC_MOVE: CacheAligned<AllocCluster> = CacheAligned(AllocCluster {
123 count: AtomicUsize::new(0),
124 sum: AtomicU64::new(0),
125});
126
127static USE: CacheAligned<UseCluster> = CacheAligned(UseCluster {
128 curr: AtomicUsize::new(0),
129 max: AtomicUsize::new(0),
130});
131
132static ALLOC_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
133 CacheAligned([const { AtomicUsize::new(0) }; 64]);
134static REALLOC_GROWTH_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
135 CacheAligned([const { AtomicUsize::new(0) }; 64]);
136static REALLOC_SHRINK_BUCKETS: CacheAligned<[AtomicUsize; 64]> =
137 CacheAligned([const { AtomicUsize::new(0) }; 64]);
138
139static ALLOC_FAIL_COUNT: AtomicUsize = AtomicUsize::new(0);
140static REALLOC_FAIL_COUNT: AtomicUsize = AtomicUsize::new(0);
141
142#[derive(Debug, Default, Clone, Copy)]
144pub struct Heapster<A: GlobalAlloc>(A);
145
146fn bucket_snapshot(buckets: &[AtomicUsize; 64]) -> Histogram {
147 let mut out = [0usize; 64];
148 for (i, b) in buckets.iter().enumerate() {
149 out[i] = b.load(Ordering::Relaxed);
150 }
151 Histogram { buckets: out }
152}
153
154impl<A: GlobalAlloc> Heapster<A> {
155 pub const fn new(alloc: A) -> Self {
157 Self(alloc)
158 }
159
160 pub const fn inner(&self) -> &A {
162 &self.0
163 }
164
165 #[inline]
167 pub fn alloc_count(&self) -> usize {
168 ALLOC.count.load(Ordering::Relaxed)
169 }
170
171 #[inline]
173 pub fn alloc_sum(&self) -> u64 {
174 ALLOC.sum.load(Ordering::Relaxed)
175 }
176
177 #[inline]
180 pub fn alloc_histogram(&self) -> Histogram {
181 bucket_snapshot(&ALLOC_BUCKETS)
182 }
183
184 #[inline]
186 pub fn alloc_fail_count(&self) -> usize {
187 ALLOC_FAIL_COUNT.load(Ordering::Relaxed)
188 }
189
190 #[inline]
192 pub fn dealloc_count(&self) -> usize {
193 DEALLOC.count.load(Ordering::Relaxed)
194 }
195
196 #[inline]
198 pub fn dealloc_sum(&self) -> u64 {
199 DEALLOC.sum.load(Ordering::Relaxed)
200 }
201
202 #[inline]
204 pub fn realloc_growth_count(&self) -> usize {
205 REALLOC_GROWTH.count.load(Ordering::Relaxed)
206 }
207
208 #[inline]
210 pub fn realloc_growth_sum(&self) -> u64 {
211 REALLOC_GROWTH.sum.load(Ordering::Relaxed)
212 }
213
214 pub fn realloc_growth_histogram(&self) -> Histogram {
217 bucket_snapshot(&REALLOC_GROWTH_BUCKETS)
218 }
219
220 #[inline]
222 pub fn realloc_shrink_count(&self) -> usize {
223 REALLOC_SHRINK.count.load(Ordering::Relaxed)
224 }
225
226 #[inline]
228 pub fn realloc_shrink_sum(&self) -> u64 {
229 REALLOC_SHRINK.sum.load(Ordering::Relaxed)
230 }
231
232 #[inline]
235 pub fn realloc_shrink_histogram(&self) -> Histogram {
236 bucket_snapshot(&REALLOC_SHRINK_BUCKETS)
237 }
238
239 #[inline]
241 pub fn realloc_move_count(&self) -> usize {
242 REALLOC_MOVE.count.load(Ordering::Relaxed)
243 }
244
245 #[inline]
247 pub fn realloc_move_sum(&self) -> u64 {
248 REALLOC_MOVE.sum.load(Ordering::Relaxed)
249 }
250
251 #[inline]
253 pub fn realloc_fail_count(&self) -> usize {
254 REALLOC_FAIL_COUNT.load(Ordering::Relaxed)
255 }
256
257 pub fn alloc_avg(&self) -> Option<usize> {
259 let sum = self.alloc_sum();
260 let count = self.alloc_count();
261 sum.checked_div(count as u64).map(|avg| avg as usize)
262 }
263
264 pub fn dealloc_avg(&self) -> Option<usize> {
266 let sum = self.dealloc_sum();
267 let count = self.dealloc_count();
268 sum.checked_div(count as u64).map(|avg| avg as usize)
269 }
270
271 pub fn realloc_growth_avg(&self) -> Option<usize> {
273 let sum = self.realloc_growth_sum();
274 let count = self.realloc_growth_count();
275 sum.checked_div(count as u64).map(|avg| avg as usize)
276 }
277
278 pub fn realloc_shrink_avg(&self) -> Option<usize> {
280 let sum = self.realloc_shrink_sum();
281 let count = self.realloc_shrink_count();
282 sum.checked_div(count as u64).map(|avg| avg as usize)
283 }
284
285 pub fn realloc_move_avg(&self) -> Option<usize> {
287 let sum = self.realloc_move_sum();
288 let count = self.realloc_move_count();
289 sum.checked_div(count as u64).map(|avg| avg as usize)
290 }
291
292 #[inline]
294 pub fn use_curr(&self) -> usize {
295 USE.curr.load(Ordering::Relaxed)
296 }
297
298 #[inline]
300 pub fn use_max(&self) -> usize {
301 USE.max.load(Ordering::Relaxed)
302 }
303
304 pub fn measure<R>(&self, f: impl FnOnce() -> R) -> (R, Stats) {
307 let before = self.stats();
308 let r = f();
309 let after = self.stats();
310 (r, &after - &before)
311 }
312
313 pub fn reset(&self) {
323 ALLOC.sum.store(0, Ordering::Relaxed);
324 ALLOC.count.store(0, Ordering::Relaxed);
325 for b in &*ALLOC_BUCKETS {
326 b.store(0, Ordering::Relaxed);
327 }
328 ALLOC_FAIL_COUNT.store(0, Ordering::Relaxed);
329
330 DEALLOC.sum.store(0, Ordering::Relaxed);
331 DEALLOC.count.store(0, Ordering::Relaxed);
332
333 REALLOC_GROWTH.count.store(0, Ordering::Relaxed);
334 REALLOC_GROWTH.sum.store(0, Ordering::Relaxed);
335 for b in &*REALLOC_GROWTH_BUCKETS {
336 b.store(0, Ordering::Relaxed);
337 }
338
339 REALLOC_SHRINK.count.store(0, Ordering::Relaxed);
340 REALLOC_SHRINK.sum.store(0, Ordering::Relaxed);
341 for b in &*REALLOC_SHRINK_BUCKETS {
342 b.store(0, Ordering::Relaxed);
343 }
344
345 REALLOC_MOVE.count.store(0, Ordering::Relaxed);
346 REALLOC_MOVE.sum.store(0, Ordering::Relaxed);
347 REALLOC_FAIL_COUNT.store(0, Ordering::Relaxed);
348
349 USE.max.store(self.use_curr(), Ordering::Relaxed);
350 }
351}
352
353#[inline]
354fn bucket_of(size: usize) -> usize {
355 debug_assert!(size > 0);
356 (usize::BITS - 1 - size.leading_zeros()) as usize
357}
358
359unsafe impl<A: GlobalAlloc> GlobalAlloc for Heapster<A> {
360 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
361 let ret = unsafe { self.0.alloc(layout) };
362 if !ret.is_null() {
363 let size = layout.size();
364 ALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
365 ALLOC.count.fetch_add(1, Ordering::Relaxed);
366 let curr = USE.curr.fetch_add(size, Ordering::Relaxed) + size;
367 USE.max.fetch_max(curr, Ordering::Relaxed);
368 ALLOC_BUCKETS[bucket_of(size)].fetch_add(1, Ordering::Relaxed);
369 } else {
370 ALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
371 }
372
373 ret
374 }
375
376 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
377 unsafe { self.0.dealloc(ptr, layout) };
378 let size = layout.size();
379 USE.curr.fetch_sub(size, Ordering::Relaxed);
380 DEALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
381 DEALLOC.count.fetch_add(1, Ordering::Relaxed);
382 }
383
384 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
385 let new_ptr = unsafe { self.0.realloc(ptr, layout, new_size) };
386 if !new_ptr.is_null() {
387 if new_size >= layout.size() {
388 let diff = new_size - layout.size();
389 REALLOC_GROWTH.count.fetch_add(1, Ordering::Relaxed);
390 REALLOC_GROWTH.sum.fetch_add(diff as u64, Ordering::Relaxed);
391 let curr = USE.curr.fetch_add(diff, Ordering::Relaxed) + diff;
392 USE.max.fetch_max(curr, Ordering::Relaxed);
393 REALLOC_GROWTH_BUCKETS[bucket_of(diff)].fetch_add(1, Ordering::Relaxed);
394 } else {
395 let diff = layout.size() - new_size;
396 REALLOC_SHRINK.count.fetch_add(1, Ordering::Relaxed);
397 REALLOC_SHRINK.sum.fetch_add(diff as u64, Ordering::Relaxed);
398 USE.curr.fetch_sub(diff, Ordering::Relaxed);
399 REALLOC_SHRINK_BUCKETS[bucket_of(diff)].fetch_add(1, Ordering::Relaxed);
400 }
401 if new_ptr != ptr {
402 REALLOC_MOVE.count.fetch_add(1, Ordering::Relaxed);
403 REALLOC_MOVE
404 .sum
405 .fetch_add(cmp::min(layout.size(), new_size) as u64, Ordering::Relaxed);
406 }
407 } else {
408 REALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
409 }
410
411 new_ptr
412 }
413
414 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
415 let ret = unsafe { self.0.alloc_zeroed(layout) };
416 if !ret.is_null() {
417 let size = layout.size();
418 ALLOC.sum.fetch_add(size as u64, Ordering::Relaxed);
419 ALLOC.count.fetch_add(1, Ordering::Relaxed);
420 let curr = USE.curr.fetch_add(size, Ordering::Relaxed) + size;
421 USE.max.fetch_max(curr, Ordering::Relaxed);
422 ALLOC_BUCKETS[bucket_of(size)].fetch_add(1, Ordering::Relaxed);
423 } else {
424 ALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
425 }
426
427 ret
428 }
429}