zigzag_alloc/alloc/counting.rs
1//! Instrumented allocator wrapper for diagnostics and testing.
2//!
3//! [`CountingAllocator`] wraps any [`Allocator`] and records per-call
4//! statistics: number of allocations / deallocations and total bytes involved.
5//! It is transparent — every call is forwarded unchanged to the inner
6//! allocator — and adds negligible overhead (four `Cell<usize>` increments).
7//!
8//! ## Typical Use Cases
9//!
10//! * **Unit tests** — assert that a data structure performs exactly the
11//! expected number of allocations.
12//! * **Benchmarks** — measure live byte footprint.
13//! * **Debug builds** — detect memory leaks (non-zero `bytes_live` at the end
14//! of a scope).
15
16use core::{
17 alloc::Layout,
18 cell::Cell,
19 ptr::NonNull,
20};
21
22use super::allocator::Allocator;
23
24/// An [`Allocator`] wrapper that records allocation and deallocation statistics.
25///
26/// Wrap any existing allocator with `CountingAllocator::new(inner)` to
27/// transparently instrument it. Statistics can be queried at any time via
28/// [`stats`](Self::stats) and reset via [`reset_stats`](Self::reset_stats).
29///
30/// # Thread Safety
31///
32/// `CountingAllocator` uses [`Cell`] internally, which is **not** `Sync`.
33/// It is suitable for single-threaded use only. For multi-threaded scenarios,
34/// wrap an atomic-based allocator instead.
35pub struct CountingAllocator<A: Allocator> {
36 /// The wrapped allocator that performs actual memory management.
37 pub(crate) inner: A,
38 /// Total number of successful `alloc` calls since last reset.
39 alloc_count: Cell<usize>,
40 /// Total number of `dealloc` calls since last reset.
41 dealloc_count: Cell<usize>,
42 /// Total bytes requested via `alloc` since last reset.
43 bytes_allocated: Cell<usize>,
44 /// Total bytes released via `dealloc` since last reset.
45 bytes_freed: Cell<usize>,
46}
47
48impl<A: Allocator> CountingAllocator<A> {
49 /// Creates a new `CountingAllocator` wrapping `inner`.
50 ///
51 /// All counters start at zero.
52 pub fn new(inner: A) -> Self {
53 Self {
54 inner,
55 alloc_count: Cell::new(0),
56 dealloc_count: Cell::new(0),
57 bytes_allocated: Cell::new(0),
58 bytes_freed: Cell::new(0),
59 }
60 }
61
62 /// Returns a snapshot of all statistics.
63 ///
64 /// Counters are cumulative since the last call to
65 /// [`reset_stats`](Self::reset_stats).
66 ///
67 /// # Example
68 ///
69 /// ```rust,ignore
70 /// let counting = CountingAllocator::new(SystemAllocator);
71 /// let _ = unsafe { counting.alloc(Layout::new::<u64>()) };
72 /// let stats = counting.stats();
73 /// assert_eq!(stats.allocs, 1);
74 /// assert_eq!(stats.bytes_allocated, 8);
75 /// ```
76 pub fn stats(&self) -> AllocStats {
77 AllocStats {
78 allocs: self.alloc_count.get(),
79 deallocs: self.dealloc_count.get(),
80 bytes_allocated: self.bytes_allocated.get(),
81 bytes_freed: self.bytes_freed.get(),
82 // `saturating_sub` prevents underflow in the pathological case where
83 // a caller frees memory that was not tracked (e.g. from a different
84 // allocator instance before wrapping).
85 bytes_live: self.bytes_allocated.get()
86 .saturating_sub(self.bytes_freed.get()),
87 }
88 }
89
90 /// Resets all counters to zero.
91 ///
92 /// Does **not** affect the underlying allocator or any live allocations.
93 pub fn reset_stats(&self) {
94 self.alloc_count.set(0);
95 self.dealloc_count.set(0);
96 self.bytes_allocated.set(0);
97 self.bytes_freed.set(0);
98 }
99}
100
101impl<A: Allocator> Allocator for CountingAllocator<A> {
102 /// Forwards the allocation to the inner allocator and records statistics.
103 ///
104 /// Only successful allocations are counted; if the inner allocator returns
105 /// `None`, the counters are not updated.
106 ///
107 /// # Safety
108 ///
109 /// Inherits all safety requirements from [`A::alloc`](Allocator::alloc).
110 unsafe fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
111 // SAFETY: Forwarding unchanged — the caller satisfies the preconditions.
112 let ptr = unsafe { self.inner.alloc(layout)? };
113 self.alloc_count.set(self.alloc_count.get() + 1);
114 self.bytes_allocated.set(self.bytes_allocated.get() + layout.size());
115 Some(ptr)
116 }
117
118 /// Forwards the deallocation to the inner allocator and records statistics.
119 ///
120 /// # Safety
121 ///
122 /// Inherits all safety requirements from [`A::dealloc`](Allocator::dealloc).
123 /// In particular, `ptr` must have been obtained from this allocator (and
124 /// therefore from its inner allocator).
125 unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
126 self.dealloc_count.set(self.dealloc_count.get() + 1);
127 self.bytes_freed.set(self.bytes_freed.get() + layout.size());
128 // SAFETY: Forwarding unchanged — the caller guarantees `ptr` and
129 // `layout` match the original allocation.
130 unsafe { self.inner.dealloc(ptr, layout) };
131 }
132}
133
134/// A snapshot of allocation statistics produced by [`CountingAllocator::stats`].
135pub struct AllocStats {
136 /// Total number of successful allocations recorded.
137 pub allocs: usize,
138 /// Total number of deallocations recorded.
139 pub deallocs: usize,
140 /// Cumulative bytes requested across all successful allocations.
141 pub bytes_allocated: usize,
142 /// Cumulative bytes released across all deallocations.
143 pub bytes_freed: usize,
144 /// Current live byte footprint: `bytes_allocated - bytes_freed` (saturating).
145 pub bytes_live: usize,
146}