1use parking_lot::{Mutex, RwLock};
7use std::alloc::{GlobalAlloc, Layout, System};
8use std::collections::HashMap;
9use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10use std::time::{Duration, Instant};
11
12pub static GLOBAL_TRACKER: std::sync::LazyLock<AllocationTracker> =
14 std::sync::LazyLock::new(AllocationTracker::new);
15
16#[derive(Debug, Clone)]
22pub struct AllocationRecord {
23 pub id: u64,
25 pub size: usize,
27 pub align: usize,
29 pub timestamp: Instant,
31 #[cfg(debug_assertions)]
33 pub backtrace: Option<String>,
34}
35
36impl AllocationRecord {
37 pub fn new(id: u64, size: usize, align: usize) -> Self {
39 Self {
40 id,
41 size,
42 align,
43 timestamp: Instant::now(),
44 #[cfg(debug_assertions)]
45 backtrace: if super::is_profiling_enabled() {
46 Some(format!("{:?}", std::backtrace::Backtrace::capture()))
47 } else {
48 None
49 },
50 }
51 }
52
53 pub fn age(&self) -> Duration {
55 self.timestamp.elapsed()
56 }
57}
58
59#[derive(Debug)]
65pub struct AllocationTracker {
66 next_id: AtomicU64,
68 total_allocations: AtomicU64,
70 total_deallocations: AtomicU64,
72 total_bytes_allocated: AtomicUsize,
74 total_bytes_deallocated: AtomicUsize,
76 current_bytes: AtomicUsize,
78 peak_bytes: AtomicUsize,
80 active: RwLock<HashMap<usize, AllocationRecord>>,
82 size_histogram: Mutex<SizeHistogram>,
84 start_time: Instant,
86}
87
88impl AllocationTracker {
89 pub fn new() -> Self {
91 Self {
92 next_id: AtomicU64::new(1),
93 total_allocations: AtomicU64::new(0),
94 total_deallocations: AtomicU64::new(0),
95 total_bytes_allocated: AtomicUsize::new(0),
96 total_bytes_deallocated: AtomicUsize::new(0),
97 current_bytes: AtomicUsize::new(0),
98 peak_bytes: AtomicUsize::new(0),
99 active: RwLock::new(HashMap::new()),
100 size_histogram: Mutex::new(SizeHistogram::new()),
101 start_time: Instant::now(),
102 }
103 }
104
105 pub fn record_alloc(&self, ptr: usize, size: usize, align: usize) {
107 if !super::is_profiling_enabled() {
108 return;
109 }
110
111 let id = self.next_id.fetch_add(1, Ordering::Relaxed);
112 let record = AllocationRecord::new(id, size, align);
113
114 self.total_allocations.fetch_add(1, Ordering::Relaxed);
116 self.total_bytes_allocated
117 .fetch_add(size, Ordering::Relaxed);
118
119 let current = self.current_bytes.fetch_add(size, Ordering::Relaxed) + size;
120
121 let mut peak = self.peak_bytes.load(Ordering::Relaxed);
123 while current > peak {
124 match self.peak_bytes.compare_exchange_weak(
125 peak,
126 current,
127 Ordering::Relaxed,
128 Ordering::Relaxed,
129 ) {
130 Ok(_) => break,
131 Err(p) => peak = p,
132 }
133 }
134
135 self.active.write().insert(ptr, record);
137
138 self.size_histogram.lock().record(size);
140 }
141
142 pub fn record_dealloc(&self, ptr: usize, size: usize) {
144 if !super::is_profiling_enabled() {
145 return;
146 }
147
148 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
149 self.total_bytes_deallocated
150 .fetch_add(size, Ordering::Relaxed);
151 self.current_bytes.fetch_sub(size, Ordering::Relaxed);
152
153 self.active.write().remove(&ptr);
155 }
156
157 pub fn stats(&self) -> AllocationStats {
159 AllocationStats {
160 total_allocations: self.total_allocations.load(Ordering::Relaxed),
161 total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
162 total_bytes_allocated: self.total_bytes_allocated.load(Ordering::Relaxed),
163 total_bytes_deallocated: self.total_bytes_deallocated.load(Ordering::Relaxed),
164 current_allocations: self.active.read().len() as u64,
165 current_bytes: self.current_bytes.load(Ordering::Relaxed),
166 peak_bytes: self.peak_bytes.load(Ordering::Relaxed),
167 uptime: self.start_time.elapsed(),
168 }
169 }
170
171 pub fn active_allocations(&self) -> Vec<AllocationRecord> {
173 self.active.read().values().cloned().collect()
174 }
175
176 pub fn old_allocations(&self, threshold: Duration) -> Vec<AllocationRecord> {
178 self.active
179 .read()
180 .values()
181 .filter(|r| r.age() > threshold)
182 .cloned()
183 .collect()
184 }
185
186 pub fn histogram(&self) -> SizeHistogram {
188 self.size_histogram.lock().clone()
189 }
190
191 pub fn reset(&self) {
193 self.total_allocations.store(0, Ordering::Relaxed);
194 self.total_deallocations.store(0, Ordering::Relaxed);
195 self.total_bytes_allocated.store(0, Ordering::Relaxed);
196 self.total_bytes_deallocated.store(0, Ordering::Relaxed);
197 self.current_bytes.store(0, Ordering::Relaxed);
198 self.peak_bytes.store(0, Ordering::Relaxed);
199 self.active.write().clear();
200 *self.size_histogram.lock() = SizeHistogram::new();
201 }
202}
203
204impl Default for AllocationTracker {
205 fn default() -> Self {
206 Self::new()
207 }
208}
209
210#[derive(Debug, Clone, Default)]
216pub struct AllocationStats {
217 pub total_allocations: u64,
219 pub total_deallocations: u64,
221 pub total_bytes_allocated: usize,
223 pub total_bytes_deallocated: usize,
225 pub current_allocations: u64,
227 pub current_bytes: usize,
229 pub peak_bytes: usize,
231 pub uptime: Duration,
233}
234
235impl AllocationStats {
236 pub fn net_allocations(&self) -> i64 {
238 self.total_allocations as i64 - self.total_deallocations as i64
239 }
240
241 pub fn allocation_rate(&self) -> f64 {
243 if self.uptime.as_secs_f64() > 0.0 {
244 self.total_allocations as f64 / self.uptime.as_secs_f64()
245 } else {
246 0.0
247 }
248 }
249
250 pub fn avg_allocation_size(&self) -> usize {
252 if self.total_allocations > 0 {
253 self.total_bytes_allocated / self.total_allocations as usize
254 } else {
255 0
256 }
257 }
258
259 pub fn has_potential_leaks(&self) -> bool {
261 self.net_allocations() > 0 && self.current_bytes > 0
262 }
263}
264
265#[derive(Debug, Clone, Default)]
271pub struct SizeHistogram {
272 buckets: [u64; 7],
274 total: u64,
276}
277
278impl SizeHistogram {
279 pub fn new() -> Self {
281 Self::default()
282 }
283
284 pub fn record(&mut self, size: usize) {
286 let bucket = match size {
287 0..=64 => 0,
288 65..=256 => 1,
289 257..=1024 => 2,
290 1025..=4096 => 3,
291 4097..=16384 => 4,
292 16385..=65536 => 5,
293 _ => 6,
294 };
295 self.buckets[bucket] += 1;
296 self.total += 1;
297 }
298
299 pub fn buckets(&self) -> &[u64; 7] {
301 &self.buckets
302 }
303
304 pub fn bucket_labels() -> &'static [&'static str; 7] {
306 &[
307 "0-64B", "64-256B", "256B-1K", "1K-4K", "4K-16K", "16K-64K", "64K+",
308 ]
309 }
310
311 pub fn most_common_bucket(&self) -> (&'static str, u64) {
313 let labels = Self::bucket_labels();
314 let (idx, count) = self
315 .buckets
316 .iter()
317 .enumerate()
318 .max_by_key(|(_, c)| *c)
319 .unwrap();
320 (labels[idx], *count)
321 }
322
323 pub fn percentages(&self) -> [f64; 7] {
325 if self.total == 0 {
326 return [0.0; 7];
327 }
328 let mut pcts = [0.0; 7];
329 for (i, &count) in self.buckets.iter().enumerate() {
330 pcts[i] = (count as f64 / self.total as f64) * 100.0;
331 }
332 pcts
333 }
334}
335
336pub struct TrackedAllocator {
351 inner: System,
352}
353
354impl TrackedAllocator {
355 pub const fn new() -> Self {
357 Self { inner: System }
358 }
359}
360
361impl Default for TrackedAllocator {
362 fn default() -> Self {
363 Self::new()
364 }
365}
366
367unsafe impl GlobalAlloc for TrackedAllocator {
368 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
369 let ptr = unsafe { self.inner.alloc(layout) };
371 if !ptr.is_null() {
372 GLOBAL_TRACKER.record_alloc(ptr as usize, layout.size(), layout.align());
373 }
374 ptr
375 }
376
377 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
378 GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
379 unsafe { self.inner.dealloc(ptr, layout) };
381 }
382
383 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
384 GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
386
387 let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };
389
390 if !new_ptr.is_null() {
392 GLOBAL_TRACKER.record_alloc(new_ptr as usize, new_size, layout.align());
393 }
394
395 new_ptr
396 }
397}
398
399#[cfg(test)]
400mod tests {
401 use super::*;
402
403 #[test]
404 fn test_allocation_record() {
405 let record = AllocationRecord::new(1, 1024, 8);
406 assert_eq!(record.id, 1);
407 assert_eq!(record.size, 1024);
408 assert_eq!(record.align, 8);
409 }
410
411 #[test]
412 #[ignore = "flaky in CI due to global profiling state interference"]
413 fn test_allocation_tracker() {
414 super::super::enable_profiling();
415
416 let tracker = AllocationTracker::new();
417
418 tracker.record_alloc(0x1000, 100, 8);
419 tracker.record_alloc(0x2000, 200, 8);
420
421 let stats = tracker.stats();
422 assert_eq!(stats.total_allocations, 2);
423 assert_eq!(stats.total_bytes_allocated, 300);
424 assert_eq!(stats.current_bytes, 300);
425
426 tracker.record_dealloc(0x1000, 100);
427
428 let stats = tracker.stats();
429 assert_eq!(stats.total_deallocations, 1);
430 assert_eq!(stats.current_bytes, 200);
431
432 super::super::disable_profiling();
433 }
434
435 #[test]
436 fn test_size_histogram() {
437 let mut hist = SizeHistogram::new();
438
439 hist.record(32); hist.record(128); hist.record(512); hist.record(32); assert_eq!(hist.buckets[0], 2); assert_eq!(hist.buckets[1], 1); assert_eq!(hist.buckets[2], 1); }
448
449 #[test]
450 fn test_stats_calculations() {
451 let stats = AllocationStats {
452 total_allocations: 100,
453 total_deallocations: 80,
454 total_bytes_allocated: 10000,
455 total_bytes_deallocated: 8000,
456 current_allocations: 20,
457 current_bytes: 2000,
458 peak_bytes: 5000,
459 uptime: Duration::from_secs(10),
460 };
461
462 assert_eq!(stats.net_allocations(), 20);
463 assert_eq!(stats.allocation_rate(), 10.0);
464 assert_eq!(stats.avg_allocation_size(), 100);
465 assert!(stats.has_potential_leaks());
466 }
467}