1use parking_lot::{Mutex, RwLock};
7use std::alloc::{GlobalAlloc, Layout, System};
8use std::collections::HashMap;
9use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10use std::time::{Duration, Instant};
11
12pub static GLOBAL_TRACKER: std::sync::LazyLock<AllocationTracker> =
14 std::sync::LazyLock::new(AllocationTracker::new);
15
16#[derive(Debug, Clone)]
22pub struct AllocationRecord {
23 pub id: u64,
25 pub size: usize,
27 pub align: usize,
29 pub timestamp: Instant,
31 #[cfg(debug_assertions)]
33 pub backtrace: Option<String>,
34}
35
36impl AllocationRecord {
37 pub fn new(id: u64, size: usize, align: usize) -> Self {
39 Self {
40 id,
41 size,
42 align,
43 timestamp: Instant::now(),
44 #[cfg(debug_assertions)]
45 backtrace: if super::is_profiling_enabled() {
46 Some(format!("{:?}", std::backtrace::Backtrace::capture()))
47 } else {
48 None
49 },
50 }
51 }
52
53 pub fn age(&self) -> Duration {
55 self.timestamp.elapsed()
56 }
57}
58
59#[derive(Debug)]
65pub struct AllocationTracker {
66 next_id: AtomicU64,
68 total_allocations: AtomicU64,
70 total_deallocations: AtomicU64,
72 total_bytes_allocated: AtomicUsize,
74 total_bytes_deallocated: AtomicUsize,
76 current_bytes: AtomicUsize,
78 peak_bytes: AtomicUsize,
80 active: RwLock<HashMap<usize, AllocationRecord>>,
82 size_histogram: Mutex<SizeHistogram>,
84 start_time: Instant,
86}
87
88impl AllocationTracker {
89 pub fn new() -> Self {
91 Self {
92 next_id: AtomicU64::new(1),
93 total_allocations: AtomicU64::new(0),
94 total_deallocations: AtomicU64::new(0),
95 total_bytes_allocated: AtomicUsize::new(0),
96 total_bytes_deallocated: AtomicUsize::new(0),
97 current_bytes: AtomicUsize::new(0),
98 peak_bytes: AtomicUsize::new(0),
99 active: RwLock::new(HashMap::new()),
100 size_histogram: Mutex::new(SizeHistogram::new()),
101 start_time: Instant::now(),
102 }
103 }
104
105 pub fn record_alloc(&self, ptr: usize, size: usize, align: usize) {
107 if !super::is_profiling_enabled() {
108 return;
109 }
110
111 let id = self.next_id.fetch_add(1, Ordering::Relaxed);
112 let record = AllocationRecord::new(id, size, align);
113
114 self.total_allocations.fetch_add(1, Ordering::Relaxed);
116 self.total_bytes_allocated.fetch_add(size, Ordering::Relaxed);
117
118 let current = self.current_bytes.fetch_add(size, Ordering::Relaxed) + size;
119
120 let mut peak = self.peak_bytes.load(Ordering::Relaxed);
122 while current > peak {
123 match self.peak_bytes.compare_exchange_weak(
124 peak,
125 current,
126 Ordering::Relaxed,
127 Ordering::Relaxed,
128 ) {
129 Ok(_) => break,
130 Err(p) => peak = p,
131 }
132 }
133
134 self.active.write().insert(ptr, record);
136
137 self.size_histogram.lock().record(size);
139 }
140
141 pub fn record_dealloc(&self, ptr: usize, size: usize) {
143 if !super::is_profiling_enabled() {
144 return;
145 }
146
147 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
148 self.total_bytes_deallocated.fetch_add(size, Ordering::Relaxed);
149 self.current_bytes.fetch_sub(size, Ordering::Relaxed);
150
151 self.active.write().remove(&ptr);
153 }
154
155 pub fn stats(&self) -> AllocationStats {
157 AllocationStats {
158 total_allocations: self.total_allocations.load(Ordering::Relaxed),
159 total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
160 total_bytes_allocated: self.total_bytes_allocated.load(Ordering::Relaxed),
161 total_bytes_deallocated: self.total_bytes_deallocated.load(Ordering::Relaxed),
162 current_allocations: self.active.read().len() as u64,
163 current_bytes: self.current_bytes.load(Ordering::Relaxed),
164 peak_bytes: self.peak_bytes.load(Ordering::Relaxed),
165 uptime: self.start_time.elapsed(),
166 }
167 }
168
169 pub fn active_allocations(&self) -> Vec<AllocationRecord> {
171 self.active.read().values().cloned().collect()
172 }
173
174 pub fn old_allocations(&self, threshold: Duration) -> Vec<AllocationRecord> {
176 self.active
177 .read()
178 .values()
179 .filter(|r| r.age() > threshold)
180 .cloned()
181 .collect()
182 }
183
184 pub fn histogram(&self) -> SizeHistogram {
186 self.size_histogram.lock().clone()
187 }
188
189 pub fn reset(&self) {
191 self.total_allocations.store(0, Ordering::Relaxed);
192 self.total_deallocations.store(0, Ordering::Relaxed);
193 self.total_bytes_allocated.store(0, Ordering::Relaxed);
194 self.total_bytes_deallocated.store(0, Ordering::Relaxed);
195 self.current_bytes.store(0, Ordering::Relaxed);
196 self.peak_bytes.store(0, Ordering::Relaxed);
197 self.active.write().clear();
198 *self.size_histogram.lock() = SizeHistogram::new();
199 }
200}
201
202impl Default for AllocationTracker {
203 fn default() -> Self {
204 Self::new()
205 }
206}
207
208#[derive(Debug, Clone, Default)]
214pub struct AllocationStats {
215 pub total_allocations: u64,
217 pub total_deallocations: u64,
219 pub total_bytes_allocated: usize,
221 pub total_bytes_deallocated: usize,
223 pub current_allocations: u64,
225 pub current_bytes: usize,
227 pub peak_bytes: usize,
229 pub uptime: Duration,
231}
232
233impl AllocationStats {
234 pub fn net_allocations(&self) -> i64 {
236 self.total_allocations as i64 - self.total_deallocations as i64
237 }
238
239 pub fn allocation_rate(&self) -> f64 {
241 if self.uptime.as_secs_f64() > 0.0 {
242 self.total_allocations as f64 / self.uptime.as_secs_f64()
243 } else {
244 0.0
245 }
246 }
247
248 pub fn avg_allocation_size(&self) -> usize {
250 if self.total_allocations > 0 {
251 self.total_bytes_allocated / self.total_allocations as usize
252 } else {
253 0
254 }
255 }
256
257 pub fn has_potential_leaks(&self) -> bool {
259 self.net_allocations() > 0 && self.current_bytes > 0
260 }
261}
262
263#[derive(Debug, Clone, Default)]
269pub struct SizeHistogram {
270 buckets: [u64; 7],
272 total: u64,
274}
275
276impl SizeHistogram {
277 pub fn new() -> Self {
279 Self::default()
280 }
281
282 pub fn record(&mut self, size: usize) {
284 let bucket = match size {
285 0..=64 => 0,
286 65..=256 => 1,
287 257..=1024 => 2,
288 1025..=4096 => 3,
289 4097..=16384 => 4,
290 16385..=65536 => 5,
291 _ => 6,
292 };
293 self.buckets[bucket] += 1;
294 self.total += 1;
295 }
296
297 pub fn buckets(&self) -> &[u64; 7] {
299 &self.buckets
300 }
301
302 pub fn bucket_labels() -> &'static [&'static str; 7] {
304 &["0-64B", "64-256B", "256B-1K", "1K-4K", "4K-16K", "16K-64K", "64K+"]
305 }
306
307 pub fn most_common_bucket(&self) -> (&'static str, u64) {
309 let labels = Self::bucket_labels();
310 let (idx, count) = self
311 .buckets
312 .iter()
313 .enumerate()
314 .max_by_key(|(_, c)| *c)
315 .unwrap();
316 (labels[idx], *count)
317 }
318
319 pub fn percentages(&self) -> [f64; 7] {
321 if self.total == 0 {
322 return [0.0; 7];
323 }
324 let mut pcts = [0.0; 7];
325 for (i, &count) in self.buckets.iter().enumerate() {
326 pcts[i] = (count as f64 / self.total as f64) * 100.0;
327 }
328 pcts
329 }
330}
331
332pub struct TrackedAllocator {
347 inner: System,
348}
349
350impl TrackedAllocator {
351 pub const fn new() -> Self {
353 Self { inner: System }
354 }
355}
356
357impl Default for TrackedAllocator {
358 fn default() -> Self {
359 Self::new()
360 }
361}
362
363unsafe impl GlobalAlloc for TrackedAllocator {
364 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
365 let ptr = unsafe { self.inner.alloc(layout) };
367 if !ptr.is_null() {
368 GLOBAL_TRACKER.record_alloc(ptr as usize, layout.size(), layout.align());
369 }
370 ptr
371 }
372
373 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
374 GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
375 unsafe { self.inner.dealloc(ptr, layout) };
377 }
378
379 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
380 GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
382
383 let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };
385
386 if !new_ptr.is_null() {
388 GLOBAL_TRACKER.record_alloc(new_ptr as usize, new_size, layout.align());
389 }
390
391 new_ptr
392 }
393}
394
395#[cfg(test)]
396mod tests {
397 use super::*;
398
399 #[test]
400 fn test_allocation_record() {
401 let record = AllocationRecord::new(1, 1024, 8);
402 assert_eq!(record.id, 1);
403 assert_eq!(record.size, 1024);
404 assert_eq!(record.align, 8);
405 }
406
407 #[test]
408 fn test_allocation_tracker() {
409 super::super::enable_profiling();
410
411 let tracker = AllocationTracker::new();
412
413 tracker.record_alloc(0x1000, 100, 8);
414 tracker.record_alloc(0x2000, 200, 8);
415
416 let stats = tracker.stats();
417 assert_eq!(stats.total_allocations, 2);
418 assert_eq!(stats.total_bytes_allocated, 300);
419 assert_eq!(stats.current_bytes, 300);
420
421 tracker.record_dealloc(0x1000, 100);
422
423 let stats = tracker.stats();
424 assert_eq!(stats.total_deallocations, 1);
425 assert_eq!(stats.current_bytes, 200);
426
427 super::super::disable_profiling();
428 }
429
430 #[test]
431 fn test_size_histogram() {
432 let mut hist = SizeHistogram::new();
433
434 hist.record(32); hist.record(128); hist.record(512); hist.record(32); assert_eq!(hist.buckets[0], 2); assert_eq!(hist.buckets[1], 1); assert_eq!(hist.buckets[2], 1); }
443
444 #[test]
445 fn test_stats_calculations() {
446 let stats = AllocationStats {
447 total_allocations: 100,
448 total_deallocations: 80,
449 total_bytes_allocated: 10000,
450 total_bytes_deallocated: 8000,
451 current_allocations: 20,
452 current_bytes: 2000,
453 peak_bytes: 5000,
454 uptime: Duration::from_secs(10),
455 };
456
457 assert_eq!(stats.net_allocations(), 20);
458 assert_eq!(stats.allocation_rate(), 10.0);
459 assert_eq!(stats.avg_allocation_size(), 100);
460 assert!(stats.has_potential_leaks());
461 }
462}
463