Skip to main content

prax_query/profiling/
allocation.rs

1//! Allocation tracking and statistics.
2//!
3//! Provides fine-grained tracking of memory allocations for leak detection
4//! and performance analysis.
5
6use parking_lot::{Mutex, RwLock};
7use std::alloc::{GlobalAlloc, Layout, System};
8use std::collections::HashMap;
9use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10use std::time::{Duration, Instant};
11
12/// Global allocation tracker instance.
13pub static GLOBAL_TRACKER: std::sync::LazyLock<AllocationTracker> =
14    std::sync::LazyLock::new(AllocationTracker::new);
15
16// ============================================================================
17// Allocation Record
18// ============================================================================
19
20/// Record of a single allocation.
21#[derive(Debug, Clone)]
22pub struct AllocationRecord {
23    /// Unique allocation ID.
24    pub id: u64,
25    /// Size in bytes.
26    pub size: usize,
27    /// Memory alignment.
28    pub align: usize,
29    /// Timestamp when allocated.
30    pub timestamp: Instant,
31    /// Optional backtrace (expensive, only in debug mode).
32    #[cfg(debug_assertions)]
33    pub backtrace: Option<String>,
34}
35
36impl AllocationRecord {
37    /// Create a new allocation record.
38    pub fn new(id: u64, size: usize, align: usize) -> Self {
39        Self {
40            id,
41            size,
42            align,
43            timestamp: Instant::now(),
44            #[cfg(debug_assertions)]
45            backtrace: if super::is_profiling_enabled() {
46                Some(format!("{:?}", std::backtrace::Backtrace::capture()))
47            } else {
48                None
49            },
50        }
51    }
52
53    /// Get the age of this allocation.
54    pub fn age(&self) -> Duration {
55        self.timestamp.elapsed()
56    }
57}
58
59// ============================================================================
60// Allocation Tracker
61// ============================================================================
62
63/// Tracks memory allocations and deallocations.
64#[derive(Debug)]
65pub struct AllocationTracker {
66    /// Counter for allocation IDs.
67    next_id: AtomicU64,
68    /// Total allocations.
69    total_allocations: AtomicU64,
70    /// Total deallocations.
71    total_deallocations: AtomicU64,
72    /// Total bytes allocated.
73    total_bytes_allocated: AtomicUsize,
74    /// Total bytes deallocated.
75    total_bytes_deallocated: AtomicUsize,
76    /// Current bytes allocated.
77    current_bytes: AtomicUsize,
78    /// Peak bytes allocated.
79    peak_bytes: AtomicUsize,
80    /// Active allocations (ptr -> record).
81    active: RwLock<HashMap<usize, AllocationRecord>>,
82    /// Allocation size histogram.
83    size_histogram: Mutex<SizeHistogram>,
84    /// Start time.
85    start_time: Instant,
86}
87
88impl AllocationTracker {
89    /// Create a new allocation tracker.
90    pub fn new() -> Self {
91        Self {
92            next_id: AtomicU64::new(1),
93            total_allocations: AtomicU64::new(0),
94            total_deallocations: AtomicU64::new(0),
95            total_bytes_allocated: AtomicUsize::new(0),
96            total_bytes_deallocated: AtomicUsize::new(0),
97            current_bytes: AtomicUsize::new(0),
98            peak_bytes: AtomicUsize::new(0),
99            active: RwLock::new(HashMap::new()),
100            size_histogram: Mutex::new(SizeHistogram::new()),
101            start_time: Instant::now(),
102        }
103    }
104
105    /// Record an allocation.
106    pub fn record_alloc(&self, ptr: usize, size: usize, align: usize) {
107        if !super::is_profiling_enabled() {
108            return;
109        }
110
111        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
112        let record = AllocationRecord::new(id, size, align);
113
114        // Update counters
115        self.total_allocations.fetch_add(1, Ordering::Relaxed);
116        self.total_bytes_allocated
117            .fetch_add(size, Ordering::Relaxed);
118
119        let current = self.current_bytes.fetch_add(size, Ordering::Relaxed) + size;
120
121        // Update peak
122        let mut peak = self.peak_bytes.load(Ordering::Relaxed);
123        while current > peak {
124            match self.peak_bytes.compare_exchange_weak(
125                peak,
126                current,
127                Ordering::Relaxed,
128                Ordering::Relaxed,
129            ) {
130                Ok(_) => break,
131                Err(p) => peak = p,
132            }
133        }
134
135        // Record allocation
136        self.active.write().insert(ptr, record);
137
138        // Update histogram
139        self.size_histogram.lock().record(size);
140    }
141
142    /// Record a deallocation.
143    pub fn record_dealloc(&self, ptr: usize, size: usize) {
144        if !super::is_profiling_enabled() {
145            return;
146        }
147
148        self.total_deallocations.fetch_add(1, Ordering::Relaxed);
149        self.total_bytes_deallocated
150            .fetch_add(size, Ordering::Relaxed);
151        self.current_bytes.fetch_sub(size, Ordering::Relaxed);
152
153        // Remove from active
154        self.active.write().remove(&ptr);
155    }
156
157    /// Get current statistics.
158    pub fn stats(&self) -> AllocationStats {
159        AllocationStats {
160            total_allocations: self.total_allocations.load(Ordering::Relaxed),
161            total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
162            total_bytes_allocated: self.total_bytes_allocated.load(Ordering::Relaxed),
163            total_bytes_deallocated: self.total_bytes_deallocated.load(Ordering::Relaxed),
164            current_allocations: self.active.read().len() as u64,
165            current_bytes: self.current_bytes.load(Ordering::Relaxed),
166            peak_bytes: self.peak_bytes.load(Ordering::Relaxed),
167            uptime: self.start_time.elapsed(),
168        }
169    }
170
171    /// Get active allocations.
172    pub fn active_allocations(&self) -> Vec<AllocationRecord> {
173        self.active.read().values().cloned().collect()
174    }
175
176    /// Get allocations older than a threshold (potential leaks).
177    pub fn old_allocations(&self, threshold: Duration) -> Vec<AllocationRecord> {
178        self.active
179            .read()
180            .values()
181            .filter(|r| r.age() > threshold)
182            .cloned()
183            .collect()
184    }
185
186    /// Get allocation size histogram.
187    pub fn histogram(&self) -> SizeHistogram {
188        self.size_histogram.lock().clone()
189    }
190
191    /// Reset all tracking state.
192    pub fn reset(&self) {
193        self.total_allocations.store(0, Ordering::Relaxed);
194        self.total_deallocations.store(0, Ordering::Relaxed);
195        self.total_bytes_allocated.store(0, Ordering::Relaxed);
196        self.total_bytes_deallocated.store(0, Ordering::Relaxed);
197        self.current_bytes.store(0, Ordering::Relaxed);
198        self.peak_bytes.store(0, Ordering::Relaxed);
199        self.active.write().clear();
200        *self.size_histogram.lock() = SizeHistogram::new();
201    }
202}
203
204impl Default for AllocationTracker {
205    fn default() -> Self {
206        Self::new()
207    }
208}
209
210// ============================================================================
211// Allocation Statistics
212// ============================================================================
213
214/// Summary statistics for allocations.
215#[derive(Debug, Clone, Default)]
216pub struct AllocationStats {
217    /// Total number of allocations.
218    pub total_allocations: u64,
219    /// Total number of deallocations.
220    pub total_deallocations: u64,
221    /// Total bytes ever allocated.
222    pub total_bytes_allocated: usize,
223    /// Total bytes ever deallocated.
224    pub total_bytes_deallocated: usize,
225    /// Current number of active allocations.
226    pub current_allocations: u64,
227    /// Current bytes allocated.
228    pub current_bytes: usize,
229    /// Peak bytes allocated.
230    pub peak_bytes: usize,
231    /// Time since tracking started.
232    pub uptime: Duration,
233}
234
235impl AllocationStats {
236    /// Get net allocations (allocs - deallocs).
237    pub fn net_allocations(&self) -> i64 {
238        self.total_allocations as i64 - self.total_deallocations as i64
239    }
240
241    /// Get allocation rate (allocations per second).
242    pub fn allocation_rate(&self) -> f64 {
243        if self.uptime.as_secs_f64() > 0.0 {
244            self.total_allocations as f64 / self.uptime.as_secs_f64()
245        } else {
246            0.0
247        }
248    }
249
250    /// Get average allocation size.
251    pub fn avg_allocation_size(&self) -> usize {
252        if self.total_allocations > 0 {
253            self.total_bytes_allocated / self.total_allocations as usize
254        } else {
255            0
256        }
257    }
258
259    /// Check if there are potential leaks.
260    pub fn has_potential_leaks(&self) -> bool {
261        self.net_allocations() > 0 && self.current_bytes > 0
262    }
263}
264
265// ============================================================================
266// Size Histogram
267// ============================================================================
268
269/// Histogram of allocation sizes.
270#[derive(Debug, Clone, Default)]
271pub struct SizeHistogram {
272    /// Buckets: [0-64], [64-256], [256-1K], [1K-4K], [4K-16K], [16K-64K], [64K+]
273    buckets: [u64; 7],
274    /// Total allocations.
275    total: u64,
276}
277
278impl SizeHistogram {
279    /// Create a new histogram.
280    pub fn new() -> Self {
281        Self::default()
282    }
283
284    /// Record an allocation size.
285    pub fn record(&mut self, size: usize) {
286        let bucket = match size {
287            0..=64 => 0,
288            65..=256 => 1,
289            257..=1024 => 2,
290            1025..=4096 => 3,
291            4097..=16384 => 4,
292            16385..=65536 => 5,
293            _ => 6,
294        };
295        self.buckets[bucket] += 1;
296        self.total += 1;
297    }
298
299    /// Get bucket counts.
300    pub fn buckets(&self) -> &[u64; 7] {
301        &self.buckets
302    }
303
304    /// Get bucket labels.
305    pub fn bucket_labels() -> &'static [&'static str; 7] {
306        &[
307            "0-64B", "64-256B", "256B-1K", "1K-4K", "4K-16K", "16K-64K", "64K+",
308        ]
309    }
310
311    /// Get the most common bucket.
312    pub fn most_common_bucket(&self) -> (&'static str, u64) {
313        let labels = Self::bucket_labels();
314        let (idx, count) = self
315            .buckets
316            .iter()
317            .enumerate()
318            .max_by_key(|(_, c)| *c)
319            .unwrap();
320        (labels[idx], *count)
321    }
322
323    /// Get percentage for each bucket.
324    pub fn percentages(&self) -> [f64; 7] {
325        if self.total == 0 {
326            return [0.0; 7];
327        }
328        let mut pcts = [0.0; 7];
329        for (i, &count) in self.buckets.iter().enumerate() {
330            pcts[i] = (count as f64 / self.total as f64) * 100.0;
331        }
332        pcts
333    }
334}
335
336// ============================================================================
337// Tracked Allocator
338// ============================================================================
339
340/// A wrapper allocator that tracks allocations.
341///
342/// Use this as the global allocator to enable automatic tracking:
343///
344/// ```rust,ignore
345/// use prax_query::profiling::TrackedAllocator;
346///
347/// #[global_allocator]
348/// static ALLOC: TrackedAllocator = TrackedAllocator::new();
349/// ```
350pub struct TrackedAllocator {
351    inner: System,
352}
353
354impl TrackedAllocator {
355    /// Create a new tracked allocator.
356    pub const fn new() -> Self {
357        Self { inner: System }
358    }
359}
360
361impl Default for TrackedAllocator {
362    fn default() -> Self {
363        Self::new()
364    }
365}
366
367unsafe impl GlobalAlloc for TrackedAllocator {
368    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
369        // SAFETY: Delegating to System allocator which is always safe to call
370        let ptr = unsafe { self.inner.alloc(layout) };
371        if !ptr.is_null() {
372            GLOBAL_TRACKER.record_alloc(ptr as usize, layout.size(), layout.align());
373        }
374        ptr
375    }
376
377    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
378        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
379        // SAFETY: Delegating to System allocator which is always safe to call
380        unsafe { self.inner.dealloc(ptr, layout) };
381    }
382
383    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
384        // Record deallocation of old
385        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
386
387        // SAFETY: Delegating to System allocator which is always safe to call
388        let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };
389
390        // Record allocation of new
391        if !new_ptr.is_null() {
392            GLOBAL_TRACKER.record_alloc(new_ptr as usize, new_size, layout.align());
393        }
394
395        new_ptr
396    }
397}
398
399#[cfg(test)]
400mod tests {
401    use super::*;
402
403    #[test]
404    fn test_allocation_record() {
405        let record = AllocationRecord::new(1, 1024, 8);
406        assert_eq!(record.id, 1);
407        assert_eq!(record.size, 1024);
408        assert_eq!(record.align, 8);
409    }
410
411    #[test]
412    #[ignore = "flaky in CI due to global profiling state interference"]
413    fn test_allocation_tracker() {
414        super::super::enable_profiling();
415
416        let tracker = AllocationTracker::new();
417
418        tracker.record_alloc(0x1000, 100, 8);
419        tracker.record_alloc(0x2000, 200, 8);
420
421        let stats = tracker.stats();
422        assert_eq!(stats.total_allocations, 2);
423        assert_eq!(stats.total_bytes_allocated, 300);
424        assert_eq!(stats.current_bytes, 300);
425
426        tracker.record_dealloc(0x1000, 100);
427
428        let stats = tracker.stats();
429        assert_eq!(stats.total_deallocations, 1);
430        assert_eq!(stats.current_bytes, 200);
431
432        super::super::disable_profiling();
433    }
434
435    #[test]
436    fn test_size_histogram() {
437        let mut hist = SizeHistogram::new();
438
439        hist.record(32); // 0-64
440        hist.record(128); // 64-256
441        hist.record(512); // 256-1K
442        hist.record(32); // 0-64
443
444        assert_eq!(hist.buckets[0], 2); // 0-64
445        assert_eq!(hist.buckets[1], 1); // 64-256
446        assert_eq!(hist.buckets[2], 1); // 256-1K
447    }
448
449    #[test]
450    fn test_stats_calculations() {
451        let stats = AllocationStats {
452            total_allocations: 100,
453            total_deallocations: 80,
454            total_bytes_allocated: 10000,
455            total_bytes_deallocated: 8000,
456            current_allocations: 20,
457            current_bytes: 2000,
458            peak_bytes: 5000,
459            uptime: Duration::from_secs(10),
460        };
461
462        assert_eq!(stats.net_allocations(), 20);
463        assert_eq!(stats.allocation_rate(), 10.0);
464        assert_eq!(stats.avg_allocation_size(), 100);
465        assert!(stats.has_potential_leaks());
466    }
467}