prax_query/profiling/
allocation.rs

1//! Allocation tracking and statistics.
2//!
3//! Provides fine-grained tracking of memory allocations for leak detection
4//! and performance analysis.
5
6use parking_lot::{Mutex, RwLock};
7use std::alloc::{GlobalAlloc, Layout, System};
8use std::collections::HashMap;
9use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10use std::time::{Duration, Instant};
11
12/// Global allocation tracker instance.
13pub static GLOBAL_TRACKER: std::sync::LazyLock<AllocationTracker> =
14    std::sync::LazyLock::new(AllocationTracker::new);
15
16// ============================================================================
17// Allocation Record
18// ============================================================================
19
20/// Record of a single allocation.
21#[derive(Debug, Clone)]
22pub struct AllocationRecord {
23    /// Unique allocation ID.
24    pub id: u64,
25    /// Size in bytes.
26    pub size: usize,
27    /// Memory alignment.
28    pub align: usize,
29    /// Timestamp when allocated.
30    pub timestamp: Instant,
31    /// Optional backtrace (expensive, only in debug mode).
32    #[cfg(debug_assertions)]
33    pub backtrace: Option<String>,
34}
35
36impl AllocationRecord {
37    /// Create a new allocation record.
38    pub fn new(id: u64, size: usize, align: usize) -> Self {
39        Self {
40            id,
41            size,
42            align,
43            timestamp: Instant::now(),
44            #[cfg(debug_assertions)]
45            backtrace: if super::is_profiling_enabled() {
46                Some(format!("{:?}", std::backtrace::Backtrace::capture()))
47            } else {
48                None
49            },
50        }
51    }
52
53    /// Get the age of this allocation.
54    pub fn age(&self) -> Duration {
55        self.timestamp.elapsed()
56    }
57}
58
59// ============================================================================
60// Allocation Tracker
61// ============================================================================
62
63/// Tracks memory allocations and deallocations.
64#[derive(Debug)]
65pub struct AllocationTracker {
66    /// Counter for allocation IDs.
67    next_id: AtomicU64,
68    /// Total allocations.
69    total_allocations: AtomicU64,
70    /// Total deallocations.
71    total_deallocations: AtomicU64,
72    /// Total bytes allocated.
73    total_bytes_allocated: AtomicUsize,
74    /// Total bytes deallocated.
75    total_bytes_deallocated: AtomicUsize,
76    /// Current bytes allocated.
77    current_bytes: AtomicUsize,
78    /// Peak bytes allocated.
79    peak_bytes: AtomicUsize,
80    /// Active allocations (ptr -> record).
81    active: RwLock<HashMap<usize, AllocationRecord>>,
82    /// Allocation size histogram.
83    size_histogram: Mutex<SizeHistogram>,
84    /// Start time.
85    start_time: Instant,
86}
87
88impl AllocationTracker {
89    /// Create a new allocation tracker.
90    pub fn new() -> Self {
91        Self {
92            next_id: AtomicU64::new(1),
93            total_allocations: AtomicU64::new(0),
94            total_deallocations: AtomicU64::new(0),
95            total_bytes_allocated: AtomicUsize::new(0),
96            total_bytes_deallocated: AtomicUsize::new(0),
97            current_bytes: AtomicUsize::new(0),
98            peak_bytes: AtomicUsize::new(0),
99            active: RwLock::new(HashMap::new()),
100            size_histogram: Mutex::new(SizeHistogram::new()),
101            start_time: Instant::now(),
102        }
103    }
104
105    /// Record an allocation.
106    pub fn record_alloc(&self, ptr: usize, size: usize, align: usize) {
107        if !super::is_profiling_enabled() {
108            return;
109        }
110
111        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
112        let record = AllocationRecord::new(id, size, align);
113
114        // Update counters
115        self.total_allocations.fetch_add(1, Ordering::Relaxed);
116        self.total_bytes_allocated.fetch_add(size, Ordering::Relaxed);
117
118        let current = self.current_bytes.fetch_add(size, Ordering::Relaxed) + size;
119
120        // Update peak
121        let mut peak = self.peak_bytes.load(Ordering::Relaxed);
122        while current > peak {
123            match self.peak_bytes.compare_exchange_weak(
124                peak,
125                current,
126                Ordering::Relaxed,
127                Ordering::Relaxed,
128            ) {
129                Ok(_) => break,
130                Err(p) => peak = p,
131            }
132        }
133
134        // Record allocation
135        self.active.write().insert(ptr, record);
136
137        // Update histogram
138        self.size_histogram.lock().record(size);
139    }
140
141    /// Record a deallocation.
142    pub fn record_dealloc(&self, ptr: usize, size: usize) {
143        if !super::is_profiling_enabled() {
144            return;
145        }
146
147        self.total_deallocations.fetch_add(1, Ordering::Relaxed);
148        self.total_bytes_deallocated.fetch_add(size, Ordering::Relaxed);
149        self.current_bytes.fetch_sub(size, Ordering::Relaxed);
150
151        // Remove from active
152        self.active.write().remove(&ptr);
153    }
154
155    /// Get current statistics.
156    pub fn stats(&self) -> AllocationStats {
157        AllocationStats {
158            total_allocations: self.total_allocations.load(Ordering::Relaxed),
159            total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
160            total_bytes_allocated: self.total_bytes_allocated.load(Ordering::Relaxed),
161            total_bytes_deallocated: self.total_bytes_deallocated.load(Ordering::Relaxed),
162            current_allocations: self.active.read().len() as u64,
163            current_bytes: self.current_bytes.load(Ordering::Relaxed),
164            peak_bytes: self.peak_bytes.load(Ordering::Relaxed),
165            uptime: self.start_time.elapsed(),
166        }
167    }
168
169    /// Get active allocations.
170    pub fn active_allocations(&self) -> Vec<AllocationRecord> {
171        self.active.read().values().cloned().collect()
172    }
173
174    /// Get allocations older than a threshold (potential leaks).
175    pub fn old_allocations(&self, threshold: Duration) -> Vec<AllocationRecord> {
176        self.active
177            .read()
178            .values()
179            .filter(|r| r.age() > threshold)
180            .cloned()
181            .collect()
182    }
183
184    /// Get allocation size histogram.
185    pub fn histogram(&self) -> SizeHistogram {
186        self.size_histogram.lock().clone()
187    }
188
189    /// Reset all tracking state.
190    pub fn reset(&self) {
191        self.total_allocations.store(0, Ordering::Relaxed);
192        self.total_deallocations.store(0, Ordering::Relaxed);
193        self.total_bytes_allocated.store(0, Ordering::Relaxed);
194        self.total_bytes_deallocated.store(0, Ordering::Relaxed);
195        self.current_bytes.store(0, Ordering::Relaxed);
196        self.peak_bytes.store(0, Ordering::Relaxed);
197        self.active.write().clear();
198        *self.size_histogram.lock() = SizeHistogram::new();
199    }
200}
201
202impl Default for AllocationTracker {
203    fn default() -> Self {
204        Self::new()
205    }
206}
207
208// ============================================================================
209// Allocation Statistics
210// ============================================================================
211
212/// Summary statistics for allocations.
213#[derive(Debug, Clone, Default)]
214pub struct AllocationStats {
215    /// Total number of allocations.
216    pub total_allocations: u64,
217    /// Total number of deallocations.
218    pub total_deallocations: u64,
219    /// Total bytes ever allocated.
220    pub total_bytes_allocated: usize,
221    /// Total bytes ever deallocated.
222    pub total_bytes_deallocated: usize,
223    /// Current number of active allocations.
224    pub current_allocations: u64,
225    /// Current bytes allocated.
226    pub current_bytes: usize,
227    /// Peak bytes allocated.
228    pub peak_bytes: usize,
229    /// Time since tracking started.
230    pub uptime: Duration,
231}
232
233impl AllocationStats {
234    /// Get net allocations (allocs - deallocs).
235    pub fn net_allocations(&self) -> i64 {
236        self.total_allocations as i64 - self.total_deallocations as i64
237    }
238
239    /// Get allocation rate (allocations per second).
240    pub fn allocation_rate(&self) -> f64 {
241        if self.uptime.as_secs_f64() > 0.0 {
242            self.total_allocations as f64 / self.uptime.as_secs_f64()
243        } else {
244            0.0
245        }
246    }
247
248    /// Get average allocation size.
249    pub fn avg_allocation_size(&self) -> usize {
250        if self.total_allocations > 0 {
251            self.total_bytes_allocated / self.total_allocations as usize
252        } else {
253            0
254        }
255    }
256
257    /// Check if there are potential leaks.
258    pub fn has_potential_leaks(&self) -> bool {
259        self.net_allocations() > 0 && self.current_bytes > 0
260    }
261}
262
263// ============================================================================
264// Size Histogram
265// ============================================================================
266
267/// Histogram of allocation sizes.
268#[derive(Debug, Clone, Default)]
269pub struct SizeHistogram {
270    /// Buckets: [0-64], [64-256], [256-1K], [1K-4K], [4K-16K], [16K-64K], [64K+]
271    buckets: [u64; 7],
272    /// Total allocations.
273    total: u64,
274}
275
276impl SizeHistogram {
277    /// Create a new histogram.
278    pub fn new() -> Self {
279        Self::default()
280    }
281
282    /// Record an allocation size.
283    pub fn record(&mut self, size: usize) {
284        let bucket = match size {
285            0..=64 => 0,
286            65..=256 => 1,
287            257..=1024 => 2,
288            1025..=4096 => 3,
289            4097..=16384 => 4,
290            16385..=65536 => 5,
291            _ => 6,
292        };
293        self.buckets[bucket] += 1;
294        self.total += 1;
295    }
296
297    /// Get bucket counts.
298    pub fn buckets(&self) -> &[u64; 7] {
299        &self.buckets
300    }
301
302    /// Get bucket labels.
303    pub fn bucket_labels() -> &'static [&'static str; 7] {
304        &["0-64B", "64-256B", "256B-1K", "1K-4K", "4K-16K", "16K-64K", "64K+"]
305    }
306
307    /// Get the most common bucket.
308    pub fn most_common_bucket(&self) -> (&'static str, u64) {
309        let labels = Self::bucket_labels();
310        let (idx, count) = self
311            .buckets
312            .iter()
313            .enumerate()
314            .max_by_key(|(_, c)| *c)
315            .unwrap();
316        (labels[idx], *count)
317    }
318
319    /// Get percentage for each bucket.
320    pub fn percentages(&self) -> [f64; 7] {
321        if self.total == 0 {
322            return [0.0; 7];
323        }
324        let mut pcts = [0.0; 7];
325        for (i, &count) in self.buckets.iter().enumerate() {
326            pcts[i] = (count as f64 / self.total as f64) * 100.0;
327        }
328        pcts
329    }
330}
331
332// ============================================================================
333// Tracked Allocator
334// ============================================================================
335
336/// A wrapper allocator that tracks allocations.
337///
338/// Use this as the global allocator to enable automatic tracking:
339///
340/// ```rust,ignore
341/// use prax_query::profiling::TrackedAllocator;
342///
343/// #[global_allocator]
344/// static ALLOC: TrackedAllocator = TrackedAllocator::new();
345/// ```
346pub struct TrackedAllocator {
347    inner: System,
348}
349
350impl TrackedAllocator {
351    /// Create a new tracked allocator.
352    pub const fn new() -> Self {
353        Self { inner: System }
354    }
355}
356
357impl Default for TrackedAllocator {
358    fn default() -> Self {
359        Self::new()
360    }
361}
362
363unsafe impl GlobalAlloc for TrackedAllocator {
364    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
365        // SAFETY: Delegating to System allocator which is always safe to call
366        let ptr = unsafe { self.inner.alloc(layout) };
367        if !ptr.is_null() {
368            GLOBAL_TRACKER.record_alloc(ptr as usize, layout.size(), layout.align());
369        }
370        ptr
371    }
372
373    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
374        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
375        // SAFETY: Delegating to System allocator which is always safe to call
376        unsafe { self.inner.dealloc(ptr, layout) };
377    }
378
379    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
380        // Record deallocation of old
381        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
382
383        // SAFETY: Delegating to System allocator which is always safe to call
384        let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };
385
386        // Record allocation of new
387        if !new_ptr.is_null() {
388            GLOBAL_TRACKER.record_alloc(new_ptr as usize, new_size, layout.align());
389        }
390
391        new_ptr
392    }
393}
394
395#[cfg(test)]
396mod tests {
397    use super::*;
398
399    #[test]
400    fn test_allocation_record() {
401        let record = AllocationRecord::new(1, 1024, 8);
402        assert_eq!(record.id, 1);
403        assert_eq!(record.size, 1024);
404        assert_eq!(record.align, 8);
405    }
406
407    #[test]
408    fn test_allocation_tracker() {
409        super::super::enable_profiling();
410
411        let tracker = AllocationTracker::new();
412
413        tracker.record_alloc(0x1000, 100, 8);
414        tracker.record_alloc(0x2000, 200, 8);
415
416        let stats = tracker.stats();
417        assert_eq!(stats.total_allocations, 2);
418        assert_eq!(stats.total_bytes_allocated, 300);
419        assert_eq!(stats.current_bytes, 300);
420
421        tracker.record_dealloc(0x1000, 100);
422
423        let stats = tracker.stats();
424        assert_eq!(stats.total_deallocations, 1);
425        assert_eq!(stats.current_bytes, 200);
426
427        super::super::disable_profiling();
428    }
429
430    #[test]
431    fn test_size_histogram() {
432        let mut hist = SizeHistogram::new();
433
434        hist.record(32);  // 0-64
435        hist.record(128); // 64-256
436        hist.record(512); // 256-1K
437        hist.record(32);  // 0-64
438
439        assert_eq!(hist.buckets[0], 2); // 0-64
440        assert_eq!(hist.buckets[1], 1); // 64-256
441        assert_eq!(hist.buckets[2], 1); // 256-1K
442    }
443
444    #[test]
445    fn test_stats_calculations() {
446        let stats = AllocationStats {
447            total_allocations: 100,
448            total_deallocations: 80,
449            total_bytes_allocated: 10000,
450            total_bytes_deallocated: 8000,
451            current_allocations: 20,
452            current_bytes: 2000,
453            peak_bytes: 5000,
454            uptime: Duration::from_secs(10),
455        };
456
457        assert_eq!(stats.net_allocations(), 20);
458        assert_eq!(stats.allocation_rate(), 10.0);
459        assert_eq!(stats.avg_allocation_size(), 100);
460        assert!(stats.has_potential_leaks());
461    }
462}
463